source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
util.py
|
import hashlib
import http.server
import json
import logging
import os
import re
import shutil
import socketserver
import subprocess
from contextlib import contextmanager, ExitStack
from itertools import chain
from multiprocessing import Process
from shutil import rmtree, which
from subprocess import check_call
from typing import List
import requests
import teamcity
import yaml
from teamcity.messages import TeamcityServiceMessages
from pkgpanda.exceptions import FetchError, ValidationError
json_prettyprint_args = {
"sort_keys": True,
"indent": 2,
"separators": (',', ':')
}
def variant_str(variant):
"""Return a string representation of variant."""
if variant is None:
return ''
return variant
def variant_object(variant_str):
"""Return a variant object from its string representation."""
if variant_str == '':
return None
return variant_str
def variant_name(variant):
"""Return a human-readable string representation of variant."""
if variant is None:
return '<default>'
return variant
def variant_prefix(variant):
"""Return a filename prefix for variant."""
if variant is None:
return ''
return variant + '.'
def variant_suffix(variant, delim='.'):
if variant is None:
return ''
return delim + variant
def download(out_filename, url, work_dir, rm_on_error=True):
assert os.path.isabs(out_filename)
assert os.path.isabs(work_dir)
work_dir = work_dir.rstrip('/')
# Strip off whitespace to make it so scheme matching doesn't fail because
# of simple user whitespace.
url = url.strip()
# Handle file:// urls specially since requests doesn't know about them.
try:
if url.startswith('file://'):
src_filename = url[len('file://'):]
if not os.path.isabs(src_filename):
src_filename = work_dir + '/' + src_filename
shutil.copyfile(src_filename, out_filename)
else:
# Download the file.
with open(out_filename, "w+b") as f:
r = requests.get(url, stream=True)
if r.status_code == 301:
raise Exception("got a 301")
r.raise_for_status()
for chunk in r.iter_content(chunk_size=4096):
f.write(chunk)
except Exception as fetch_exception:
if rm_on_error:
rm_passed = False
# try / except so if remove fails we don't get an exception during an exception.
# Sets rm_passed to true so if this fails we can include a special error message in the
# FetchError
try:
os.remove(out_filename)
rm_passed = True
except Exception:
pass
else:
rm_passed = True
raise FetchError(url, out_filename, fetch_exception, rm_passed) from fetch_exception
def download_atomic(out_filename, url, work_dir):
assert os.path.isabs(out_filename)
tmp_filename = out_filename + '.tmp'
try:
download(tmp_filename, url, work_dir)
os.rename(tmp_filename, out_filename)
except FetchError:
try:
os.remove(tmp_filename)
except:
pass
raise
def extract_tarball(path, target):
"""Extract the tarball into target.
If there are any errors, delete the folder being extracted to.
"""
# TODO(cmaloney): Validate extraction will pass before unpacking as much as possible.
# TODO(cmaloney): Unpack into a temporary directory then move into place to
# prevent partial extraction from ever laying around on the filesystem.
try:
assert os.path.exists(path), "Path doesn't exist but should: {}".format(path)
check_call(['mkdir', '-p', target])
check_call(['tar', '-xf', path, '-C', target])
except:
# If there are errors, we can't really cope since we are already in an error state.
rmtree(target, ignore_errors=True)
raise
def load_json(filename):
try:
with open(filename) as f:
return json.load(f)
except ValueError as ex:
raise ValueError("Invalid JSON in {0}: {1}".format(filename, ex)) from ex
class YamlParseError(Exception):
pass
def load_yaml(filename):
try:
with open(filename) as f:
return yaml.safe_load(f)
except yaml.YAMLError as ex:
raise YamlParseError("Invalid YAML in {}: {}".format(filename, ex)) from ex
def write_yaml(filename, data, **kwargs):
with open(filename, "w+") as f:
return yaml.safe_dump(data, f, **kwargs)
def make_file(name):
with open(name, 'a'):
pass
def write_json(filename, data):
with open(filename, "w+") as f:
return json.dump(data, f, **json_prettyprint_args)
def write_string(filename, data):
with open(filename, "w+") as f:
return f.write(data)
def load_string(filename):
with open(filename) as f:
return f.read().strip()
def json_prettyprint(data):
return json.dumps(data, **json_prettyprint_args)
def if_exists(fn, *args, **kwargs):
try:
return fn(*args, **kwargs)
except FileNotFoundError:
return None
def sha1(filename):
hasher = hashlib.sha1()
with open(filename, 'rb') as fh:
while 1:
buf = fh.read(4096)
if not buf:
break
hasher.update(buf)
return hasher.hexdigest()
def expect_folder(path, files):
path_contents = os.listdir(path)
assert set(path_contents) == set(files)
def expect_fs(folder, contents):
if isinstance(contents, list):
expect_folder(folder, contents)
elif isinstance(contents, dict):
expect_folder(folder, contents.keys())
for path in iter(contents):
if contents[path] is not None:
expect_fs(os.path.join(folder, path), contents[path])
else:
raise ValueError("Invalid type {0} passed to expect_fs".format(type(contents)))
def make_tar(result_filename, change_folder):
tar_cmd = ["tar", "--numeric-owner", "--owner=0", "--group=0"]
if which("pxz"):
tar_cmd += ["--use-compress-program=pxz", "-cf"]
else:
tar_cmd += ["-cJf"]
tar_cmd += [result_filename, "-C", change_folder, "."]
check_call(tar_cmd)
def rewrite_symlinks(root, old_prefix, new_prefix):
# Find the symlinks and rewrite them from old_prefix to new_prefix
# All symlinks not beginning with old_prefix are ignored because
# packages may contain arbitrary symlinks.
for root_dir, dirs, files in os.walk(root):
for name in chain(files, dirs):
full_path = os.path.join(root_dir, name)
if os.path.islink(full_path):
# Rewrite old_prefix to new_prefix if present.
target = os.readlink(full_path)
if target.startswith(old_prefix):
new_target = os.path.join(new_prefix, target[len(old_prefix) + 1:].lstrip('/'))
# Remove the old link and write a new one.
os.remove(full_path)
os.symlink(new_target, full_path)
def check_forbidden_services(path, services):
"""Check if package contains systemd services that may break DC/OS
This functions checks the contents of systemd's unit file dirs and
throws the exception if there are reserved services inside.
Args:
path: path where the package contents are
services: list of reserved services to look for
Raises:
ValidationError: Reserved serice names were found inside the package
"""
services_dir_regexp = re.compile(r'dcos.target.wants(?:_.+)?')
forbidden_srv_set = set(services)
pkg_srv_set = set()
for direntry in os.listdir(path):
if not services_dir_regexp.match(direntry):
continue
pkg_srv_set.update(set(os.listdir(os.path.join(path, direntry))))
found_units = forbidden_srv_set.intersection(pkg_srv_set)
if found_units:
msg = "Reverved unit names found: " + ','.join(found_units)
raise ValidationError(msg)
def run(cmd, *args, **kwargs):
proc = subprocess.Popen(cmd, *args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
stdout, stderr = proc.communicate()
print("STDOUT: ", stdout.decode('utf-8'))
print("STDERR: ", stderr.decode('utf-8'))
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode, cmd)
assert len(stderr) == 0
return stdout.decode('utf-8')
def launch_server(directory):
os.chdir("resources/repo")
httpd = socketserver.TCPServer(
("", 8000),
http.server.SimpleHTTPRequestHandler)
httpd.serve_forever()
class TestRepo:
def __init__(self, repo_dir):
self.__dir = repo_dir
def __enter__(self):
self.__server = Process(target=launch_server, args=(self.__dir))
self.__server.start()
def __exit__(self, exc_type, exc_value, traceback):
self.__server.join()
def resources_test_dir(path):
assert not path.startswith('/')
return "pkgpanda/test_resources/{}".format(path)
class MessageLogger:
"""Abstraction over TeamCity Build Messages
When pkgpanda is ran in a TeamCity environment additional meta-messages will be output to stdout
such that TeamCity can provide improved status reporting, log line highlighting, and failure
reporting. When pkgpanda is ran in an environment other than TeamCity all meta-messages will
silently be omitted.
TeamCity docs: https://confluence.jetbrains.com/display/TCD10/Build+Script+Interaction+with+TeamCity
"""
def __init__(self):
self.loggers = []
if teamcity.is_running_under_teamcity():
self.loggers.append(TeamcityServiceMessages())
else:
self.loggers.append(PrintLogger())
def _custom_message(self, text, status, error_details='', flow_id=None):
for log in self.loggers:
log.customMessage(text, status, errorDetails=error_details, flowId=flow_id)
@contextmanager
def _block(self, log, name, flow_id):
log.blockOpened(name, flowId=flow_id)
log.progressMessage(name)
yield
log.blockClosed(name, flowId=flow_id)
@contextmanager
def scope(self, name, flow_id=None):
"""
Creates a new scope for TeamCity messages. This method is intended to be called in a ``with`` statement
:param name: The name of the scope
:param flow_id: Optional flow id that can be used if ``name`` can be non-unique
"""
with ExitStack() as stack:
for log in self.loggers:
stack.enter_context(self._block(log, name, flow_id))
yield
def normal(self, text, flow_id=None):
self._custom_message(text=text, status='NORMAL', flow_id=flow_id)
def warning(self, text, flow_id=None):
self._custom_message(text=text, status='WARNING', flow_id=flow_id)
def error(self, text, flow_id=None, error_details=''):
self._custom_message(text=text, status='ERROR', flow_id=flow_id, error_details=error_details)
def failure(self, text, flow_id=None):
self._custom_message(text=text, status='FAILURE', flow_id=flow_id)
class PrintLogger:
def customMessage(self, text, status, errorDetails='', flowId=None): # noqa: N802, N803
print("{}: {} {}".format(status, text, errorDetails))
def progressMessage(self, message): # noqa: N802, N803
pass
def blockOpened(self, name, flowId=None): # noqa: N802, N803
print("starting: {}".format(name))
def blockClosed(self, name, flowId=None): # noqa: N802, N803
print("completed: {}".format(name))
logger = MessageLogger()
def hash_str(s: str):
hasher = hashlib.sha1()
hasher.update(s.encode('utf-8'))
return hasher.hexdigest()
def hash_int(i: int):
return hash_str(str(i))
def hash_dict(d: dict):
item_hashes = []
for k in sorted(d.keys()):
assert isinstance(k, str)
item_hashes.append("{0}={1}".format(k, hash_checkout(d[k])))
return hash_str(",".join(item_hashes))
def hash_list(l: List[str]):
item_hashes = []
for item in sorted(l):
item_hashes.append(hash_checkout(item))
return hash_str(",".join(item_hashes))
def hash_checkout(item):
if isinstance(item, str) or isinstance(item, bytes):
return hash_str(item)
elif isinstance(item, dict):
return hash_dict(item)
elif isinstance(item, list):
return hash_list(item)
elif isinstance(item, int):
return hash_int(item)
elif isinstance(item, set):
return hash_list(list(item))
else:
raise NotImplementedError("{} of type {}".format(item, type(item)))
def split_by_token(token_prefix, token_suffix, string_, strip_token_decoration=False):
"""Yield a sequence of (substring, is_token) pairs comprising the string.
The string is split by token boundary, where a token is a substring that
begins with the token prefix and ends with the token suffix. is_token is
True if the substring is a token. If strip_token_decoration is True, tokens
are yielded without their prefix and suffix. Each token prefix must have a
matching suffix, and vice versa. Tokens may not be nested.
>>> list(split_by_token('{', '}', 'some text {token} some more text'))
[('some text ', False), ('{token}', True), (' some more text', False)]
>>> list(split_by_token('{', '}', 'some text {token} some more text', strip_token_decoration=True))
[('some text ', False), ('token', True), (' some more text', False)]
"""
def _next_substring(superstring, substring, start):
idx = superstring.find(substring, start)
if idx < 0:
return None
return idx, idx + len(substring)
def _raise_exception_if_suffix_in(substring):
if token_suffix in substring:
logging.debug("Token suffix found without matching prefix in string: {}".format(repr(string_)))
raise Exception("Token suffix found without matching prefix")
if len(token_prefix) == 0:
raise ValueError('Token prefix must be a nonzero length string')
if len(token_suffix) == 0:
raise ValueError('Token suffix must be a nonzero length string')
if string_ == '':
yield string_, False
num_chars_consumed = 0
while num_chars_consumed < len(string_):
# Find the next token.
token_start = _next_substring(string_, token_prefix, num_chars_consumed)
if not token_start:
# No token found. Yield the rest of the string and return.
remainder = string_[num_chars_consumed:]
_raise_exception_if_suffix_in(remainder)
yield remainder, False
return
# Yield the string preceding the token, if any.
if token_start[0] > num_chars_consumed:
preceding_string = string_[num_chars_consumed:token_start[0]]
_raise_exception_if_suffix_in(preceding_string)
yield preceding_string, False
# Find the end of the token.
token_end = _next_substring(string_, token_suffix, token_start[1])
if not token_end or token_prefix in string_[token_start[1]:token_end[0]]:
# Can't find a closing suffix, or found two consecutive prefixes without a suffix between them.
logging.debug("Token prefix found without matching suffix in string: {}".format(repr(string_)))
raise Exception("Token prefix found without matching suffix")
# Yield the token.
if strip_token_decoration:
# Omit the token's prefix and suffix.
yield string_[token_start[1]:token_end[0]], True
else:
# Yield the entire token.
yield string_[token_start[0]:token_end[1]], True
# Update the chars consumed count for the next iteration.
num_chars_consumed = token_end[1]
|
dow.py
|
from classes.Config import DowConfig
from classes.Database import DowDatabase
from classes.Sankaku2 import DowSankaku
from classes.Pixiv import DowPixiv
from classes.Worker import DowWorker
from classes.MimeType import DowMimeType
import pathlib
from multiprocessing import Process
config = DowConfig(pathlib.Path(".").joinpath("config.json"))
db = DowDatabase(config.ROOT_DIR, config.DB_NAME, False)
download_dir = pathlib.Path(config.ROOT_DIR).joinpath(config.DOWNLOAD_FOLDER)
sankaku = DowSankaku(config.SAN_USER, config.SAN_PASSWORD, download_dir, config.SAN_USER_TAG)
pixiv = DowPixiv(download_dir, config.PIXIV_TOKEN)
procs = []
if not sankaku.IsConnected():
exit(1)
def check_file_type(file):
file = pathlib.Path(file)
img_type = DowMimeType(file).GetType()
img_type = "jpg" if img_type == "jpeg" else img_type
if img_type != None and img_type != file.suffix:
img_type = "." + img_type
new_file = pathlib.Path(str(file).split(".")[0] + img_type)
return new_file
else:
return file
def download_file(module, file):
global db
global download_dir
short_name = module.GetShortFileName(file)
if module.DownloadFile(file):
if len(file[1]) > 1:
for f in file[1]:
f = pathlib.Path(f)
p = download_dir.joinpath(short_name)
new_name = check_file_type(p.joinpath(f))
if new_name.suffix != f.suffix:
p.joinpath(f).replace(new_name)
print("Insert to db: %s" % new_name)
db.Insert(new_name.name, p, file[2])
else:
f = pathlib.Path(file[1][0])
new_name = check_file_type(download_dir.joinpath(f)) if f.suffix not in DowMimeType("").video_formats_suffix_list else download_dir.joinpath(f)
if new_name.suffix != f.suffix:
download_dir.joinpath(f).replace(new_name)
print("Insert to db: %s" % new_name)
db.Insert(new_name.name, download_dir, file[2])
p_skip_state = False
def p_file_in_db(module, file):
global db
global p_skip_state
name = module.GetShortFileName(file)
if name == "61666908":
p_skip_state = False
return True
return p_skip_state
s_skip_state = False
def s_file_in_db(module, file):
global db
global s_skip_state
name = module.GetShortFileName(file)
if name == "5122f15b93c1c8a8187b4f9a3984eef":
s_skip_state = False
return True
return s_skip_state
def file_no_in_db(module, file):
download_file(module, file)
return True
procs.append(Process(target=DowWorker().Worker, args=(sankaku, db, s_file_in_db, file_no_in_db,)))
procs.append(Process(target=DowWorker().Worker, args=(pixiv, db, p_file_in_db, file_no_in_db,)))
for proc in procs:
proc.start()
for proc in procs:
proc.join()
#FixVideoWorker(sankaku)
|
dataset.py
|
# Global dependencies
from config import IMG_DIR_PATH, CLINICAL_CONTROL_COLUMNS, IMG_CODES_FILENAME, NON_IMG_DATA_FILENAME, \
N_DATALOADER_WORKERS, CACHE_LIMIT
import os
import pdb
import copy
import random
import pickle
import cv2
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader, sampler
class PytorchImagesDataset(Dataset):
"""
A class for loading in images one at a time.
Follows pytorch dataset tutorial: https://pytorch.org/tutorials/beginner/data_loading_tutorial.html
"""
def __init__(self,
dataset,
transform_statistics,
C_cols,
y_cols,
merge_klg_01,
transform,
cache=None,
truncate_C_floats=True,
data_proportion=1.,
shuffle_Cs=False,
zscore_C=True,
zscore_Y=False,
max_horizontal_translation=None,
max_vertical_translation=None,
C_hat_path=None,
use_small_subset=False,
downsample_fraction=None):
assert dataset in ['train', 'val', 'test']
assert all([(y_col in CLINICAL_CONTROL_COLUMNS) for y_col in y_cols])
assert all([(C_col in CLINICAL_CONTROL_COLUMNS) for C_col in C_cols])
assert len(set(y_cols).intersection(set(C_cols))) == 0 # A and y should not share variables
assert transform in ['None', 'random_translation'], 'transform is: %s' % str(transform)
if transform != 'None': assert max_horizontal_translation > 0
if transform != 'None': assert max_vertical_translation > 0
self.cache = cache
self.dataset = dataset
self.transform_statistics = transform_statistics
self.C_cols = C_cols
self.y_cols = y_cols
self.merge_klg_01 = merge_klg_01
self.transform = transform
self.data_proportion = data_proportion
self.max_horizontal_translation = max_horizontal_translation
self.max_vertical_translation = max_vertical_translation
self.C_hat_path = C_hat_path
self.use_small_subset = use_small_subset
self.downsample_fraction = downsample_fraction
# ----- Data processing -----
self.base_dir_for_images, self.non_image_data, data_transform_statistics = \
load_non_image_data(self.dataset, C_cols, y_cols, zscore_C, zscore_Y,
transform_statistics=transform_statistics,
merge_klg_01=merge_klg_01,
truncate_C_floats=truncate_C_floats,
shuffle_Cs=shuffle_Cs,
check=True)
if self.transform_statistics is None:
self.transform_statistics = data_transform_statistics
# Select subset of ids if we are only using a proportion of data
N = len(self.non_image_data)
if self.data_proportion < 1.:
N_selected = int(data_proportion * N)
selected_ids = np.random.choice(N, N_selected, replace=False)
self.selected_ids = selected_ids
else:
self.selected_ids = np.arange(N)
l = os.listdir(self.base_dir_for_images)
self.selected_ids = [int(x.split("_")[1].split(".")[0]) for x in l if x[-1] == "y"]
if C_hat_path:
# Attribute prediction available from previous model
pass
if len(C_cols) > 0:
self.C_feats = copy.deepcopy(self.non_image_data[C_cols].values)
# This is the weight given to EACH class WITHIN EACH attribute in order to correct the imbalance.
variables = [C_col + '_loss_class_wt' for C_col in C_cols]
self.C_feats_loss_class_wts = copy.deepcopy(self.non_image_data[variables].values)
self.y_feats = copy.deepcopy(self.non_image_data[y_cols].values)
print('Dataset %s has %i rows' % (dataset, len(self.non_image_data)))
def __len__(self):
if self.use_small_subset:
return 500
return len(self.selected_ids)
def __getitem__(self, idx):
new_idx = self.selected_ids[idx]
if self.cache:
image = self.cache.get(new_idx)
cache_hit = image is not None
if not self.cache or not cache_hit:
### USHA CHANGED TO NPY
image_path = os.path.join(self.base_dir_for_images, 'image_%i.npy' % new_idx)
image = self.load_image(image_path)
# ----- Data augmentation -----
if self.transform == 'random_translation':
image = random_transform_image(image, self.transform,
max_horizontal_translation=self.max_horizontal_translation,
max_vertical_translation=self.max_vertical_translation)
if self.downsample_fraction:
image = downsample_image(image, self.downsample_fraction)
### USHA REMOVED
# image = np.tile(image, [3, 1, 1])
# ----- Data processing -----
if self.C_cols:
C_feats = self.C_feats[new_idx, :]
C_feats_loss_class_wts = self.C_feats_loss_class_wts[new_idx]
C_feats_not_nan = ~np.isnan(C_feats)
C_feats[~C_feats_not_nan] = 0
C_feats_not_nan = C_feats_not_nan * 1.
y_feats = self.y_feats[new_idx]
assert ~any(np.isnan(y_feats))
sample = {'image': image,
'C_feats': C_feats, # C_cols that are Z-scored
'C_feats_not_nan': C_feats_not_nan, # C_cols that are Z-scored but not nan
'C_feats_loss_class_wts': C_feats_loss_class_wts,
'y': y_feats}
return sample
def load_image(self, path):
### USHA CHANGED FROM np.load(path)['arr_0']
# print(path, arr.shape)
return np.load(path, allow_pickle=True)
def load_non_image_data(dataset_split, C_cols, y_cols, zscore_C, zscore_Y,
transform_statistics=None, merge_klg_01=True, truncate_C_floats=True,
shuffle_Cs=False, return_CY_only=False, check=True, verbose=True):
base_dir_for_images = get_base_dir_for_individual_image(dataset_split)
image_codes = pickle.load(open(os.path.join(base_dir_for_images, IMG_CODES_FILENAME), 'rb'))
non_image_data = pd.read_csv(os.path.join(base_dir_for_images, NON_IMG_DATA_FILENAME), index_col=0)
if check: ensure_barcodes_match(non_image_data, image_codes)
# Clip xrattl from [0,3] to [0,2]. Basically only for the 2 examples with Class = 3
# which do not appear in train dataset
if verbose: print('Truncating xrattl')
non_image_data['xrattl'] = np.minimum(2, non_image_data['xrattl'])
# Data processing for non-image data
if merge_klg_01:
if verbose: print('Merging KLG')
# Merge KLG 0,1 + Convert KLG scale to [0,3]
non_image_data['xrkl'] = np.maximum(0, non_image_data['xrkl'] - 1)
# Truncate odd decimals
if truncate_C_floats:
if verbose: print('Truncating A floats')
for variable in C_cols + y_cols:
# Truncate decimals
non_image_data[variable] = non_image_data[variable].values.astype(np.int64).astype(np.float64)
# Mix up the As for the training set to see if performance of KLG worsens
if shuffle_Cs:
if verbose: print('Shuffling As')
for variable in C_cols:
N = len(non_image_data)
permutation = np.random.permutation(N)
non_image_data[variable] = non_image_data[variable].values[permutation]
# Give weights for each class within each attribute, so that it can be used to reweigh the loss
for variable in C_cols:
new_variable = variable + '_loss_class_wt'
attribute = non_image_data[variable].values
unique_classes = np.unique(attribute)
N_total = len(attribute)
N_classes = len(unique_classes)
weights = np.zeros(len(attribute))
for cls_val in unique_classes:
belongs_to_cls = attribute == cls_val
counts = np.sum(belongs_to_cls)
# Since each class has 'counts', the total weight allocated to each class = 1
# weights[belongs_to_cls] = 1. / counts
weights[belongs_to_cls] = (N_total - counts) / N_total
non_image_data[new_variable] = weights
# Z-scoring of the Ys
new_transform_statistics = {}
y_feats = None
if zscore_Y:
y_feats = copy.deepcopy(non_image_data[y_cols].values)
for i in range(len(y_cols)):
not_nan = ~np.isnan(y_feats[:, i])
if transform_statistics is None:
std = np.std(y_feats[not_nan, i], ddof=1)
mu = np.mean(y_feats[not_nan, i])
new_transform_statistics[y_cols[i]] = { 'mu': mu, 'std': std }
else:
std = transform_statistics[y_cols[i]]['std']
mu = transform_statistics[y_cols[i]]['mu']
if verbose: print('Z-scoring additional feature %s with mean %2.3f and std %2.3f' % (y_cols[i], mu, std))
non_image_data['%s_original' % y_cols[i]] = y_feats[:, i]
non_image_data[y_cols[i]] = (y_feats[:, i] - mu) / std
y_feats[:, i] = non_image_data[y_cols[i]]
# Z-scoring of the attributes
C_feats = None
if zscore_C:
C_feats = copy.deepcopy(non_image_data[C_cols].values)
for i in range(len(C_cols)):
not_nan = ~np.isnan(C_feats[:, i])
if transform_statistics is None:
std = np.std(C_feats[not_nan, i], ddof=1)
mu = np.mean(C_feats[not_nan, i])
new_transform_statistics[C_cols[i]] = {'mu': mu, 'std': std}
else:
std = transform_statistics[C_cols[i]]['std']
mu = transform_statistics[C_cols[i]]['mu']
if verbose: print('Z-scoring additional feature %s with mean %2.3f and std %2.3f' % (C_cols[i], mu, std))
non_image_data['%s_original' % C_cols[i]] = C_feats[:, i]
non_image_data[C_cols[i]] = (C_feats[:, i] - mu) / std
C_feats[:, i] = non_image_data[C_cols[i]]
if return_CY_only:
if y_feats is None:
y_feats = copy.deepcopy(non_image_data[y_cols].values)
if C_feats is None:
C_feats = copy.deepcopy(non_image_data[C_cols].values)
return C_feats, y_feats
return base_dir_for_images, non_image_data, new_transform_statistics
def load_attributes(image_codes, non_image_data, all_cols, y_cols, merge_klg_01=False, zscore_C=False, zscore_Y=False):
C_feats = copy.deepcopy(non_image_data[all_cols].values)
if zscore_C:
for i in range(len(all_cols)):
not_nan = ~np.isnan(C_feats[:, i])
std = np.std(C_feats[not_nan, i], ddof=1)
mu = np.mean(C_feats[not_nan, i])
print('Z-scoring additional feature %s with mean %2.3f and std %2.3f' % (all_cols[i], mu, std))
C_feats[:, i] = (C_feats[:, i] - mu) / std
y_feats = copy.deepcopy(non_image_data[y_cols].values)
if merge_klg_01:
assert 'xrkl' in y_cols
y_feats = np.maximum(0, y_feats - 1)
if zscore_Y:
for i in range(len(y_cols)):
not_nan = ~np.isnan(y_feats[:, i])
std = np.std(y_feats[not_nan, i], ddof=1)
mu = np.mean(y_feats[not_nan, i])
if verbose: print('Z-scoring additional feature %s with mean %2.3f and std %2.3f' % (y_cols[i], mu, std))
y_feats[:, i] = (y_feats[:, i] - mu) / std
print('A shape: %s' % str(C_feats.shape))
print('y shape: %s' % str(y_feats.shape))
return C_feats, y_feats
def get_sampling_weights(sampling_strategy, sampling_args, train_dataset, C_cols, y_cols):
"""
Get different weights for each data point according to the sampling strategy.
"""
print('\n-------------- Sampling strategy: %s --------------' % sampling_strategy)
if sampling_strategy == 'weighted':
C_data = train_dataset.non_image_data[C_cols].values
y_data = train_dataset.non_image_data[y_cols].values
N_C_cols = len(C_cols)
N_y_cols = len(y_cols)
N = C_data.shape[0]
weights = np.zeros(N)
if sampling_args['mode'] in ['weigh_C', 'weigh_Cy']:
# Select an example according to the class distribution of As.
# For an attribute, if an example is part of the rarer class, it will receive a higher weight.
# However, if the class is too rare, we assume give it a uniform weight to prevent overfitting.
for i in range(N_C_cols):
string = '%02d [%7s] ' % (i + 1, C_cols[i])
attribute = C_data[:, i]
ids_all = np.arange(N)
classes = np.unique(attribute)
N_classes = len(classes)
total_wt_per_class = 1. / N_classes
for cls_val in classes:
counts = np.sum(attribute == cls_val)
if counts < sampling_args['min_count']:
# If there are too few of this attribute class, we just sample this point uniformly
# (to prevent constant sampling of this data point and overfitting).
# Weight is (N_C_cols / N) instead of (1 / N) because we are adding magnitudes of 1
# to the 'weights' vector N_C_cols times.Dis
print('Setting Attribute %s (Class %f) to uniform due to small size of %d' %
(C_cols[i], cls_val, counts))
additive_weights = N_C_cols / N
else:
# Each data point given weight w such that each class should be equally sampled.
# EG. If there are 25 pos and 100 neg, we want sampling of 0.5:0.5.
# So, we give 0.5/25 = 0.02 weight to pos and 0.5/100=0.005 weight to neg.
additive_weights = total_wt_per_class / (counts + 0)
weights[attribute == cls_val] += additive_weights
string += '(%.7f)' % additive_weights
print(string)
if sampling_args['mode'] == 'weigh_Cy':
for i in range(N_y_cols):
string = '%02d [%7s] ' % (i + 1, y_cols[i])
y_value = y_data[:, i]
ids_all = np.arange(N)
classes = np.unique(y_value)
N_classes = len(classes)
total_wt_per_class = 1. / N_classes
for cls_val in classes:
counts = np.sum(y_value == cls_val)
if counts < sampling_args['min_count']:
print('Setting Attribute %s (Class %f) to uniform due to small size of %d' %
(y_cols[i], cls_val, counts))
additive_weights = N_C_cols / N
else:
additive_weights = total_wt_per_class / (counts + 0)
weights[y_value == cls_val] += additive_weights
string += '(%.7f)' % additive_weights
hist_counts, hist_weights = np.histogram(weights * 1000, bins=20)
print('Histogram counts : ', [x for x in hist_counts])
print('Histogram weights : ', ' '.join(['%.1f' % x for x in hist_weights]))
# While each rare class has high weight, its 'mass' is small since it has low count
# The below unif_prob_mass_per_bin and prob_mass_per_bin show what the rare class representation would be
# like before and after resampling. Ideally, the change should not be overly drastic, but the common class
# representation should smooth out.
unif_prob_mass = (1 / N) * hist_counts
unif_prob_mass_per_bin = unif_prob_mass / np.sum(unif_prob_mass)
print('Uniform prob mass (%) :', ' '.join(['%.1f' % (x * 100) for x in unif_prob_mass_per_bin]))
prob_mass = hist_weights[:-1] * hist_counts
prob_mass_per_bin = prob_mass / np.sum(prob_mass)
print('Histogram weights prob mass (%): ', ' '.join(['%.1f' % (x * 100) for x in prob_mass_per_bin]), '\n')
train_sampler = sampler.WeightedRandomSampler(weights, len(weights))
shuffle = False
elif sampling_strategy == 'uniform':
train_sampler = None
shuffle = True
return train_sampler, shuffle
def get_image_cache_for_split(dataset_split, limit=None):
print('Building image cache for %s split' % dataset_split)
cache = {}
base_dir_for_images = get_base_dir_for_individual_image(dataset_split)
non_image_data = pd.read_csv(os.path.join(base_dir_for_images, NON_IMG_DATA_FILENAME), index_col=0)
N = len(non_image_data) if limit is None else min(int(limit), len(non_image_data))
num_workers = 8
def get_images(ids_group, result):
for idx in ids_group:
## USHA CHANGED FROM NPZ TO NPY
image_path = os.path.join(base_dir_for_images, 'image_%i.npy' % idx)
image = np.load(image_path)
result[idx] = image
rounds = 20 # Split into multiple rounds to pass smaller sized data
import threading
###USHA CHANGED to new_list from range(N)
l = os.listdir(base_dir_for_images)
new_list = [int(x.split("_")[1].split(".")[0]) for x in l if x[-1] == "y"]
ids_groups_list = np.array_split(new_list, num_workers * rounds)
for round in range(rounds):
print(' Iter %d/%d' % (round + 1, rounds))
ids_groups = ids_groups_list[round * num_workers:(round + 1) * num_workers]
results = []
threads = []
for i, ids_group in enumerate(ids_groups):
result = {}
t = threading.Thread(target=get_images, args=(ids_group, result))
t.start()
threads.append(t)
results.append(result)
for i, t in enumerate(threads):
t.join()
for i, result in enumerate(results):
cache.update(result)
return cache
def load_data_from_different_splits(batch_size,
C_cols,
y_cols,
zscore_C,
zscore_Y,
data_proportion,
shuffle_Cs,
merge_klg_01,
max_horizontal_translation,
max_vertical_translation,
augment=None,
sampling_strategy=None,
sampling_args=None,
C_hat_path=None,
use_small_subset=False,
downsample_fraction=None):
"""
Load dataset a couple images at a time using DataLoader class, as shown in pytorch dataset tutorial.
Checked.
"""
limit = 500 if use_small_subset else CACHE_LIMIT
cache_train = get_image_cache_for_split('train', limit=limit)
train_dataset = PytorchImagesDataset(dataset='train',
transform_statistics=None,
C_cols=C_cols,
y_cols=y_cols,
zscore_C=zscore_C,
zscore_Y=zscore_Y,
cache=cache_train,
truncate_C_floats=True,
data_proportion=data_proportion,
shuffle_Cs=shuffle_Cs,
merge_klg_01=merge_klg_01,
transform=augment,
max_horizontal_translation=max_horizontal_translation,
max_vertical_translation=max_vertical_translation,
use_small_subset=use_small_subset)
# Sampler for training
train_sampler, shuffle = get_sampling_weights(sampling_strategy, sampling_args, train_dataset, C_cols, y_cols)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=N_DATALOADER_WORKERS,
sampler=train_sampler, pin_memory=False)
cache_val = get_image_cache_for_split('val', limit=limit)
val_dataset = PytorchImagesDataset(dataset='val',
transform_statistics=train_dataset.transform_statistics,
C_cols=C_cols,
y_cols=y_cols,
zscore_C=zscore_C,
zscore_Y=zscore_Y,
cache=cache_val,
truncate_C_floats=True,
data_proportion=data_proportion,
shuffle_Cs=False,
merge_klg_01=merge_klg_01,
transform='None',
use_small_subset=use_small_subset,
downsample_fraction=downsample_fraction)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=N_DATALOADER_WORKERS)
test_dataset = PytorchImagesDataset(dataset='test',
transform_statistics=train_dataset.transform_statistics,
C_cols=C_cols,
y_cols=y_cols,
zscore_C=zscore_C,
zscore_Y=zscore_Y,
cache=None,
truncate_C_floats=True,
shuffle_Cs=False,
merge_klg_01=merge_klg_01,
transform='None',
use_small_subset=use_small_subset,
downsample_fraction=downsample_fraction)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=N_DATALOADER_WORKERS)
dataloaders = {'train': train_dataloader, 'val': val_dataloader, 'test': test_dataloader}
datasets = {'train': train_dataset, 'val': val_dataset, 'test': test_dataset}
if use_small_subset:
dataset_sizes = {'train': 500, 'val': 500, 'test': 500}
else:
dataset_sizes = {'train': len(train_dataset), 'val': len(val_dataset), 'test': len(test_dataset)}
return dataloaders, datasets, dataset_sizes
def get_base_dir_for_individual_image(dataset_split):
"""
Get the path for an image.
"""
assert dataset_split in ['train', 'val', 'test']
base_dir = IMG_DIR_PATH % dataset_split
return base_dir
def ensure_barcodes_match(combined_df, image_codes):
"""
Sanity check: make sure non-image data matches image data.
"""
print("Ensuring that barcodes line up.")
assert len(combined_df) == len(image_codes)
for idx in range(len(combined_df)):
barcode = str(combined_df.iloc[idx]['barcdbu'])
if len(barcode) == 11:
barcode = '0' + barcode
side = str(combined_df.iloc[idx]['side'])
code_in_df = barcode + '*' + side
if image_codes[idx] != code_in_df:
raise Exception("Barcode mismatch at index %i, %s != %s" % (idx, image_codes[idx], code_in_df))
print("All %i barcodes line up." % len(combined_df))
# ----------------- Data augmentations -----------------
def random_horizontal_vertical_translation(img, max_horizontal_translation, max_vertical_translation):
"""
Translates the image horizontally/vertically by a fraction of its width/length.
To keep the image the same size + scale, we add a background color to fill in any space created.
"""
assert max_horizontal_translation >= 0 and max_horizontal_translation <= 1
assert max_vertical_translation >= 0 and max_vertical_translation <= 1
if max_horizontal_translation == 0 and max_vertical_translation == 0:
return img
img = img.copy()
assert len(img.shape) == 3
channels = img.shape[0]
assert img.shape[1] >= img.shape[2]
height = img.shape[1]
width = img.shape[2]
translated_img = img
horizontal_translation = int((random.random() - .5) * max_horizontal_translation * width)
vertical_translation = int((random.random() - .5) * max_vertical_translation * height)
background_color = img[:, -10:, -10:].mean(axis=1).mean(axis=1)
# first we translate the image.
if horizontal_translation != 0:
if horizontal_translation > 0:
translated_img = translated_img[:, :, horizontal_translation:] # cuts off pixels on the left of image.
else:
translated_img = translated_img[:, :, :horizontal_translation] # cuts off pixels on the right of image.
if vertical_translation != 0:
if vertical_translation > 0:
translated_img = translated_img[:, vertical_translation:, :] # cuts off pixels on the top of image.
else:
translated_img = translated_img[:, :vertical_translation, :] # cuts off pixels on the bottom of image.
# then we keep the dimensions the same.
new_height = translated_img.shape[1]
new_width = translated_img.shape[2]
new_image = []
for i in range(channels): # loop over RGB
background_square = np.ones([height, width]) * background_color[i]
if horizontal_translation < 0:
if vertical_translation < 0:
# I don't really know if the signs here matter all that much -- it's just whether we're putting the
# translated images on the left or right.
background_square[-new_height:, -new_width:] = translated_img[i, :, :]
else:
background_square[:new_height, -new_width:] = translated_img[i, :, :]
else:
if vertical_translation < 0:
background_square[-new_height:, :new_width] = translated_img[i, :, :]
else:
background_square[:new_height, :new_width] = translated_img[i, :, :]
new_image.append(background_square)
new_image = np.array(new_image)
return new_image
def random_transform_image(image, transform, max_horizontal_translation, max_vertical_translation):
assert transform in ['random_translation_and_then_random_horizontal_flip', 'random_translation']
image = random_horizontal_vertical_translation(image, max_horizontal_translation, max_vertical_translation)
if transform == 'random_translation_and_then_random_horizontal_flip':
if random.random() < 0.5:
image = image[:, :, ::-1].copy()
return image
def downsample_image(image, downsample_fraction):
assert 0 < downsample_fraction < 1 # this argument is the downsample fraction
new_image = []
for i in range(3): # RGB
img = image[i, :, :].copy()
original_size = img.shape # note have to reverse arguments for cv2.
img2 = cv2.resize(img,
(int(original_size[1] * downsample_fraction), int(original_size[0] * downsample_fraction)))
new_image.append(cv2.resize(img2, tuple(original_size[::-1])))
# image[0:1, :, :] = gaussian_filter(image[0:1, :, :], sigma=self.gaussian_blur_filter)
new_image = np.array(new_image)
assert new_image.shape == image.shape
return new_image
|
show.py
|
import discord
import logging
import asyncio
import os
import json
import time
from discord.ext import commands
from threading import Thread
import s3
import podcast_utils
import recording_utils
recording_thread = None
recording_buffer = recording_utils.BufSink()
class ShowCog(commands.Cog):
def __init__(self, bot, helper, configs):
self.bot = bot
self.helper = helper
self.configs = configs
def start_recordiing(self,show_channel):
global recording_thread
recording_filename = show_channel.name + "-" +time.strftime("%Y%m%d-%H%M%S")+ ".wav"
if recording_thread is None:
recording_thread = Thread(target=recording_utils.poster, args=(self.bot, recording_buffer, recording_filename))
recording_thread.start()
self.bot.voice_clients[0].listen(recording_buffer)
return
@commands.command(name='startshow')
@commands.has_role('podcast-host')
async def start_show(self,ctx):
logging.info("Command '%s' detected in call screening channel (%s).", ctx.command.name, self.configs['CHANNELS']['SCREENING']['name'])
await self.helper.serverCheck()
perms = discord.PermissionOverwrite(
connect=True,
speak=False,
mute_members=False,
deafen_members=False,
move_members=False,
use_voice_activation=False,
priority_speaker=False,
read_messages=True
)
await self.bot.get_channel(self.configs['CHANNELS']['VOICE']['id']).set_permissions(ctx.guild.default_role, overwrite=perms)
await self.bot.get_channel(self.configs['CHANNELS']['VOICE']['id']).connect()
self.start_recordiing(self.bot.get_channel(self.configs['CHANNELS']['VOICE']['id']))
@commands.command(name='endshow')
@commands.has_role('podcast-host')
async def end_show(self, ctx):
logging.info("Command '%s' detected in call screening channel (%s).", ctx.command.name, self.configs['CHANNELS']['SCREENING']['name'])
perms = discord.PermissionOverwrite(
connect=False,
speak=False,
mute_members=False,
deafen_members=False,
move_members=False,
use_voice_activation=False,
priority_speaker=False,
read_messages=False
)
await self.bot.get_channel(self.configs['CHANNELS']['VOICE']['id']).set_permissions(ctx.guild.default_role, overwrite=perms)
await self.helper.clean_livecallers(ctx)
if self.bot.voice_clients:
for vc in self.bot.voice_clients:
await vc.disconnect()
recording_utils.recording_finished_flag = True
global recording_thread
recording_thread.join()
s3.save_recording_to_bucket("discord-recordings-dev", recording_utils.recording_filename)
def setup(bot):
bot.add_cog(ShowCog(bot))
|
executor.py
|
#!/usr/bin/env python
# hook for virtualenv
# switch to the virtualenv where the executor belongs,
# replace all the path for modules
import sys, os.path
P = 'site-packages'
apath = os.path.abspath(__file__)
if P in apath:
virltualenv = apath[:apath.index(P)]
sysp = [p[:-len(P)] for p in sys.path if p.endswith(P)][0]
if sysp != virltualenv:
sys.path = [p.replace(sysp, virltualenv) for p in sys.path]
import os
import pickle
import subprocess
import threading
from threading import Thread
import socket
import psutil
import time
import zmq
import dpark.pymesos as mesos
from dpark.pymesos import mesos_pb2
ctx = zmq.Context()
def forword(fd, addr, prefix=''):
f = os.fdopen(fd, 'r', 4096)
out = ctx.socket(zmq.PUSH)
out.connect(addr)
while True:
try:
line = f.readline()
if not line: break
out.send(prefix+line)
except IOError:
break
f.close()
out.close()
def reply_status(driver, task_id, status):
update = mesos_pb2.TaskStatus()
update.task_id.MergeFrom(task_id)
update.state = status
driver.sendStatusUpdate(update)
def launch_task(self, driver, task):
reply_status(driver, task.task_id, mesos_pb2.TASK_RUNNING)
host = socket.gethostname()
cwd, command, _env, shell, addr1, addr2, addr3 = pickle.loads(task.data)
prefix = "[%s@%s] " % (str(task.task_id.value), host)
outr, outw = os.pipe()
errr, errw = os.pipe()
t1 = Thread(target=forword, args=[outr, addr1, prefix])
t1.daemon = True
t1.start()
t2 = Thread(target=forword, args=[errr, addr2, prefix])
t2.daemon = True
t2.start()
wout = os.fdopen(outw,'w',0)
werr = os.fdopen(errw,'w',0)
if addr3:
subscriber = ctx.socket(zmq.SUB)
subscriber.connect(addr3)
subscriber.setsockopt(zmq.SUBSCRIBE, '')
poller = zmq.Poller()
poller.register(subscriber, zmq.POLLIN)
socks = dict(poller.poll(60 * 1000))
if socks and socks.get(subscriber) == zmq.POLLIN:
hosts = pickle.loads(subscriber.recv(zmq.NOBLOCK))
line = hosts.get(host)
if line:
command = line.split(' ')
else:
return reply_status(driver, task.task_id, mesos_pb2.TASK_FAILED)
else:
return reply_status(driver, task.task_id, mesos_pb2.TASK_FAILED)
mem = 100
for r in task.resources:
if r.name == 'mem':
mem = r.scalar.value
break
try:
env = dict(os.environ)
env.update(_env)
if not os.path.exists(cwd):
print >>werr, 'CWD %s is not exists, use /tmp instead' % cwd
cwd = '/tmp'
p = subprocess.Popen(command,
stdout=wout, stderr=werr,
cwd=cwd, env=env, shell=shell)
tid = task.task_id.value
self.ps[tid] = p
code = None
last_time = 0
while True:
time.sleep(0.1)
code = p.poll()
if code is not None:
break
now = time.time()
if now < last_time + 2:
continue
last_time = now
try:
process = psutil.Process(p.pid)
rss = sum((proc.get_memory_info().rss
for proc in process.get_children(recursive=True)),
process.get_memory_info().rss)
rss = (rss >> 20)
except Exception, e:
continue
if rss > mem * 1.5:
print >>werr, "task %s used too much memory: %dMB > %dMB * 1.5, kill it. " \
"use -m argument to request more memory." % (
tid, rss, mem)
p.kill()
elif rss > mem:
print >>werr, "task %s used too much memory: %dMB > %dMB, " \
"use -m to request for more memory" % (
tid, rss, mem)
if code == 0:
status = mesos_pb2.TASK_FINISHED
else:
print >>werr, ' '.join(command) + ' exit with %s' % code
status = mesos_pb2.TASK_FAILED
except Exception, e:
status = mesos_pb2.TASK_FAILED
import traceback
print >>werr, 'exception while open ' + ' '.join(command)
for line in traceback.format_exc():
werr.write(line)
reply_status(driver, task.task_id, status)
wout.close()
werr.close()
t1.join()
t2.join()
self.ps.pop(tid, None)
self.ts.pop(tid, None)
class MyExecutor(mesos.Executor):
def __init__(self):
self.ps = {}
self.ts = {}
def launchTask(self, driver, task):
t = Thread(target=launch_task, args=(self, driver, task))
t.daemon = True
t.start()
self.ts[task.task_id.value] = t
def killTask(self, driver, task_id):
try:
if task_id.value in self.ps:
self.ps[task_id.value].kill()
reply_status(driver, task_id, mesos_pb2.TASK_KILLED)
except: pass
def shutdown(self, driver):
for p in self.ps.values():
try: p.kill()
except: pass
for t in self.ts.values():
t.join()
if __name__ == "__main__":
executor = MyExecutor()
mesos.MesosExecutorDriver(executor).run()
|
TimeoutSendService.py
|
from __future__ import annotations
from threading import Thread
from typing import Callable, Optional
from .interfaces import ITimeoutSendService
from ..clock import IClock
from ..service import IService, IServiceManager
from ..send import ISendService
from ..util.Atomic import Atomic
from ..util.InterruptableSleep import InterruptableSleep
class TimeoutSendService(IService, ITimeoutSendService):
def __init__(
self,
clock: IClock,
send_service: ISendService,
service_manager: IServiceManager,
timeout_seconds: float,
message_callback: Callable[[], Optional[bytes]] = lambda: None,
) -> None:
self.interruptable_sleep = InterruptableSleep(clock)
self.timeout_seconds = timeout_seconds
self.message_callback = Atomic(message_callback)
self.send_count = 0
self.send_service = send_service
self.should_run = True
self.thread = Thread(target=self.run)
service_manager.add_service(self)
def get_send_count(self) -> int:
return self.send_count
def get_service_name(self) -> str:
return __name__
def join_service(self, timeout_seconds: Optional[float] = None) -> bool:
self.thread.join(timeout_seconds)
return self.thread.is_alive()
def run(self) -> None:
should_send = True
while self.should_run:
if should_send:
with self.message_callback as (message_callback, _):
self._send(message_callback())
should_send = self.interruptable_sleep.sleep(self.timeout_seconds)
def set_and_send_immediately(self, message_callback: Callable[[], Optional[bytes]]) -> Optional[bytes]:
with self.message_callback as (_, set_message_callback):
message = message_callback()
self._send(message)
set_message_callback(message_callback)
self.interruptable_sleep.interrupt()
return message
def _send(self, message: Optional[bytes]) -> None:
if message is not None:
self.send_count += 1
self.send_service.send(message)
def start_service(self) -> None:
self.thread.start()
def stop_service(self) -> None:
self.should_run = False
self.interruptable_sleep.interrupt()
|
test_pdb.py
|
# A test suite for pdb; not very comprehensive at the moment.
import doctest
import os
import pdb
import sys
import types
import unittest
import subprocess
import textwrap
from test import support
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[2]>(18)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) continue
BAZ
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True, readrc=False).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True, readrc=False).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True, readrc=False)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
getattr(pdb.Pdb(nosigint=True, readrc=False), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_next_command_for_coroutine():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(4)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
Internal StopIteration
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()->None
-> await test_coro()
(Pdb) continue
finished
"""
def test_pdb_next_command_for_asyncgen():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def agen():
... yield 1
... await asyncio.sleep(0)
... yield 2
>>> async def test_coro():
... async for x in agen():
... print(x)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[3]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(3)test_coro()
-> print(x)
(Pdb) next
1
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(2)agen()
-> yield 1
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(3)agen()
-> await asyncio.sleep(0)
(Pdb) continue
2
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(9)test_function()
-> if ex.value != 1:
(Pdb) continue
finished
"""
def test_pdb_return_command_for_coroutine():
"""Testing no unwindng stack on yield for coroutines for "return" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoing is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_until_command_for_coroutine():
"""Testing no unwindng stack for coroutines
for "until" command if target breakpoing is not reached
>>> import asyncio
>>> async def test_coro():
... print(0)
... await asyncio.sleep(0)
... print(1)
... await asyncio.sleep(0)
... print(2)
... await asyncio.sleep(0)
... print(3)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... print("finished")
>>> with PdbTestInput(['step',
... 'until 8',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) until 8
0
1
2
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(8)test_coro()
-> print(3)
(Pdb) continue
3
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controlled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 6 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_issue_20766():
"""Test for reference leaks when the SIGINT handler is set.
>>> def test_function():
... i = 1
... while i <= 2:
... sess = pdb.Pdb()
... sess.set_trace(sys._getframe())
... print('pdb %d: %s' % (i, sess._previous_sigint_handler))
... i += 1
>>> with PdbTestInput(['continue',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 1: <built-in function default_int_handler>
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(5)test_function()
-> sess.set_trace(sys._getframe())
(Pdb) continue
pdb 2: <built-in function default_int_handler>
"""
class PdbTestCase(unittest.TestCase):
def run_pdb(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.unlink, filename)
self.addCleanup(support.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb', filename]
stdout = stderr = None
with subprocess.Popen(cmd, stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def _assert_find_function(self, file_content, func_name, expected):
file_content = textwrap.dedent(file_content)
with open(support.TESTFN, 'w') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], support.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, support.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function('', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bar():
pass
def quux():
pass
""",
'bar',
('bar', 4),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(support.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(support.unlink, 'bar.py')
stdout, stderr = self.run_pdb(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13210(self):
# invoking "continue" on a non-main thread triggered an exception
# inside signal.signal
# raises SkipTest if python was built without threads
support.import_module('threading')
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb(readrc=False).set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def test_issue16180(self):
# A syntax error in the debuggee.
script = "def f: pass\n"
commands = ''
expected = "SyntaxError:"
stdout, stderr = self.run_pdb(script, commands)
self.assertIn(expected, stdout,
'\n\nExpected:\n{}\nGot:\n{}\n'
'Fail to handle a syntax error in the debuggee.'
.format(expected, stdout))
def test_readrc_kwarg(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb(readrc=False).set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with support.temp_cwd():
with open('.pdbrc', 'w') as f:
f.write("invalid\n")
with open('main.py', 'w') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc:
stdout, stderr = proc.communicate(b'q\n')
self.assertNotIn("NameError: name 'invalid' is not defined",
stdout.decode())
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def tearDown(self):
support.unlink(support.TESTFN)
def load_tests(*args):
from test import test_pdb
suites = [unittest.makeSuite(PdbTestCase), doctest.DocTestSuite(test_pdb)]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
|
streamwaveform.py
|
import uhd
from uhd import libpyuhd as lib
import threading
from Tools.WaveformMonitor import WaveformMonitor
import time
import os
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class EventHandler(FileSystemEventHandler):
def __init__(self, streamingThread, waveMan):
super(EventHandler, self).__init__()
self.streamingThread = streamingThread
self.waveMan = waveMan
def on_any_event(self, event):
result = None
while result is None:
try:
if waveMan.compareFreqs(self.waveMan.getJsonData()):
self.waveMan.jsonData = self.waveMan.getJsonData()
result = 1
self.waveMan.initializeWaveforms()
else:
result = 1
self.waveMan.jsonData = self.waveMan.getJsonData()
except:
pass
if self.waveMan.getTotalPower(1) < 30 and self.waveMan.getTotalPower(0) < 30:
self.waveMan.initializeSDR()
self.streamingThread.wave = self.waveMan.getOutputWaveform()
else:
print ("WARNING: TOO MUCH POWER")
def streamWaveform(streamer, wave, metadata):
t = threading.currentThread()
while getattr(t, "run", True):
streamingWave = getattr(t, "wave", wave)
streamer.send(streamingWave, metadata)
usrp = uhd.usrp.MultiUSRP('')
waveMan = WaveformMonitor("Resources/waveformArguments.json", usrp)
waveMan.initializeWaveforms()
jsonData = waveMan.getJsonData()
waveMan.initializeSDR()
st_args = lib.usrp.stream_args("fc32", "sc16")
st_args.channels = range(len(jsonData['channels']))
streamer = usrp.get_tx_stream(st_args)
buffer_samps = streamer.get_max_num_samps()
metadata = lib.types.tx_metadata()
wave = waveMan.getOutputWaveform()
stream = threading.Thread(target=streamWaveform, args=(streamer, wave, metadata))
stream.start()
path = os.path.abspath("Resources")
event_handler = EventHandler(stream, waveMan)
observer = Observer()
observer.schedule(event_handler, path)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
main.py
|
#!/usr/bin/env python3
"""
When studying distributed systems, it is usefull to play with concepts and create prototype applications.
This main runner aims in helping with the prototyping, so that an application can be created as a class
in ./classes/apps. This code contains the main runner that will run in each node. This is called by the
pymace main emulation script, but can be called manually when running on real hardware or when running
manually for testing.
Other support classes are also in ./classes to bootstrap some basic funcionality, but are completelly
optional since most is already covered by better python libraries.
"""
__author__ = "Bruno Chianca Ferreira"
__license__ = "MIT"
__version__ = "0.6"
__maintainer__ = "Bruno Chianca Ferreira"
__email__ = "brunobcf@gmail.com"
import threading, sys, traceback, time, random, json, os, shutil, socket, argparse
from classes import prompt, log, node, nodedump, tools
from apscheduler.schedulers.background import BackgroundScheduler
fwd_old = 0
inc=0
packet_counter = 0
anim = ['\\','|','/','-']
def main(tag):
#this function controls all the tasks
try:
_start() #main execution loop
_shutdown()
except KeyboardInterrupt:
logger.print_error("Interrupted by ctrl+c")
os._exit(1)
except:
logger.print_error("Scheduling error!")
traceback.print_exc()
def _start():
random.seed("this_is_enac "+Node.fulltag); #Seed for random
prompt_thread.start() #starts prompt
# finally a scheduler that actually works
scheduler.add_job(task1, 'interval', seconds=1, id='running')
scheduler.add_job(task2, 'interval', seconds=Node.second/1000, id='sim_sec')
scheduler.add_job(task3, 'interval', seconds=1, id='real_sec')
scheduler.start()
Node.start() # replace this by application start
while Node.stop == False:
time.sleep(2)
def _shutdown():
try:
scheduler.remove_job('running')
scheduler.remove_job('sim_sec')
scheduler.remove_job('real_sec')
scheduler.shutdown()
except:
pass
prompt_thread.join(timeout=1)
os._exit(1)
def task1(): #check_if_finished and stops the simulation
"""
This function checks if simulation is supposed to be running, otherwise stop and cleanup
"""
if Node.stop==False: #Run if the simulation is supposed to stop
#if Node.Battery.battery_percent <= 1 or Node.lock == False:
if Node.lock == False:
logger.print_alert("Simulation ended. Recording logs.")
#Node.lock=False
prompt.lock=False
logger.datalog(Node)
logger.log_messages(Node)
logger.log_network(Node)
try:
logger.print_alert("Shuting down node.")
Node.shutdown()
endfile = open("reports/" + logger.simdir + "/finished/"+args.tag+".csv","w") #
endfile.write('done\n')
endfile.close()
logger.print_alert("Done:" + str(Node.stop))
except:
pass
return
def task2(): #1 tick per sim second
"""
This function counts every second and stops after defined limit
"""
if Node.lock==False: #Do not run if the simulation is supposed to stop
return
Node.simulation_seconds += 1
if Node.simulation_seconds > args.time_limit:
Node.lock = False
def task3(): #This task draws a HMI to have some realtime info
"""
This function displays elapsed time and a small indication that something is happeing in the network.
"""
if Node.lock==False: #Do not run if the simulation is supposed to stop
return
Node.simulation_tick_seconds += 1
fwd_new = Node.stats[0]
visible = len(Node.Membership.visible)
#topology = len(Node.Membership.topology) + 1
global inc, fwd_old, packet_counter
if packet_counter >= 5:
Node.Network.traffic = Node.Network.packets / 5
Node.Network.packets = 0
packet_counter = 0
else:
packet_counter += 1
if fwd_new > fwd_old:
fwd_old= fwd_new
inc+=1
if inc == 4:
inc = 0
logger.printxy(1,79,anim[inc])
logger.printxy(2, 80-(len(str(Node.simulation_seconds))),Node.simulation_seconds)
def startup():
"""
This function is a synchronizer so that all nodes can start ROUGHLY at the same time
"""
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.remove("/tmp/pymace.sock.node"+str(args.number))
except OSError:
#traceback.print_exc()
pass
try:
s.bind("/tmp/pymace.sock.node"+str(args.number))
s.listen(10)
except OSError:
traceback.print_exc()
pass
conn, addr = s.accept()
data = conn.recv(1024)
if (float(data) - time.time()) < 0:
conn.send("NOK".encode())
else:
conn.send("OK".encode())
#print(float(data))
#receives the global time when they should start. Same for all and in this simulation the clock is universal since all nodes run in the same computer
conn.close()
return data
def pritn_header():
print("pymace v." + __version__ + " - application test")
def parse_arguments():
parser = argparse.ArgumentParser(description='Some arguments are obligatory and must follow the correct order as indicated')
parser.add_argument("tag", help="A tag for the node")
parser.add_argument("application", help="Which application you want to use")
parser.add_argument("time_scale", help="Time scaler to make the application run faster(<1) or slower(>1)", type=float)
parser.add_argument("time_limit", help="Simulation runtime limit in seconds", type=int)
#parser.add_argument("mobility", help="The mobility model being use for reporting reasons")
parser.add_argument("ip", help="IP protocol: ipv4 or ipv6", choices=['ipv4', 'ipv6'])
parser.add_argument("-v", "--verbosity", action="store_true", help="Verbose output")
parser.add_argument("-b", "--battery", type=int, help="Initial battery level", default=100)
parser.add_argument("-e", "--energy", type=str, help="Energy model", default="stub")
parser.add_argument("-r", "--role", type=str, help="Set a role if required by application", default="node")
parser.add_argument("-p", "--protocol", type=str, help="Communication protocol", default="sockets")
parser.add_argument("-m", "--membership", type=str, help="Membership control", default="local")
parser.add_argument("-f", "--fault_detector", type=str, help="Fault Detector", default="simple")
parser.add_argument("-o", "--mobility", type=str, help="Mobility Model", default="Random_Walk")
parser.add_argument("-n", "--number", type=int, help="Node number", default=0)
return parser.parse_args()
if __name__ == '__main__': #for main run the main function. This is only run when this main python file is called, not when imported as a class
try:
pritn_header()
args = parse_arguments()
#defining the node
Node = node.Node(args.tag,
args.number,
args.energy,
args.mobility,
args.application,
args.role,
args.time_scale,
args.battery,
args.ip.upper(),
args.protocol,
args.membership,
args.fault_detector) #create node object
prompt = prompt.Prompt(Node)
logger = log.Log(Node, args.tag, args.role, args.energy, args.mobility)
logger.clean_nodedumps(Node)
#wait from the runner script the time when this node should start
start=startup()
#print("Starting in: " + str(float(start) * 1000 - time.time() * 1000) + "ms")
print("Starting ...")
#start = 1
while float(start) > time.time():
#keeps locked in loop until is time to start
pass
tools.printxy(2,1, " ")
#############################################################################
prompt_thread = threading.Thread(target=prompt.prompt, args=(Node,))
scheduler = BackgroundScheduler()
main(args.tag); #call scheduler function
except KeyboardInterrupt:
logger.print_error("Interrupted by ctrl+c")
logger.logfile.close()
except:
traceback.print_exc()
|
test_xmlrpc.py
|
# expected: fail
import base64
import datetime
import sys
import time
import unittest
import xmlrpclib
import SimpleXMLRPCServer
import mimetools
import httplib
import socket
import StringIO
import os
import re
from test import test_support
try:
import threading
except ImportError:
threading = None
try:
import gzip
except ImportError:
gzip = None
try:
unicode
except NameError:
have_unicode = False
else:
have_unicode = True
alist = [{'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2L,
'anotherlist': ['.zyx.41'],
'abase64': xmlrpclib.Binary("my dog has fleas"),
'boolean': xmlrpclib.False,
'unicode': u'\u4000\u6000\u8000',
u'ukey\u4000': 'regular value',
'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
'datetime2': xmlrpclib.DateTime(
(2005, 02, 10, 11, 41, 23, 0, 1, -1)),
'datetime3': xmlrpclib.DateTime(
datetime.datetime(2005, 02, 10, 11, 41, 23)),
}]
class XMLRPCTestCase(unittest.TestCase):
def test_dump_load(self):
self.assertEqual(alist,
xmlrpclib.loads(xmlrpclib.dumps((alist,)))[0][0])
def test_dump_bare_datetime(self):
# This checks that an unwrapped datetime.date object can be handled
# by the marshalling code. This can't be done via test_dump_load()
# since with use_datetime set to 1 the unmarshaller would create
# datetime objects for the 'datetime[123]' keys as well
dt = datetime.datetime(2005, 02, 10, 11, 41, 23)
s = xmlrpclib.dumps((dt,))
(newdt,), m = xmlrpclib.loads(s, use_datetime=1)
self.assertEqual(newdt, dt)
self.assertEqual(m, None)
(newdt,), m = xmlrpclib.loads(s, use_datetime=0)
self.assertEqual(newdt, xmlrpclib.DateTime('20050210T11:41:23'))
def test_datetime_before_1900(self):
# same as before but with a date before 1900
dt = datetime.datetime(1, 02, 10, 11, 41, 23)
s = xmlrpclib.dumps((dt,))
(newdt,), m = xmlrpclib.loads(s, use_datetime=1)
self.assertEqual(newdt, dt)
self.assertEqual(m, None)
(newdt,), m = xmlrpclib.loads(s, use_datetime=0)
self.assertEqual(newdt, xmlrpclib.DateTime('00010210T11:41:23'))
def test_cmp_datetime_DateTime(self):
now = datetime.datetime.now()
dt = xmlrpclib.DateTime(now.timetuple())
self.assertTrue(dt == now)
self.assertTrue(now == dt)
then = now + datetime.timedelta(seconds=4)
self.assertTrue(then >= dt)
self.assertTrue(dt < then)
def test_bug_1164912 (self):
d = xmlrpclib.DateTime()
((new_d,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((d,),
methodresponse=True))
self.assertIsInstance(new_d.value, str)
# Check that the output of dumps() is still an 8-bit string
s = xmlrpclib.dumps((new_d,), methodresponse=True)
self.assertIsInstance(s, str)
def test_newstyle_class(self):
class T(object):
pass
t = T()
t.x = 100
t.y = "Hello"
((t2,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((t,)))
self.assertEqual(t2, t.__dict__)
def test_dump_big_long(self):
self.assertRaises(OverflowError, xmlrpclib.dumps, (2L**99,))
def test_dump_bad_dict(self):
self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},))
def test_dump_recursive_seq(self):
l = [1,2,3]
t = [3,4,5,l]
l.append(t)
self.assertRaises(TypeError, xmlrpclib.dumps, (l,))
def test_dump_recursive_dict(self):
d = {'1':1, '2':1}
t = {'3':3, 'd':d}
d['t'] = t
self.assertRaises(TypeError, xmlrpclib.dumps, (d,))
def test_dump_big_int(self):
if sys.maxint > 2L**31-1:
self.assertRaises(OverflowError, xmlrpclib.dumps,
(int(2L**34),))
xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MAXINT+1,))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MININT-1,))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_int(xmlrpclib.MAXINT, dummy_write)
m.dump_int(xmlrpclib.MININT, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MAXINT+1, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MININT-1, dummy_write)
def test_dump_none(self):
value = alist + [None]
arg1 = (alist + [None],)
strg = xmlrpclib.dumps(arg1, allow_none=True)
self.assertEqual(value,
xmlrpclib.loads(strg)[0][0])
self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
def test_default_encoding_issues(self):
# SF bug #1115989: wrong decoding in '_stringify'
utf8 = """<?xml version='1.0' encoding='iso-8859-1'?>
<params>
<param><value>
<string>abc \x95</string>
</value></param>
<param><value>
<struct>
<member>
<name>def \x96</name>
<value><string>ghi \x97</string></value>
</member>
</struct>
</value></param>
</params>
"""
# sys.setdefaultencoding() normally doesn't exist after site.py is
# loaded. Import a temporary fresh copy to get access to it
# but then restore the original copy to avoid messing with
# other potentially modified sys module attributes
old_encoding = sys.getdefaultencoding()
with test_support.CleanImport('sys'):
import sys as temp_sys
temp_sys.setdefaultencoding("iso-8859-1")
try:
(s, d), m = xmlrpclib.loads(utf8)
finally:
temp_sys.setdefaultencoding(old_encoding)
items = d.items()
if have_unicode:
self.assertEqual(s, u"abc \x95")
self.assertIsInstance(s, unicode)
self.assertEqual(items, [(u"def \x96", u"ghi \x97")])
self.assertIsInstance(items[0][0], unicode)
self.assertIsInstance(items[0][1], unicode)
else:
self.assertEqual(s, "abc \xc2\x95")
self.assertEqual(items, [("def \xc2\x96", "ghi \xc2\x97")])
class HelperTestCase(unittest.TestCase):
def test_escape(self):
self.assertEqual(xmlrpclib.escape("a&b"), "a&b")
self.assertEqual(xmlrpclib.escape("a<b"), "a<b")
self.assertEqual(xmlrpclib.escape("a>b"), "a>b")
class FaultTestCase(unittest.TestCase):
def test_repr(self):
f = xmlrpclib.Fault(42, 'Test Fault')
self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
self.assertEqual(repr(f), str(f))
def test_dump_fault(self):
f = xmlrpclib.Fault(42, 'Test Fault')
s = xmlrpclib.dumps((f,))
(newf,), m = xmlrpclib.loads(s)
self.assertEqual(newf, {'faultCode': 42, 'faultString': 'Test Fault'})
self.assertEqual(m, None)
s = xmlrpclib.Marshaller().dumps(f)
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s)
class DateTimeTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.DateTime()
def test_time(self):
d = 1181399930.036952
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", time.localtime(d)))
def test_time_tuple(self):
d = (2007,6,9,10,38,50,5,160,0)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070609T10:38:50')
def test_time_struct(self):
d = time.localtime(1181399930.036952)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d))
def test_datetime_datetime(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070102T03:04:05')
def test_repr(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
val ="<DateTime '20070102T03:04:05' at %x>" % id(t)
self.assertEqual(repr(t), val)
def test_decode(self):
d = ' 20070908T07:11:13 '
t1 = xmlrpclib.DateTime()
t1.decode(d)
tref = xmlrpclib.DateTime(datetime.datetime(2007,9,8,7,11,13))
self.assertEqual(t1, tref)
t2 = xmlrpclib._datetime(d)
self.assertEqual(t1, tref)
class BinaryTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.Binary()
self.assertEqual(str(t), '')
def test_string(self):
d = '\x01\x02\x03abc123\xff\xfe'
t = xmlrpclib.Binary(d)
self.assertEqual(str(t), d)
def test_decode(self):
d = '\x01\x02\x03abc123\xff\xfe'
de = base64.encodestring(d)
t1 = xmlrpclib.Binary()
t1.decode(de)
self.assertEqual(str(t1), d)
t2 = xmlrpclib._binary(de)
self.assertEqual(str(t2), d)
ADDR = PORT = URL = None
# The evt is set twice. First when the server is ready to serve.
# Second when the server has been shutdown. The user must clear
# the event after it has been set the first time to catch the second set.
def http_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
serv = MyXMLRPCServer(("localhost", 0), requestHandler,
logRequests=False, bind_and_activate=False)
try:
serv.socket.settimeout(3)
serv.server_bind()
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
serv.register_introspection_functions()
serv.register_multicall_functions()
serv.register_function(pow)
serv.register_function(lambda x,y: x+y, 'add')
serv.register_function(my_function)
serv.register_instance(TestInstanceClass())
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
def http_multi_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(SimpleXMLRPCServer.MultiPathXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
class MyRequestHandler(requestHandler):
rpc_paths = []
serv = MyXMLRPCServer(("localhost", 0), MyRequestHandler,
logRequests=False, bind_and_activate=False)
serv.socket.settimeout(3)
serv.server_bind()
try:
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
paths = ["/foo", "/foo/bar"]
for path in paths:
d = serv.add_dispatcher(path, SimpleXMLRPCServer.SimpleXMLRPCDispatcher())
d.register_introspection_functions()
d.register_multicall_functions()
serv.get_dispatcher(paths[0]).register_function(pow)
serv.get_dispatcher(paths[1]).register_function(lambda x,y: x+y, 'add')
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
# This function prevents errors like:
# <ProtocolError for localhost:57527/RPC2: 500 Internal Server Error>
def is_unavailable_exception(e):
'''Returns True if the given ProtocolError is the product of a server-side
exception caused by the 'temporarily unavailable' response sometimes
given by operations on non-blocking sockets.'''
# sometimes we get a -1 error code and/or empty headers
try:
if e.errcode == -1 or e.headers is None:
return True
exc_mess = e.headers.get('X-exception')
except AttributeError:
# Ignore socket.errors here.
exc_mess = str(e)
if exc_mess and 'temporarily unavailable' in exc_mess.lower():
return True
return False
@unittest.skipUnless(threading, 'Threading required for this test.')
class BaseServerTestCase(unittest.TestCase):
requestHandler = None
request_count = 1
threadFunc = staticmethod(http_server)
def setUp(self):
# enable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, self.request_count, self.requestHandler)
threading.Thread(target=self.threadFunc, args=serv_args).start()
# wait for the server to be ready
self.evt.wait(10)
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait(10)
# disable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False
# NOTE: The tests in SimpleServerTestCase will ignore failures caused by
# "temporarily unavailable" exceptions raised in SimpleXMLRPCServer. This
# condition occurs infrequently on some platforms, frequently on others, and
# is apparently caused by using SimpleXMLRPCServer with a non-blocking socket
# If the server class is updated at some point in the future to handle this
# situation more gracefully, these tests should be modified appropriately.
class SimpleServerTestCase(BaseServerTestCase):
def test_simple1(self):
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii(self):
start_string = 'P\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}t'
end_string = 'h\N{LATIN SMALL LETTER O WITH HORN}n'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_unicode_host(self):
server = xmlrpclib.ServerProxy(u"http://%s:%d/RPC2"%(ADDR, PORT))
self.assertEqual(server.add("a", u"\xe9"), u"a\xe9")
# [ch] The test 404 is causing lots of false alarms.
def XXXtest_404(self):
# send POST with httplib, it should return 404 header and
# 'Not Found' message.
conn = httplib.HTTPConnection(ADDR, PORT)
conn.request('POST', '/this-is-not-valid')
response = conn.getresponse()
conn.close()
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
def test_introspection1(self):
try:
p = xmlrpclib.ServerProxy(URL)
meth = p.system.listMethods()
expected_methods = set(['pow', 'div', 'my_function', 'add',
'system.listMethods', 'system.methodHelp',
'system.methodSignature', 'system.multicall'])
self.assertEqual(set(meth), expected_methods)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection2(self):
try:
# test _methodHelp()
p = xmlrpclib.ServerProxy(URL)
divhelp = p.system.methodHelp('div')
self.assertEqual(divhelp, 'This is the div function')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_introspection3(self):
try:
# test native doc
p = xmlrpclib.ServerProxy(URL)
myfunction = p.system.methodHelp('my_function')
self.assertEqual(myfunction, 'This is my function')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection4(self):
# the SimpleXMLRPCServer doesn't support signatures, but
# at least check that we can try making the call
try:
p = xmlrpclib.ServerProxy(URL)
divsig = p.system.methodSignature('div')
self.assertEqual(divsig, 'signatures not supported')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.add(2,3)
multicall.pow(6,8)
multicall.div(127,42)
add_result, pow_result, div_result = multicall()
self.assertEqual(add_result, 2+3)
self.assertEqual(pow_result, 6**8)
self.assertEqual(div_result, 127//42)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_non_existing_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.this_is_not_exists()
result = multicall()
# result.results contains;
# [{'faultCode': 1, 'faultString': '<type \'exceptions.Exception\'>:'
# 'method "this_is_not_exists" is not supported'>}]
self.assertEqual(result.results[0]['faultCode'], 1)
self.assertEqual(result.results[0]['faultString'],
'<type \'exceptions.Exception\'>:method "this_is_not_exists" '
'is not supported')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_dotted_attribute(self):
# Raises an AttributeError because private methods are not allowed.
self.assertRaises(AttributeError,
SimpleXMLRPCServer.resolve_dotted_attribute, str, '__add')
self.assertTrue(SimpleXMLRPCServer.resolve_dotted_attribute(str, 'title'))
# Get the test to run faster by sending a request with test_simple1.
# This avoids waiting for the socket timeout.
self.test_simple1()
def test_partial_post(self):
# Check that a partial POST doesn't make the server loop: issue #14001.
conn = httplib.HTTPConnection(ADDR, PORT)
conn.request('POST', '/RPC2 HTTP/1.0\r\nContent-Length: 100\r\n\r\nbye')
conn.close()
class MultiPathServerTestCase(BaseServerTestCase):
threadFunc = staticmethod(http_multi_server)
request_count = 2
def test_path1(self):
p = xmlrpclib.ServerProxy(URL+"/foo")
self.assertEqual(p.pow(6,8), 6**8)
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_path2(self):
p = xmlrpclib.ServerProxy(URL+"/foo/bar")
self.assertEqual(p.add(6,8), 6+8)
self.assertRaises(xmlrpclib.Fault, p.pow, 6, 8)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class BaseKeepaliveServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
parentClass = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
myRequests = []
def handle(self):
self.myRequests.append([])
self.reqidx = len(self.myRequests)-1
return self.parentClass.handle(self)
def handle_one_request(self):
result = self.parentClass.handle_one_request(self)
self.myRequests[self.reqidx].append(self.raw_requestline)
return result
requestHandler = RequestHandler
def setUp(self):
#clear request log
self.RequestHandler.myRequests = []
return BaseServerTestCase.setUp(self)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class KeepaliveServerTestCase1(BaseKeepaliveServerTestCase):
def test_two(self):
p = xmlrpclib.ServerProxy(URL)
#do three requests.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
#they should have all been handled by a single request handler
self.assertEqual(len(self.RequestHandler.myRequests), 1)
#check that we did at least two (the third may be pending append
#due to thread scheduling)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
#test special attribute access on the serverproxy, through the __call__
#function.
class KeepaliveServerTestCase2(BaseKeepaliveServerTestCase):
#ask for two keepalive requests to be handled.
request_count=2
def test_close(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")() #this should trigger a new keep-alive request
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
#they should have all been two request handlers, each having logged at least
#two complete requests
self.assertEqual(len(self.RequestHandler.myRequests), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-2]), 2)
def test_transport(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
p("transport").close() #same as above, really.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(len(self.RequestHandler.myRequests), 2)
#A test case that verifies that gzip encoding works in both directions
#(for a request and the response)
@unittest.skipUnless(gzip, 'gzip not available')
class GzipServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
parentClass = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
def do_POST(self):
#store content of last request in class
self.__class__.content_length = int(self.headers["content-length"])
return self.parentClass.do_POST(self)
requestHandler = RequestHandler
class Transport(xmlrpclib.Transport):
#custom transport, stores the response length for our perusal
fake_gzip = False
def parse_response(self, response):
self.response_length=int(response.getheader("content-length", 0))
return xmlrpclib.Transport.parse_response(self, response)
def send_content(self, connection, body):
if self.fake_gzip:
#add a lone gzip header to induce decode error remotely
connection.putheader("Content-Encoding", "gzip")
return xmlrpclib.Transport.send_content(self, connection, body)
def setUp(self):
BaseServerTestCase.setUp(self)
def test_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
p = xmlrpclib.ServerProxy(URL, transport=t)
self.assertEqual(p.pow(6,8), 6**8)
a = self.RequestHandler.content_length
t.encode_threshold = 0 #turn on request encoding
self.assertEqual(p.pow(6,8), 6**8)
b = self.RequestHandler.content_length
self.assertTrue(a>b)
def test_bad_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
t.fake_gzip = True
p = xmlrpclib.ServerProxy(URL, transport=t)
cm = self.assertRaisesRegexp(xmlrpclib.ProtocolError,
re.compile(r"\b400\b"))
with cm:
p.pow(6, 8)
def test_gsip_response(self):
t = self.Transport()
p = xmlrpclib.ServerProxy(URL, transport=t)
old = self.requestHandler.encode_threshold
self.requestHandler.encode_threshold = None #no encoding
self.assertEqual(p.pow(6,8), 6**8)
a = t.response_length
self.requestHandler.encode_threshold = 0 #always encode
self.assertEqual(p.pow(6,8), 6**8)
b = t.response_length
self.requestHandler.encode_threshold = old
self.assertTrue(a>b)
#Test special attributes of the ServerProxy object
class ServerProxyTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
if threading:
self.url = URL
else:
# Without threading, http_server() and http_multi_server() will not
# be executed and URL is still equal to None. 'http://' is a just
# enough to choose the scheme (HTTP)
self.url = 'http://'
def test_close(self):
p = xmlrpclib.ServerProxy(self.url)
self.assertEqual(p('close')(), None)
def test_transport(self):
t = xmlrpclib.Transport()
p = xmlrpclib.ServerProxy(self.url, transport=t)
self.assertEqual(p('transport'), t)
# This is a contrived way to make a failure occur on the server side
# in order to test the _send_traceback_header flag on the server
class FailingMessageClass(mimetools.Message):
def __getitem__(self, key):
key = key.lower()
if key == 'content-length':
return 'I am broken'
return mimetools.Message.__getitem__(self, key)
@unittest.skipUnless(threading, 'Threading required for this test.')
class FailingServerTestCase(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
threading.Thread(target=http_server, args=serv_args).start()
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# reset flag
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False
# reset message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = mimetools.Message
def test_basic(self):
# check that flag is false by default
flagval = SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header
self.assertEqual(flagval, False)
# enable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
# test a call that shouldn't fail just as a smoke test
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_fail_no_info(self):
# use the broken message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# The two server-side error headers shouldn't be sent back in this case
self.assertTrue(e.headers.get("X-exception") is None)
self.assertTrue(e.headers.get("X-traceback") is None)
else:
self.fail('ProtocolError not raised')
def test_fail_with_info(self):
# use the broken message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
# Check that errors in the server send back exception/traceback
# info when flag is set
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# We should get error info in the response
expected_err = "invalid literal for int() with base 10: 'I am broken'"
self.assertEqual(e.headers.get("x-exception"), expected_err)
self.assertTrue(e.headers.get("x-traceback") is not None)
else:
self.fail('ProtocolError not raised')
class CGIHandlerTestCase(unittest.TestCase):
def setUp(self):
self.cgi = SimpleXMLRPCServer.CGIXMLRPCRequestHandler()
def tearDown(self):
self.cgi = None
def test_cgi_get(self):
with test_support.EnvironmentVarGuard() as env:
env['REQUEST_METHOD'] = 'GET'
# if the method is GET and no request_text is given, it runs handle_get
# get sysout output
with test_support.captured_stdout() as data_out:
self.cgi.handle_request()
# parse Status header
data_out.seek(0)
handle = data_out.read()
status = handle.split()[1]
message = ' '.join(handle.split()[2:4])
self.assertEqual(status, '400')
self.assertEqual(message, 'Bad Request')
def test_cgi_xmlrpc_response(self):
data = """<?xml version='1.0'?>
<methodCall>
<methodName>test_method</methodName>
<params>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string>bar</string></value>
</param>
</params>
</methodCall>
"""
with test_support.EnvironmentVarGuard() as env, \
test_support.captured_stdout() as data_out, \
test_support.captured_stdin() as data_in:
data_in.write(data)
data_in.seek(0)
env['CONTENT_LENGTH'] = str(len(data))
self.cgi.handle_request()
data_out.seek(0)
# will respond exception, if so, our goal is achieved ;)
handle = data_out.read()
# start with 44th char so as not to get http header, we just need only xml
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:])
# Also test the content-length returned by handle_request
# Using the same test method inorder to avoid all the datapassing
# boilerplate code.
# Test for bug: http://bugs.python.org/issue5040
content = handle[handle.find("<?xml"):]
self.assertEqual(
int(re.search('Content-Length: (\d+)', handle).group(1)),
len(content))
class FakeSocket:
def __init__(self):
self.data = StringIO.StringIO()
def send(self, buf):
self.data.write(buf)
return len(buf)
def sendall(self, buf):
self.data.write(buf)
def getvalue(self):
return self.data.getvalue()
def makefile(self, x='r', y=-1):
raise RuntimeError
def close(self):
pass
class FakeTransport(xmlrpclib.Transport):
"""A Transport instance that records instead of sending a request.
This class replaces the actual socket used by httplib with a
FakeSocket object that records the request. It doesn't provide a
response.
"""
def make_connection(self, host):
conn = xmlrpclib.Transport.make_connection(self, host)
conn.sock = self.fake_socket = FakeSocket()
return conn
class TransportSubclassTestCase(unittest.TestCase):
def issue_request(self, transport_class):
"""Return an HTTP request made via transport_class."""
transport = transport_class()
proxy = xmlrpclib.ServerProxy("http://example.com/",
transport=transport)
try:
proxy.pow(6, 8)
except RuntimeError:
return transport.fake_socket.getvalue()
return None
def test_custom_user_agent(self):
class TestTransport(FakeTransport):
def send_user_agent(self, conn):
xmlrpclib.Transport.send_user_agent(self, conn)
conn.putheader("X-Test", "test_custom_user_agent")
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_custom_user_agent\r\n", req)
def test_send_host(self):
class TestTransport(FakeTransport):
def send_host(self, conn, host):
xmlrpclib.Transport.send_host(self, conn, host)
conn.putheader("X-Test", "test_send_host")
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_send_host\r\n", req)
def test_send_request(self):
class TestTransport(FakeTransport):
def send_request(self, conn, url, body):
xmlrpclib.Transport.send_request(self, conn, url, body)
conn.putheader("X-Test", "test_send_request")
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_send_request\r\n", req)
def test_send_content(self):
class TestTransport(FakeTransport):
def send_content(self, conn, body):
conn.putheader("X-Test", "test_send_content")
xmlrpclib.Transport.send_content(self, conn, body)
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_send_content\r\n", req)
@test_support.reap_threads
def test_main():
xmlrpc_tests = [XMLRPCTestCase, HelperTestCase, DateTimeTestCase,
BinaryTestCase, FaultTestCase, TransportSubclassTestCase]
xmlrpc_tests.append(SimpleServerTestCase)
xmlrpc_tests.append(KeepaliveServerTestCase1)
xmlrpc_tests.append(KeepaliveServerTestCase2)
xmlrpc_tests.append(GzipServerTestCase)
xmlrpc_tests.append(MultiPathServerTestCase)
xmlrpc_tests.append(ServerProxyTestCase)
xmlrpc_tests.append(FailingServerTestCase)
xmlrpc_tests.append(CGIHandlerTestCase)
test_support.run_unittest(*xmlrpc_tests)
if __name__ == "__main__":
test_main()
|
arc2owl.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: houzhiwei
# time: 2019/1/7 22:25
from owlready2 import *
import json
import re
from JSON2OWL.OwlConvert.OwlUtils import OWLUtils
from JSON2OWL.OwlConvert.Preprocessor import Preprocessor
import datetime
# import math
module_uri = 'http://www.egc.org/ont/process/arcgis'
onto = get_ontology(module_uri)
# onto, skos, dcterms, props = OWLUtils.load_common(onto)
onto, shacl, skos, dcterms, props, foaf = OWLUtils.load_common(onto)
onto, geospatial = OWLUtils.load_geo_vocabl(onto)
onto, gb, task, data, cyber, context = OWLUtils.load_common_for_process_tool(onto)
print('ontologies imported')
with onto:
class ArcGISTool(gb.GeoprocessingFunctionality):
pass
class ArcGISInput(cyber.Input):
pass
class ArcGISOutput(cyber.Output):
pass
class ArcGISOption(cyber.Option):
pass
onto.metadata.creator.append('houzhiwei')
onto.metadata.title.append('ArcGIS Tools')
onto.metadata.created.append(datetime.datetime.today())
module_path = os.path.dirname(__file__)
onto.metadata.versionInfo.append('10.1')
def get_task_type(full_name):
task_type_partten = "\([a-zA-Z0-9*\-' ]+\)"
task_types = re.findall(task_type_partten, full_name)
if len(task_types) > 1:
# tool.hasKeywords.append(OWLUtils.remove_parenthesis(task_types[0]))
task_type = Preprocessor.remove_parenthesis(re.findall(task_type_partten, full_name)[-1])
else:
task_type = Preprocessor.remove_parenthesis(re.findall(task_type_partten, full_name)[0])
return task_type
def handle_task(tool, full_name, task_name, des):
config = OWLUtils.get_config(module_path + '/config.ini')
task_type = get_task_type(full_name)
task_cls = config.get('task', task_type)
# tool.keywords.append(task_type)
tool.subject.append(task_type)
# avoid duplicate
if not task[task_name + "_task"]:
task_ins = task[task_cls](task_name + "_task", prefLabel=locstr(task_name.replace('_', ' ') + " task", lang='en'))
task_ins.isAtomicTask = True
task_ins.identifier = task_name
else:
task_ins = task[task_name + "_task"]
if (task_ins in tool.usedByTask) is False:
tool.usedByTask.append(task_ins)
if (tool in tool.processingTool) is False:
task_ins.processingTool.append(tool)
task_ins.description.append(locstr(des, lang='en'))
def handle_parameters(tool, param):
# 部分parameter不包含isInputFile等属性
_name = Preprocessor.io_name(param['name'], onto)
if 'isInputFile' in param.keys() and param['isInputFile']:
p = ArcGISInput(_name, prefLabel=locstr(param['name'], lang='en'))
# p = ArcGISInput(0, prefLabel=locstr(param['name'], lang='en'))
tool.input.append(p)
p.isInput = param['isInputFile']
OWLUtils.link_to_domain_concept(p, param['name'].replace('_', ' '))
elif 'isOutputFile' in param.keys() and param['isOutputFile']:
p = ArcGISOutput(_name, prefLabel=locstr(param['name'], lang='en'))
# p = ArcGISOutput(0, prefLabel=locstr(param['name'], lang='en'))
tool.output.append(p)
p.isOutput = param['isOutputFile']
OWLUtils.link_to_domain_concept(p, param['name'].replace('_', ' '))
else:
p = ArcGISOption(_name, prefLabel=locstr(param['name'], lang='en'))
# p = ArcGISOption(0, prefLabel=locstr(param['name'], lang='en'))
tool.option.append(p)
dt = param['dataType']
if dt:
p.datatypeInString.append(param['dataType'])
p.datatype.append(OWLUtils.get_datatype_iris(param['dataType']))
OWLUtils.link_to_domain_concept(p, param['name'].replace('_', ' '))
p.identifier = param['name']
p.flag = param['name']
if 'dataType' in param.keys() and param['dataType']:
p.datatypeInString.append(param['dataType'])
p.description.append(param['description'])
p.isOptional = param['isOptional']
# datatype
datatype = param['dataType']
if datatype is None: datatype = "string"
dt = datatype.strip().lower().replace(' ', '_')
# print(dt)
dtype = data[dt]
if dtype is None: dtype = OWLUtils.get_datatype_iris(dt)
p.datatype.append(dtype)
if "available_values" in param.keys():
for value in param['available_values']:
p.availableValue.append(value)
def handle_example(example):
ex = 'Title: ' + example['title'] if example['title'] else ''
ex = ex + '\n' + 'Description: ' + example['description'] if example['description'] else ex
ex += '\n' + 'Code: \n' + example['code']
return ex
def map_to_owl(json_data):
for d in json_data:
"""mapping json data to ontology properties"""
name = re.match("[0-9a-zA-Z\-/* ]+ (?=\([\w' ]+\))", d['name']) # 存在同名冲突
if name:
name_str = name.group().strip().lower().replace(' ', '_').replace('/', '_')
else:
continue
category = get_task_type(d['name'])
toolCls = tool_class(category)
# if already exists a instance has the same name
if onto[name_str]:
# if is the same
if onto[name_str].syntax == d['syntax']:
onto[name_str].is_a.append(toolCls)
continue
else:
name_str = name_str + '_' + category.lower().replace(' ', '_')
tool = toolCls(name_str, prefLabel=locstr(name_str.replace('_', ' '), lang='en'))
keywords = [name_str.replace('_', ' ')]
OWLUtils.link_to_domain_concept(tool, keywords)
tool.isToolOfSoftware.append(cyber.ArcGIS_Desktop)
tool.identifier = name_str.replace('_', ' ')
# tool.hasManualPageURL.append(d['manual_url'])
tool.description.append(locstr(d['description'], lang='en'))
tool.usage.append(OWLUtils.join_list(d['usage']))
tool.syntax.append(d['syntax'])
tool.example.append(handle_example(d['example']))
handle_task(tool, d['name'], name_str, d['description'])
for parameter in d['parameters']:
handle_parameters(tool, parameter)
def tool_class(category):
if category == '3D Analyst': category = 'ThreeDimensionalAnalyst'
tool_cls = category.replace(' ', '') + 'Tool'
return OWLUtils.create_onto_class(onto, tool_cls, ArcGISTool)
if __name__ == "__main__":
with open(module_path + '/arcgis.json', 'r') as f:
jdata = json.load(f) # list
# length = len(jdata)
# otherwise will report stack overflow exception
size = 1024 * 1024 * 1024 # 该值与具体的系统相关
threading.stack_size(size)
thread = threading.Thread(target=map_to_owl(jdata))
thread.start()
onto.save(file='arcgis.owl', format="rdfxml")
# update task ontology
task.save()
print('ArcGIS Done!')
|
thread_utils.py
|
#
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see LICENSE.
# Contact: khmer-project@idyll.org
#
"""Utilities for dealing with multithreaded processing of short reads."""
from __future__ import print_function, unicode_literals
import threading
import sys
import screed
from khmer import utils
# stdlib queue module was renamed on Python 3
try:
import queue
except ImportError:
import Queue as queue
DEFAULT_WORKER_THREADS = 8
DEFAULT_GROUPSIZE = 100
def verbose_loader(filename):
"""Screed iterator that additionally prints progress info to stderr."""
screed_iter = screed.open(filename, parse_description=False)
for n, record in enumerate(screed_iter):
if n % 100000 == 0:
print('... filtering', n, file=sys.stderr)
yield record
verbose_fasta_iter = verbose_loader
class SequenceGroup(object):
def __init__(self, order, seqlist):
self.order = order
self.seqlist = seqlist
def is_pair(r1, r2):
a = r1['name'].split('/')[0]
b = r2['name'].split('/')[0]
return (a == b)
class ThreadedSequenceProcessor(object):
QUEUESIZE = 50
def __init__(self, process_fn, n_workers=DEFAULT_WORKER_THREADS,
group_size=DEFAULT_GROUPSIZE, verbose=True):
self.process_fn = process_fn
self.n_workers = n_workers
self.group_size = group_size
self.inqueue = queue.Queue(self.QUEUESIZE)
self.outqueue = queue.Queue(self.QUEUESIZE)
self.worker_count = 0
self.worker_count_lock = threading.Lock()
self.done = False
self.verbose = verbose
self.n_processed = 0
self.n_written = 0
self.bp_processed = 0
self.bp_written = 0
self.tallies_lock = threading.Lock()
def start(self, inputiter, outfp):
if self.verbose:
print('starting threads', file=sys.stderr)
try:
for _ in range(self.n_workers):
t = threading.Thread(target=self.do_process)
self.worker_count += 1
t.start()
if self.verbose:
print('starting writer', file=sys.stderr)
w = threading.Thread(target=self.do_write, args=(outfp,))
w.start()
if self.verbose:
print('loading...', file=sys.stderr)
self.push_sequences(inputiter)
if self.verbose:
print('done loading in sequences', file=sys.stderr)
self.done = True
w.join()
except Exception:
self.done = True
raise
def push_sequences(self, inputiter):
batch = []
last_record = None
i = 0
for record in inputiter:
if i >= self.group_size:
# keep pairs together in batches, to retain the interleaving.
if is_pair(record, last_record):
batch.append(record)
g = SequenceGroup(0, batch)
self.inqueue.put(g)
batch = []
else:
g = SequenceGroup(0, batch)
self.inqueue.put(g)
batch = [record]
i = 0
else:
batch.append(record)
last_record = record
i += 1
# submit last set of sequences
if batch:
g = SequenceGroup(0, batch)
self.inqueue.put(g)
def do_process(self):
inq = self.inqueue
while not self.done or not inq.empty():
try:
g = inq.get(True, 1)
except queue.Empty:
continue
bp_processed = 0
bp_written = 0
keep = []
for record in g.seqlist:
name, sequence = self.process_fn(record)
bp_processed += len(record['sequence'])
if name:
quality = record.get('quality')
if quality:
quality = quality[:len(sequence)]
bp_written += len(sequence)
keep.append((name, sequence, quality))
self.outqueue.put(SequenceGroup(0, keep))
# the tallies are shared among workers, hence we lock
with self.tallies_lock:
self.n_processed += len(g.seqlist)
self.n_written += len(keep)
self.bp_processed += bp_processed
self.bp_written += bp_written
if self.verbose and self.n_processed % 500000 == 0:
print("processed %d / wrote %d / removed %d" %
(self.n_processed, self.n_written,
self.n_processed - self.n_written), file=sys.stderr)
print("processed %d bp / wrote %d bp / removed %d bp" %
(self.bp_processed, self.bp_written,
self.bp_processed - self.bp_written),
file=sys.stderr)
discarded = self.bp_processed - self.bp_written
f = float(discarded) / float(self.bp_processed) * 100
print("discarded %.1f%%" % f, file=sys.stderr)
# end of thread; exit, decrement worker count.
with self.worker_count_lock:
self.worker_count -= 1
def do_write(self, outfp):
outq = self.outqueue
while self.worker_count > 0 or not outq.empty():
try:
g = outq.get(True, 1)
except queue.Empty:
continue
for name, seq, quality in g.seqlist:
if quality: # write FASTQ; CTB hack.
outfp.write('@%s\n%s\n+\n%s\n' % (name, seq, quality))
else:
outfp.write('>%s\n%s\n' % (name, seq,))
if self.verbose:
print("DONE writing.\nprocessed %d / wrote %d / removed %d" %
(self.n_processed, self.n_written,
self.n_processed - self.n_written), file=sys.stderr)
print("processed %d bp / wrote %d bp / removed %d bp" %
(self.bp_processed, self.bp_written,
self.bp_processed - self.bp_written), file=sys.stderr)
discarded = self.bp_processed - self.bp_written
f = float(discarded) / float(self.bp_processed) * 100
print("discarded %.1f%%" % f, file=sys.stderr)
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
server.py
|
import abc
import socket
from threading import Lock, Thread
import six.moves.cPickle as pickle
from flask import Flask, request
from multiprocessing import Process
from ..utils.sockets import determine_master
from ..utils.sockets import receive, send
from ..utils.serialization import dict_to_model
# from multiprocessing import Lock
from ..utils.rwlock import RWLock as Lock
from ..utils.notebook_utils import is_running_in_notebook
class BaseParameterServer(object):
"""BaseParameterServer
Parameter servers can be started and stopped. Server implementations have
to cater to the needs of their respective BaseParameterClient instances.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
raise NotImplementedError
@abc.abstractmethod
def start(self):
"""Start the parameter server instance.
"""
raise NotImplementedError
@abc.abstractmethod
def stop(self):
"""Terminate the parameter server instance.
"""
raise NotImplementedError
class HttpServer(BaseParameterServer):
"""HttpServer
Flask HTTP server. Defines two routes, `/parameters` to GET current
parameters held by this server, and `/update` which can be used to
POST updates.
"""
def __init__(self, model, optimizer, mode, port=4000, debug=True,
threaded=True, use_reloader=True):
"""Initializes and HTTP server from a serialized Keras model, elephas optimizer,
a parallelisation mode and a port to run the Flask application on. In
hogwild mode no read- or write-locks will be acquired, in asynchronous
mode this is the case.
:param model: Serialized Keras model
:param optimizer: Elephas optimizer
:param mode: parallelization mode, either `asynchronous` or `hogwild`
:param port: int, port to run the application on
:param debug: boolean, Flask debug mode
:param threaded: boolean, Flask threaded application mode
:param use_reloader: boolean, Flask `use_reloader` argument
"""
self.master_network = dict_to_model(model)
self.mode = mode
self.master_url = None
self.optimizer = optimizer
self.port = port
if is_running_in_notebook():
self.threaded = False
self.use_reloader = False
self.debug = False
else:
self.debug = debug
self.threaded = threaded
self.use_reloader = use_reloader
self.lock = Lock()
self.pickled_weights = None
self.weights = self.master_network.get_weights()
self.server = Process(target=self.start_flask_service)
def start(self):
self.server.start()
self.master_url = determine_master(self.port)
def stop(self):
self.server.terminate()
self.server.join()
def start_flask_service(self):
"""Define Flask parameter server service.
This HTTP server can do two things: get the current model
parameters and update model parameters. After registering
the `parameters` and `update` routes, the service will
get started.
"""
app = Flask(__name__)
self.app = app
@app.route('/')
def home():
return 'Elephas'
@app.route('/parameters', methods=['GET'])
def handle_get_parameters():
if self.mode == 'asynchronous':
self.lock.acquire_read()
self.pickled_weights = pickle.dumps(self.weights, -1)
pickled_weights = self.pickled_weights
if self.mode == 'asynchronous':
self.lock.release()
return pickled_weights
@app.route('/update', methods=['POST'])
def handle_update_parameters():
delta = pickle.loads(request.data)
if self.mode == 'asynchronous':
self.lock.acquire_write()
if not self.master_network.built:
self.master_network.build()
def base_constraint(a): return a
constraints = [base_constraint for _ in self.weights]
self.weights = self.optimizer.get_updates(
self.weights, constraints, delta)
if self.mode == 'asynchronous':
self.lock.release()
return 'Update done'
# self.app.run(host='0.0.0.0', debug=self.debug, port=self.port,
# threaded=self.threaded, use_reloader=self.use_reloader)
from werkzeug.serving import run_simple
run_simple(self.master_url.split(':')[0], self.port, self.app)
class SocketServer(BaseParameterServer):
"""SocketServer
A basic Python socket server
"""
def __init__(self, model, port=4000):
"""Initializes a Socket server instance from a serializer Keras model
and a port to listen to.
:param model: Serialized Keras model
:param port: int, port to run the socket on
"""
self.model = dict_to_model(model)
self.port = port
self.socket = None
self.runs = False
self.connections = []
self.lock = Lock()
self.thread = None
def start(self):
if self.thread is not None:
self.stop()
self.thread = Thread(target=self.start_server)
self.thread.start()
def stop(self):
self.stop_server()
self.thread.join()
self.thread = None
def start_server(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.bind(('0.0.0.0', self.port))
sock.listen(5)
self.socket = sock
self.runs = True
self.run()
def stop_server(self):
self.runs = False
if self.socket:
for thread in self.connections:
thread.join()
del thread
self.socket.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(("localhost", self.port))
sock.close()
except Exception:
pass
self.socket = None
self.connections = []
def update_parameters(self, conn):
data = receive(conn)
delta = data['delta']
with self.lock:
weights = self.model.get_weights() + delta
self.model.set_weights(weights)
def get_parameters(self, conn):
with self.lock:
weights = self.model.get_weights()
send(conn, weights)
def action_listener(self, conn):
while self.runs:
get_or_update = conn.recv(1).decode()
if get_or_update == 'u':
self.update_parameters(conn)
elif get_or_update == 'g':
self.get_parameters(conn)
else:
raise ValueError('Received invalid action')
def run(self):
while self.runs:
try:
conn, addr = self.socket.accept()
thread = Thread(target=self.action_listener, args=(conn, addr))
thread.start()
self.connections.append(thread)
except Exception:
print("Failed to set up socket connection.")
|
xadmin_action.py
|
import os
import threading
import abupy
import numpy as np
from abupy import AbuFactorBuyBreak, AbuBenchmark, AbuCapital, ABuPickTimeExecute, AbuMetricsBase, AbuFactorAtrNStop, \
AbuFactorCloseAtrNStop, \
AbuFactorPreAtrNStop, AbuFactorSellBreak, ABuGridHelper, GridSearch, ABuFileUtil, WrsmScorer, EMarketSourceType
from base.models import Stock
from xadmin.plugins.actions import BaseActionView
class GridSearchAction(BaseActionView):
action_name = "change_grid_search" #: 相当于这个 Action 的唯一标示, 尽量用比较针对性的名字
description = u'Grid Search 最优参数 %(verbose_name_plural)s' #: 描述, 出现在 Action 菜单中, 可以使用 ``%(verbose_name_plural)s`` 代替 Model 的名字.
model_perm = 'change'
lock = threading.Lock()
console_str = []
def booth(self, obj, score_fn):
self.lock.acquire()
if obj:
print('Thread_id', obj)
benchmark = AbuBenchmark(start=str(obj.start), end=str(obj.end))
read_cash = obj.read_cash
stocks = obj.stocks.all()
choice_symbols = []
for stock in stocks:
choice_symbols.append(stock.symbol)
sell_factors_product, buy_factors_product = self.gen_factor_params(obj, True)
grid_search = GridSearch(read_cash, choice_symbols, benchmark=benchmark,
buy_factors_product=buy_factors_product,
sell_factors_product=sell_factors_product)
"""
注意下面的运行耗时大约1小时多,如果所有cpu都用上的话,也可以设置n_jobs为 < cpu进程数,一边做其它的一边跑
"""
# 运行GridSearch n_jobs=-1启动cpu个数的进程数
scores, score_tuple_array = grid_search.fit(n_jobs=-1)
"""
针对运行完成输出的score_tuple_array可以使用dump_pickle保存在本地,以方便修改其它验证效果。
"""
ABuFileUtil.dump_pickle(score_tuple_array, score_fn)
print('组合因子参数数量{}'.format(len(buy_factors_product) * len(sell_factors_product)))
print('最终评分结果数量{}'.format(len(scores)))
else:
print("Thread_id", obj, "No more")
self.lock.release()
def gen_factor_params(self, obj, show=True):
"""
参数进行排列组合
:return:
"""
buy_factors = []
sell_factors = []
for factor_buy in obj.factor_buys.all():
buy_factors.append(eval(factor_buy.get_class_name_display()))
for factor_sell in obj.factor_sells.all():
sell_factors.append(eval(factor_sell.get_class_name_display()))
sell_factors_product = ABuGridHelper.gen_factor_grid(
ABuGridHelper.K_GEN_FACTOR_PARAMS_SELL,
sell_factors)
if show:
print('卖出因子参数共有{}种组合方式'.format(len(sell_factors_product)))
print('卖出因子组合0形式为{}'.format(sell_factors_product[0]))
buy_factors_product = ABuGridHelper.gen_factor_grid(
ABuGridHelper.K_GEN_FACTOR_PARAMS_BUY, buy_factors)
if show:
print('买入因子参数共有{}种组合方式'.format(len(buy_factors_product)))
print('买入因子组合形式为{}'.format(buy_factors_product))
return sell_factors_product, buy_factors_product
def do_action(self, queryset):
for obj in queryset:
print('GridSearchAction')
score_fn = '../gen/score_tuple_array_%s' % str(obj.id)
if not ABuFileUtil.file_exist(score_fn):
new_thread = threading.Thread(target=self.booth, args=(obj,score_fn, ))
new_thread.start()
else:
"""
直接读取本地序列化文件
"""
score_tuple_array = ABuFileUtil.load_pickle(score_fn)
# 实例化一个评分类WrsmScorer,它的参数为之前GridSearch返回的score_tuple_array对象
scorer = WrsmScorer(score_tuple_array)
print('scorer.score_pd.tail():\n', scorer.score_pd.tail())
# score_tuple_array[658]与grid_search.best_score_tuple_grid是一致的
scorer_returns_max = scorer.fit_score()
# 因为是倒序排序,所以index最后一个为最优参数
best_score_tuple_grid = score_tuple_array[scorer_returns_max.index[-1]]
# 由于篇幅,最优结果只打印文字信息
AbuMetricsBase.show_general(best_score_tuple_grid.orders_pd,
best_score_tuple_grid.action_pd,
best_score_tuple_grid.capital,
best_score_tuple_grid.benchmark,
only_info=True)
# 最后打印出只考虑投资回报下最优结果使用的买入策略和卖出策略
print('best_score_tuple_grid.buy_factors, best_score_tuple_grid.sell_factors:\n',
best_score_tuple_grid.buy_factors,
best_score_tuple_grid.sell_factors)
obj.description = 'best_score_tuple_grid.buy_factors:%s, best_score_tuple_grid.sell_factors:%s' % (
best_score_tuple_grid.buy_factors,
best_score_tuple_grid.sell_factors)
obj.save()
|
slack.py
|
import json
import logging
import random
import re
import requests
import sys
import time
import traceback
import websocket
from markdownify import MarkdownConverter
from will import settings
from .base import IOBackend
from will.utils import Bunch, UNSURE_REPLIES, clean_for_pickling
from will.mixins import SleepMixin, StorageMixin
from multiprocessing import Process
from will.abstractions import Event, Message, Person, Channel
from slackclient import SlackClient
SLACK_SEND_URL = "https://slack.com/api/chat.postMessage"
SLACK_SET_TOPIC_URL = "https://slack.com/api/channels.setTopic"
SLACK_PRIVATE_SET_TOPIC_URL = "https://slack.com/api/groups.setTopic"
class SlackMarkdownConverter(MarkdownConverter):
def convert_strong(self, el, text):
return '*%s*' % text if text else ''
class SlackBackend(IOBackend, SleepMixin, StorageMixin):
friendly_name = "Slack"
internal_name = "will.backends.io_adapters.slack"
required_settings = [
{
"name": "SLACK_API_TOKEN",
"obtain_at": """1. Go to https://api.slack.com/custom-integrations/legacy-tokens and sign in as yourself (or a user for Will).
2. Find the workspace you want to use, and click "Create token."
3. Set this token as SLACK_API_TOKEN."""
}
]
def get_channel_from_name(self, name):
for k, c in self.channels.items():
if c.name.lower() == name.lower() or c.id.lower() == name.lower():
return c
def normalize_incoming_event(self, event):
if (
"type" in event and
event["type"] == "message" and
("subtype" not in event or event["subtype"] != "message_changed") and
# Ignore thread summary events (for now.)
# TODO: We should stack these into the history.
("subtype" not in event or ("message" in event and "thread_ts" not in event["message"]))
):
# print("slack: normalize_incoming_event - %s" % event)
# Sample of group message
# {u'source_team': u'T5ACF70KV', u'text': u'test',
# u'ts': u'1495661121.838366', u'user': u'U5ACF70RH',
# u'team': u'T5ACF70KV', u'type': u'message', u'channel': u'C5JDAR2S3'}
# Sample of 1-1 message
# {u'source_team': u'T5ACF70KV', u'text': u'test',
# u'ts': u'1495662397.335424', u'user': u'U5ACF70RH',
# u'team': u'T5ACF70KV', u'type': u'message', u'channel': u'D5HGP0YE7'}
# Threaded message
# {u'event_ts': u'1507601477.000073', u'ts': u'1507601477.000073',
# u'subtype': u'message_replied', u'message':
# {u'thread_ts': u'1507414046.000010', u'text': u'hello!',
# u'ts': u'1507414046.000010', u'unread_count': 2,
# u'reply_count': 2, u'user': u'U5GUL9D9N', u'replies':
# [{u'user': u'U5ACF70RH', u'ts': u'1507601449.000007'}, {
# u'user': u'U5ACF70RH', u'ts': u'1507601477.000063'}],
# u'type': u'message', u'bot_id': u'B5HL9ABFE'},
# u'type': u'message', u'hidden': True, u'channel': u'D5HGP0YE7'}
sender = self.people[event["user"]]
channel = clean_for_pickling(self.channels[event["channel"]])
# print "channel: %s" % channel
interpolated_handle = "<@%s>" % self.me.id
real_handle = "@%s" % self.me.handle
will_is_mentioned = False
will_said_it = False
is_private_chat = False
thread = None
if "thread_ts" in event:
thread = event["thread_ts"]
# If the parent thread is a 1-1 between Will and I, also treat that as direct.
# Since members[] still comes in on the thread event, we can trust this, even if we're
# in a thread.
if len(channel.members.keys()) == 0:
is_private_chat = True
# <@U5GUL9D9N> hi
# TODO: if there's a thread with just will and I on it, treat that as direct.
is_direct = False
if is_private_chat or event["text"].startswith(interpolated_handle) or event["text"].startswith(real_handle):
is_direct = True
if event["text"].startswith(interpolated_handle):
event["text"] = event["text"][len(interpolated_handle):].strip()
if event["text"].startswith(real_handle):
event["text"] = event["text"][len(real_handle):].strip()
if interpolated_handle in event["text"] or real_handle in event["text"]:
will_is_mentioned = True
if event["user"] == self.me.id:
will_said_it = True
m = Message(
content=event["text"],
type=event["type"],
is_direct=is_direct,
is_private_chat=is_private_chat,
is_group_chat=not is_private_chat,
backend=self.internal_name,
sender=sender,
channel=channel,
thread=thread,
will_is_mentioned=will_is_mentioned,
will_said_it=will_said_it,
backend_supports_acl=True,
original_incoming_event=clean_for_pickling(event),
)
return m
else:
# An event type the slack ba has no idea how to handle.
pass
def set_topic(self, event):
headers = {'Accept': 'text/plain'}
data = self.set_data_channel_and_thread(event)
data.update({
"token": settings.SLACK_API_TOKEN,
"as_user": True,
"topic": event.content,
})
if data["channel"].startswith("G"):
url = SLACK_PRIVATE_SET_TOPIC_URL
else:
url = SLACK_SET_TOPIC_URL
r = requests.post(
url,
headers=headers,
data=data,
**settings.REQUESTS_OPTIONS
)
self.handle_request(r, data)
def handle_outgoing_event(self, event):
if event.type in ["say", "reply"]:
if "kwargs" in event and "html" in event.kwargs and event.kwargs["html"]:
event.content = SlackMarkdownConverter().convert(event.content)
event.content = event.content.replace("&", "&")
event.content = event.content.replace("\_", "_")
kwargs = {}
if "kwargs" in event:
kwargs.update(**event.kwargs)
if hasattr(event, "source_message") and event.source_message and "channel" not in kwargs:
self.send_message(event)
else:
# Came from webhook/etc
# TODO: finish this.
if "room" in kwargs:
event.channel = self.get_channel_from_name(kwargs["room"])
elif "channel" in kwargs:
event.channel = self.get_channel_from_name(kwargs["channel"])
else:
if hasattr(settings, "SLACK_DEFAULT_ROOM"):
event.channel = self.get_channel_from_name(settings.SLACK_DEFAULT_ROOM)
else:
# Set self.me
self.people
for c in self.channels.values():
if c.name != c.id and self.me.id in c.members:
event.channel = c
break
self.send_message(event)
if event.type in ["topic_change", ]:
self.set_topic(event)
elif (
event.type == "message.no_response" and
event.data.is_direct and
event.data.will_said_it is False
):
event.content = random.choice(UNSURE_REPLIES)
self.send_message(event)
def handle_request(self, r, data):
resp_json = r.json()
if not resp_json["ok"]:
if resp_json["error"] == "not_in_channel":
channel = self.get_channel_from_name(data["channel"])
if not hasattr(self, "me") or not hasattr(self.me, "handle"):
self.people
logging.critical(
"I was asked to post to the slack %s channel, but I haven't been invited. "
"Please invite me with '/invite @%s'" % (channel.name, self.me.handle)
)
else:
logging.error("Error sending to slack: %s" % resp_json["error"])
logging.error(resp_json)
assert resp_json["ok"]
def set_data_channel_and_thread(self, event, data={}):
if "channel" in event:
# We're coming off an explicit set.
channel_id = event.channel.id
else:
if "source_message" in event:
# Mentions that come back via self.say()
if hasattr(event.source_message, "data"):
channel_id = event.source_message.data.channel.id
if hasattr(event.source_message.data, "thread"):
data.update({
"thread_ts": event.source_message.data.thread
})
else:
# Mentions that come back via self.say() with a specific room (I think)
channel_id = event.source_message.channel.id
if hasattr(event.source_message, "thread"):
data.update({
"thread_ts": event.source_message.thread
})
else:
# Mentions that come back via self.reply()
if hasattr(event.data, "original_incoming_event"):
if hasattr(event.data.original_incoming_event.channel, "id"):
channel_id = event.data.original_incoming_event.channel.id
else:
channel_id = event.data.original_incoming_event.channel
else:
if hasattr(event.data["original_incoming_event"].data.channel, "id"):
channel_id = event.data["original_incoming_event"].data.channel.id
else:
channel_id = event.data["original_incoming_event"].data.channel
try:
# If we're starting a thread
if "kwargs" in event and "start_thread" in event.kwargs and event.kwargs["start_thread"] and ("thread_ts" not in data or not data["thread_ts"]):
if hasattr(event.source_message, "original_incoming_event"):
data.update({
"thread_ts": event.source_message.original_incoming_event["ts"]
})
elif (
hasattr(event.source_message, "data") and
hasattr(event.source_message.data, "original_incoming_event") and
"ts" in event.source_message.data.original_incoming_event
):
logging.error(
"Hm. I was told to start a new thread, but while using .say(), instead of .reply().\n"
"This doesn't really make sense, but I'm going to make the best of it by pretending you "
"used .say() and threading off of your message.\n"
"Please update your plugin to use .reply() when you have a second!"
)
data.update({
"thread_ts": event.source_message.data.original_incoming_event["ts"]
})
else:
if hasattr(event.data.original_incoming_event, "thread_ts"):
data.update({
"thread_ts": event.data.original_incoming_event.thread_ts
})
elif "thread" in event.data.original_incoming_event.data:
data.update({
"thread_ts": event.data.original_incoming_event.data.thread
})
except:
logging.info(traceback.format_exc().split(" ")[-1])
pass
data.update({
"channel": channel_id,
})
return data
def send_message(self, event):
data = {}
if hasattr(event, "kwargs"):
data.update(event.kwargs)
# Add slack-specific functionality
if "color" in event.kwargs:
data.update({
"attachments": json.dumps([
{
"fallback": event.content,
"color": self._map_color(event.kwargs["color"]),
"text": event.content,
}
]),
})
else:
data.update({
"text": event.content,
})
else:
data.update({
"text": event.content,
})
data = self.set_data_channel_and_thread(event, data=data)
# Auto-link mention names
if "text" in data:
if data["text"].find("<@") != -1:
data["text"] = data["text"].replace("<@", "<@")
data["text"] = data["text"].replace(">", ">")
elif "attachments" in data and "text" in data["attachments"][0]:
if data["attachments"][0]["text"].find("<@") != -1:
data["attachments"][0]["text"] = data["attachments"][0]["text"].replace("<@", "<@")
data["attachments"][0]["text"] = data["attachments"][0]["text"].replace(">", ">")
data.update({
"token": settings.SLACK_API_TOKEN,
"as_user": True,
})
if hasattr(event, "kwargs") and "html" in event.kwargs and event.kwargs["html"]:
data.update({
"parse": "full",
})
headers = {'Accept': 'text/plain'}
r = requests.post(
SLACK_SEND_URL,
headers=headers,
data=data,
**settings.REQUESTS_OPTIONS
)
self.handle_request(r, data)
def _map_color(self, color):
# Turn colors into hex values, handling old slack colors, etc
if color == "red":
return "danger"
elif color == "yellow":
return "warning"
elif color == "green":
return "good"
return color
def join_channel(self, channel_id):
return self.client.api_call(
"channels.join",
channel=channel_id,
)
@property
def people(self):
if not hasattr(self, "_people") or self._people is {}:
self._update_people()
return self._people
@property
def channels(self):
if not hasattr(self, "_channels") or self._channels is {}:
self._update_channels()
return self._channels
@property
def client(self):
if not hasattr(self, "_client"):
self._client = SlackClient(settings.SLACK_API_TOKEN)
return self._client
def _update_channels(self):
channels = {}
for c in self.client.server.channels:
members = {}
for m in c.members:
members[m] = self.people[m]
channels[c.id] = Channel(
id=c.id,
name=c.name,
source=clean_for_pickling(c),
members=members
)
if len(channels.keys()) == 0:
# Server isn't set up yet, and we're likely in a processing thread,
if self.load("slack_channel_cache", None):
self._channels = self.load("slack_channel_cache", None)
else:
self._channels = channels
self.save("slack_channel_cache", channels)
def _update_people(self):
people = {}
self.handle = self.client.server.username
for k, v in self.client.server.users.items():
user_timezone = None
if v.tz:
user_timezone = v.tz
people[k] = Person(
id=v.id,
mention_handle="<@%s>" % v.id,
handle=v.name,
source=clean_for_pickling(v),
name=v.real_name,
)
if v.name == self.handle:
self.me = Person(
id=v.id,
mention_handle="<@%s>" % v.id,
handle=v.name,
source=clean_for_pickling(v),
name=v.real_name,
)
if user_timezone and user_timezone != 'unknown':
people[k].timezone = user_timezone
if v.name == self.handle:
self.me.timezone = user_timezone
if len(people.keys()) == 0:
# Server isn't set up yet, and we're likely in a processing thread,
if self.load("slack_people_cache", None):
self._people = self.load("slack_people_cache", None)
if not hasattr(self, "me") or not self.me:
self.me = self.load("slack_me_cache", None)
if not hasattr(self, "handle") or not self.handle:
self.handle = self.load("slack_handle_cache", None)
else:
self._people = people
self.save("slack_people_cache", people)
self.save("slack_me_cache", self.me)
self.save("slack_handle_cache", self.handle)
def _update_backend_metadata(self):
self._update_people()
self._update_channels()
def _watch_slack_rtm(self):
while True:
try:
if self.client.rtm_connect():
self._update_backend_metadata()
num_polls_between_updates = 30 / settings.EVENT_LOOP_INTERVAL # Every 30 seconds
current_poll_count = 0
while True:
events = self.client.rtm_read()
if len(events) > 0:
# TODO: only handle events that are new.
# print(len(events))
for e in events:
self.handle_incoming_event(e)
# Update channels/people/me/etc every 10s or so.
current_poll_count += 1
if current_poll_count > num_polls_between_updates:
self._update_backend_metadata()
current_poll_count = 0
self.sleep_for_event_loop()
except websocket.WebSocketConnectionClosedException:
logging.error('Encountered WebSocketConnectionClosedException attempting reconnect in 2 seconds')
time.sleep(2)
except (KeyboardInterrupt, SystemExit):
break
except:
logging.critical("Error in watching slack RTM: \n%s" % traceback.format_exc())
break
def bootstrap(self):
# Bootstrap must provide a way to to have:
# a) self.normalize_incoming_event fired, or incoming events put into self.incoming_queue
# b) any necessary threads running for a)
# c) self.me (Person) defined, with Will's info
# d) self.people (dict of People) defined, with everyone in an organization/backend
# e) self.channels (dict of Channels) defined, with all available channels/rooms.
# Note that Channel asks for members, a list of People.
# f) A way for self.handle, self.me, self.people, and self.channels to be kept accurate,
# with a maximum lag of 60 seconds.
# Property, auto-inits.
self.client
self.rtm_thread = Process(target=self._watch_slack_rtm)
self.rtm_thread.start()
def terminate(self):
if hasattr(self, "rtm_thread"):
self.rtm_thread.terminate()
while self.rtm_thread.is_alive():
time.sleep(0.2)
|
__init__.py
|
#coding:utf-8
import os
import select
import signal
import sys
import threading
from wsgiref.simple_server import make_server
from caty.core.system import System
from caty.front.console import CatyShell
from caty.front.web.console import HTTPConsoleThread
from caty.util import init_writer
from caty.util.syslog import init_log, get_start_log
from caty.util.optionparser import HelpFound
import caty.core.runtimeobject as ro
def main(args):
init_log()
terminator = Terminator()
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, lambda signum, frame: terminator.restart(signum))
if hasattr(signal, 'SIGQUIT'):
signal.signal(signal.SIGQUIT, lambda signum, frame: terminator.exit(signum))
if hasattr(signal, 'SIGTERM'):
signal.signal(signal.SIGTERM, lambda signum, frame: terminator.exit(signum))
if hasattr(signal, 'SIGINT'):
signal.signal(signal.SIGINT, lambda signum, frame: terminator.exit(signum))
sl = get_start_log()
sl.info(ro.i18n.get('Caty server started'))
while terminator.continue_process == Terminator.CONTINUE:
try:
system, is_debug, port, hcon_port, uuserver_port = setup(args)
system.uuserver_port = uuserver_port
except HelpFound:
return 0
except Exception, e:
import traceback
traceback.print_exc()
return 1
if not system:
return 0
try:
http_console = None
uuserver_thread = None
server = build_server(system, is_debug, port)
if hcon_port:
http_console = HTTPConsoleThread(system, hcon_port)
if uuserver_port:
uuserver_thread = build_uuserver(system, is_debug, uuserver_port)
terminator.set_server(server)
if http_console:
http_console.start()
if uuserver_thread:
uuserver_thread.start()
server.main()
except select.error, e:
if e.args[0] == 4:
pass
else:
handle_tb()
terminator.continue_process = Terminator.FAIL
except Exception, e:
handle_tb()
terminator.continue_process = Terminator.FAIL
if http_console:
http_console.httpd.shutdown()
if uuserver_thread:
uuserver_thread.shutdown()
if terminator.continue_process == Terminator.END:
sl.info(ro.i18n.get('Caty server ended'))
else:
sl.error(ro.i18n.get('Caty server ended'))
system.finalize()
return 0
def unlink_pid():
if os.path.exists(ro.PID_FILE):
os.unlink(ro.PID_FILE)
def handle_tb():
import traceback
sl = get_start_log()
sl.error(unicode(traceback.format_exc(), 'utf-8'))
traceback.print_exc()
def build_server(system, is_debug, port=8000):
server_module_name = system.server_module_name
exec 'import %s as server_module' % server_module_name
server = CatyServerFacade(server_module, system, is_debug, port)
return server
def check_hcon(option, opt_str, value, parser):
from optparse import OptionValueError
if opt_str in ('--hcon-port', '--hcon-name'):
if parser.values.hcon:
raise OptionValueError('--hcon-port and --hcon-name is exclusive')
setattr(parser.values, option.dest, value)
def make_server_opt_parser():
from caty.front.util import make_base_opt_parser
parser = make_base_opt_parser('server')
parser.add_option('-p', '--port', type='int', default=8000, help=u'サーバーの動作するポート番号')
parser.add_option('--pid', help=u'サーバーのプロセスIDファイル')
parser.add_option('--hcon-port', dest='hcon', type='string', action='callback', callback=check_hcon, help=u'hconの動作するポート(uwsgi動作時には無効, hcon-nameと排他)')
parser.add_option('--hcon-name', dest='hcon', type='string', action='callback', callback=check_hcon, help=u'hconアプリケーション名(uwsgi動作時のみ有効, hcon-portと排他)')
parser.add_option('--public-commands', choices=['action', 'all'])
parser.add_option('--uuserver-port', type='int', help='Ugly URIスレッドのポート番号')
return parser
def setup(args):
parser = make_server_opt_parser()
options, _ = parser.parse_args(args)
init_writer(options.system_encoding)
_help = False
if os.path.exists(ro.PID_FILE):
os.unlink(ro.PID_FILE)
system = System(options)
system.public_commands = options.public_commands
if options.goodbye:
print
print options.goodbye
return None, None, None, None
return system, options.debug, options.port, options.hcon, options.uuserver_port
class ConsoleThread(threading.Thread):
def __init__(self, shell, server):
threading.Thread.__init__(self)
self.shell = shell
self.server = server
def run(self):
self.shell.cmdloop()
self.server.close()
def stop(self):
self.shell.do_quit()
class Terminator(object):
CONTINUE = 0
END = 1
FAIL = 2
def __init__(self):
self.continue_process = Terminator.CONTINUE
def set_server(self, server):
self.closer = threading.Thread(target=lambda :server.close())
def exit(self, signum):
from caty.util import cout
sl = get_start_log()
cout.writeln('received signal(' + str(signum) + ')')
sl.info('received signal(' + str(signum) + ')')
self.continue_process = Terminator.END
self.closer.start()
self.closer.join()
def restart(self, signum):
from caty.util import cout
sl = get_start_log()
sl.info('received signal(' + str(signum) + '), restart')
cout.writeln('received signal(' + str(signum) + '), restart')
self.continue_process = Terminator.CONTINUE
self.closer.start()
self.closer.join()
class CatyServerFacade(object):
def __init__(self, server_module, system, is_debug, port):
self.is_debug = is_debug
server_class = server_module.get_server(system, is_debug)
handler_class = server_module.get_handler(system, is_debug)
dispatcher = system._global_config.session.wrapper(server_module.get_dispatcher(system, is_debug), system._global_config.session.conf)
self.httpd = make_server('',
port,
dispatcher,
server_class,
handler_class)
from caty.util import cout
cout.writeln("Serving on port %d..." % port)
def main(self):
import os
with open(ro.PID_FILE, 'wb') as f:
f.write(str(os.getpid()))
self.httpd.serve_forever()
def close(self):
self.httpd.server_close()
os.unlink(ro.PID_FILE)
def build_uuserver(system, is_debug, port=8000):
server_module_name = system.server_module_name
exec 'import %s as server_module' % server_module_name
server = PerformerThread(CatyUUServerFacade(server_module, system, is_debug, port))
return server
class CatyUUServerFacade(object):
def __init__(self, server_module, system, is_debug, port):
self.is_debug = is_debug
server_class = server_module.get_server(system, is_debug)
handler_class = server_module.get_handler(system, is_debug)
dispatcher = system._global_config.session.wrapper(server_module.get_uuserver(system, is_debug), system._global_config.session.conf)
self.httpd = make_server('',
port,
dispatcher,
server_class,
handler_class)
from caty.util import cout
self.port = port
cout.writeln("Performer serving on port %d..." % port)
def main(self):
self.httpd.serve_forever()
def close(self):
self.httpd.shutdown()
class PerformerThread(threading.Thread):
def __init__(self, server):
threading.Thread.__init__(self)
self.server = server
def run(self):
self.server.main()
def shutdown(self):
self.server.close()
def status(self):
return u'running on port %d' % self.server.port
|
misc.py
|
import shutil
import sys
import os
import tables
import warnings
from threading import Thread
from queue import Queue, Empty
from tierpsy import AUX_FILES_DIR
# get the correct path for ffmpeg. First we look in the aux
# directory, otherwise we look in the system path.
def get_local_or_sys_path(file_name):
file_source = os.path.join(AUX_FILES_DIR, file_name)
if not os.path.exists(file_source):
file_source = shutil.which(file_name)
if not file_source:
raise FileNotFoundError('command not found: %s' % file_name)
return file_source
try:
if sys.platform == 'win32':
FFMPEG_CMD = get_local_or_sys_path('ffmpeg.exe')
elif sys.platform == 'darwin':
FFMPEG_CMD = get_local_or_sys_path('ffmpeg22')
elif sys.platform == 'linux':
FFMPEG_CMD = get_local_or_sys_path('ffmpeg')
except FileNotFoundError:
FFMPEG_CMD = ''
warnings.warn('ffmpeg do not found. This might cause problems while reading .mjpeg files.')
# get the correct path for ffprobe. First we look in the aux
# directory, otherwise we look in the system path.
try:
if os.name == 'nt':
FFPROBE_CMD = get_local_or_sys_path('ffprobe.exe')
else:
FFPROBE_CMD = get_local_or_sys_path('ffprobe')
except FileNotFoundError:
FFPROBE_CMD = ''
warnings.warn('ffprobe do not found. This might cause problems while extracting the raw videos timestamps.')
WLAB = {'U': 0, 'WORM': 1, 'WORMS': 2, 'BAD': 3, 'GOOD_SKE': 4}
# pytables filters.
TABLE_FILTERS = tables.Filters(
complevel=5,
complib='zlib',
shuffle=True,
fletcher32=True)
def print_flush(msg):
print(msg)
sys.stdout.flush()
class ReadEnqueue():
def __init__(self, pipe, timeout=-1):
def _target_fun(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close
self.timeout = timeout
self.queue = Queue()
self.thread = Thread( target=_target_fun, args=(pipe, self.queue))
self.thread.start()
def read(self):
try:
if self.timeout > 0:
line = self.queue.get(timeout=self.timeout)
else:
line = self.queue.get_nowait()
line = line.decode("utf-8")
except Empty:
line = None
return line
|
progress_queue1.py
|
import time
from multiprocessing import Process
from multiprocessing import Queue
# 共享全局变量通信
# 共享全局变量不使用于多进程编程,使用与多线程
def product(a):
a[1] = 2
def consume(a):
time.sleep(2)
print(a)
if __name__ == "__main__":
a = {}
p = Process(target=product, args=(a,))
c = Process(target=consume, args=(a,))
p.start()
c.start()
p.join()
c.join()
|
equilibrator-apiServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'equilibrator-api'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from equilibrator-api.equilibrator-apiImpl import equilibrator-api # noqa @IgnorePep8
impl_equilibrator-api = equilibrator-api(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'equilibrator-api'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.rpc_service.add(impl_equilibrator-api.status,
name='equilibrator-api.status',
types=[dict])
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
client3.py
|
import socket
from threading import Thread
utf = "utf-8"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('127.0.0.1', 19400))
print("Welcome to the chat, enter 'Bye' to exit")
def send_message():
while True:
sock.send(input().encode(utf))
def receive_message():
while True:
data = sock.recv(16348)
print(data.decode(utf))
send_thread = Thread(target=send_message)
get_thread = Thread(target=receive_message)
send_thread.start()
get_thread.start()
|
parallel_manager.py
|
import os
import time
import logbook
import threading
from tempfile import mkdtemp
from six.moves import xmlrpc_client
from .. import log
from ..exceptions import INTERRUPTION_EXCEPTIONS, ParallelServerIsDown, ParallelTimeout
from ..conf import config
from .server import Server, ServerStates, KeepaliveServer
from .worker_configuration import TmuxWorkerConfiguration, ProcessWorkerConfiguration
_logger = logbook.Logger(__name__)
log.set_log_color(_logger.name, logbook.NOTICE, 'blue')
TIME_BETWEEN_CHECKS = 2
MAX_CONNECTION_RETRIES = 200
def get_xmlrpc_proxy(address, port):
return xmlrpc_client.ServerProxy('http://{}:{}'.format(address, port))
class ParallelManager(object):
def __init__(self, args):
super(ParallelManager, self).__init__()
self.server = None
self.workers_error_dircetory = mkdtemp()
self.args = args
self.workers_num = config.root.parallel.num_workers
self.workers = {}
self.server_thread = None
self.keepalive_server = None
self.keepalive_server_thread = None
self._create_workers()
def _create_workers(self):
for index in range(1, self.workers_num+1):
_logger.debug("Creating worker number {}", index)
index_str = str(index)
worker_cls = TmuxWorkerConfiguration if config.root.tmux.enabled else ProcessWorkerConfiguration
self.workers[index_str] = worker_cls(self.args, index_str)
def try_connect(self):
for _ in range(MAX_CONNECTION_RETRIES):
if self.server.state != ServerStates.NOT_INITIALIZED and self.keepalive_server.state != ServerStates.NOT_INITIALIZED:
return
time.sleep(0.1)
raise ParallelServerIsDown("Cannot connect to XML_RPC server")
def start_server_in_thread(self, collected):
self.server = Server(collected)
self.server_thread = threading.Thread(target=self.server.serve, args=())
self.server_thread.setDaemon(True)
self.server_thread.start()
self.keepalive_server = KeepaliveServer()
self.keepalive_server_thread = threading.Thread(target=self.keepalive_server.serve, args=())
self.keepalive_server_thread.setDaemon(True)
self.keepalive_server_thread.start()
def kill_workers(self):
for worker in list(self.workers.values()):
worker.kill()
def report_worker_error_logs(self):
found_worker_errors_file = False
for file_name in os.listdir(self.workers_error_dircetory):
if file_name.startswith(config.root.parallel.worker_error_file):
found_worker_errors_file = True
with open(os.path.join(self.workers_error_dircetory, file_name)) as worker_file:
content = worker_file.readlines()
for line in content:
_logger.error("{}: {}", file_name, line, extra={'capture': False})
if not found_worker_errors_file:
_logger.error("No worker error files were found", extra={'capture': False})
def handle_error(self, failure_message):
_logger.error(failure_message, extra={'capture': False})
self.kill_workers()
self.report_worker_error_logs()
get_xmlrpc_proxy(config.root.parallel.server_addr, self.server.port).report_session_error(failure_message)
raise ParallelTimeout(failure_message)
def wait_all_workers_to_connect(self):
while self.server.state == ServerStates.WAIT_FOR_CLIENTS:
if time.time() - self.server.start_time > config.root.parallel.worker_connect_timeout * self.workers_num:
self.handle_error("Timeout: Not all clients connected to server, terminating.\n\
Clients connected: {}".format(self.server.connected_clients))
time.sleep(TIME_BETWEEN_CHECKS)
def check_worker_timed_out(self):
workers_last_connection_time = self.keepalive_server.get_workers_last_connection_time()
for worker_id in self.server.get_connected_clients():
worker_last_connection_time = workers_last_connection_time.get(worker_id, None)
if worker_last_connection_time is None: #worker keepalive thread didn't started yet
continue
if time.time() - worker_last_connection_time > config.root.parallel.communication_timeout_secs:
_logger.error("Worker {} is down, terminating session", worker_id, extra={'capture': False})
self.report_worker_error_logs()
self.workers[worker_id].handle_timeout()
get_xmlrpc_proxy(config.root.parallel.server_addr, self.server.port).report_client_failure(worker_id)
def check_no_requests_timeout(self):
if time.time() - self.keepalive_server.last_request_time > config.root.parallel.no_request_timeout:
_logger.error("No request sent to server for {} seconds, terminating",
config.root.parallel.no_request_timeout, extra={'capture': False})
if self.server.has_connected_clients():
_logger.error("Clients that are still connected to server: {}",
self.server.connected_clients, extra={'capture': False})
if self.server.has_more_tests():
_logger.error("Number of unstarted tests: {}", len(self.server.get_unstarted_tests()),
extra={'capture': False})
if self.server.executing_tests:
_logger.error("Currently executed tests indexes: {}", self.server.executing_tests.values(),
extra={'capture': False})
self.handle_error("No request sent to server for {} seconds, terminating".format(config.root.parallel.no_request_timeout))
def start(self):
self.try_connect()
try:
for worker in list(self.workers.values()):
worker.start()
self.wait_all_workers_to_connect()
while self.server.should_wait_for_request():
self.check_worker_timed_out()
self.check_no_requests_timeout()
time.sleep(TIME_BETWEEN_CHECKS)
except INTERRUPTION_EXCEPTIONS:
_logger.error("Server interrupted, stopping workers and terminating", extra={'capture': False})
get_xmlrpc_proxy(config.root.parallel.server_addr, self.server.port).session_interrupted()
self.kill_workers()
raise
finally:
for worker in list(self.workers.values()):
worker.wait_to_finish()
get_xmlrpc_proxy(config.root.parallel.server_addr, self.server.port).stop_serve()
get_xmlrpc_proxy(config.root.parallel.server_addr, self.keepalive_server.port).stop_serve()
self.server_thread.join()
self.keepalive_server_thread.join()
|
flash_lights.py
|
import pyaudio
from threading import Thread
import time
import numpy as np
# import servo board
from board import SCL, SDA
import busio
from adafruit_pca9685 import PCA9685
# Vales to control whether dome lights are on or off
VOL_MIN = 400
VOL_MAX = 8000
RATE = 44100 # recording rate in Hz
MAX = 400 # minimum volume level for dome lights to illuminate
ON = 1.0
CHUNK = 2**13 # buffer size for audio capture and analysis
FREQUENCY = 50
PERIOD = 1.0 / float(FREQUENCY) * 1000.0
DOME_LIGHTS = 0
# create iris servo
i2c_bus = busio.I2C(SCL, SDA)
pca = PCA9685(i2c_bus)
pca.frequency = FREQUENCY
# Sets up a daemon thread to flash lights in line with sound
def dalek_light(channel,value):
"""
Changes the level of illumination of a light attached to the
PWM output of the servo controller.
Args:
channel (int): the channel number of the servo (range 0-16)
value (float): value between 0.0 and 1.0
"""
pca.channels[channel].duty_cycle = int(value * 65535.0)
def flash_dome_lights():
''' Daemon thread to flash lights based on microphone noise '''
while True:
try:
data = np.frombuffer(stream.read(CHUNK, False),dtype=np.int16)
vol = abs(int(np.average(np.abs(data))))
print(vol)
if vol > VOL_MIN:
vol = min(1.0, vol/VOL_MAX)
dalek_light(DOME_LIGHTS, vol)
else:
dalek_light(DOME_LIGHTS, 0)
except ValueError:
print ("Volume out of range: " + vol)
print("Starting audio thread...")
p = pyaudio.PyAudio()
stream=p.open(format=pyaudio.paInt16,channels=1,rate=RATE,input=True,
frames_per_buffer=CHUNK, input_device_index=1)
domeLightsThread = Thread(target=flash_dome_lights, daemon=True)
domeLightsThread.start()
print("Audio thread started...")
try:
while True:
time.sleep(0.01)
except KeyboardInterrupt:
stream.stop_stream()
stream.close()
p.terminate()
print("Lights stopped by user.")
|
test_threading_local.py
|
import unittest
from doctest import DocTestSuite
from test import support
import weakref
import gc
# Modules under test
_thread = support.import_module('_thread')
threading = support.import_module('threading')
import _threading_local
class Weak(object):
pass
def target(local, weaklist):
weak = Weak()
local.weak = weak
weaklist.append(weakref.ref(weak))
class BaseLocalTest:
def test_local_refs(self):
self._local_refs(20)
self._local_refs(50)
self._local_refs(100)
def _local_refs(self, n):
local = self._local()
weaklist = []
for i in range(n):
t = threading.Thread(target=target, args=(local, weaklist))
t.start()
t.join()
del t
gc.collect()
self.assertEqual(len(weaklist), n)
# XXX _threading_local keeps the local of the last stopped thread alive.
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n))
# Assignment to the same thread local frees it sometimes (!)
local.someothervar = None
gc.collect()
deadlist = [weak for weak in weaklist if weak() is None]
self.assertTrue(len(deadlist) in (n-1, n), (n, len(deadlist)))
def test_derived(self):
# Issue 3088: if there is a threads switch inside the __init__
# of a threading.local derived class, the per-thread dictionary
# is created but not correctly set on the object.
# The first member set may be bogus.
import time
class Local(self._local):
def __init__(self):
time.sleep(0.01)
local = Local()
def f(i):
local.x = i
# Simply check that the variable is correctly set
self.assertEqual(local.x, i)
threads= []
for i in range(10):
t = threading.Thread(target=f, args=(i,))
t.start()
threads.append(t)
for t in threads:
t.join()
def test_derived_cycle_dealloc(self):
# http://bugs.python.org/issue6990
class Local(self._local):
pass
locals = None
passed = False
e1 = threading.Event()
e2 = threading.Event()
def f():
nonlocal passed
# 1) Involve Local in a cycle
cycle = [Local()]
cycle.append(cycle)
cycle[0].foo = 'bar'
# 2) GC the cycle (triggers threadmodule.c::local_clear
# before local_dealloc)
del cycle
gc.collect()
e1.set()
e2.wait()
# 4) New Locals should be empty
passed = all(not hasattr(local, 'foo') for local in locals)
t = threading.Thread(target=f)
t.start()
e1.wait()
# 3) New Locals should recycle the original's address. Creating
# them in the thread overwrites the thread state and avoids the
# bug
locals = [Local() for i in range(10)]
e2.set()
t.join()
self.assertTrue(passed)
def _test_one_class(self, c):
self._failed = "No error message set or cleared."
obj = c()
e1 = threading.Event()
e2 = threading.Event()
def f1():
obj.x = 'foo'
obj.y = 'bar'
del obj.y
e1.set()
e2.wait()
def f2():
try:
foo = obj.x
except AttributeError:
# This is expected -- we haven't set obj.x in this thread yet!
self._failed = "" # passed
else:
self._failed = ('Incorrectly got value %r from class %r\n' %
(foo, c))
sys.stderr.write(self._failed)
t1 = threading.Thread(target=f1)
t1.start()
e1.wait()
t2 = threading.Thread(target=f2)
t2.start()
t2.join()
# The test is done; just let t1 know it can exit, and wait for it.
e2.set()
t1.join()
self.assertFalse(self._failed, self._failed)
def test_threading_local(self):
self._test_one_class(self._local)
def test_threading_local_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_one_class(LocalSubclass)
def _test_dict_attribute(self, cls):
obj = cls()
obj.x = 5
self.assertEqual(obj.__dict__, {'x': 5})
with self.assertRaises(AttributeError):
obj.__dict__ = {}
with self.assertRaises(AttributeError):
del obj.__dict__
def test_dict_attribute(self):
self._test_dict_attribute(self._local)
def test_dict_attribute_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_dict_attribute(LocalSubclass)
class ThreadLocalTest(unittest.TestCase, BaseLocalTest):
_local = _thread._local
# Fails for the pure Python implementation
def test_cycle_collection(self):
class X:
pass
x = X()
x.local = self._local()
x.local.x = x
wr = weakref.ref(x)
del x
gc.collect()
self.assertIs(wr(), None)
class PyThreadingLocalTest(unittest.TestCase, BaseLocalTest):
_local = _threading_local.local
def test_main():
suite = unittest.TestSuite()
suite.addTest(DocTestSuite('_threading_local'))
suite.addTest(unittest.makeSuite(ThreadLocalTest))
suite.addTest(unittest.makeSuite(PyThreadingLocalTest))
local_orig = _threading_local.local
def setUp(test):
_threading_local.local = _thread._local
def tearDown(test):
_threading_local.local = local_orig
suite.addTest(DocTestSuite('_threading_local',
setUp=setUp, tearDown=tearDown)
)
support.run_unittest(suite)
if __name__ == '__main__':
test_main()
|
goldenrod.py
|
# twisted imports
from twisted.words.protocols import irc
from twisted.internet import reactor, protocol
from twisted.python import log
import logging
# system imports
import datetime, time, sys, threading, os.path
import commandparser
import messagequeue
import contestmanager
import config
import random
import sqlite3
import channelmanager
import whisperbot
commandParser = None
channelManager = None
channelInstances = {}
allInstances = []
conn = sqlite3.connect("goldenrod.db", check_same_thread=False)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
try:
cursor.execute("SELECT * FROM slotspool")
cursor.fetchone()
except sqlite3.OperationalError:
import dbcreator
dbcreator.create(conn, cursor)
if os.path.isfile("./contests.db"):
# attach it
cursor.execute("ATTACH DATABASE \"contests.db\" AS contests")
lock = threading.Lock()
class GoldenrodNostalgiaB(irc.IRCClient):
"""A gambling IRC bot."""
nickname = config.botNick
password = config.botOAuth
def __init__(self, commandParser):
self.commandParser = commandParser
self.acceptCommands = False
self.isMod = False
self.channelMods = []
self.messageQueue = None
self.lurklessCount = 0
self.conn = conn
self.cursor = cursor
self.contestManager = None
self.commandsEnabled = False
self.contestsEnabled = False
self.inQuietMode = False
self.infoSendTimes = {}
self.quietModeTold = []
self.shinyMessageTimes = {}
# DB stuff
def execQueryModify(self, query, args=None):
try:
lock.acquire(True)
try:
if(args == None):
self.cursor.execute(query)
else:
self.cursor.execute(query, args)
except sqlite3.IntegrityError:
# do nothing because row already exists
self.conn.commit()
return 0
rowc = self.cursor.rowcount
self.conn.commit()
return rowc
finally:
lock.release()
def execQuerySelectOne(self, query, args=None):
try:
lock.acquire(True)
if(args == None):
self.cursor.execute(query)
else:
self.cursor.execute(query, args)
return self.cursor.fetchone()
finally:
lock.release()
def execQuerySelectMultiple(self, query, args=None):
try:
lock.acquire(True)
if(args == None):
self.cursor.execute(query)
else:
self.cursor.execute(query, args)
return self.cursor.fetchall()
finally:
lock.release()
def getUserDetails(self, user):
userData = self.execQuerySelectOne("SELECT * FROM users WHERE twitchname = ?", (user,))
if userData == None:
self.createNewUser(user)
return self.execQuerySelectOne("SELECT * FROM users WHERE twitchname = ?", (user,))
else:
self.execQueryModify("UPDATE users SET last_activity = ? WHERE twitchname = ?", (int(time.time()), user))
return userData
def createNewUser(self, user):
self.execQueryModify("INSERT INTO users (twitchname, balance, last_activity, highest_balance) VALUES(?, ?, ?, ?)", (user, config.startingBalance, int(time.time()), config.startingBalance))
def updateHighestBalance(self, userData, newBalance):
if newBalance > userData["highest_balance"]:
self.execQueryModify("UPDATE users SET highest_balance = ? WHERE twitchname = ?", (newBalance, userData["twitchname"]))
def commandsAreEnabled(self):
commandInfo = self.execQuerySelectOne("SELECT * FROM channels WHERE channel = ?", (self.factory.channel,))
if commandInfo == None:
self.execQueryModify("INSERT INTO channels (channel, commandsEnabled, quietMode, lastChange) VALUES(?, ?, ?, ?)", (self.factory.channel, False, False, int(time.time())))
commandInfo = self.execQuerySelectOne("SELECT * FROM channels WHERE channel = ?", (self.factory.channel,))
return commandInfo["commandsEnabled"]
def setCommandsEnabled(self, commandsEnabled):
self.commandsEnabled = commandsEnabled
self.execQueryModify("UPDATE channels SET commandsEnabled = ?, lastChange = ? WHERE channel = ?", (commandsEnabled, int(time.time()), self.factory.channel))
def isInQuietMode(self):
commandInfo = self.execQuerySelectOne("SELECT * FROM channels WHERE channel = ?", (self.factory.channel,))
if commandInfo == None:
self.execQueryModify("INSERT INTO channels (channel, commandsEnabled, quietMode, lastChange) VALUES(?, ?, ?, ?)", (self.factory.channel, False, False, int(time.time())))
commandInfo = self.execQuerySelectOne("SELECT * FROM channels WHERE channel = ?", (self.factory.channel,))
return commandInfo["quietMode"]
def setQuietMode(self, quietMode):
self.inQuietMode = quietMode
self.execQueryModify("UPDATE channels SET quietMode = ?, lastChange = ? WHERE channel = ?", (quietMode, int(time.time()), self.factory.channel))
self.quietModeTold = []
# return 0 if they can play or cooldown in seconds remaining otherwise
def canPlayGame(self, userData):
if(userData["last_game"] == None):
return 0
currTimestamp = int(time.time())
if(currTimestamp - userData["last_game"] >= config.gameCooldown):
return 0
else:
return config.gameCooldown - (currTimestamp - userData["last_game"])
# callbacks for events
def connectionMade(self):
irc.IRCClient.connectionMade(self)
def connectionLost(self, reason):
irc.IRCClient.connectionLost(self, reason)
def signedOn(self):
"""Called when bot has succesfully signed on to server."""
self.sendLine("CAP REQ :twitch.tv/commands")
self.join(self.factory.channel)
self.acceptCommands = False
self.isMod = (config.botNick == self.factory.channel)
self.channelMods = []
self.contestsEnabled = False
self.messageQueue = messagequeue.MessageQueue(self)
mqThread = threading.Thread(target=self.messageQueue.run)
mqThread.daemon = True
mqThread.start()
self.contestManager = contestmanager.ContestManager(self)
cmThread = threading.Thread(target=self.contestManager.run)
cmThread.daemon = True
cmThread.start()
def joined(self, channel):
"""This will get called when the bot joins the channel."""
self.commandsEnabled = self.commandsAreEnabled()
self.inQuietMode = self.isInQuietMode()
self.quietModeTold = []
self.acceptCommands = True
self.isMod = (config.botNick == self.factory.channel)
self.channelMods = []
self.channelMsg(".mods")
if config.doSayHello and self.commandsEnabled:
self.channelMsg(config.helloMessage)
def modeChanged(self, user, channel, set, modes, args):
# do something (change mod status?)
pass
def privmsg(self, user, channel, msg):
"""This will get called when the bot receives a message."""
user = user.split('!', 1)[0]
reactor.rootLogger.info(("%s --> %s : %s" % (user, channel, msg)).decode("utf-8"))
# Check to see if it is a potential command
msg = msg.strip()
if (self.commandsEnabled or (user == self.factory.channel or user == config.botOwner or user in self.channelMods)) and self.acceptCommands:
if not self.commandsEnabled and (user == self.factory.channel or user in self.channelMods) and user != config.botOwner:
if not msg.startswith("%sgoldenrodctl" % config.cmdChar):
return
timeNow = int(time.time())
if (user not in self.shinyMessageTimes) or self.shinyMessageTimes[user] <= timeNow - 60:
self.shinyMessageTimes[user] = timeNow
if random.randint(1, 8192) == 6969:
self.channelMsg("/me *** %s's MESSAGE WAS SHINIED! THEY WIN %d %s. ***" % (user.upper(), config.shinyPrize, config.currencyPlural.upper()))
userData = self.getUserDetails(user)
self.execQueryModify("INSERT INTO shinies (twitchname, reward, whenHappened, channel) VALUES(?, ?, ?, ?)", (user, config.shinyPrize, int(time.time()), self.factory.channel))
self.execQueryModify("UPDATE users SET balance = ?, last_activity = ? WHERE twitchname = ? AND balance = ?", (userData["balance"]+config.shinyPrize, int(time.time()), user, userData["balance"]))
if user == self.factory.channel:
self.channelMsg("/me Strimmer got a shiny? DansGame R I G G E D DansGame")
if user == config.botOwner:
self.channelMsg("/me The owner got a shiny? DansGame DansGame DansGame 1 0 0 % R I G G E D DansGame DansGame DansGame")
if self.contestManager.currentContest != None:
self.contestManager.currentContest.processMessage(user, msg)
if msg.startswith(config.cmdChar):
commandBits = msg[config.cmdCharLen:].split(' ', 1)
command = commandBits[0]
args = ""
if len(commandBits) == 2:
args = commandBits[1]
self.commandParser.parse(self, user, command, args, False)
def noticed(self, user, channel, msg):
reactor.rootLogger.info(("%s --> (notice) %s : %s" % (user, channel, msg)).decode("utf-8"))
# Check to see if they're sending me a private message
if user == "tmi.twitch.tv" and msg.startswith(config.twitchModsMsg):
self.channelMods = msg[len(config.twitchModsMsg):].split(", ")
self.isMod = (self.nickname in self.channelMods) or (self.nickname == self.factory.channel)
def leaveChannel(self, byeMessage):
if not self.acceptCommands:
return
if byeMessage != None and byeMessage != "":
self.queueMsg("#%s" % self.factory.channel, byeMessage, True)
self.acceptCommands = False
klThread = threading.Thread(target=self.killRequest)
klThread.daemon = True
klThread.start()
def killRequest(self):
try:
while not (self.messageQueue == None) and not self.messageQueue.queue.empty():
time.sleep(0.5)
except AttributeError:
pass
from goldenrod import allInstances
allInstances.remove(self)
self.factory.killBot = True
self.quit()
def channelMsg(self, message):
reactor.rootLogger.info(("%s --> %s (queueing) : %s" % (config.botNick, "#%s"%self.factory.channel, message)).decode("utf-8"))
self.queueMsg("#%s" % self.factory.channel, message, False)
def channelMsgRA(self, message):
reactor.rootLogger.info(("%s --> %s (queueing, repeat) : %s" % (config.botNick, "#%s"%self.factory.channel, message)).decode("utf-8"))
self.queueMsg("#%s" % self.factory.channel, message, True)
def queueMsg(self, channel, message, repeat):
if repeat:
self.messageQueue.queueMessageRA(channel, message)
else:
self.messageQueue.queueMessage(channel, message)
def addressUser(self, user, message):
if self.inQuietMode:
reactor.whisperer.sendWhisper(user, message)
else:
self.channelMsg("%s -> %s" % (user, message))
def isWhisperRequest(self):
return False
def sendInfoMessage(self, id, user, message):
if self.inQuietMode:
reactor.whisperer.sendWhisper(user, message)
else:
isMod = (user in self.channelMods) or user == self.factory.channel or user == config.botOwner
timeNow = int(time.time())
if isMod or (id not in self.infoSendTimes) or self.infoSendTimes[id] <= timeNow - 60:
self.infoSendTimes[id] = timeNow
self.channelMsg(message)
def tellAboutQuietMode(self, user):
if user not in self.quietModeTold:
self.quietModeTold.append(user)
reactor.whisperer.sendWhisper(user, "This stream is currently in quiet mode, spammy commands like !handout are turned off. Please respect the streamer's wishes and keep the spam low.")
def connectToTwitch(startChannel, commandParser, waitTimeout):
if waitTimeout > 0:
time.sleep(waitTimeout)
f = GoldenrodFactory(startChannel, commandParser, waitTimeout)
# connect factory to this host and port
twitchServers = ["192.16.64.11", "192.16.64.144", "192.16.64.145", "192.16.64.146", "192.16.64.152", "192.16.64.155"]
myServer = random.choice(twitchServers)
reactor.connectTCP(myServer, 6667, f)
def connectWhisperer(commandParser, waitTimeout):
if waitTimeout > 0:
time.sleep(waitTimeout)
f = whisperbot.WhisperFactory(waitTimeout, conn, cursor, lock, commandParser)
# connect factory to this host and port
twitchGroupServers = ["199.9.253.120"]
myServer = random.choice(twitchGroupServers)
reactor.connectTCP(myServer, 6667, f)
class GoldenrodFactory(protocol.ClientFactory):
def __init__(self, channel, commandParser, waitTimeout):
self.channel = channel
self.killBot = False
self.oldWait = waitTimeout
self.timeouts = { 0: 5, 0.1: 5, 5: 10, 10: 30, 30: 60, 60: 300, 300: 300 }
self.commandParser = commandParser
self.instance = None
def buildProtocol(self, addr):
from goldenrod import channelInstances, allInstances
if self.channel in channelInstances:
self.instance = None
return None
p = GoldenrodNostalgiaB(self.commandParser)
channelInstances[self.channel] = p
allInstances.append(p)
p.factory = self
return p
def clientConnectionLost(self, connector, reason):
from goldenrod import channelInstances, allInstances
if self.instance in channelInstances.values():
channelInstances.remove(self.instance)
if self.instance in allInstances:
allInstances.remove(self.instance)
self.instance = None
def clientConnectionFailed(self, connector, reason):
from goldenrod import channelInstances, allInstances
if self.instance in channelInstances.values():
channelInstances.remove(self.instance)
if self.instance in allInstances:
allInstances.remove(self.instance)
self.instance = None
def joinNewChannel(channel):
from goldenrod import channelInstances
if channel.startswith("#"):
channel = channel[1:]
if channel in channelInstances:
return
connectToTwitch(channel, reactor.commandParser, 0)
def leaveChannel(channel, message):
from goldenrod import channelInstances
if channel.startswith("#"):
channel = channel[1:]
if channel not in channelInstances:
return
channelInstances[channel].leaveChannel(message)
del channelInstances[channel]
def commandsAreEnabled(channel):
if channel.startswith("#"):
channel = channel[1:]
try:
lock.acquire(True)
cursor.execute("SELECT * FROM channels WHERE channel = ?", (channel,))
channelData = cursor.fetchone()
if channelData == None:
return False
else:
return channelData["commandsEnabled"]
finally:
lock.release()
def addToCommandsEnabled(channel):
from goldenrod import channelInstances
if channel.startswith("#"):
channel = channel[1:]
if channel not in channelInstances:
try:
lock.acquire(True)
cursor.execute("SELECT * FROM channels WHERE channel = ?", (channel,))
channelData = cursor.fetchone()
if channelData == None:
cursor.execute("INSERT INTO channels (channel, commandsEnabled, lastChange) VALUES(?, ?, ?)", (channel, True, int(time.time())))
else:
cursor.execute("UPDATE channels SET commandsEnabled = ?, lastChange = ? WHERE channel = ?", (True, int(time.time()), channel))
return
finally:
lock.release()
else:
channelInstances[channel].setCommandsEnabled(True)
def removeFromCommandsEnabled(channel):
from goldenrod import channelInstances
if channel.startswith("#"):
channel = channel[1:]
if channel not in channelInstances:
try:
lock.acquire(True)
cursor.execute("SELECT * FROM channels WHERE channel = ?", (channel,))
channelData = cursor.fetchone()
if channelData == None:
cursor.execute("INSERT INTO channels (channel, commandsEnabled, lastChange) VALUES(?, ?, ?)", (channel, False, int(time.time())))
else:
cursor.execute("UPDATE channels SET commandsEnabled = ?, lastChange = ? WHERE channel = ?", (False, int(time.time()), channel))
return
finally:
lock.release()
else:
channelInstances[channel].setCommandsEnabled(False)
if __name__ == '__main__':
#initialize logging
#log.startLogging(sys.stdout)
log.startLogging(open('./logs/%d_stdouterr.log' % (time.time()), 'w'))
handler = logging.FileHandler('./logs/%d_messages.log' % (time.time()), "w",
encoding = "UTF-8")
formatter = logging.Formatter("%(asctime)s %(message)s")
handler.setFormatter(formatter)
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(logging.INFO)
reactor.rootLogger = root_logger
commandParser = commandparser.CommandParser()
commandParser.loadCommands()
reactor.commandParser = commandParser
connectToTwitch(config.botNick, commandParser, 0)
connectWhisperer(commandParser, 0)
# setup channel manager
channelManager = channelmanager.ChannelManager(conn, cursor, lock, channelInstances)
cmThread = threading.Thread(target=channelManager.run)
cmThread.daemon = True
cmThread.start()
# run bot
reactor.run()
|
raw.py
|
# Run to collect raw data or graph live data
from data import *
from graphics import start_loop
import threading
import argparse
# Argparse for optional options
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--save', action='store_true', help='saves to data.out')
parser.add_argument('-g', '--graphics', action='store_true', help='displays data live to a graph')
parser.add_argument('-t', '--text', action='store_true', help='print all live data as text to console')
args = parser.parse_args()
graphic_mode = args.graphics
save_mode = args.save
text_mode = args.text
# Load arguments
if graphic_mode:
# Serial reader must be in separate thread, tkinter must be run on main thread
thread = threading.Thread(target=data_loop, args=[text_mode, save_mode, graphic_mode, 10, False])
thread.start()
start_loop()
# graphic_mode will always be false if this executes and average should be one without graphics
elif text_mode:
data_loop(text_mode, save_mode, False, 1, False)
|
driver.py
|
from multiprocessing import Value
from multiprocessing import Process
class Driver(object):
LINEAR = 'linear'
ANGULAR = 'angular'
#STEP
LINEAR_STEP_SPEED = 0.05
ANGULAR_STEP_SPEED = 0.2
#MAX
MAX_LINEAR_SPEED = 0.5
MAX_ANGULAR_SPEED = 1
def __init__(self):
self.linear = Value('d', 0.0)
self.angular = Value('d', 0.0)
def move_forward(self):
self.linear.value = self.MAX_LINEAR_SPEED
def move_backward(self):
self.linear.value = -self.MAX_LINEAR_SPEED
def move_right(self):
self.angular.value = -self.MAX_ANGULAR_SPEED
def move_left(self):
self.angular.value = self.MAX_ANGULAR_SPEED
def stop(self, direction=None):
if direction == self.LINEAR:
self.linear.value = 0
elif direction == self.ANGULAR:
self.angular.value = 0
else:
self.angular.value = 0
self.linear.value = 0
def start(self):
self.publisher_process = Process(target=self.publish_message, args=())
self.publisher_process.start()
@staticmethod
def acceleration(current_speed, desired_speed, step_speed):
if current_speed == desired_speed:
pass
elif current_speed < desired_speed:
current_speed += step_speed
if current_speed > desired_speed:
current_speed = desired_speed
elif current_speed > desired_speed:
current_speed -= step_speed
if current_speed < desired_speed:
current_speed = desired_speed
return current_speed
def publish_message(self):
import rospy
from rospy import Publisher
from geometry_msgs.msg import Twist
rospy.init_node('move')
rospy.loginfo("About to be moving!")
publisher = Publisher('mobile_base/commands/velocity', Twist)
twist = Twist()
while True:
rospy.loginfo("Current/Desired/Step Linear Speed: " + str(twist.linear.x) + "/" + str(self.linear.value) +"/"+ str(self.LINEAR_STEP_SPEED))
rospy.loginfo("Current/Desired/Step Angular Speed: " + str(twist.angular.z) + "/" + str(self.angular.value) +"/"+ str(self.ANGULAR_STEP_SPEED))
twist.linear.x = Driver.acceleration(current_speed=twist.linear.x, desired_speed=self.linear.value, step_speed=self.LINEAR_STEP_SPEED)
twist.angular.z = Driver.acceleration(current_speed=twist.angular.z, desired_speed=self.angular.value, step_speed=self.ANGULAR_STEP_SPEED)
publisher.publish(twist)
rospy.sleep(0.1)
|
unit_tests.py
|
#! /bin/python
import time
import multiprocessing
import unittest
import unittest.mock
import os
import signal
import logging
import psutil
import pynisher
try:
import sklearn # noqa
is_sklearn_available = True
except ImportError:
print("Scikit Learn was not found!")
is_sklearn_available = False
all_tests = 1
logger = multiprocessing.log_to_stderr()
logger.setLevel(logging.WARNING)
# TODO: add tests with large return value to test for deadlock!
def rogue_subprocess():
pid = os.getpid()
oldgrp = os.getpgrp()
os.setpgrp()
logger.debug("{}: Changed group id from {} to {}".format(pid, oldgrp, os.getpgrp()))
time.sleep(60)
def spawn_rogue_subprocess(num_procs=5):
for i in range(num_procs):
p = multiprocessing.Process(target=rogue_subprocess, daemon=False)
p.start()
p = psutil.Process()
time.sleep(10)
def simulate_work(size_in_mb, wall_time_in_s, num_processes, **kwargs):
# allocate memory (size_in_mb) with an array
# note the actual size in memory of this process is a little bit larger
A = [42.] * ((1024 * size_in_mb) // 8) # noqa
# try to spawn new processes
if (num_processes > 0):
# data parallelism
multiprocessing.Pool(num_processes)
# sleep for specified duration
time.sleep(wall_time_in_s + 1)
return (size_in_mb, wall_time_in_s, num_processes)
def svm_example(n_samples=10000, n_features=100):
from sklearn.svm import SVR
from sklearn.datasets import make_regression
X, Y = make_regression(n_samples, n_features)
m = SVR()
m.fit(X, Y)
def svc_example(n_samples=10000, n_features=4):
from sklearn.svm import SVC
from sklearn.datasets import make_classification
X, Y = make_classification(n_samples, n_features)
# pp = PolynomialFeatures(degree=3)
# X = pp.fit_transform(X)
m = SVC()
m.fit(X, Y)
def crash_unexpectedly(signum):
print("going to receive signal {}.".format(signum))
pid = os.getpid()
time.sleep(1)
os.kill(pid, signum)
time.sleep(1)
def crash_with_exception(exception):
print("going to raise {}.".format(exception))
raise exception
def return_big_array(num_elements):
return ([1] * num_elements)
def cpu_usage():
i = 1
while True:
i += 1
def nested_pynisher(level=2, cputime=5, walltime=5, memlimit=10e24, increment=-1, grace_period=1):
print("this is level {}".format(level))
if level == 0:
spawn_rogue_subprocess(10)
else:
func = pynisher.enforce_limits(mem_in_mb=memlimit, cpu_time_in_s=cputime, wall_time_in_s=walltime,
grace_period_in_s=grace_period)(nested_pynisher)
func(level - 1, None, walltime + increment, memlimit, increment)
class test_limit_resources_module(unittest.TestCase):
@unittest.skipIf(not all_tests, "skipping successful tests")
def test_success(self):
print("Testing unbounded function call which have to run through!")
local_mem_in_mb = None
local_wall_time_in_s = None
local_cpu_time_in_s = None
local_grace_period = None
wrapped_function = pynisher.enforce_limits(mem_in_mb=local_mem_in_mb, wall_time_in_s=local_wall_time_in_s,
cpu_time_in_s=local_cpu_time_in_s,
grace_period_in_s=local_grace_period)(simulate_work)
for mem in [1, 2, 4, 8, 16]:
self.assertEqual((mem, 0, 0), wrapped_function(mem, 0, 0))
self.assertEqual(wrapped_function.exit_status, 0)
self.assertEqual(wrapped_function.exitcode, 0)
@unittest.skipIf(not all_tests, "skipping out_of_memory test")
def test_out_of_memory(self):
print("Testing memory constraint.")
local_mem_in_mb = 32
local_wall_time_in_s = None
local_cpu_time_in_s = None
local_grace_period = None
wrapped_function = pynisher.enforce_limits(mem_in_mb=local_mem_in_mb, wall_time_in_s=local_wall_time_in_s,
cpu_time_in_s=local_cpu_time_in_s,
grace_period_in_s=local_grace_period)(simulate_work)
for mem in [1024, 2048, 4096]:
self.assertIsNone(wrapped_function(mem, 0, 0))
self.assertEqual(wrapped_function.exit_status, pynisher.MemorylimitException)
self.assertEqual(wrapped_function.exitcode, 0)
@unittest.skipIf(not all_tests, "skipping time_out test")
def test_time_out(self):
print("Testing wall clock time constraint.")
local_mem_in_mb = None
local_wall_time_in_s = 1
local_cpu_time_in_s = None
local_grace_period = None
wrapped_function = pynisher.enforce_limits(mem_in_mb=local_mem_in_mb, wall_time_in_s=local_wall_time_in_s,
cpu_time_in_s=local_cpu_time_in_s,
grace_period_in_s=local_grace_period)(simulate_work)
for mem in range(1, 10):
self.assertIsNone(wrapped_function(mem, 10, 0))
self.assertEqual(wrapped_function.exit_status, pynisher.TimeoutException, str(wrapped_function.result))
# Apparently, the exit code here is not deterministic (so far only PYthon 3.6)
self.assertIn(wrapped_function.exitcode, (-15, 0))
@unittest.skipIf(not all_tests, "skipping too many processes test")
def test_num_processes(self):
print("Testing number of processes constraint.")
local_mem_in_mb = None
local_num_processes = 1
local_wall_time_in_s = None
local_grace_period = None
wrapped_function = pynisher.enforce_limits(mem_in_mb=local_mem_in_mb, wall_time_in_s=local_wall_time_in_s,
num_processes=local_num_processes,
grace_period_in_s=local_grace_period)(simulate_work)
for processes in [2, 15, 50, 100, 250]:
self.assertIsNone(wrapped_function(0, 0, processes))
self.assertEqual(wrapped_function.exit_status, pynisher.SubprocessException)
self.assertEqual(wrapped_function.exitcode, 0)
@unittest.skipIf(not all_tests, "skipping unexpected signal test")
def test_crash_unexpectedly(self):
print("Testing an unexpected signal simulating a crash.")
wrapped_function = pynisher.enforce_limits()(crash_unexpectedly)
self.assertIsNone(wrapped_function(signal.SIGQUIT))
self.assertEqual(wrapped_function.exit_status, pynisher.SignalException)
self.assertEqual(wrapped_function.exitcode, 0)
@unittest.skipIf(not all_tests, "skipping unexpected signal test")
def test_high_cpu_percentage(self):
print("Testing cpu time constraint.")
cpu_time_in_s = 2
grace_period = 1
wrapped_function = pynisher.enforce_limits(cpu_time_in_s=cpu_time_in_s, grace_period_in_s=grace_period)(
cpu_usage)
self.assertIsNone(wrapped_function())
self.assertEqual(wrapped_function.exit_status, pynisher.CpuTimeoutException)
self.assertEqual(wrapped_function.exitcode, 0)
@unittest.skipIf(not all_tests, "skipping big data test")
def test_big_return_data(self):
print("Testing big return values")
wrapped_function = pynisher.enforce_limits()(return_big_array)
for num_elements in [4, 16, 64, 256, 1024, 4096, 16384, 65536, 262144]:
bla = wrapped_function(num_elements)
self.assertEqual(len(bla), num_elements)
self.assertEqual(wrapped_function.exitcode, 0)
@unittest.skipIf(not all_tests, "skipping subprocess changing process group")
def test_kill_subprocesses(self):
wrapped_function = pynisher.enforce_limits(wall_time_in_s=1)(spawn_rogue_subprocess)
wrapped_function(5)
time.sleep(1)
p = psutil.Process()
self.assertEqual(len(p.children(recursive=True)), 0)
self.assertEqual(wrapped_function.exitcode, -15)
@unittest.skipIf(not is_sklearn_available, "test requires scikit learn")
@unittest.skipIf(not all_tests, "skipping fitting an SVM to see how C libraries are handles")
def test_busy_in_C_library(self):
global logger
wrapped_function = pynisher.enforce_limits(wall_time_in_s=2)(svm_example)
start = time.time()
wrapped_function(16384, 128)
duration = time.time() - start
time.sleep(1)
p = psutil.Process()
self.assertEqual(len(p.children(recursive=True)), 0)
self.assertTrue(duration <= 2.1)
self.assertEqual(wrapped_function.exitcode, -15)
self.assertLess(duration, 2.1)
@unittest.skipIf(not is_sklearn_available, "test requires scikit learn")
@unittest.skipIf(not all_tests, "skipping fitting an SVM to see how C libraries are handles")
def test_liblinear_svc(self):
global logger
time_limit = 2
grace_period = 1
logger_mock = unittest.mock.Mock()
wrapped_function = pynisher.enforce_limits(cpu_time_in_s=time_limit, mem_in_mb=None,
grace_period_in_s=grace_period, logger=logger)
wrapped_function.logger = logger_mock
wrapped_function = wrapped_function(svc_example)
start = time.time()
wrapped_function(16384, 10000)
duration = time.time() - start
time.sleep(1)
p = psutil.Process()
self.assertEqual(len(p.children(recursive=True)), 0)
self.assertEqual(logger_mock.debug.call_count, 2)
self.assertEqual(logger_mock.debug.call_args_list[0][0][0],
'Function called with argument: (16384, 10000), {}')
self.assertEqual(logger_mock.debug.call_args_list[1][0][0],
'Your function call closed the pipe prematurely -> '
'Subprocess probably got an uncatchable signal.')
# self.assertEqual(wrapped_function.exit_status, pynisher.CpuTimeoutException)
self.assertGreater(duration, time_limit - 0.1)
self.assertLess(duration, time_limit + grace_period + 0.1)
self.assertEqual(wrapped_function.exitcode, -9)
@unittest.skipIf(not all_tests, "skipping nested pynisher test")
def test_nesting(self):
tl = 2 # time limit
gp = 1 # grace period
start = time.time()
nested_pynisher(level=2, cputime=2, walltime=2, memlimit=None, increment=1, grace_period=gp)
duration = time.time() - start
print(duration)
time.sleep(1)
p = psutil.Process()
self.assertEqual(len(p.children(recursive=True)), 0)
self.assertGreater(duration, tl - 0.1)
self.assertLess(duration, tl + gp + 0.1)
@unittest.skipIf(not all_tests, "skipping capture stdout test")
def test_capture_output(self):
print("Testing capturing of output.")
global logger
time_limit = 2
grace_period = 1
def print_and_sleep(t):
for i in range(t):
print(i)
time.sleep(1)
wrapped_function = pynisher.enforce_limits(wall_time_in_s=time_limit, mem_in_mb=None,
grace_period_in_s=grace_period, logger=logger, capture_output=True)(
print_and_sleep)
wrapped_function(5)
self.assertTrue('0' in wrapped_function.stdout)
self.assertEqual(wrapped_function.stderr, '')
self.assertEqual(wrapped_function.exitcode, 0)
def print_and_fail():
print(0)
raise RuntimeError()
wrapped_function = pynisher.enforce_limits(wall_time_in_s=time_limit, mem_in_mb=None,
grace_period_in_s=grace_period, logger=logger, capture_output=True)(
print_and_fail)
wrapped_function()
self.assertIn('0', wrapped_function.stdout)
self.assertIn('RuntimeError', wrapped_function.stderr)
self.assertEqual(wrapped_function.exitcode, 1)
def test_too_little_memory(self):
# Test what happens if the target process does not have a sufficiently high memory limit
# 2048 MB
dummy_content = [42.] * ((1024 * 2048) // 8) # noqa
wrapped_function = pynisher.enforce_limits(mem_in_mb=1)(simulate_work)
wrapped_function(size_in_mb=1000, wall_time_in_s=10, num_processes=1,
dummy_content=dummy_content)
self.assertIsNone(wrapped_function.result)
# The following is a bit weird, on my local machine I get a SubprocessException, but on
# travis-ci I get a MemoryLimitException
self.assertIn(wrapped_function.exit_status,
(pynisher.SubprocessException, pynisher.MemorylimitException))
# This is triggered on my local machine, but not on travis-ci
if wrapped_function.exit_status == pynisher.SubprocessException:
self.assertEqual(wrapped_function.os_errno, 12)
self.assertEqual(wrapped_function.exitcode, 0)
def test_raise(self):
# As above test does not reliably work on travis-ci, this test checks whether an
# OSError's error code is properly read out
wrapped_function = pynisher.enforce_limits(mem_in_mb=1000)(crash_with_exception)
wrapped_function.logger = unittest.mock.Mock()
error = OSError()
error.errno = 12
wrapped_function(error)
self.assertIsNone(wrapped_function.result)
self.assertEqual(wrapped_function.exit_status, pynisher.SubprocessException)
if wrapped_function.exit_status == pynisher.SubprocessException:
self.assertEqual(wrapped_function.os_errno, 12)
self.assertEqual(wrapped_function.exitcode, 0)
if __name__ == '__main__':
unittest.main()
|
model.py
|
import json
from threading import Thread
import time
from typing import Any, Dict, List, FrozenSet, Set, Union, Optional
import urwid
from zulipterminal.helper import (
asynch,
classify_unread_counts,
index_messages,
set_count
)
from zulipterminal.ui_tools.utils import create_msg_box_list
class Model:
"""
A class responsible for storing the data to be displayed.
"""
def __init__(self, controller: Any) -> None:
self.controller = controller
self.client = controller.client
# Get message after registering to the queue.
self.msg_view = None # type: Any
self.anchor = 0
self.num_before = 30
self.num_after = 10
self.msg_list = None # type: Any
self.narrow = [] # type: List[Any]
self.update = False
self.stream_id = -1
self.stream_dict = {} # type: Dict[int, Any]
self.recipients = frozenset() # type: FrozenSet[Any]
self.index = None # type: Any
self.user_id = -1 # type: int
self.initial_data = {} # type: Dict[str, Any]
self._update_user_id()
self._update_initial_data()
self.users = self.get_all_users()
self.muted_streams = list() # type: List[int]
self.streams = self.get_subscribed_streams()
self.muted_topics = self.initial_data['muted_topics']
self.unread_counts = classify_unread_counts(self)
self.new_user_input = True
self.update_presence()
@asynch
def _update_user_id(self) -> None:
self.user_id = self.client.get_profile()['user_id']
def _update_realm_users(self) -> None:
self.initial_data['realm_users'] = self.client.get_members(
request={
'client_gravatar': True,
}
)['members']
def get_focus_in_current_narrow(self) -> Union[int, Set[None]]:
"""
Returns the focus in the current narrow.
For no existing focus this returns {}, otherwise the message ID.
"""
return self.index['pointer'][str(self.narrow)]
def set_focus_in_current_narrow(self, focus_message: int) -> None:
self.index['pointer'][str(self.narrow)] = focus_message
def set_narrow(self, *,
stream: Optional[str]=None, topic: Optional[str]=None,
search: Optional[str]=None,
pm_with: Optional[str]=None) -> bool:
if search and not(stream or topic or pm_with):
new_narrow = [['search', search]]
elif stream and topic and not(search or pm_with):
new_narrow = [["stream", stream],
["topic", topic]]
elif stream and not(topic or search or pm_with):
new_narrow = [['stream', stream]]
elif pm_with == '' and not(stream or topic or search):
new_narrow = [['is', 'private']]
elif pm_with and not(stream or topic or search):
new_narrow = [['pm_with', pm_with]]
elif not stream and not topic and not search and not pm_with:
new_narrow = []
else:
raise RuntimeError("Model.set_narrow parameters used incorrectly.")
if new_narrow != self.narrow:
self.narrow = new_narrow
return False
else:
return True
def get_message_ids_in_current_narrow(self) -> Set[int]:
narrow = self.narrow
if narrow == []:
current_ids = self.index['all_messages']
elif narrow[0][0] == 'stream':
stream_id = self.stream_id
if len(narrow) == 1:
current_ids = self.index['all_stream'][stream_id]
elif len(narrow) == 2:
topic = narrow[1][1]
current_ids = self.index['stream'][stream_id][topic]
elif narrow[0][1] == 'private':
current_ids = self.index['all_private']
elif narrow[0][0] == 'pm_with':
recipients = self.recipients
current_ids = self.index['private'][recipients]
elif narrow[0][0] == 'search':
current_ids = self.index['search']
return current_ids.copy()
@asynch
def update_presence(self) -> None:
# TODO: update response in user list.
response = self.client.call_endpoint(
url='users/me/presence',
request={
'status': 'active',
'new_user_input': self.new_user_input,
}
)
self.new_user_input = False
time.sleep(60)
self.update_presence()
@asynch
def react_to_message(self,
message: Dict[str, Any],
reaction_to_toggle: str) -> None:
# FIXME Only support thumbs_up for now
assert reaction_to_toggle == 'thumbs_up'
endpoint = 'messages/{}/reactions'.format(message['id'])
reaction_to_toggle_spec = dict(
emoji_name='thumbs_up',
reaction_type='unicode_emoji',
emoji_code='1f44d')
existing_reactions = [reaction['emoji_code']
for reaction in message['reactions']
if ('user_id' in reaction['user'] and
reaction['user']['user_id'] == self.user_id)]
if reaction_to_toggle_spec['emoji_code'] in existing_reactions:
method = 'DELETE'
else:
method = 'POST'
response = self.client.call_endpoint(url=endpoint,
method=method,
request=reaction_to_toggle_spec)
def get_messages(self, first_anchor: bool) -> Any:
request = {
'anchor': self.anchor,
'num_before': self.num_before,
'num_after': self.num_after,
'apply_markdown': True,
'use_first_unread_anchor': first_anchor,
'client_gravatar': False,
'narrow': json.dumps(self.narrow),
}
response = self.client.do_api_query(request, '/json/messages',
method="GET")
for msg in response['messages']:
with open('../res.txt', 'a') as f:
f.write(str(msg['content']) + "\n\n")
if response['result'] == 'success':
self.index = index_messages(response['messages'], self, self.index)
if first_anchor:
self.index[str(self.narrow)] = response['anchor']
query_range = self.num_after + self.num_before + 1
if len(response['messages']) < (query_range):
self.update = True
return self.index
def _update_initial_data(self) -> None:
try:
# Thread Processes to reduces start time.
get_messages = Thread(target=self.get_messages,
kwargs={'first_anchor': True})
get_messages.start()
update_realm_users = Thread(target=self._update_realm_users)
update_realm_users.start()
result = self.client.register(
fetch_event_types=[
'presence',
'subscription',
'message',
'update_message_flags',
'muted_topics',
],
client_gravatar=True,
)
self.initial_data.update(result)
# Join process to ensure they are completed
update_realm_users.join()
get_messages.join()
except Exception:
print("Invalid API key")
raise urwid.ExitMainLoop()
def get_all_users(self) -> List[Dict[str, Any]]:
# Dict which stores the active/idle status of users (by email)
presences = self.initial_data['presences']
# Construct a dict of each user in the realm to look up by email
# and a user-id to email mapping
self.user_dict = dict() # type: Dict[str, Dict[str, Any]]
self.user_id_email_dict = dict() # type: Dict[int, str]
for user in self.initial_data['realm_users']:
email = user['email']
if email in presences: # presences currently subset of all users
status = presences[email]['aggregated']['status']
else:
# TODO: Consider if bots & other no-presence results should
# also really be treated as 'idle' and adjust accordingly
status = 'idle'
self.user_dict[email] = {
'full_name': user['full_name'],
'email': email,
'user_id': user['user_id'],
'status': status,
}
self.user_id_email_dict[user['user_id']] = email
# Generate filtered lists for active & idle users
active = [properties for properties in self.user_dict.values()
if properties['status'] == 'active']
idle = [properties for properties in self.user_dict.values()
if properties['status'] == 'idle']
# Construct user_list from sorted components of each list
user_list = sorted(active, key=lambda u: u['full_name'])
user_list += sorted(idle, key=lambda u: u['full_name'])
return user_list
def get_subscribed_streams(self) -> List[List[str]]:
subscriptions = self.initial_data['subscriptions']
# Store streams in id->Stream format
for stream in subscriptions:
self.stream_dict[stream['stream_id']] = stream
# Add if stream is muted.
if stream['in_home_view'] is False:
self.muted_streams.append(stream['stream_id'])
stream_names = [[
stream['name'],
stream['stream_id'],
stream['color'],
stream['invite_only'],
] for stream in subscriptions
]
return sorted(stream_names, key=lambda s: s[0].lower())
def append_message(self, response: Dict[str, Any]) -> None:
"""
Adds message to the end of the view.
"""
response['flags'] = []
if hasattr(self.controller, 'view') and self.update:
self.index = index_messages([response], self, self.index)
msg_w_list = create_msg_box_list(self, [response['id']])
if not msg_w_list:
return
else:
msg_w = msg_w_list[0]
if not self.narrow:
self.msg_list.log.append(msg_w)
elif self.narrow[0][1] == response['type'] and\
len(self.narrow) == 1:
self.msg_list.log.append(msg_w)
elif response['type'] == 'stream' and len(self.narrow) == 2 and\
self.narrow[1][1] == response['subject']:
self.msg_list.log.append(msg_w)
elif response['type'] == 'private' and len(self.narrow) == 1 and\
self.narrow[0][0] == "pm_with":
recipients = self.recipients
msg_recipients = frozenset([
self.user_id,
self.user_dict[self.narrow[0][1]]['user_id']
])
if recipients == msg_recipients:
self.msg_list.log.append(msg_w)
set_count([response['id']], self.controller, 1)
self.controller.update_screen()
def update_message(self, response: Dict[str, Any]) -> None:
"""
Updates previously rendered message.
"""
message_id = response['message_id']
content = response['content']
# If the message is indexed
if self.index['messages'][message_id] != {}:
message = self.index['messages'][message_id]
message['content'] = content
self.index['messages'][message_id] = message
self.update_rendered_view(message_id)
def update_reaction(self, response: Dict[str, Any]) -> None:
message_id = response['message_id']
# If the message is indexed
if self.index['messages'][message_id] != {}:
message = self.index['messages'][message_id]
if response['op'] == 'add':
message['reactions'].append(
{
'user': response['user'],
'reaction_type': response['reaction_type'],
'emoji_code': response['emoji_code'],
'emoji_name': response['emoji_name'],
}
)
else:
emoji_code = response['emoji_code']
for reaction in message['reactions']:
# Since Who reacted is not displayed,
# remove the first one encountered
if reaction['emoji_code'] == emoji_code:
message['reactions'].remove(reaction)
self.index['messages'][message_id] = message
self.update_rendered_view(message_id)
def update_rendered_view(self, msg_id: int) -> None:
# Update new content in the rendered view
for msg_w in self.msg_list.log:
if msg_w.original_widget.message['id'] == msg_id:
msg_w_list = create_msg_box_list(self, [msg_id])
if not msg_w_list:
return
else:
new_msg_w = msg_w_list[0]
msg_pos = self.msg_list.log.index(msg_w)
self.msg_list.log[msg_pos] = new_msg_w
self.controller.update_screen()
@asynch
def poll_for_events(self) -> None:
queue_id = self.controller.queue_id
last_event_id = self.controller.last_event_id
while True:
if queue_id is None:
self.controller.register_initial_desired_events()
queue_id = self.controller.queue_id
last_event_id = self.controller.last_event_id
response = self.client.get_events(
queue_id=queue_id,
last_event_id=last_event_id
)
if 'error' in response['result']:
if response["msg"].startswith("Bad event queue id:"):
# Our event queue went away, probably because
# we were asleep or the server restarted
# abnormally. We may have missed some
# events while the network was down or
# something, but there's not really anything
# we can do about it other than resuming
# getting new ones.
#
# Reset queue_id to register a new event queue.
queue_id = None
time.sleep(1)
continue
for event in response['events']:
last_event_id = max(last_event_id, int(event['id']))
if event['type'] == 'message':
self.append_message(event['message'])
elif event['type'] == 'update_message':
# FIXME: Support Topic Editing
if 'subject' in event.keys():
continue
else:
self.update_message(event)
elif event['type'] == 'reaction':
self.update_reaction(event)
elif event['type'] == 'typing':
if hasattr(self.controller, 'view'):
self.controller.view.handle_typing_event(event)
|
simplesubscribe.py
|
# coding=utf-8
import zmq
import threading
import uuid
from google.protobuf.message import DecodeError
from fysom import Fysom
import machinetalk.protobuf.types_pb2 as pb
from machinetalk.protobuf.message_pb2 import Container
class SimpleSubscribe(object):
def __init__(self, debuglevel=0, debugname='Simple Subscribe'):
self.debuglevel = debuglevel
self.debugname = debugname
self._error_string = ''
self.on_error_string_changed = []
# ZeroMQ
context = zmq.Context()
context.linger = 0
self._context = context
# pipe to signalize a shutdown
self._shutdown = context.socket(zmq.PUSH)
self._shutdown_uri = b'inproc://shutdown-%s' % str(uuid.uuid4()).encode()
self._shutdown.bind(self._shutdown_uri)
self._thread = None # socket worker tread
self._tx_lock = threading.Lock() # lock for outgoing messages
# Socket
self.socket_uri = ''
self._socket_topics = set()
# more efficient to reuse protobuf messages
self._socket_rx = Container()
# callbacks
self.on_socket_message_received = []
self.on_state_changed = []
# fsm
self._fsm = Fysom(
{
'initial': 'down',
'events': [
{'name': 'start', 'src': 'down', 'dst': 'up'},
{'name': 'any_msg_received', 'src': 'up', 'dst': 'up'},
{'name': 'stop', 'src': 'up', 'dst': 'down'},
],
}
)
self._fsm.ondown = self._on_fsm_down
self._fsm.onafterstart = self._on_fsm_start
self._fsm.onup = self._on_fsm_up
self._fsm.onafterany_msg_received = self._on_fsm_any_msg_received
self._fsm.onafterstop = self._on_fsm_stop
def _on_fsm_down(self, _):
if self.debuglevel > 0:
print('[%s]: state DOWN' % self.debugname)
for cb in self.on_state_changed:
cb('down')
return True
def _on_fsm_start(self, _):
if self.debuglevel > 0:
print('[%s]: event START' % self.debugname)
self.start_socket()
return True
def _on_fsm_up(self, _):
if self.debuglevel > 0:
print('[%s]: state UP' % self.debugname)
for cb in self.on_state_changed:
cb('up')
return True
def _on_fsm_any_msg_received(self, _):
if self.debuglevel > 0:
print('[%s]: event ANY MSG RECEIVED' % self.debugname)
return True
def _on_fsm_stop(self, _):
if self.debuglevel > 0:
print('[%s]: event STOP' % self.debugname)
self.stop_socket()
return True
@property
def error_string(self):
return self._error_string
@error_string.setter
def error_string(self, string):
if self._error_string is string:
return
self._error_string = string
for cb in self.on_error_string_changed:
cb(string)
def start(self):
if self._fsm.isstate('down'):
self._fsm.start()
def stop(self):
if self._fsm.isstate('up'):
self._fsm.stop()
def add_socket_topic(self, name):
self._socket_topics.add(name)
def remove_socket_topic(self, name):
self._socket_topics.remove(name)
def clear_socket_topics(self):
self._socket_topics.clear()
def _socket_worker(self, context, uri):
poll = zmq.Poller()
socket = context.socket(zmq.SUB)
socket.setsockopt(zmq.LINGER, 0)
socket.connect(uri)
poll.register(socket, zmq.POLLIN)
# subscribe is always connected to socket creation
for topic in self._socket_topics:
socket.setsockopt(zmq.SUBSCRIBE, topic.encode())
shutdown = context.socket(zmq.PULL)
shutdown.connect(self._shutdown_uri)
poll.register(shutdown, zmq.POLLIN)
while True:
s = dict(poll.poll())
if shutdown in s:
shutdown.recv()
return # shutdown signal
if socket in s:
self._socket_message_received(socket)
def start_socket(self):
self._thread = threading.Thread(
target=self._socket_worker, args=(self._context, self.socket_uri)
)
self._thread.start()
def stop_socket(self):
self._shutdown.send(b' ') # trigger socket thread shutdown
self._thread = None
# process all messages received on socket
def _socket_message_received(self, socket):
(identity, msg) = socket.recv_multipart() # identity is topic
try:
self._socket_rx.ParseFromString(msg)
except DecodeError as e:
note = 'Protobuf Decode Error: ' + str(e)
print(note) # TODO: decode error
return
if self.debuglevel > 0:
print('[%s] received message' % self.debugname)
if self.debuglevel > 1:
print(self._socket_rx)
rx = self._socket_rx
# react to any incoming message
if self._fsm.isstate('up'):
self._fsm.any_msg_received()
for cb in self.on_socket_message_received:
cb(identity, rx)
|
server_env.py
|
import gym
from gym import spaces
import numpy as np
import asyncio
import websockets
from multiprocessing import Pipe, Process
from threading import Thread
import json
import subprocess
class ServerEnv(gym.Env):
def __init__(self, serverIP='127.0.0.1', serverPort='8000',
exeCmd=None, action_space=None, observation_space=None, proc_mode='process',
window_render=False, env_fps=60, renderPath='./render_frames/'):
# Server url
self.serverIP = serverIP
self.serverPort = serverPort
# Action & observation space
self.action_space = action_space
self.observation_space = observation_space
# Render frames path
self.renderPath = renderPath
# Pipe to send/get msg from the server process
self.parent_conn, self.child_conn = Pipe()
# Start the websocket server process
assert ((proc_mode=='thread' or proc_mode=='process'))
print('- starting Gym server')
if proc_mode == 'thread' :
self.p = Thread(target=self._start_server)
elif proc_mode == 'process':
self.p = Process(target=self._start_server)
self.p.start()
# Start the simulation
physic_delta_flag = ' --fixed-fps {}'.format(env_fps)
render_loop_flag = ' --disable-render-loop' if not window_render else ''
server_ip_flag = ' --serverIP={}'.format(serverIP)
server_port_flag = ' --serverPort={}'.format(serverPort)
render_path_flag = ' --renderPath={}'.format(renderPath)
flags = physic_delta_flag + render_loop_flag + server_ip_flag + server_port_flag + render_path_flag
print('- starting Godot env with command : ' + exeCmd + flags)
subprocess.Popen([exeCmd + flags], shell=True)
def _start_server(self):
# Run server logic on process loop
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
ws_server = websockets.serve(self._server_handler, self.serverIP, self.serverPort)
self.loop.run_until_complete(ws_server)
self.loop.run_forever()
async def _server_handler(self, websocket, path):
while True:
# Block until there is a msg to send and read it
msg = self.child_conn.recv()
# Wait for the msg to be sent
await websocket.send(json.dumps(msg))
# If msg is not a 'close' msg then wait for the answer, otherwise stop the server
if msg['cmd'] != 'close' :
try:
answer = await websocket.recv()
except:
print('- connection ended')
break
# Parse answer
answer = json.loads(answer)
# Send the answer back to main process
self.child_conn.send(answer)
else :
break
self.loop.call_soon_threadsafe(self.loop.stop)
def _sendAndGetAnswer(self, msg):
# Send msg to server process
self.parent_conn.send(msg)
# Block until answer available
return self.parent_conn.recv()
def reset(self):
# Send reset msg and return initial observation
answer = self._sendAndGetAnswer({'cmd': 'reset'})
return np.array(answer['init_observation']).astype(np.float32)
def step(self, action):
# Send action msg and return current obs, reward and isDone
if isinstance(self.action_space, spaces.Discrete):
action = np.asarray([action]) # Handle discrete space
answer = self._sendAndGetAnswer(
{'cmd': 'step', 'action': action.tolist()})
observation_np = np.array(answer['observation']).astype(np.float32)
return observation_np, answer['reward'], answer['done'], {}
def close(self):
# Send close msg
self.parent_conn.send({'cmd': 'close'})
# Wait for server to close
self.p.join()
print('- server closed')
def render(self, mode=''):
# Send render msg
answer = self._sendAndGetAnswer({'cmd': 'render'})
# Report Godot render error if any
if answer['render_error'] != '0':
print('Error while saving render : ' + answer['render_error'])
|
test_smtplib.py
|
import asyncore
import base64
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import hmac
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import threading
import unittest
from test import support, mock_socket
from test.support import HOST, HOSTv4, HOSTv6
from unittest.mock import Mock
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(smtp.source_address, ('127.0.0.1', 19876))
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(1)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(2)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what server host and port were assigned
self.host, self.port = self.serv.socket.getsockname()[:2]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
def get_output_without_xpeer(self):
test_output = self.output.getvalue()
return re.sub(r'(.*?)^X-Peer:\s*\S+\n(.*)', r'\1\2',
test_output, flags=re.MULTILINE|re.DOTALL)
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testSourceAddress(self):
# connect
src_port = support.find_unused_port()
try:
smtp = smtplib.SMTP(self.host, self.port, local_hostname='localhost',
timeout=3, source_address=(self.host, src_port))
self.assertEqual(smtp.source_address, (self.host, src_port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to source port %d" % src_port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds as figuring out
# exactly what IP address format is put there is not easy (and
# irrelevant to our test). Typically 127.0.0.1 or ::1, but it is
# not always the same as socket.gethostbyname(HOST). :(
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
class DefaultArgumentsTests(unittest.TestCase):
def setUp(self):
self.msg = EmailMessage()
self.msg['From'] = 'Páolo <főo@bar.com>'
self.smtp = smtplib.SMTP()
self.smtp.ehlo = Mock(return_value=(200, 'OK'))
self.smtp.has_extn, self.smtp.sendmail = Mock(), Mock()
def testSendMessage(self):
expected_mail_options = ('SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg)
self.smtp.send_message(self.msg)
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
self.assertEqual(self.smtp.sendmail.call_args_list[1][0][3],
expected_mail_options)
def testSendMessageWithMailOptions(self):
mail_options = ['STARTTLS']
expected_mail_options = ('STARTTLS', 'SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg, None, None, mail_options)
self.assertEqual(mail_options, ['STARTTLS'])
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
# test response of client to a non-successful HELO message
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
thread = threading.Thread(target=server, args=servargs)
thread.start()
self.addCleanup(thread.join)
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class ResponseException(Exception): pass
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
AUTH = 99 # Add protocol state to enable auth testing.
authenticated_user = None
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
# AUTH related stuff. It would be nice if support for this were in smtpd.
def found_terminator(self):
if self.smtp_state == self.AUTH:
line = self._emptystring.join(self.received_lines)
print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
try:
self.auth_object(line)
except ResponseException as e:
self.smtp_state = self.COMMAND
self.push('%s %s' % (e.smtp_code, e.smtp_error))
return
super().found_terminator()
def smtp_AUTH(self, arg):
if not self.seen_greeting:
self.push('503 Error: send EHLO first')
return
if not self.extended_smtp or 'AUTH' not in self._extrafeatures:
self.push('500 Error: command "AUTH" not recognized')
return
if self.authenticated_user is not None:
self.push(
'503 Bad sequence of commands: already authenticated')
return
args = arg.split()
if len(args) not in [1, 2]:
self.push('501 Syntax: AUTH <mechanism> [initial-response]')
return
auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_')
try:
self.auth_object = getattr(self, auth_object_name)
except AttributeError:
self.push('504 Command parameter not implemented: unsupported '
' authentication mechanism {!r}'.format(auth_object_name))
return
self.smtp_state = self.AUTH
self.auth_object(args[1] if len(args) == 2 else None)
def _authenticated(self, user, valid):
if valid:
self.authenticated_user = user
self.push('235 Authentication Succeeded')
else:
self.push('535 Authentication credentials invalid')
self.smtp_state = self.COMMAND
def _decode_base64(self, string):
return base64.decodebytes(string.encode('ascii')).decode('utf-8')
def _auth_plain(self, arg=None):
if arg is None:
self.push('334 ')
else:
logpass = self._decode_base64(arg)
try:
*_, user, password = logpass.split('\0')
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
' failed: {}'.format(logpass, e))
return
self._authenticated(user, password == sim_auth[1])
def _auth_login(self, arg=None):
if arg is None:
# base64 encoded 'Username:'
self.push('334 VXNlcm5hbWU6')
elif not hasattr(self, '_auth_login_user'):
self._auth_login_user = self._decode_base64(arg)
# base64 encoded 'Password:'
self.push('334 UGFzc3dvcmQ6')
else:
password = self._decode_base64(arg)
self._authenticated(self._auth_login_user, password == sim_auth[1])
del self._auth_login_user
def _auth_cram_md5(self, arg=None):
if arg is None:
self.push('334 {}'.format(sim_cram_md5_challenge))
else:
logpass = self._decode_base64(arg)
try:
user, hashed_pass = logpass.split()
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
'failed: {}'.format(logpass, e))
return False
valid_hashed_pass = hmac.HMAC(
sim_auth[1].encode('ascii'),
self._decode_base64(sim_cram_md5_challenge).encode('ascii'),
'md5').hexdigest()
self._authenticated(user, hashed_pass == valid_hashed_pass)
# end AUTH related stuff.
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
self._addresses = {}
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
self._addresses['from'] = mailfrom
self._addresses['tos'] = rcpttos
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_auth_function(self):
supported = {'CRAM-MD5', 'PLAIN', 'LOGIN'}
for mechanism in supported:
self.serv.add_feature("AUTH {}".format(mechanism))
for mechanism in supported:
with self.subTest(mechanism=mechanism):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.ehlo('foo')
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
method = 'auth_' + mechanism.lower().replace('-', '_')
resp = smtp.auth(mechanism, getattr(smtp, method))
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=15)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
# This test is located here and not in the SMTPUTF8SimTests
# class because it needs a "regular" SMTP server to work
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
with self.assertRaises(smtplib.SMTPNotSupportedError):
smtp.send_message(msg)
def test_name_field_not_included_in_envelop_addresses(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3
)
self.addCleanup(smtp.close)
message = EmailMessage()
message['From'] = email.utils.formataddr(('Michaël', 'michael@example.com'))
message['To'] = email.utils.formataddr(('René', 'rene@example.com'))
self.assertDictEqual(smtp.send_message(message), {})
self.assertEqual(self.serv._addresses['from'], 'michael@example.com')
self.assertEqual(self.serv._addresses['tos'], ['rene@example.com'])
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
if __name__ == '__main__':
unittest.main()
|
java_gateway_test.py
|
# -*- coding: UTF-8 -*-
"""
Created on Dec 10, 2009
@author: barthelemy
"""
from __future__ import unicode_literals, absolute_import
from collections import deque
from decimal import Decimal
import gc
import math
from multiprocessing import Process
import os
from socket import AF_INET, SOCK_STREAM, socket
import subprocess
import tempfile
from threading import Thread
import time
from traceback import print_exc
import unittest
from py4j.compat import (
range, isbytearray, ispython3bytestr, bytearray2, long,
Queue)
from py4j.finalizer import ThreadSafeFinalizer
from py4j.java_gateway import (
JavaGateway, JavaMember, get_field, get_method,
GatewayClient, set_field, java_import, JavaObject, is_instance_of,
GatewayParameters, CallbackServerParameters, quiet_close)
from py4j.protocol import (
Py4JError, Py4JJavaError, Py4JNetworkError, decode_bytearray,
encode_bytearray, escape_new_line, unescape_new_line)
SERVER_PORT = 25333
TEST_PORT = 25332
PY4J_JAVA_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"../../../../py4j-java/bin")
def sleep(sleep_time=0.250):
"""Default sleep time to enable the OS to reuse address and port.
"""
time.sleep(sleep_time)
def start_echo_server():
subprocess.call(["java", "-cp", PY4J_JAVA_PATH, "py4j.EchoServer"])
def start_echo_server_process():
# XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
sleep()
p = Process(target=start_echo_server)
p.start()
sleep(1.5)
return p
def start_example_server():
subprocess.call([
"java", "-Xmx512m", "-cp", PY4J_JAVA_PATH,
"py4j.examples.ExampleApplication"])
def start_example_app_process():
# XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
p = Process(target=start_example_server)
p.start()
sleep()
test_gateway_connection()
return p
def test_gateway_connection():
test_gateway = JavaGateway()
try:
# Call a dummy method just to make sure we can connect to the JVM
test_gateway.jvm.System.lineSeparator()
except Py4JNetworkError:
# We could not connect. Let"s wait a long time.
# If it fails after that, there is a bug with our code!
sleep(2)
finally:
test_gateway.close()
def get_socket():
testSocket = socket(AF_INET, SOCK_STREAM)
testSocket.connect(("127.0.0.1", TEST_PORT))
return testSocket
def safe_shutdown(instance):
try:
instance.gateway.shutdown()
except Exception:
print_exc()
class TestConnection(object):
"""Connection that does nothing. Useful for testing."""
counter = -1
def __init__(self, return_message="yro"):
self.address = "127.0.0.1"
self.port = 1234
self.return_message = return_message
self.is_connected = True
def start(self):
pass
def stop(self):
pass
def send_command(self, command):
TestConnection.counter += 1
if not command.startswith("m\nd\n"):
self.last_message = command
return self.return_message + str(TestConnection.counter)
class ProtocolTest(unittest.TestCase):
def tearDown(self):
# Safety check in case there was an exception...
safe_shutdown(self)
def testEscape(self):
self.assertEqual("Hello\t\rWorld\n\\", unescape_new_line(
escape_new_line("Hello\t\rWorld\n\\")))
self.assertEqual("Hello\t\rWorld\n\\", unescape_new_line(
escape_new_line("Hello\t\rWorld\n\\")))
def testProtocolSend(self):
testConnection = TestConnection()
self.gateway = JavaGateway()
# Replace gateway client by test connection
self.gateway.set_gateway_client(testConnection)
e = self.gateway.getExample()
self.assertEqual("c\nt\ngetExample\ne\n", testConnection.last_message)
e.method1(1, True, "Hello\nWorld", e, None, 1.5)
self.assertEqual(
"c\no0\nmethod1\ni1\nbTrue\nsHello\\nWorld\nro0\nn\nd1.5\ne\n",
testConnection.last_message)
del(e)
def testProtocolReceive(self):
p = start_echo_server_process()
try:
testSocket = get_socket()
testSocket.sendall("yo\n".encode("utf-8"))
testSocket.sendall("yro0\n".encode("utf-8"))
testSocket.sendall("yo\n".encode("utf-8"))
testSocket.sendall("ysHello World\n".encode("utf-8"))
# No extra echange (method3) because it is already cached.
testSocket.sendall("yi123\n".encode("utf-8"))
testSocket.sendall("yd1.25\n".encode("utf-8"))
testSocket.sendall("yo\n".encode("utf-8"))
testSocket.sendall("yn\n".encode("utf-8"))
testSocket.sendall("yo\n".encode("utf-8"))
testSocket.sendall("ybTrue\n".encode("utf-8"))
testSocket.sendall("yo\n".encode("utf-8"))
testSocket.sendall("yL123\n".encode("utf-8"))
testSocket.sendall("ydinf\n".encode("utf-8"))
testSocket.close()
sleep()
self.gateway = JavaGateway(
gateway_parameters=GatewayParameters(auto_field=True))
ex = self.gateway.getNewExample()
self.assertEqual("Hello World", ex.method3(1, True))
self.assertEqual(123, ex.method3())
self.assertAlmostEqual(1.25, ex.method3())
self.assertTrue(ex.method2() is None)
self.assertTrue(ex.method4())
self.assertEqual(long(123), ex.method8())
self.assertEqual(float("inf"), ex.method8())
self.gateway.shutdown()
except Exception:
print_exc()
self.fail("Problem occurred")
p.join()
class IntegrationTest(unittest.TestCase):
def setUp(self):
self.p = start_echo_server_process()
# This is to ensure that the server is started before connecting to it!
def tearDown(self):
# Safety check in case there was an exception...
safe_shutdown(self)
self.p.join()
def testIntegration(self):
try:
testSocket = get_socket()
testSocket.sendall("yo\n".encode("utf-8"))
testSocket.sendall("yro0\n".encode("utf-8"))
testSocket.sendall("yo\n".encode("utf-8"))
testSocket.sendall("ysHello World\n".encode("utf-8"))
testSocket.sendall("yro1\n".encode("utf-8"))
testSocket.sendall("yo\n".encode("utf-8"))
testSocket.sendall("ysHello World2\n".encode("utf-8"))
testSocket.close()
sleep()
self.gateway = JavaGateway(
gateway_parameters=GatewayParameters(auto_field=True))
ex = self.gateway.getNewExample()
response = ex.method3(1, True)
self.assertEqual("Hello World", response)
ex2 = self.gateway.entry_point.getNewExample()
response = ex2.method3(1, True)
self.assertEqual("Hello World2", response)
self.gateway.shutdown()
except Exception:
self.fail("Problem occurred")
def testException(self):
try:
testSocket = get_socket()
testSocket.sendall("yo\n".encode("utf-8"))
testSocket.sendall("yro0\n".encode("utf-8"))
testSocket.sendall("yo\n".encode("utf-8"))
testSocket.sendall(b"x\n")
testSocket.close()
sleep()
self.gateway = JavaGateway(
gateway_parameters=GatewayParameters(auto_field=True))
ex = self.gateway.getNewExample()
self.assertRaises(Py4JError, lambda: ex.method3(1, True))
self.gateway.shutdown()
except Exception:
self.fail("Problem occurred")
class CloseTest(unittest.TestCase):
def testNoCallbackServer(self):
# Test that the program can continue to move on and that no close
# is required.
JavaGateway()
self.assertTrue(True)
def testCallbackServer(self):
# A close is required to stop the thread.
gateway = JavaGateway(
callback_server_parameters=CallbackServerParameters())
gateway.close()
self.assertTrue(True)
sleep(2)
class MethodTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
# This is to ensure that the server is started before connecting to it!
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testNoneArg(self):
ex = self.gateway.getNewExample()
try:
ex.method2(None)
ex2 = ex.method4(None)
self.assertEquals(ex2.getField1(), 3)
self.assertEquals(2, ex.method7(None))
except Exception:
print_exc()
self.fail()
def testUnicode(self):
sb = self.gateway.jvm.java.lang.StringBuffer()
sb.append("\r\n\tHello\r\n\t")
self.assertEqual("\r\n\tHello\r\n\t", sb.toString())
def testEscape(self):
sb = self.gateway.jvm.java.lang.StringBuffer()
sb.append("\r\n\tHello\r\n\t")
self.assertEqual("\r\n\tHello\r\n\t", sb.toString())
class FieldTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testAutoField(self):
self.gateway = JavaGateway(
gateway_parameters=GatewayParameters(auto_field=True))
ex = self.gateway.getNewExample()
self.assertEqual(ex.field10, 10)
self.assertEqual(ex.field11, long(11))
sb = ex.field20
sb.append("Hello")
self.assertEqual("Hello", sb.toString())
self.assertTrue(ex.field21 is None)
def testAutoFieldDeprecated(self):
self.gateway = JavaGateway(auto_field=True)
ex = self.gateway.getNewExample()
self.assertEqual(ex.field10, 10)
def testNoField(self):
self.gateway = JavaGateway(
gateway_parameters=GatewayParameters(auto_field=True))
ex = self.gateway.getNewExample()
member = ex.field50
self.assertTrue(isinstance(member, JavaMember))
def testNoAutoField(self):
self.gateway = JavaGateway(
gateway_parameters=GatewayParameters(auto_field=False))
ex = self.gateway.getNewExample()
self.assertTrue(isinstance(ex.field10, JavaMember))
self.assertTrue(isinstance(ex.field50, JavaMember))
self.assertEqual(10, get_field(ex, "field10"))
# This field does not exist
self.assertRaises(Exception, get_field, ex, "field50")
# With auto field = True
ex._auto_field = True
sb = ex.field20
sb.append("Hello")
self.assertEqual("Hello", sb.toString())
def testSetField(self):
self.gateway = JavaGateway(
gateway_parameters=GatewayParameters(auto_field=False))
ex = self.gateway.getNewExample()
set_field(ex, "field10", 2334)
self.assertEquals(get_field(ex, "field10"), 2334)
sb = self.gateway.jvm.java.lang.StringBuffer("Hello World!")
set_field(ex, "field21", sb)
self.assertEquals(get_field(ex, "field21").toString(), "Hello World!")
self.assertRaises(Exception, set_field, ex, "field1", 123)
def testGetMethod(self):
# This is necessary if a field hides a method...
self.gateway = JavaGateway()
ex = self.gateway.getNewExample()
self.assertEqual(1, get_method(ex, "method1")())
class UtilityTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testIsInstance(self):
a_list = self.gateway.jvm.java.util.ArrayList()
a_map = self.gateway.jvm.java.util.HashMap()
# FQN
self.assertTrue(is_instance_of(self.gateway, a_list, "java.util.List"))
self.assertFalse(
is_instance_of(
self.gateway, a_list, "java.lang.String"))
# JavaClass
self.assertTrue(
is_instance_of(
self.gateway, a_list, self.gateway.jvm.java.util.List))
self.assertFalse(
is_instance_of(
self.gateway, a_list, self.gateway.jvm.java.lang.String))
# JavaObject
self.assertTrue(is_instance_of(self.gateway, a_list, a_list))
self.assertFalse(is_instance_of(self.gateway, a_list, a_map))
class MemoryManagementTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
def tearDown(self):
safe_shutdown(self)
self.p.join()
gc.collect()
def testNoAttach(self):
self.gateway = JavaGateway()
gateway2 = JavaGateway()
sb = self.gateway.jvm.java.lang.StringBuffer()
sb.append("Hello World")
self.gateway.shutdown()
self.assertRaises(Exception, lambda: sb.append("Python"))
self.assertRaises(
Exception, lambda: gateway2.jvm.java.lang.StringBuffer())
def testDetach(self):
self.gateway = JavaGateway()
gc.collect()
finalizers_size_start = len(ThreadSafeFinalizer.finalizers)
sb = self.gateway.jvm.java.lang.StringBuffer()
sb.append("Hello World")
self.gateway.detach(sb)
sb2 = self.gateway.jvm.java.lang.StringBuffer()
sb2.append("Hello World")
sb2._detach()
gc.collect()
self.assertEqual(
len(ThreadSafeFinalizer.finalizers) - finalizers_size_start, 0)
self.gateway.shutdown()
class TypeConversionTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testLongInt(self):
ex = self.gateway.getNewExample()
self.assertEqual(1, ex.method7(1234))
self.assertEqual(4, ex.method7(2147483648))
self.assertEqual(4, ex.method7(long(2147483648)))
self.assertEqual(long(4), ex.method8(3))
self.assertEqual(4, ex.method8(3))
self.assertEqual(long(4), ex.method8(long(3)))
self.assertEqual(long(4), ex.method9(long(3)))
def testBigDecimal(self):
ex = self.gateway.getNewExample()
self.assertEqual(Decimal("2147483.647"), ex.method10(2147483647, 3))
self.assertEqual(Decimal("-13.456"), ex.method10(Decimal("-14.456")))
def testFloatConversion(self):
java_inf = self.gateway.jvm.java.lang.Double.parseDouble("Infinity")
self.assertEqual(float("inf"), java_inf)
java_inf = self.gateway.jvm.java.lang.Double.parseDouble("+Infinity")
self.assertEqual(float("inf"), java_inf)
java_neg_inf = self.gateway.jvm.java.lang.Double.parseDouble(
"-Infinity")
self.assertEqual(float("-inf"), java_neg_inf)
java_nan = self.gateway.jvm.java.lang.Double.parseDouble("NaN")
self.assertTrue(math.isnan(java_nan))
class UnicodeTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
# def testUtfMethod(self):
# ex = self.gateway.jvm.py4j.examples.UTFExample()
# Only works for Python 3
# self.assertEqual(2, ex.strangeMéthod())
def testUnicodeString(self):
# NOTE: this is unicode because of import future unicode literal...
ex = self.gateway.jvm.py4j.examples.UTFExample()
s1 = "allo"
s2 = "alloé"
array1 = ex.getUtfValue(s1)
array2 = ex.getUtfValue(s2)
self.assertEqual(len(s1), len(array1))
self.assertEqual(len(s2), len(array2))
self.assertEqual(ord(s1[0]), array1[0])
self.assertEqual(ord(s2[4]), array2[4])
class ByteTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testJavaByteConversion(self):
ex = self.gateway.jvm.py4j.examples.UTFExample()
ba = bytearray([0, 1, 127, 128, 255, 216, 1, 220])
self.assertEqual(0, ex.getPositiveByteValue(ba[0]))
self.assertEqual(1, ex.getPositiveByteValue(ba[1]))
self.assertEqual(127, ex.getPositiveByteValue(ba[2]))
self.assertEqual(128, ex.getPositiveByteValue(ba[3]))
self.assertEqual(255, ex.getPositiveByteValue(ba[4]))
self.assertEqual(216, ex.getPositiveByteValue(ba[5]))
self.assertEqual(0, ex.getJavaByteValue(ba[0]))
self.assertEqual(1, ex.getJavaByteValue(ba[1]))
self.assertEqual(127, ex.getJavaByteValue(ba[2]))
self.assertEqual(-128, ex.getJavaByteValue(ba[3]))
self.assertEqual(-1, ex.getJavaByteValue(ba[4]))
def testProtocolConversion(self):
# b1 = tobytestr("abc\n")
b2 = bytearray([1, 2, 3, 255, 0, 128, 127])
# encoded1 = encode_bytearray(b1)
encoded2 = encode_bytearray(b2)
# self.assertEqual(b1, decode_bytearray(encoded1))
self.assertEqual(b2, decode_bytearray(encoded2))
def testBytesType(self):
ex = self.gateway.jvm.py4j.examples.UTFExample()
int_list = [0, 1, 10, 127, 128, 255]
ba1 = bytearray(int_list)
# Same for Python2, bytes for Python 3
ba2 = bytearray2(int_list)
a1 = ex.getBytesValue(ba1)
a2 = ex.getBytesValue(ba2)
for i1, i2 in zip(a1, int_list):
self.assertEqual(i1, i2)
for i1, i2 in zip(a2, int_list):
self.assertEqual(i1, i2)
def testBytesType2(self):
ex = self.gateway.jvm.py4j.examples.UTFExample()
int_list = [0, 1, 10, 127, 255, 128]
a1 = ex.getBytesValue()
# Python 2: bytearray (because str is too easy to confuse with normal
# strings)
# Python 3: bytes (because bytes is closer to the byte[] representation
# in Java)
self.assertTrue(isbytearray(a1) or ispython3bytestr(a1))
for i1, i2 in zip(a1, int_list):
self.assertEqual(i1, i2)
def testLargeByteArray(self):
# Regression test for #109, an error when passing large byte arrays.
self.gateway.jvm.java.nio.ByteBuffer.wrap(bytearray(range(255)))
class ExceptionTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testJavaError(self):
try:
self.gateway.jvm.Integer.valueOf("allo")
except Py4JJavaError as e:
self.assertEqual(
"java.lang.NumberFormatException",
e.java_exception.getClass().getName())
except Exception:
self.fail()
def testJavaConstructorError(self):
try:
self.gateway.jvm.Integer("allo")
except Py4JJavaError as e:
self.assertEqual(
"java.lang.NumberFormatException",
e.java_exception.getClass().getName())
except Exception:
self.fail()
def doError(self):
id = ""
try:
self.gateway.jvm.Integer.valueOf("allo")
except Py4JJavaError as e:
id = e.java_exception._target_id
return id
def testJavaErrorGC(self):
id = self.doError()
java_object = JavaObject(id, self.gateway._gateway_client)
try:
# Should fail because it should have been garbage collected...
java_object.getCause()
self.fail()
except Py4JError:
self.assertTrue(True)
def testReflectionError(self):
try:
self.gateway.jvm.Integer.valueOf2("allo")
except Py4JJavaError:
self.fail()
except Py4JNetworkError:
self.fail()
except Py4JError:
self.assertTrue(True)
def testStrError(self):
try:
self.gateway.jvm.Integer.valueOf("allo")
except Py4JJavaError as e:
self.assertTrue(str(e).startswith(
"An error occurred while calling z:java.lang.Integer.valueOf."
"\n: java.lang.NumberFormatException:"))
except Exception:
self.fail()
class JVMTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testConstructors(self):
jvm = self.gateway.jvm
sb = jvm.java.lang.StringBuffer("hello")
sb.append("hello world")
sb.append(1)
self.assertEqual(sb.toString(), "hellohello world1")
l1 = jvm.java.util.ArrayList()
l1.append("hello world")
l1.append(1)
self.assertEqual(2, len(l1))
self.assertEqual("hello world", l1[0])
l2 = ["hello world", 1]
self.assertEqual(str(l2), str(l1))
def testStaticMethods(self):
System = self.gateway.jvm.java.lang.System
self.assertTrue(System.currentTimeMillis() > 0)
self.assertEqual("123", self.gateway.jvm.java.lang.String.valueOf(123))
def testStaticFields(self):
Short = self.gateway.jvm.java.lang.Short
self.assertEqual(-32768, Short.MIN_VALUE)
System = self.gateway.jvm.java.lang.System
self.assertFalse(System.out.checkError())
def testDefaultImports(self):
self.assertTrue(self.gateway.jvm.System.currentTimeMillis() > 0)
self.assertEqual("123", self.gateway.jvm.String.valueOf(123))
def testNone(self):
ex = self.gateway.entry_point.getNewExample()
ex.method4(None)
def testJVMView(self):
newView = self.gateway.new_jvm_view("myjvm")
time = newView.System.currentTimeMillis()
self.assertTrue(time > 0)
time = newView.java.lang.System.currentTimeMillis()
self.assertTrue(time > 0)
def testImport(self):
newView = self.gateway.new_jvm_view("myjvm")
java_import(self.gateway.jvm, "java.util.*")
java_import(self.gateway.jvm, "java.io.File")
self.assertTrue(self.gateway.jvm.ArrayList() is not None)
self.assertTrue(self.gateway.jvm.File("hello.txt") is not None)
self.assertRaises(Exception, lambda: newView.File("test.txt"))
java_import(newView, "java.util.HashSet")
self.assertTrue(newView.HashSet() is not None)
def testEnum(self):
self.assertEqual("FOO", str(self.gateway.jvm.py4j.examples.Enum2.FOO))
def testInnerClass(self):
self.assertEqual(
"FOO",
str(self.gateway.jvm.py4j.examples.EnumExample.MyEnum.FOO))
self.assertEqual(
"HELLO2",
self.gateway.jvm.py4j.examples.EnumExample.InnerClass.MY_CONSTANT2)
class HelpTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testHelpObject(self):
ex = self.gateway.getNewExample()
help_page = self.gateway.help(ex, short_name=True, display=False)
self.assertTrue(len(help_page) > 1)
def testHelpObjectWithPattern(self):
ex = self.gateway.getNewExample()
help_page = self.gateway.help(
ex, pattern="m*", short_name=True, display=False)
self.assertTrue(len(help_page) > 1)
def testHelpClass(self):
String = self.gateway.jvm.java.lang.String
help_page = self.gateway.help(String, short_name=False, display=False)
self.assertTrue(len(help_page) > 1)
self.assertTrue("String" in help_page)
class Runner(Thread):
def __init__(self, runner_range, gateway):
Thread.__init__(self)
self.range = runner_range
self.gateway = gateway
self.ok = True
def run(self):
ex = self.gateway.getNewExample()
for i in self.range:
try:
l = ex.getList(i)
if len(l) != i:
self.ok = False
break
self.gateway.detach(l)
# gc.collect()
except Exception:
self.ok = False
break
class ThreadTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
gateway_client = GatewayClient()
self.gateway = JavaGateway()
self.gateway.set_gateway_client(gateway_client)
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testStress(self):
# Real stress test!
# runner1 = Runner(xrange(1,10000,2),self.gateway)
# runner2 = Runner(xrange(1000,1000000,10000), self.gateway)
# runner3 = Runner(xrange(1000,1000000,10000), self.gateway)
# Small stress test
runner1 = Runner(range(1, 10000, 1000), self.gateway)
runner2 = Runner(range(1000, 1000000, 100000), self.gateway)
runner3 = Runner(range(1000, 1000000, 100000), self.gateway)
runner1.start()
runner2.start()
runner3.start()
runner1.join()
runner2.join()
runner3.join()
self.assertTrue(runner1.ok)
self.assertTrue(runner2.ok)
self.assertTrue(runner3.ok)
class GatewayLauncherTest(unittest.TestCase):
def tearDown(self):
safe_shutdown(self)
def testDefaults(self):
self.gateway = JavaGateway.launch_gateway()
self.assertTrue(self.gateway.jvm)
def testJavaopts(self):
self.gateway = JavaGateway.launch_gateway(javaopts=["-Xmx64m"])
self.assertTrue(self.gateway.jvm)
def testRedirectToNull(self):
self.gateway = JavaGateway.launch_gateway()
for i in range(4097): # Hangs if not properly redirected
self.gateway.jvm.System.out.println("Test")
def testRedirectToQueue(self):
qout = Queue()
qerr = Queue()
self.gateway = JavaGateway.launch_gateway(
redirect_stdout=qout, redirect_stderr=qerr)
for i in range(10):
self.gateway.jvm.System.out.println("Test")
self.gateway.jvm.System.err.println("Test2")
sleep()
for i in range(10):
self.assertEqual("Test\n", qout.get())
self.assertEqual("Test2\n", qerr.get())
self.assertTrue(qout.empty)
self.assertTrue(qerr.empty)
def testRedirectToDeque(self):
qout = deque()
qerr = deque()
self.gateway = JavaGateway.launch_gateway(
redirect_stdout=qout, redirect_stderr=qerr)
for i in range(10):
self.gateway.jvm.System.out.println("Test")
self.gateway.jvm.System.err.println("Test2")
sleep()
for i in range(10):
self.assertEqual("Test\n", qout.pop())
self.assertEqual("Test2\n", qerr.pop())
self.assertEqual(0, len(qout))
self.assertEqual(0, len(qerr))
def testRedirectToFile(self):
(_, outpath) = tempfile.mkstemp(text=True)
(_, errpath) = tempfile.mkstemp(text=True)
stdout = open(outpath, "w")
stderr = open(errpath, "w")
try:
self.gateway = JavaGateway.launch_gateway(
redirect_stdout=stdout, redirect_stderr=stderr)
for i in range(10):
self.gateway.jvm.System.out.println("Test")
self.gateway.jvm.System.err.println("Test2")
# Should not be necessary
quiet_close(stdout)
quiet_close(stderr)
# Test that the redirect files were written to correctly
with open(outpath, "r") as stdout:
lines = stdout.readlines()
self.assertEqual(10, len(lines))
self.assertEqual("Test\n", lines[0])
with open(errpath, "r") as stderr:
lines = stderr.readlines()
self.assertEqual(10, len(lines))
self.assertEqual("Test2\n", lines[0])
finally:
os.unlink(outpath)
os.unlink(errpath)
if __name__ == "__main__":
unittest.main()
|
dispatcher_node.py
|
# Copyright 2021 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import rmf_adapter as adpt
import rmf_adapter.type as Type
import rmf_adapter.nodes as Nodes
from threading import Thread
def submit_task_thread(mod):
time.sleep(2)
task_desc = Type.CPPTaskDescriptionMsg()
task_desc.delivery = adpt.type.CPPDeliveryMsg()
print("Sumbiting 2 sample delivery tasks")
id1 = mod.submit_task(task_desc)
id2 = mod.submit_task(task_desc)
print(f" active list >> {mod.get_active_task_ids()}")
time.sleep(3)
print("---- Assertion ----")
state1 = mod.get_task_state(id1)
state2 = mod.get_task_state(id2)
state3 = mod.get_task_state("null_id")
print(f" {id1}: {state1} \n {id2}: {state2} \n null_id: {state3}")
assert state1 == Nodes.TaskState.Failed, "Fail due to absence of a bid"
assert state2 == Nodes.TaskState.Pending, "state should be pending"
assert state3 == None, "state should be none"
assert mod.cancel_task(id2), "failed to cancel task"
# check if task is canceled
state2 = mod.get_task_state(id2)
print(f" Canceled:: {id2}: {state2} \n")
assert state2 == Nodes.TaskState.Canceled, "task should be canceled"
print("Done Check")
def main():
print("Starting Simple Dispatcher Node")
adpt.init_rclcpp()
dispatcher = Nodes.DispatcherNode.make_node("sample_dispatcher_node")
th1 = Thread(target=submit_task_thread, args=(dispatcher,))
th1.start()
while True:
adpt.spin_some_rclcpp(dispatcher.node())
time.sleep(0.2)
print("Exiting")
if __name__ == "__main__":
main()
|
_speech_synthesizer.py
|
# -*- coding: utf-8 -*-
"""
* Copyright 2015 Alibaba Group Holding Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import json
import six
import websocket
import uuid
import threading
import time
from ali_speech._logging import _log
from ali_speech._constant import Status
from ali_speech._constant import Constant
from ali_speech._speech_reqprotocol import SpeechReqProtocol
class SpeechSynthesizer(SpeechReqProtocol):
def __init__(self, callback, url):
super(SpeechSynthesizer, self).__init__(callback, url)
self._last_start_retry = False
self._is_connected = False
self._header[Constant.HEADER_KEY_NAMESPACE] = Constant.HEADER_VALUE_TTS_NAMESPACE
self._payload[Constant.PAYLOAD_KEY_VOICE] = 'xiaoyun'
self._payload[Constant.PAYLOAD_KEY_FORMAT] = 'pcm'
self._payload[Constant.PAYLOAD_KEY_SAMPLE_RATE] = 16000
def set_text(self, text):
self._payload[Constant.PAYLOAD_KEY_TEXT] = text
def set_voice(self, voice):
self._payload[Constant.PAYLOAD_KEY_VOICE] = voice
def set_volume(self, volume):
self._payload[Constant.PAYLOAD_KEY_VOLUME] = volume
def set_speech_rate(self, speech_rate):
self._payload[Constant.PAYLOAD_KEY_SPEECH_RATE] = speech_rate
def set_pitch_rate(self, pitch_rate):
self._payload[Constant.PAYLOAD_KEY_PITCH_RATE] = pitch_rate
def start(self, ping_interval=5, ping_timeout=3):
"""
开始合成,新建到服务端的连接
:param ping_interval: 自动发送ping命令,指定发送间隔,单位为秒
:param ping_timeout: 等待接收pong消息的超时时间,单位为秒
:return: 与服务端建立连接成功,返回0
与服务端建立连接失败,返回-1
"""
if self._status == Status.STATUS_INIT:
_log.debug('starting synthesizer...')
self._status = Status.STATUS_STARTING
else:
_log.error("Illegal status: %s" % self._status)
return -1
def _on_open(ws):
_log.debug('websocket connected')
self._status = Status.STATUS_STARTED
self._is_connected = True
time.sleep(0.01)
msg_id = six.u(uuid.uuid1().hex)
self._task_id = six.u(uuid.uuid1().hex)
self._header[Constant.HEADER_KEY_NAME] = Constant.HEADER_VALUE_TTS_NAME_START
self._header[Constant.HEADER_KEY_MESSAGE_ID] = msg_id
self._header[Constant.HEADER_KEY_TASK_ID] = self._task_id
text = self.serialize()
_log.info('sending start cmd: ' + text)
ws.send(text)
def _on_data(ws, raw, opcode, flag):
if opcode == websocket.ABNF.OPCODE_BINARY:
_log.debug("received binary data, size: %s" % len(raw))
self._callback.on_binary_data_received(raw)
elif opcode == websocket.ABNF.OPCODE_TEXT:
_log.debug("websocket message received: %s" % raw)
msg = json.loads(raw)
name = msg[Constant.HEADER][Constant.HEADER_KEY_NAME]
if name == Constant.HEADER_VALUE_TTS_NAME_COMPLETED:
self._status = Status.STATUS_STOPPED
_log.debug('websocket status changed to stopped')
_log.debug('callback on_completed')
self._callback.on_completed(msg)
elif name == Constant.HEADER_VALUE_NAME_TASK_FAILED:
self._status = Status.STATUS_STOPPED
_log.error(msg)
_log.debug('websocket status changed to stopped')
_log.debug('callback on_task_failed')
self._callback.on_task_failed(msg)
elif name == 'MetaInfo':
self._callback.on_meta_info(msg)
def _on_close(ws):
_log.debug('callback on_channel_closed')
self._callback.on_channel_closed()
def _on_error(ws, error):
if self._is_connected or self._last_start_retry:
_log.error(error)
self._status = Status.STATUS_STOPPED
message = json.loads('{"header":{"namespace":"Default","name":"TaskFailed",'
'"status":400,"message_id":"0","task_id":"0",'
'"status_text":"%s"}}'
% error)
self._callback.on_task_failed(message)
else:
_log.warning('retry start: %s' % error)
retry_count = 3
for count in range(retry_count):
self._status = Status.STATUS_STARTING
if count == (retry_count - 1):
self._last_start_retry = True
# Init WebSocket
self._ws = websocket.WebSocketApp(self._gateway_url,
on_open=_on_open,
on_data=_on_data,
on_error=_on_error,
on_close=_on_close,
header={Constant.HEADER_TOKEN: self._token})
self._thread = threading.Thread(target=self._ws.run_forever, args=(None, None, ping_interval, ping_timeout))
self._thread.daemon = True
self._thread.start()
# waite for no more than 10 seconds
for i in range(1000):
if self._status == Status.STATUS_STARTED or self._status == Status.STATUS_STOPPED:
break
else:
time.sleep(0.01)
if self._status == Status.STATUS_STARTED:
# 与服务端连接建立成功
_log.debug('start succeed!')
return 0
else:
if self._is_connected or self._last_start_retry:
# 已建立了WebSocket链接但是与服务端的连接失败, 或者是最后一次重试,则返回-1
_log.error("start failed, status: %s" % self._status)
return -1
else:
# 尝试重连
continue
def wait_completed(self):
"""
等待合成结束
:return: 合成结束,返回0
合成超时,返回-1
"""
ret = 0
if self._status == Status.STATUS_STARTED:
for i in range(100):
if self._status == Status.STATUS_STOPPED:
break
else:
time.sleep(0.1)
_log.debug('waite 100ms')
if self._status != Status.STATUS_STOPPED:
ret = -1
else:
ret = 0
else:
_log.error('should not wait completed in state %d', self._status)
ret = -1
return ret
def close(self):
"""
关闭WebSocket连接
:return:
"""
if self._ws:
if self._thread and self._thread.is_alive():
self._ws.keep_running = False
self._thread.join()
self._ws.close()
|
gruv_socks.py
|
from time import sleep
from select import select
from threading import Thread
from traceback import print_exc
from struct import pack, unpack, error as struct_error
from socket import socket, SHUT_RDWR, SOL_SOCKET, SO_REUSEADDR, error as socket_error
SOCK_ERROR = b'\x01'
SOCK_TIMEOUT = b'\x00'
class Socket(object):
"""
Creates an easy to use abstracted standard for transporting and reconstructing messages over TCP.
"""
def __init__(self, sock=None, timeout: int=60, debug: bool=False):
"""
sock: Existing socket to use if supplied.
timeout: Time to wait (in seconds) for certain socket operations before stopping. I.e. connecting, reading data.
debug: If set to true, the stack trace will be printed to console upon errors for debugging.
"""
self.timeout = timeout
self.debug = debug
self.__sock = sock
def __str__(self) -> str:
return f"gruv_socks.Socket(timeout={self.timeout}, debug={self.debug})"
def __add__(self, x: bytes) -> bool:
return self.write(x)
def connect(self, host: str, port: int, timeout: int=10) -> bool:
"""
Attempts to establish a connection to a given host. Returns bool dictating status.
host: Hostname/Address of host to connect to.
port: Port on the given host to connect to.
timoeut: Time (in seconds) to wait for connection to complete.
"""
if self.__sock is not None:
print(f"[ERROR] (Socket.connect) could not connect, reason: socket already connected")
return False
try:
self.__sock = socket()
self.__sock.settimeout(timeout) # set connection timeout
self.__sock.connect((host, port))
self.__sock.settimeout(self.timeout) # return back to original timeout
return True
except Exception as err:
if self.debug: print_exc()
print(f"[SOCKET ERROR] (Socket.connect) could not connect, reason: {err}")
return False
def read(self, timeout_override: int=0) -> tuple[bool, bytes]:
"""
Attempts to read data from the socket object.
Returns a tuple containing a boolean dictating success status, and then the received data in byte string.
If the status is False, then either gruv_socks.SOCK_ERROR, or gruv_socks.SOCK_TIMEOUT will be returned as the data.
timeout_override: If not 0, then overrides the set timeout for this singular read call.
"""
fragments = [] # mutable types are faster to process than immutable types such as byte strings
data_length = 0
message_length = 0
timeout = timeout_override if timeout_override != 0 else self.timeout
if self.__sock is None:
print("[ERROR] (Socket.read) could not receive data, reason: socket is not connected")
return (False, SOCK_TIMEOUT)
try:
# use timeout override (if set) only upon first read
if len(select([self.__sock], [], [], timeout)[0]) == 0:
return (False, SOCK_TIMEOUT)
message_length = unpack(">I", self.__sock.recv(4))[0]
while data_length < message_length:
# creating and getting length of buffer is faster than indexing and calling len on fragments
if len(select([self.__sock], [], [], timeout)[0]) == 0:
return (False, SOCK_TIMEOUT)
buffer = self.__sock.recv(message_length-data_length)
data_length += len(buffer)
fragments.append(buffer)
return (True, b''.join(fragments))
except struct_error as err:
if self.debug: print_exc()
print(f"[STRUCT ERROR] (Socket.read) could not receive data, reason: {err}")
return (False, SOCK_ERROR)
except socket_error as err:
if self.debug: print_exc()
print(f"[SOCKET ERROR] (Socket.read) could not receive data, reason: {err}")
return (False, SOCK_ERROR)
except Exception as err:
if self.debug: print_exc()
print(f"[ERROR] (Socket.read) could not receive data, reason: {err}")
return (False, SOCK_ERROR)
def write(self, data: bytes or str) -> bool:
"""
Attempts to write data to socket object, sending it to the connected host. Returns boolean dictating status.
data: Data to send to connected host.
"""
if self.__sock is None:
print("[ERROR] (Socket.write) could not send data, reason: socket is not connected")
return False
if isinstance(data, str):
data = data.encode()
sent = 0
length = len(data)
try:
data = pack(">I", length) + data
while sent < length:
sent += self.__sock.send(data[sent:])
return True
except struct_error as err:
if self.debug: print_exc()
print(f"[STRUCT ERROR] (Socket.write) could not send data, reason: {err}")
return False
except socket_error as err:
if self.debug: print_exc()
print(f"[SOCKET ERROR] (Socket.write) could not send data, reason: {err}")
return False
except Exception as err:
if self.debug: print_exc()
print(f"[ERROR] (Socket.write) could not send data, reason: {err}")
return False
def disconnect(self):
"""
Properly disconnects the socket object by shutting down READ/WRITE channels, and then closing the socket.
"""
if self.__sock is None: return
try: self.__sock.shutdown(SHUT_RDWR)
except Exception: pass
finally:
try: self.__sock.close()
except Exception: pass
finally: self.__sock = None
def __del__(self):
"""
Socket destructor, ensures the socket object properly closes before being destroyed.
"""
self.disconnect()
class ServerBase:
"""
Quasi-framework for creating a server using the Socket object.
"""
def __init__(self, debug: bool=False):
"""
Initializes the ServerBase object.
debug: Decides if stack trace information is printed to console or not.
"""
self.debug = debug
self.running = False
self.__listener = None
def __listen(self, callback):
"""
Listens for incoming connections and hands them off to the callback function supplied.
"""
while self.running:
try:
if len(select([self.__listener], [], [], 0.1)[0]) == 0: continue
sock, addr = self.__listener.accept()
t = Thread(target=callback, args=(addr, Socket(sock, debug=self.debug)))
t.setDaemon(True)
t.start()
except KeyboardInterrupt:
self.running = False
except Exception as err:
if self.debug: print_exc()
if self.running: print(f"[ERROR] (ServerBase.__listen): {err}")
def start(self, callback, port: int, address: str="0.0.0.0", blocking: bool=False):
"""
Makes the server listen with the given configuration.
The callback function is supplied 2 arguments. The first is a tuple of the remote IP, and the remote port.
The second argument is the Socket object of the remote connection.
callback: Callback function to trigger upon new connections.
callback( (host: str, port: int), Socket )
port: Port to listen on.
address: Address to listen on.
blocking: Boolean dictating wether or not this function should block, or spawn a thread to listen.
"""
if self.__listener is not None:
raise Exception("server is already listening")
self.__listener = socket()
self.__listener.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.__listener.bind((address, port))
self.__listener.listen()
self.running = True
if blocking:
self.__listen(callback)
else:
t = Thread(target=self.__listen, args=(callback,))
t.setDaemon(True)
t.start()
def stop(self):
"""
Stops the server by shutting down the listening socket and triggering the background thread to stop.
"""
self.running = False
sleep(0.2)
try: self.__listener.shutdown(SHUT_RDWR)
except Exception: pass
finally:
try: self.__listener.close()
except Exception: pass
self.__listener = None
def __del__(self):
"""
Ensures the listening socket is properly closed, and the listening thread exits gracefully.
"""
if self.running is True:
self.stop()
def echo_callback(addr: tuple[str, int], sock: Socket):
data = sock.read()[1]
sock.write(data)
sock.disconnect()
def server_test():
conn = Socket(debug=True)
serv = ServerBase(debug=True)
serv.start(echo_callback, 5551, address="127.0.0.1")
conn.connect("127.0.0.1", 5551)
conn.write(b"If you see this message, that means gruv_socks is working!")
print(conn.read()[1].decode())
conn.disconnect()
serv.stop()
if __name__ == "__main__":
server_test()
|
test_s3.py
|
import boto3
import botocore.session
from botocore.exceptions import ClientError
from botocore.exceptions import ParamValidationError
from nose.tools import eq_ as eq
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
import isodate
import email.utils
import datetime
import threading
import re
import pytz
from collections import OrderedDict
import requests
import json
import base64
import hmac
import hashlib
import xml.etree.ElementTree as ET
import time
import operator
import nose
import os
import string
import random
import socket
import dateutil.parser
import ssl
from collections import namedtuple
from email.header import decode_header
from .utils import assert_raises
from .utils import generate_random
from .utils import _get_status_and_error_code
from .utils import _get_status
from .policy import Policy, Statement, make_json_policy
from . import (
get_client,
get_prefix,
get_unauthenticated_client,
get_bad_auth_client,
get_v2_client,
get_new_bucket,
get_new_bucket_name,
get_new_bucket_resource,
get_config_is_secure,
get_config_host,
get_config_port,
get_config_endpoint,
get_main_aws_access_key,
get_main_aws_secret_key,
get_main_display_name,
get_main_user_id,
get_main_email,
get_main_api_name,
get_alt_aws_access_key,
get_alt_aws_secret_key,
get_alt_display_name,
get_alt_user_id,
get_alt_email,
get_alt_client,
get_tenant_client,
get_tenant_iam_client,
get_tenant_user_id,
get_buckets_list,
get_objects_list,
get_main_kms_keyid,
get_secondary_kms_keyid,
get_svc_client,
nuke_prefixed_buckets,
)
def _bucket_is_empty(bucket):
is_empty = True
for obj in bucket.objects.all():
is_empty = False
break
return is_empty
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='empty buckets return no contents')
def test_bucket_list_empty():
bucket = get_new_bucket_resource()
is_empty = _bucket_is_empty(bucket)
eq(is_empty, True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='distinct buckets have different contents')
def test_bucket_list_distinct():
bucket1 = get_new_bucket_resource()
bucket2 = get_new_bucket_resource()
obj = bucket1.put_object(Body='str', Key='asdf')
is_empty = _bucket_is_empty(bucket2)
eq(is_empty, True)
def _create_objects(bucket=None, bucket_name=None, keys=[]):
"""
Populate a (specified or new) bucket with objects with
specified names (and contents identical to their names).
"""
if bucket_name is None:
bucket_name = get_new_bucket_name()
if bucket is None:
bucket = get_new_bucket_resource(name=bucket_name)
for key in keys:
obj = bucket.put_object(Body=key, Key=key)
return bucket_name
def _get_keys(response):
"""
return lists of strings that are the keys from a client.list_objects() response
"""
keys = []
if 'Contents' in response:
objects_list = response['Contents']
keys = [obj['Key'] for obj in objects_list]
return keys
def _get_prefixes(response):
"""
return lists of strings that are prefixes from a client.list_objects() response
"""
prefixes = []
if 'CommonPrefixes' in response:
prefix_list = response['CommonPrefixes']
prefixes = [prefix['Prefix'] for prefix in prefix_list]
return prefixes
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=2, no marker')
def test_bucket_list_many():
bucket_name = _create_objects(keys=['foo', 'bar', 'baz'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, MaxKeys=2)
keys = _get_keys(response)
eq(len(keys), 2)
eq(keys, ['bar', 'baz'])
eq(response['IsTruncated'], True)
response = client.list_objects(Bucket=bucket_name, Marker='baz',MaxKeys=2)
keys = _get_keys(response)
eq(len(keys), 1)
eq(response['IsTruncated'], False)
eq(keys, ['foo'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=2, no marker')
@attr('list-objects-v2')
def test_bucket_listv2_many():
bucket_name = _create_objects(keys=['foo', 'bar', 'baz'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=2)
keys = _get_keys(response)
eq(len(keys), 2)
eq(keys, ['bar', 'baz'])
eq(response['IsTruncated'], True)
response = client.list_objects_v2(Bucket=bucket_name, StartAfter='baz',MaxKeys=2)
keys = _get_keys(response)
eq(len(keys), 1)
eq(response['IsTruncated'], False)
eq(keys, ['foo'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='keycount in listobjectsv2')
@attr('list-objects-v2')
def test_basic_key_count():
client = get_client()
bucket_names = []
bucket_name = get_new_bucket_name()
client.create_bucket(Bucket=bucket_name)
for j in range(5):
client.put_object(Bucket=bucket_name, Key=str(j))
response1 = client.list_objects_v2(Bucket=bucket_name)
eq(response1['KeyCount'], 5)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
def test_bucket_list_delimiter_basic():
bucket_name = _create_objects(keys=['foo/bar', 'foo/bar/xyzzy', 'quux/thud', 'asdf'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
eq(keys, ['asdf'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
eq(prefixes, ['foo/', 'quux/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_basic():
bucket_name = _create_objects(keys=['foo/bar', 'foo/bar/xyzzy', 'quux/thud', 'asdf'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
eq(keys, ['asdf'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
eq(prefixes, ['foo/', 'quux/'])
eq(response['KeyCount'], len(prefixes) + len(keys))
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='test url encoding')
@attr('list-objects-v2')
def test_bucket_listv2_encoding_basic():
bucket_name = _create_objects(keys=['foo+1/bar', 'foo/bar/xyzzy', 'quux ab/thud', 'asdf+b'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/', EncodingType='url')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
eq(keys, ['asdf%2Bb'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 3)
eq(prefixes, ['foo%2B1/', 'foo/', 'quux%20ab/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='test url encoding')
@attr('list-objects')
def test_bucket_list_encoding_basic():
bucket_name = _create_objects(keys=['foo+1/bar', 'foo/bar/xyzzy', 'quux ab/thud', 'asdf+b'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/', EncodingType='url')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
eq(keys, ['asdf%2Bb'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 3)
eq(prefixes, ['foo%2B1/', 'foo/', 'quux%20ab/'])
def validate_bucket_list(bucket_name, prefix, delimiter, marker, max_keys,
is_truncated, check_objs, check_prefixes, next_marker):
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter=delimiter, Marker=marker, MaxKeys=max_keys, Prefix=prefix)
eq(response['IsTruncated'], is_truncated)
if 'NextMarker' not in response:
response['NextMarker'] = None
eq(response['NextMarker'], next_marker)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(len(keys), len(check_objs))
eq(len(prefixes), len(check_prefixes))
eq(keys, check_objs)
eq(prefixes, check_prefixes)
return response['NextMarker']
def validate_bucket_listv2(bucket_name, prefix, delimiter, continuation_token, max_keys,
is_truncated, check_objs, check_prefixes, last=False):
client = get_client()
params = dict(Bucket=bucket_name, Delimiter=delimiter, MaxKeys=max_keys, Prefix=prefix)
if continuation_token is not None:
params['ContinuationToken'] = continuation_token
else:
params['StartAfter'] = ''
response = client.list_objects_v2(**params)
eq(response['IsTruncated'], is_truncated)
if 'NextContinuationToken' not in response:
response['NextContinuationToken'] = None
if last:
eq(response['NextContinuationToken'], None)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(len(keys), len(check_objs))
eq(len(prefixes), len(check_prefixes))
eq(keys, check_objs)
eq(prefixes, check_prefixes)
return response['NextContinuationToken']
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
def test_bucket_list_delimiter_prefix():
bucket_name = _create_objects(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla'])
delim = '/'
marker = ''
prefix = ''
marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['asdf'], [], 'asdf')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, True, [], ['boo/'], 'boo/')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['cquux/'], None)
marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, True, ['asdf'], ['boo/'], 'boo/')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 2, False, [], ['cquux/'], None)
prefix = 'boo/'
marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['boo/bar'], [], 'boo/bar')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['boo/baz/'], None)
marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, False, ['boo/bar'], ['boo/baz/'], None)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_prefix():
bucket_name = _create_objects(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla'])
delim = '/'
continuation_token = ''
prefix = ''
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['asdf'], [])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, True, [], ['boo/'])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, False, [], ['cquux/'], last=True)
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, True, ['asdf'], ['boo/'])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 2, False, [], ['cquux/'], last=True)
prefix = 'boo/'
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['boo/bar'], [])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, False, [], ['boo/baz/'], last=True)
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, False, ['boo/bar'], ['boo/baz/'], last=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefix and delimiter handling when object ends with delimiter')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_prefix_ends_with_delimiter():
bucket_name = _create_objects(keys=['asdf/'])
validate_bucket_listv2(bucket_name, 'asdf/', '/', None, 1000, False, ['asdf/'], [], last=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefix and delimiter handling when object ends with delimiter')
def test_bucket_list_delimiter_prefix_ends_with_delimiter():
bucket_name = _create_objects(keys=['asdf/'])
validate_bucket_list(bucket_name, 'asdf/', '/', '', 1000, False, ['asdf/'], [], None)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='non-slash delimiter characters')
def test_bucket_list_delimiter_alt():
bucket_name = _create_objects(keys=['bar', 'baz', 'cab', 'foo'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='a')
eq(response['Delimiter'], 'a')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
# bar, baz, and cab should be broken up by the 'a' delimiters
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
eq(prefixes, ['ba', 'ca'])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='non-slash delimiter characters')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_alt():
bucket_name = _create_objects(keys=['bar', 'baz', 'cab', 'foo'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='a')
eq(response['Delimiter'], 'a')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
# bar, baz, and cab should be broken up by the 'a' delimiters
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
eq(prefixes, ['ba', 'ca'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes starting with underscore')
def test_bucket_list_delimiter_prefix_underscore():
bucket_name = _create_objects(keys=['_obj1_','_under1/bar', '_under1/baz/xyzzy', '_under2/thud', '_under2/bla'])
delim = '/'
marker = ''
prefix = ''
marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['_obj1_'], [], '_obj1_')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, True, [], ['_under1/'], '_under1/')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['_under2/'], None)
marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, True, ['_obj1_'], ['_under1/'], '_under1/')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 2, False, [], ['_under2/'], None)
prefix = '_under1/'
marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['_under1/bar'], [], '_under1/bar')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['_under1/baz/'], None)
marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, False, ['_under1/bar'], ['_under1/baz/'], None)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes starting with underscore')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_prefix_underscore():
bucket_name = _create_objects(keys=['_obj1_','_under1/bar', '_under1/baz/xyzzy', '_under2/thud', '_under2/bla'])
delim = '/'
continuation_token = ''
prefix = ''
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['_obj1_'], [])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, True, [], ['_under1/'])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, False, [], ['_under2/'], last=True)
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, True, ['_obj1_'], ['_under1/'])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 2, False, [], ['_under2/'], last=True)
prefix = '_under1/'
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['_under1/bar'], [])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, False, [], ['_under1/baz/'], last=True)
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, False, ['_under1/bar'], ['_under1/baz/'], last=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='percentage delimiter characters')
def test_bucket_list_delimiter_percentage():
bucket_name = _create_objects(keys=['b%ar', 'b%az', 'c%ab', 'foo'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='%')
eq(response['Delimiter'], '%')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b%', 'c%'])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='percentage delimiter characters')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_percentage():
bucket_name = _create_objects(keys=['b%ar', 'b%az', 'c%ab', 'foo'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='%')
eq(response['Delimiter'], '%')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b%', 'c%'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='whitespace delimiter characters')
def test_bucket_list_delimiter_whitespace():
bucket_name = _create_objects(keys=['b ar', 'b az', 'c ab', 'foo'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter=' ')
eq(response['Delimiter'], ' ')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b ', 'c '])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='whitespace delimiter characters')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_whitespace():
bucket_name = _create_objects(keys=['b ar', 'b az', 'c ab', 'foo'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter=' ')
eq(response['Delimiter'], ' ')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b ', 'c '])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='dot delimiter characters')
def test_bucket_list_delimiter_dot():
bucket_name = _create_objects(keys=['b.ar', 'b.az', 'c.ab', 'foo'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='.')
eq(response['Delimiter'], '.')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b.', 'c.'])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='dot delimiter characters')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_dot():
bucket_name = _create_objects(keys=['b.ar', 'b.az', 'c.ab', 'foo'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='.')
eq(response['Delimiter'], '.')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b.', 'c.'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='non-printable delimiter can be specified')
def test_bucket_list_delimiter_unreadable():
key_names=['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='\x0a')
eq(response['Delimiter'], '\x0a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='non-printable delimiter can be specified')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_unreadable():
key_names=['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='\x0a')
eq(response['Delimiter'], '\x0a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='empty delimiter can be specified')
def test_bucket_list_delimiter_empty():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='')
# putting an empty value into Delimiter will not return a value in the response
eq('Delimiter' in response, False)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='empty delimiter can be specified')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_empty():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='')
# putting an empty value into Delimiter will not return a value in the response
eq('Delimiter' in response, False)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='unspecified delimiter defaults to none')
def test_bucket_list_delimiter_none():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name)
# putting an empty value into Delimiter will not return a value in the response
eq('Delimiter' in response, False)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='unspecified delimiter defaults to none')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_none():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name)
# putting an empty value into Delimiter will not return a value in the response
eq('Delimiter' in response, False)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr('list-objects-v2')
def test_bucket_listv2_fetchowner_notempty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, FetchOwner=True)
objs_list = response['Contents']
eq('Owner' in objs_list[0], True)
@attr('list-objects-v2')
def test_bucket_listv2_fetchowner_defaultempty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name)
objs_list = response['Contents']
eq('Owner' in objs_list[0], False)
@attr('list-objects-v2')
def test_bucket_listv2_fetchowner_empty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, FetchOwner= False)
objs_list = response['Contents']
eq('Owner' in objs_list[0], False)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='unused delimiter is not found')
def test_bucket_list_delimiter_not_exist():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/')
# putting an empty value into Delimiter will not return a value in the response
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='unused delimiter is not found')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_not_exist():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/')
# putting an empty value into Delimiter will not return a value in the response
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='list with delimiter not skip special keys')
def test_bucket_list_delimiter_not_skip_special():
key_names = ['0/'] + ['0/%s' % i for i in range(1000, 1999)]
key_names2 = ['1999', '1999#', '1999+', '2000']
key_names += key_names2
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names2)
eq(prefixes, ['0/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='returns only objects under prefix')
def test_bucket_list_prefix_basic():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='foo/')
eq(response['Prefix'], 'foo/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['foo/bar', 'foo/baz'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='returns only objects under prefix')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_basic():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='foo/')
eq(response['Prefix'], 'foo/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['foo/bar', 'foo/baz'])
eq(prefixes, [])
# just testing that we can do the delimeter and prefix logic on non-slashes
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='prefixes w/o delimiters')
def test_bucket_list_prefix_alt():
key_names = ['bar', 'baz', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='ba')
eq(response['Prefix'], 'ba')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['bar', 'baz'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='prefixes w/o delimiters')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_alt():
key_names = ['bar', 'baz', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='ba')
eq(response['Prefix'], 'ba')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['bar', 'baz'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='empty prefix returns everything')
def test_bucket_list_prefix_empty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='')
eq(response['Prefix'], '')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='empty prefix returns everything')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_empty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='')
eq(response['Prefix'], '')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='unspecified prefix returns everything')
def test_bucket_list_prefix_none():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='')
eq(response['Prefix'], '')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='unspecified prefix returns everything')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_none():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='')
eq(response['Prefix'], '')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='nonexistent prefix returns nothing')
def test_bucket_list_prefix_not_exist():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='d')
eq(response['Prefix'], 'd')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='nonexistent prefix returns nothing')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_not_exist():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='d')
eq(response['Prefix'], 'd')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='non-printable prefix can be specified')
def test_bucket_list_prefix_unreadable():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='\x0a')
eq(response['Prefix'], '\x0a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='non-printable prefix can be specified')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_unreadable():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='\x0a')
eq(response['Prefix'], '\x0a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='returns only objects directly under prefix')
def test_bucket_list_prefix_delimiter_basic():
key_names = ['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/', Prefix='foo/')
eq(response['Prefix'], 'foo/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['foo/bar'])
eq(prefixes, ['foo/baz/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list-objects-v2 under prefix w/delimiter')
@attr(assertion='returns only objects directly under prefix')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_basic():
key_names = ['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/', Prefix='foo/')
eq(response['Prefix'], 'foo/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['foo/bar'])
eq(prefixes, ['foo/baz/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='non-slash delimiters')
def test_bucket_list_prefix_delimiter_alt():
key_names = ['bar', 'bazar', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='a', Prefix='ba')
eq(response['Prefix'], 'ba')
eq(response['Delimiter'], 'a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['bar'])
eq(prefixes, ['baza'])
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_alt():
key_names = ['bar', 'bazar', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='a', Prefix='ba')
eq(response['Prefix'], 'ba')
eq(response['Delimiter'], 'a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['bar'])
eq(prefixes, ['baza'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix')
def test_bucket_list_prefix_delimiter_prefix_not_exist():
key_names = ['b/a/r', 'b/a/c', 'b/a/g', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='d', Prefix='/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list-objects-v2 under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_prefix_not_exist():
key_names = ['b/a/r', 'b/a/c', 'b/a/g', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='d', Prefix='/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='over-ridden slash ceases to be a delimiter')
def test_bucket_list_prefix_delimiter_delimiter_not_exist():
key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='z', Prefix='b')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['b/a/c', 'b/a/g', 'b/a/r'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list-objects-v2 under prefix w/delimiter')
@attr(assertion='over-ridden slash ceases to be a delimiter')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_delimiter_not_exist():
key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='z', Prefix='b')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['b/a/c', 'b/a/g', 'b/a/r'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix and delimiter')
def test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist():
key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='z', Prefix='y')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list-objects-v2 under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix and delimiter')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist():
key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='z', Prefix='y')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=1, marker')
def test_bucket_list_maxkeys_one():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, MaxKeys=1)
eq(response['IsTruncated'], True)
keys = _get_keys(response)
eq(keys, key_names[0:1])
response = client.list_objects(Bucket=bucket_name, Marker=key_names[0])
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names[1:])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='pagination w/max_keys=1, marker')
@attr('list-objects-v2')
def test_bucket_listv2_maxkeys_one():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
eq(response['IsTruncated'], True)
keys = _get_keys(response)
eq(keys, key_names[0:1])
response = client.list_objects_v2(Bucket=bucket_name, StartAfter=key_names[0])
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names[1:])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=0')
def test_bucket_list_maxkeys_zero():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, MaxKeys=0)
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='pagination w/max_keys=0')
@attr('list-objects-v2')
def test_bucket_listv2_maxkeys_zero():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=0)
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/o max_keys')
def test_bucket_list_maxkeys_none():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name)
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
eq(response['MaxKeys'], 1000)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='pagination w/o max_keys')
@attr('list-objects-v2')
def test_bucket_listv2_maxkeys_none():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name)
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
eq(response['MaxKeys'], 1000)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='bucket list unordered')
@attr('fails_on_aws') # allow-unordered is a non-standard extension
def test_bucket_list_unordered():
# boto3.set_stream_logger(name='botocore')
keys_in = ['ado', 'bot', 'cob', 'dog', 'emu', 'fez', 'gnu', 'hex',
'abc/ink', 'abc/jet', 'abc/kin', 'abc/lax', 'abc/mux',
'def/nim', 'def/owl', 'def/pie', 'def/qed', 'def/rye',
'ghi/sew', 'ghi/tor', 'ghi/uke', 'ghi/via', 'ghi/wit',
'xix', 'yak', 'zoo']
bucket_name = _create_objects(keys=keys_in)
client = get_client()
# adds the unordered query parameter
def add_unordered(**kwargs):
kwargs['params']['url'] += "&allow-unordered=true"
client.meta.events.register('before-call.s3.ListObjects', add_unordered)
# test simple retrieval
response = client.list_objects(Bucket=bucket_name, MaxKeys=1000)
unordered_keys_out = _get_keys(response)
eq(len(keys_in), len(unordered_keys_out))
eq(keys_in.sort(), unordered_keys_out.sort())
# test retrieval with prefix
response = client.list_objects(Bucket=bucket_name,
MaxKeys=1000,
Prefix="abc/")
unordered_keys_out = _get_keys(response)
eq(5, len(unordered_keys_out))
# test incremental retrieval with marker
response = client.list_objects(Bucket=bucket_name, MaxKeys=6)
unordered_keys_out = _get_keys(response)
eq(6, len(unordered_keys_out))
# now get the next bunch
response = client.list_objects(Bucket=bucket_name,
MaxKeys=6,
Marker=unordered_keys_out[-1])
unordered_keys_out2 = _get_keys(response)
eq(6, len(unordered_keys_out2))
# make sure there's no overlap between the incremental retrievals
intersect = set(unordered_keys_out).intersection(unordered_keys_out2)
eq(0, len(intersect))
# verify that unordered used with delimiter results in error
e = assert_raises(ClientError,
client.list_objects, Bucket=bucket_name, Delimiter="/")
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='bucket list unordered')
@attr('fails_on_aws') # allow-unordered is a non-standard extension
@attr('list-objects-v2')
def test_bucket_listv2_unordered():
# boto3.set_stream_logger(name='botocore')
keys_in = ['ado', 'bot', 'cob', 'dog', 'emu', 'fez', 'gnu', 'hex',
'abc/ink', 'abc/jet', 'abc/kin', 'abc/lax', 'abc/mux',
'def/nim', 'def/owl', 'def/pie', 'def/qed', 'def/rye',
'ghi/sew', 'ghi/tor', 'ghi/uke', 'ghi/via', 'ghi/wit',
'xix', 'yak', 'zoo']
bucket_name = _create_objects(keys=keys_in)
client = get_client()
# adds the unordered query parameter
def add_unordered(**kwargs):
kwargs['params']['url'] += "&allow-unordered=true"
client.meta.events.register('before-call.s3.ListObjects', add_unordered)
# test simple retrieval
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1000)
unordered_keys_out = _get_keys(response)
eq(len(keys_in), len(unordered_keys_out))
eq(keys_in.sort(), unordered_keys_out.sort())
# test retrieval with prefix
response = client.list_objects_v2(Bucket=bucket_name,
MaxKeys=1000,
Prefix="abc/")
unordered_keys_out = _get_keys(response)
eq(5, len(unordered_keys_out))
# test incremental retrieval with marker
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=6)
unordered_keys_out = _get_keys(response)
eq(6, len(unordered_keys_out))
# now get the next bunch
response = client.list_objects_v2(Bucket=bucket_name,
MaxKeys=6,
StartAfter=unordered_keys_out[-1])
unordered_keys_out2 = _get_keys(response)
eq(6, len(unordered_keys_out2))
# make sure there's no overlap between the incremental retrievals
intersect = set(unordered_keys_out).intersection(unordered_keys_out2)
eq(0, len(intersect))
# verify that unordered used with delimiter results in error
e = assert_raises(ClientError,
client.list_objects, Bucket=bucket_name, Delimiter="/")
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='invalid max_keys')
def test_bucket_list_maxkeys_invalid():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
# adds invalid max keys to url
# before list_objects is called
def add_invalid_maxkeys(**kwargs):
kwargs['params']['url'] += "&max-keys=blah"
client.meta.events.register('before-call.s3.ListObjects', add_invalid_maxkeys)
e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='no pagination, no marker')
def test_bucket_list_marker_none():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name)
eq(response['Marker'], '')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='no pagination, empty marker')
def test_bucket_list_marker_empty():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Marker='')
eq(response['Marker'], '')
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='no pagination, empty continuationtoken')
@attr('list-objects-v2')
def test_bucket_listv2_continuationtoken_empty():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, ContinuationToken='')
eq(response['ContinuationToken'], '')
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list keys with list-objects-v2')
@attr(assertion='no pagination, non-empty continuationtoken')
@attr('list-objects-v2')
def test_bucket_listv2_continuationtoken():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response1 = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
next_continuation_token = response1['NextContinuationToken']
response2 = client.list_objects_v2(Bucket=bucket_name, ContinuationToken=next_continuation_token)
eq(response2['ContinuationToken'], next_continuation_token)
eq(response2['IsTruncated'], False)
key_names2 = ['baz', 'foo', 'quxx']
keys = _get_keys(response2)
eq(keys, key_names2)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list keys with list-objects-v2')
@attr(assertion='no pagination, non-empty continuationtoken and startafter')
@attr('list-objects-v2')
def test_bucket_listv2_both_continuationtoken_startafter():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response1 = client.list_objects_v2(Bucket=bucket_name, StartAfter='bar', MaxKeys=1)
next_continuation_token = response1['NextContinuationToken']
response2 = client.list_objects_v2(Bucket=bucket_name, StartAfter='bar', ContinuationToken=next_continuation_token)
eq(response2['ContinuationToken'], next_continuation_token)
eq(response2['StartAfter'], 'bar')
eq(response2['IsTruncated'], False)
key_names2 = ['foo', 'quxx']
keys = _get_keys(response2)
eq(keys, key_names2)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='non-printing marker')
def test_bucket_list_marker_unreadable():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Marker='\x0a')
eq(response['Marker'], '\x0a')
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='non-printing startafter')
@attr('list-objects-v2')
def test_bucket_listv2_startafter_unreadable():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, StartAfter='\x0a')
eq(response['StartAfter'], '\x0a')
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='marker not-in-list')
def test_bucket_list_marker_not_in_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Marker='blah')
eq(response['Marker'], 'blah')
keys = _get_keys(response)
eq(keys, [ 'foo','quxx'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='startafter not-in-list')
@attr('list-objects-v2')
def test_bucket_listv2_startafter_not_in_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, StartAfter='blah')
eq(response['StartAfter'], 'blah')
keys = _get_keys(response)
eq(keys, ['foo', 'quxx'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='marker after list')
def test_bucket_list_marker_after_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Marker='zzz')
eq(response['Marker'], 'zzz')
keys = _get_keys(response)
eq(response['IsTruncated'], False)
eq(keys, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='startafter after list')
@attr('list-objects-v2')
def test_bucket_listv2_startafter_after_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, StartAfter='zzz')
eq(response['StartAfter'], 'zzz')
keys = _get_keys(response)
eq(response['IsTruncated'], False)
eq(keys, [])
def _compare_dates(datetime1, datetime2):
"""
changes ms from datetime1 to 0, compares it to datetime2
"""
# both times are in datetime format but datetime1 has
# microseconds and datetime2 does not
datetime1 = datetime1.replace(microsecond=0)
eq(datetime1, datetime2)
@attr(resource='object')
@attr(method='head')
@attr(operation='compare w/bucket list')
@attr(assertion='return same metadata')
def test_bucket_list_return_data():
key_names = ['bar', 'baz', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
data = {}
for key_name in key_names:
obj_response = client.head_object(Bucket=bucket_name, Key=key_name)
acl_response = client.get_object_acl(Bucket=bucket_name, Key=key_name)
data.update({
key_name: {
'DisplayName': acl_response['Owner']['DisplayName'],
'ID': acl_response['Owner']['ID'],
'ETag': obj_response['ETag'],
'LastModified': obj_response['LastModified'],
'ContentLength': obj_response['ContentLength'],
}
})
response = client.list_objects(Bucket=bucket_name)
objs_list = response['Contents']
for obj in objs_list:
key_name = obj['Key']
key_data = data[key_name]
eq(obj['ETag'],key_data['ETag'])
eq(obj['Size'],key_data['ContentLength'])
eq(obj['Owner']['DisplayName'],key_data['DisplayName'])
eq(obj['Owner']['ID'],key_data['ID'])
_compare_dates(obj['LastModified'],key_data['LastModified'])
# amazon is eventually consistent, retry a bit if failed
def check_configure_versioning_retry(bucket_name, status, expected_string):
client = get_client()
response = client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled','Status': status})
read_status = None
for i in range(5):
try:
response = client.get_bucket_versioning(Bucket=bucket_name)
read_status = response['Status']
except KeyError:
read_status = None
if (expected_string == read_status):
break
time.sleep(1)
eq(expected_string, read_status)
@attr(resource='object')
@attr(method='head')
@attr(operation='compare w/bucket list when bucket versioning is configured')
@attr(assertion='return same metadata')
@attr('versioning')
def test_bucket_list_return_data_versioning():
bucket_name = get_new_bucket()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key_names = ['bar', 'baz', 'foo']
bucket_name = _create_objects(bucket_name=bucket_name,keys=key_names)
client = get_client()
data = {}
for key_name in key_names:
obj_response = client.head_object(Bucket=bucket_name, Key=key_name)
acl_response = client.get_object_acl(Bucket=bucket_name, Key=key_name)
data.update({
key_name: {
'ID': acl_response['Owner']['ID'],
'DisplayName': acl_response['Owner']['DisplayName'],
'ETag': obj_response['ETag'],
'LastModified': obj_response['LastModified'],
'ContentLength': obj_response['ContentLength'],
'VersionId': obj_response['VersionId']
}
})
response = client.list_object_versions(Bucket=bucket_name)
objs_list = response['Versions']
for obj in objs_list:
key_name = obj['Key']
key_data = data[key_name]
eq(obj['Owner']['DisplayName'],key_data['DisplayName'])
eq(obj['ETag'],key_data['ETag'])
eq(obj['Size'],key_data['ContentLength'])
eq(obj['Owner']['ID'],key_data['ID'])
eq(obj['VersionId'], key_data['VersionId'])
_compare_dates(obj['LastModified'],key_data['LastModified'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all objects (anonymous)')
@attr(assertion='succeeds')
def test_bucket_list_objects_anonymous():
bucket_name = get_new_bucket()
client = get_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
unauthenticated_client = get_unauthenticated_client()
unauthenticated_client.list_objects(Bucket=bucket_name)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all objects (anonymous) with list-objects-v2')
@attr(assertion='succeeds')
@attr('list-objects-v2')
def test_bucket_listv2_objects_anonymous():
bucket_name = get_new_bucket()
client = get_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
unauthenticated_client = get_unauthenticated_client()
unauthenticated_client.list_objects_v2(Bucket=bucket_name)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all objects (anonymous)')
@attr(assertion='fails')
def test_bucket_list_objects_anonymous_fail():
bucket_name = get_new_bucket()
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.list_objects, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all objects (anonymous) with list-objects-v2')
@attr(assertion='fails')
@attr('list-objects-v2')
def test_bucket_listv2_objects_anonymous_fail():
bucket_name = get_new_bucket()
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.list_objects_v2, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_bucket_notexist():
bucket_name = get_new_bucket_name()
client = get_client()
e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='non-existant bucket with list-objects-v2')
@attr(assertion='fails 404')
@attr('list-objects-v2')
def test_bucketv2_notexist():
bucket_name = get_new_bucket_name()
client = get_client()
e = assert_raises(ClientError, client.list_objects_v2, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_bucket_delete_notexist():
bucket_name = get_new_bucket_name()
client = get_client()
e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='non-empty bucket')
@attr(assertion='fails 409')
def test_bucket_delete_nonempty():
key_names = ['foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'BucketNotEmpty')
def _do_set_bucket_canned_acl(client, bucket_name, canned_acl, i, results):
try:
client.put_bucket_acl(ACL=canned_acl, Bucket=bucket_name)
results[i] = True
except:
results[i] = False
def _do_set_bucket_canned_acl_concurrent(client, bucket_name, canned_acl, num, results):
t = []
for i in range(num):
thr = threading.Thread(target = _do_set_bucket_canned_acl, args=(client, bucket_name, canned_acl, i, results))
thr.start()
t.append(thr)
return t
def _do_wait_completion(t):
for thr in t:
thr.join()
@attr(resource='bucket')
@attr(method='put')
@attr(operation='concurrent set of acls on a bucket')
@attr(assertion='works')
def test_bucket_concurrent_set_canned_acl():
bucket_name = get_new_bucket()
client = get_client()
num_threads = 50 # boto2 retry defaults to 5 so we need a thread to fail at least 5 times
# this seems like a large enough number to get through retry (if bug
# exists)
results = [None] * num_threads
t = _do_set_bucket_canned_acl_concurrent(client, bucket_name, 'public-read', num_threads, results)
_do_wait_completion(t)
for r in results:
eq(r, True)
@attr(resource='object')
@attr(method='put')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_object_write_to_nonexist_bucket():
key_names = ['foo']
bucket_name = 'whatchutalkinboutwillis'
client = get_client()
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='del')
@attr(operation='deleted bucket')
@attr(assertion='fails 404')
def test_bucket_create_delete():
bucket_name = get_new_bucket()
client = get_client()
client.delete_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='read contents that were never written')
@attr(assertion='fails 404')
def test_object_read_not_exist():
bucket_name = get_new_bucket()
client = get_client()
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
http_response = None
def get_http_response(**kwargs):
global http_response
http_response = kwargs['http_response'].__dict__
@attr(resource='object')
@attr(method='get')
@attr(operation='read contents that were never written to raise one error response')
@attr(assertion='RequestId appears in the error response')
def test_object_requestid_matches_header_on_error():
bucket_name = get_new_bucket()
client = get_client()
# get http response after failed request
client.meta.events.register('after-call.s3.GetObject', get_http_response)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='bar')
response_body = http_response['_content']
resp_body_xml = ET.fromstring(response_body)
request_id = resp_body_xml.find('.//RequestId').text
assert request_id is not None
eq(request_id, e.response['ResponseMetadata']['RequestId'])
def _make_objs_dict(key_names):
objs_list = []
for key in key_names:
obj_dict = {'Key': key}
objs_list.append(obj_dict)
objs_dict = {'Objects': objs_list}
return objs_dict
@attr(resource='object')
@attr(method='post')
@attr(operation='delete multiple objects')
@attr(assertion='deletes multiple objects with a single call')
def test_multi_object_delete():
key_names = ['key0', 'key1', 'key2']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name)
eq(len(response['Contents']), 3)
objs_dict = _make_objs_dict(key_names=key_names)
response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
eq(len(response['Deleted']), 3)
assert 'Errors' not in response
response = client.list_objects(Bucket=bucket_name)
assert 'Contents' not in response
response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
eq(len(response['Deleted']), 3)
assert 'Errors' not in response
response = client.list_objects(Bucket=bucket_name)
assert 'Contents' not in response
@attr(resource='object')
@attr(method='post')
@attr(operation='delete multiple objects with list-objects-v2')
@attr(assertion='deletes multiple objects with a single call')
@attr('list-objects-v2')
def test_multi_objectv2_delete():
key_names = ['key0', 'key1', 'key2']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name)
eq(len(response['Contents']), 3)
objs_dict = _make_objs_dict(key_names=key_names)
response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
eq(len(response['Deleted']), 3)
assert 'Errors' not in response
response = client.list_objects_v2(Bucket=bucket_name)
assert 'Contents' not in response
response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
eq(len(response['Deleted']), 3)
assert 'Errors' not in response
response = client.list_objects_v2(Bucket=bucket_name)
assert 'Contents' not in response
@attr(resource='object')
@attr(method='put')
@attr(operation='write zero-byte key')
@attr(assertion='correct content length')
def test_object_head_zero_bytes():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='')
response = client.head_object(Bucket=bucket_name, Key='foo')
eq(response['ContentLength'], 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='write key')
@attr(assertion='correct etag')
def test_object_write_check_etag():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(response['ETag'], '"37b51d194a7513e45b56f6524f2d51f2"')
@attr(resource='object')
@attr(method='put')
@attr(operation='write key')
@attr(assertion='correct cache control header')
def test_object_write_cache_control():
bucket_name = get_new_bucket()
client = get_client()
cache_control = 'public, max-age=14400'
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', CacheControl=cache_control)
response = client.head_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPHeaders']['cache-control'], cache_control)
@attr(resource='object')
@attr(method='put')
@attr(operation='write key')
@attr(assertion='correct expires header')
def test_object_write_expires():
bucket_name = get_new_bucket()
client = get_client()
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Expires=expires)
response = client.head_object(Bucket=bucket_name, Key='foo')
_compare_dates(expires, response['Expires'])
def _get_body(response):
body = response['Body']
got = body.read()
if type(got) is bytes:
got = got.decode()
return got
@attr(resource='object')
@attr(method='all')
@attr(operation='complete object life cycle')
@attr(assertion='read back what we wrote and rewrote')
def test_object_write_read_update_read_delete():
bucket_name = get_new_bucket()
client = get_client()
# Write
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
# Read
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
# Update
client.put_object(Bucket=bucket_name, Key='foo', Body='soup')
# Read
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'soup')
# Delete
client.delete_object(Bucket=bucket_name, Key='foo')
def _set_get_metadata(metadata, bucket_name=None):
"""
create a new bucket new or use an existing
name to create an object that bucket,
set the meta1 property to a specified, value,
and then re-read and return that property
"""
if bucket_name is None:
bucket_name = get_new_bucket()
client = get_client()
metadata_dict = {'meta1': metadata}
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Metadata=metadata_dict)
response = client.get_object(Bucket=bucket_name, Key='foo')
return response['Metadata']['meta1']
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-read')
@attr(assertion='reread what we wrote')
def test_object_set_get_metadata_none_to_good():
got = _set_get_metadata('mymeta')
eq(got, 'mymeta')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-read')
@attr(assertion='write empty value, returns empty value')
def test_object_set_get_metadata_none_to_empty():
got = _set_get_metadata('')
eq(got, '')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-write')
@attr(assertion='empty value replaces old')
def test_object_set_get_metadata_overwrite_to_empty():
bucket_name = get_new_bucket()
got = _set_get_metadata('oldmeta', bucket_name)
eq(got, 'oldmeta')
got = _set_get_metadata('', bucket_name)
eq(got, '')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-write')
@attr(assertion='UTF-8 values passed through')
# TODO: the decoding of this unicode metadata is not happening properly for unknown reasons
@attr('fails_on_rgw')
def test_object_set_get_unicode_metadata():
bucket_name = get_new_bucket()
client = get_client()
def set_unicode_metadata(**kwargs):
kwargs['params']['headers']['x-amz-meta-meta1'] = u"Hello World\xe9"
client.meta.events.register('before-call.s3.PutObject', set_unicode_metadata)
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
got = response['Metadata']['meta1'].decode('utf-8')
got = response['Metadata']['meta1']
print(got)
print(u"Hello World\xe9")
eq(got, u"Hello World\xe9")
def _set_get_metadata_unreadable(metadata, bucket_name=None):
"""
set and then read back a meta-data value (which presumably
includes some interesting characters), and return a list
containing the stored value AND the encoding with which it
was returned.
This should return a 400 bad request because the webserver
rejects the request.
"""
bucket_name = get_new_bucket()
client = get_client()
metadata_dict = {'meta1': metadata}
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='bar', Metadata=metadata_dict)
return e
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-write')
@attr(assertion='non-UTF-8 values detected, but rejected by webserver')
@attr('fails_strict_rfc2616')
@attr(assertion='fails 400')
def test_object_set_get_non_utf8_metadata():
metadata = '\x04mymeta'
e = _set_get_metadata_unreadable(metadata)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400 or 403)
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write')
@attr(assertion='non-printing prefixes rejected by webserver')
@attr('fails_strict_rfc2616')
@attr(assertion='fails 400')
def test_object_set_get_metadata_empty_to_unreadable_prefix():
metadata = '\x04w'
e = _set_get_metadata_unreadable(metadata)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400 or 403)
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write')
@attr(assertion='non-printing suffixes rejected by webserver')
@attr('fails_strict_rfc2616')
@attr(assertion='fails 400')
def test_object_set_get_metadata_empty_to_unreadable_suffix():
metadata = 'h\x04'
e = _set_get_metadata_unreadable(metadata)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400 or 403)
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write')
@attr(assertion='non-priting in-fixes rejected by webserver')
@attr('fails_strict_rfc2616')
@attr(assertion='fails 400')
def test_object_set_get_metadata_empty_to_unreadable_infix():
metadata = 'h\x04w'
e = _set_get_metadata_unreadable(metadata)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400 or 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='data re-write')
@attr(assertion='replaces previous metadata')
def test_object_metadata_replaced_on_put():
bucket_name = get_new_bucket()
client = get_client()
metadata_dict = {'meta1': 'bar'}
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Metadata=metadata_dict)
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
got = response['Metadata']
eq(got, {})
@attr(resource='object')
@attr(method='put')
@attr(operation='data write from file (w/100-Continue)')
@attr(assertion='succeeds and returns written data')
def test_object_write_file():
bucket_name = get_new_bucket()
client = get_client()
data_str = 'bar'
data = bytes(data_str, 'utf-8')
client.put_object(Bucket=bucket_name, Key='foo', Body=data)
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
def _get_post_url(bucket_name):
endpoint = get_config_endpoint()
return '{endpoint}/{bucket_name}'.format(endpoint=endpoint, bucket_name=bucket_name)
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_anonymous_request():
bucket_name = get_new_bucket_name()
client = get_client()
url = _get_post_url(bucket_name)
payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
("Content-Type" , "text/plain"),('file', ('bar'))])
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_authenticated_request():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request, no content-type header')
@attr(assertion='succeeds and returns written data')
def test_post_object_authenticated_no_content_type():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key="foo.txt")
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request, bad access key')
@attr(assertion='fails')
def test_post_object_authenticated_request_bad_access_key():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , 'foo'),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr(assertion='succeeds with status 201')
def test_post_object_set_success_code():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
("success_action_status" , "201"),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 201)
message = ET.fromstring(r.content).find('Key')
eq(message.text,'foo.txt')
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr(assertion='succeeds with status 204')
def test_post_object_set_invalid_success_code():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
("success_action_status" , "404"),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
content = r.content.decode()
eq(content,'')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_upload_larger_than_chunk():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 5*1024*1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
foo_string = 'foo' * 1024*1024
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', foo_string)])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, foo_string)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_set_key_from_filename():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "${filename}"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('foo.txt', 'bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds with status 204')
def test_post_object_ignored_header():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),("x-ignore-foo" , "bar"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds with status 204')
def test_post_object_case_insensitive_condition_fields():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bUcKeT": bucket_name},\
["StArTs-WiTh", "$KeY", "foo"],\
{"AcL": "private"},\
["StArTs-WiTh", "$CoNtEnT-TyPe", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
foo_string = 'foo' * 1024*1024
payload = OrderedDict([ ("kEy" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("aCl" , "private"),("signature" , signature),("pOLICy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds with escaped leading $ and returns written data')
def test_post_object_escaped_field_values():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='\$foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns redirect url')
def test_post_object_success_redirect_action():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
redirect_url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["eq", "$success_action_redirect", redirect_url],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),("success_action_redirect" , redirect_url),\
('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 200)
url = r.url
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
eq(url,
'{rurl}?bucket={bucket}&key={key}&etag=%22{etag}%22'.format(rurl = redirect_url,\
bucket = bucket_name, key = 'foo.txt', etag = response['ETag'].strip('"')))
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid signature error')
def test_post_object_invalid_signature():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())[::-1]
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with access key does not exist error')
def test_post_object_invalid_access_key():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id[::-1]),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid expiration error')
def test_post_object_invalid_date_format():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": str(expires),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with missing key error')
def test_post_object_no_key_specified():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with missing signature error')
def test_post_object_missing_signature():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with extra input fields policy error')
def test_post_object_missing_policy_condition():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds using starts-with restriction on metadata header')
def test_post_object_user_specified_header():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
["starts-with", "$x-amz-meta-foo", "bar"]
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
eq(response['Metadata']['foo'], 'barclamp')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy condition failed error due to missing field in POST request')
def test_post_object_request_missing_policy_specified_field():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
["starts-with", "$x-amz-meta-foo", "bar"]
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with conditions must be list error')
def test_post_object_condition_is_case_sensitive():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"CONDITIONS": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with expiration must be string error')
def test_post_object_expires_is_case_sensitive():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"EXPIRATION": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy expired error')
def test_post_object_expired_policy():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=-6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails using equality restriction on metadata header')
def test_post_object_invalid_request_field_value():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
["eq", "$x-amz-meta-foo", ""]
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy missing expiration error')
def test_post_object_missing_expires_condition():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy missing conditions error')
def test_post_object_missing_conditions_list():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ")}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with allowable upload size exceeded error')
def test_post_object_upload_size_limit_exceeded():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 0],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid content length error')
def test_post_object_missing_content_length_argument():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid JSON error')
def test_post_object_invalid_content_length_argument():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", -1, 0],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with upload size less than minimum allowable error')
def test_post_object_upload_size_below_minimum():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 512, 1000],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='empty conditions return appropriate error response')
def test_post_object_empty_conditions():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{ }\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Match: the latest ETag')
@attr(assertion='succeeds')
def test_get_object_ifmatch_good():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
etag = response['ETag']
response = client.get_object(Bucket=bucket_name, Key='foo', IfMatch=etag)
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Match: bogus ETag')
@attr(assertion='fails 412')
def test_get_object_ifmatch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfMatch='"ABCORZ"')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-None-Match: the latest ETag')
@attr(assertion='fails 304')
def test_get_object_ifnonematch_good():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
etag = response['ETag']
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfNoneMatch=etag)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 304)
eq(e.response['Error']['Message'], 'Not Modified')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-None-Match: bogus ETag')
@attr(assertion='succeeds')
def test_get_object_ifnonematch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo', IfNoneMatch='ABCORZ')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Modified-Since: before')
@attr(assertion='succeeds')
def test_get_object_ifmodifiedsince_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo', IfModifiedSince='Sat, 29 Oct 1994 19:43:31 GMT')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Modified-Since: after')
@attr(assertion='fails 304')
def test_get_object_ifmodifiedsince_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
last_modified = str(response['LastModified'])
last_modified = last_modified.split('+')[0]
mtime = datetime.datetime.strptime(last_modified, '%Y-%m-%d %H:%M:%S')
after = mtime + datetime.timedelta(seconds=1)
after_str = time.strftime("%a, %d %b %Y %H:%M:%S GMT", after.timetuple())
time.sleep(1)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfModifiedSince=after_str)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 304)
eq(e.response['Error']['Message'], 'Not Modified')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Unmodified-Since: before')
@attr(assertion='fails 412')
def test_get_object_ifunmodifiedsince_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfUnmodifiedSince='Sat, 29 Oct 1994 19:43:31 GMT')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Unmodified-Since: after')
@attr(assertion='succeeds')
def test_get_object_ifunmodifiedsince_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo', IfUnmodifiedSince='Sat, 29 Oct 2100 19:43:31 GMT')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='data re-write w/ If-Match: the latest ETag')
@attr(assertion='replaces previous data and metadata')
@attr('fails_on_aws')
def test_put_object_ifmatch_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
etag = response['ETag'].replace('"', '')
# pass in custom header 'If-Match' before PutObject call
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': etag}))
client.meta.events.register('before-call.s3.PutObject', lf)
response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'zar')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Match: bogus ETag')
@attr(assertion='fails 412')
def test_put_object_ifmatch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
# pass in custom header 'If-Match' before PutObject call
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '"ABCORZ"'}))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-Match: *')
@attr(assertion='replaces previous data and metadata')
@attr('fails_on_aws')
def test_put_object_ifmatch_overwrite_existed_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'zar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite non-existing object w/ If-Match: *')
@attr(assertion='fails 412')
@attr('fails_on_aws')
def test_put_object_ifmatch_nonexisted_failed():
bucket_name = get_new_bucket()
client = get_client()
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-None-Match: outdated ETag')
@attr(assertion='replaces previous data and metadata')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': 'ABCORZ'}))
client.meta.events.register('before-call.s3.PutObject', lf)
response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'zar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-None-Match: the latest ETag')
@attr(assertion='fails 412')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
etag = response['ETag'].replace('"', '')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': etag}))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite non-existing object w/ If-None-Match: *')
@attr(assertion='succeeds')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_nonexisted_good():
bucket_name = get_new_bucket()
client = get_client()
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-None-Match: *')
@attr(assertion='fails 412')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_overwrite_existed_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
def _setup_bucket_object_acl(bucket_acl, object_acl):
"""
add a foo key, and specified key and bucket acls to
a (new or existing) bucket.
"""
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL=bucket_acl, Bucket=bucket_name)
client.put_object(ACL=object_acl, Bucket=bucket_name, Key='foo')
return bucket_name
def _setup_bucket_acl(bucket_acl=None):
"""
set up a new bucket with specified acl
"""
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL=bucket_acl, Bucket=bucket_name)
return bucket_name
@attr(resource='object')
@attr(method='get')
@attr(operation='publically readable bucket')
@attr(assertion='bucket is readable')
def test_object_raw_get():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
unauthenticated_client = get_unauthenticated_client()
response = unauthenticated_client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='get')
@attr(operation='deleted object and bucket')
@attr(assertion='fails 404')
def test_object_raw_get_bucket_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
client.delete_bucket(Bucket=bucket_name)
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='deleted object and bucket')
@attr(assertion='fails 404')
def test_object_delete_key_bucket_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
client.delete_bucket(Bucket=bucket_name)
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.delete_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='deleted object')
@attr(assertion='fails 404')
def test_object_raw_get_object_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
@attr(resource='bucket')
@attr(method='head')
@attr(operation='head bucket')
@attr(assertion='succeeds')
def test_bucket_head():
bucket_name = get_new_bucket()
client = get_client()
response = client.head_bucket(Bucket=bucket_name)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr('fails_on_aws')
@attr(resource='bucket')
@attr(method='head')
@attr(operation='read bucket extended information')
@attr(assertion='extended information is getting updated')
def test_bucket_head_extended():
bucket_name = get_new_bucket()
client = get_client()
response = client.head_bucket(Bucket=bucket_name)
eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']), 0)
eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']), 0)
_create_objects(bucket_name=bucket_name, keys=['foo','bar','baz'])
response = client.head_bucket(Bucket=bucket_name)
eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']), 3)
eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']), 9)
@attr(resource='bucket.acl')
@attr(method='get')
@attr(operation='unauthenticated on private bucket')
@attr(assertion='succeeds')
def test_object_raw_get_bucket_acl():
bucket_name = _setup_bucket_object_acl('private', 'public-read')
unauthenticated_client = get_unauthenticated_client()
response = unauthenticated_client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object.acl')
@attr(method='get')
@attr(operation='unauthenticated on private object')
@attr(assertion='fails 403')
def test_object_raw_get_object_acl():
bucket_name = _setup_bucket_object_acl('public-read', 'private')
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='authenticated on public bucket/object')
@attr(assertion='succeeds')
def test_object_raw_authenticated():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
response = client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='get')
@attr(operation='authenticated on private bucket/private object with modified response headers')
@attr(assertion='succeeds')
def test_object_raw_response_headers():
bucket_name = _setup_bucket_object_acl('private', 'private')
client = get_client()
response = client.get_object(Bucket=bucket_name, Key='foo', ResponseCacheControl='no-cache', ResponseContentDisposition='bla', ResponseContentEncoding='aaa', ResponseContentLanguage='esperanto', ResponseContentType='foo/bar', ResponseExpires='123')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], 'foo/bar')
eq(response['ResponseMetadata']['HTTPHeaders']['content-disposition'], 'bla')
eq(response['ResponseMetadata']['HTTPHeaders']['content-language'], 'esperanto')
eq(response['ResponseMetadata']['HTTPHeaders']['content-encoding'], 'aaa')
eq(response['ResponseMetadata']['HTTPHeaders']['cache-control'], 'no-cache')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='authenticated on private bucket/public object')
@attr(assertion='succeeds')
def test_object_raw_authenticated_bucket_acl():
bucket_name = _setup_bucket_object_acl('private', 'public-read')
client = get_client()
response = client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='authenticated on public bucket/private object')
@attr(assertion='succeeds')
def test_object_raw_authenticated_object_acl():
bucket_name = _setup_bucket_object_acl('public-read', 'private')
client = get_client()
response = client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='get')
@attr(operation='authenticated on deleted object and bucket')
@attr(assertion='fails 404')
def test_object_raw_authenticated_bucket_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
client.delete_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='authenticated on deleted object')
@attr(assertion='fails 404')
def test_object_raw_authenticated_object_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
@attr(resource='object')
@attr(method='get')
@attr(operation='x-amz-expires check not expired')
@attr(assertion='succeeds')
def test_object_raw_get_x_amz_expires_not_expired():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=100000, HttpMethod='GET')
res = requests.get(url).__dict__
eq(res['status_code'], 200)
@attr(resource='object')
@attr(method='get')
@attr(operation='check x-amz-expires value out of range zero')
@attr(assertion='fails 403')
def test_object_raw_get_x_amz_expires_out_range_zero():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=0, HttpMethod='GET')
res = requests.get(url).__dict__
eq(res['status_code'], 403)
@attr(resource='object')
@attr(method='get')
@attr(operation='check x-amz-expires value out of max range')
@attr(assertion='fails 403')
def test_object_raw_get_x_amz_expires_out_max_range():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=609901, HttpMethod='GET')
res = requests.get(url).__dict__
eq(res['status_code'], 403)
@attr(resource='object')
@attr(method='get')
@attr(operation='check x-amz-expires value out of positive range')
@attr(assertion='succeeds')
def test_object_raw_get_x_amz_expires_out_positive_range():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=-7, HttpMethod='GET')
res = requests.get(url).__dict__
eq(res['status_code'], 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='unauthenticated, no object acls')
@attr(assertion='fails 403')
def test_object_anon_put():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo')
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.put_object, Bucket=bucket_name, Key='foo', Body='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='object')
@attr(method='put')
@attr(operation='unauthenticated, publically writable object')
@attr(assertion='succeeds')
def test_object_anon_put_write_access():
bucket_name = _setup_bucket_acl('public-read-write')
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo')
unauthenticated_client = get_unauthenticated_client()
response = unauthenticated_client.put_object(Bucket=bucket_name, Key='foo', Body='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='put')
@attr(operation='authenticated, no object acls')
@attr(assertion='succeeds')
def test_object_put_authenticated():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_object(Bucket=bucket_name, Key='foo', Body='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='put')
@attr(operation='authenticated, no object acls')
@attr(assertion='succeeds')
def test_object_raw_put_authenticated_expired():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo')
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='put_object', Params=params, ExpiresIn=-1000, HttpMethod='PUT')
# params wouldn't take a 'Body' parameter so we're passing it in here
res = requests.put(url,data="foo").__dict__
eq(res['status_code'], 403)
def check_bad_bucket_name(bucket_name):
"""
Attempt to create a bucket with a specified name, and confirm
that the request fails because of an invalid bucket name.
"""
client = get_client()
e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# AWS does not enforce all documented bucket restrictions.
# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html
@attr('fails_on_aws')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='name begins with underscore')
@attr(assertion='fails with subdomain: 400')
def test_bucket_create_naming_bad_starts_nonalpha():
bucket_name = get_new_bucket_name()
check_bad_bucket_name('_' + bucket_name)
def check_invalid_bucketname(invalid_name):
"""
Send a create bucket_request with an invalid bucket name
that will bypass the ParamValidationError that would be raised
if the invalid bucket name that was passed in normally.
This function returns the status and error code from the failure
"""
client = get_client()
valid_bucket_name = get_new_bucket_name()
def replace_bucketname_from_url(**kwargs):
url = kwargs['params']['url']
new_url = url.replace(valid_bucket_name, invalid_name)
kwargs['params']['url'] = new_url
client.meta.events.register('before-call.s3.CreateBucket', replace_bucketname_from_url)
e = assert_raises(ClientError, client.create_bucket, Bucket=invalid_name)
status, error_code = _get_status_and_error_code(e.response)
return (status, error_code)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='empty name')
@attr(assertion='fails 405')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_create_naming_bad_short_empty():
invalid_bucketname = ''
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 405)
eq(error_code, 'MethodNotAllowed')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='short (one character) name')
@attr(assertion='fails 400')
def test_bucket_create_naming_bad_short_one():
check_bad_bucket_name('a')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='short (two character) name')
@attr(assertion='fails 400')
def test_bucket_create_naming_bad_short_two():
check_bad_bucket_name('aa')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='excessively long names')
@attr(assertion='fails with subdomain: 400')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_create_naming_bad_long():
invalid_bucketname = 256*'a'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
invalid_bucketname = 280*'a'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
invalid_bucketname = 3000*'a'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
def check_good_bucket_name(name, _prefix=None):
"""
Attempt to create a bucket with a specified name
and (specified or default) prefix, returning the
results of that effort.
"""
# tests using this with the default prefix must *not* rely on
# being able to set the initial character, or exceed the max len
# tests using this with a custom prefix are responsible for doing
# their own setup/teardown nukes, with their custom prefix; this
# should be very rare
if _prefix is None:
_prefix = get_prefix()
bucket_name = '{prefix}{name}'.format(
prefix=_prefix,
name=name,
)
client = get_client()
response = client.create_bucket(Bucket=bucket_name)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
def _test_bucket_create_naming_good_long(length):
"""
Attempt to create a bucket whose name (including the
prefix) is of a specified length.
"""
# tests using this with the default prefix must *not* rely on
# being able to set the initial character, or exceed the max len
# tests using this with a custom prefix are responsible for doing
# their own setup/teardown nukes, with their custom prefix; this
# should be very rare
prefix = get_new_bucket_name()
assert len(prefix) < 63
num = length - len(prefix)
name=num*'a'
bucket_name = '{prefix}{name}'.format(
prefix=prefix,
name=name,
)
client = get_client()
response = client.create_bucket(Bucket=bucket_name)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/60 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_good_long_60():
_test_bucket_create_naming_good_long(60)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/61 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_good_long_61():
_test_bucket_create_naming_good_long(61)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/62 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_good_long_62():
_test_bucket_create_naming_good_long(62)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/63 byte name')
@attr(assertion='fails with subdomain')
def test_bucket_create_naming_good_long_63():
_test_bucket_create_naming_good_long(63)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list w/61 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_list_long_name():
prefix = get_new_bucket_name()
length = 61
num = length - len(prefix)
name=num*'a'
bucket_name = '{prefix}{name}'.format(
prefix=prefix,
name=name,
)
bucket = get_new_bucket_resource(name=bucket_name)
is_empty = _bucket_is_empty(bucket)
eq(is_empty, True)
# AWS does not enforce all documented bucket restrictions.
# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html
@attr('fails_on_aws')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/ip address for name')
@attr(assertion='fails on aws')
def test_bucket_create_naming_bad_ip():
check_bad_bucket_name('192.168.5.123')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/! in name')
@attr(assertion='fails with subdomain')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_create_naming_bad_punctuation():
# characters other than [a-zA-Z0-9._-]
invalid_bucketname = 'alpha!soup'
status, error_code = check_invalid_bucketname(invalid_bucketname)
# TODO: figure out why a 403 is coming out in boto3 but not in boto2.
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# test_bucket_create_naming_dns_* are valid but not recommended
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/underscore in name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_underscore():
invalid_bucketname = 'foo_bar'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/100 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
def test_bucket_create_naming_dns_long():
prefix = get_prefix()
assert len(prefix) < 50
num = 63 - len(prefix)
check_good_bucket_name(num * 'a')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/dash at end of name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_dash_at_end():
invalid_bucketname = 'foo-'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/.. in name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_dot_dot():
invalid_bucketname = 'foo..bar'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/.- in name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_dot_dash():
invalid_bucketname = 'foo.-bar'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/-. in name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_dash_dot():
invalid_bucketname = 'foo-.bar'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='re-create')
def test_bucket_create_exists():
# aws-s3 default region allows recreation of buckets
# but all other regions fail with BucketAlreadyOwnedByYou.
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
try:
response = client.create_bucket(Bucket=bucket_name)
except ClientError as e:
status, error_code = _get_status_and_error_code(e.response)
eq(e.status, 409)
eq(e.error_code, 'BucketAlreadyOwnedByYou')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get location')
def test_bucket_get_location():
location_constraint = get_main_api_name()
if not location_constraint:
raise SkipTest
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': location_constraint})
response = client.get_bucket_location(Bucket=bucket_name)
if location_constraint == "":
location_constraint = None
eq(response['LocationConstraint'], location_constraint)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='re-create by non-owner')
@attr(assertion='fails 409')
def test_bucket_create_exists_nonowner():
# Names are shared across a global namespace. As such, no two
# users can create a bucket with that same name.
bucket_name = get_new_bucket_name()
client = get_client()
alt_client = get_alt_client()
client.create_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, alt_client.create_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'BucketAlreadyExists')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='re-create with existing acl')
@attr(assertion='fails 409')
def test_bucket_recreate_overwrite_acl():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ACL='public-read')
e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'BucketAlreadyExists')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='re-create with new acl')
@attr(assertion='fails 409')
def test_bucket_recreate_new_acl():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name, ACL='public-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'BucketAlreadyExists')
def check_access_denied(fn, *args, **kwargs):
e = assert_raises(ClientError, fn, *args, **kwargs)
status = _get_status(e.response)
eq(status, 403)
def check_grants(got, want):
"""
Check that grants list in got matches the dictionaries in want,
in any order.
"""
eq(len(got), len(want))
for g, w in zip(got, want):
w = dict(w)
g = dict(g)
eq(g.pop('Permission', None), w['Permission'])
eq(g['Grantee'].pop('DisplayName', None), w['DisplayName'])
eq(g['Grantee'].pop('ID', None), w['ID'])
eq(g['Grantee'].pop('Type', None), w['Type'])
eq(g['Grantee'].pop('URI', None), w['URI'])
eq(g['Grantee'].pop('EmailAddress', None), w['EmailAddress'])
eq(g, {'Grantee': {}})
@attr(resource='bucket')
@attr(method='get')
@attr(operation='default acl')
@attr(assertion='read back expected defaults')
def test_bucket_acl_default():
bucket_name = get_new_bucket()
client = get_client()
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
eq(response['Owner']['DisplayName'], display_name)
eq(response['Owner']['ID'], user_id)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='public-read acl')
@attr(assertion='read back expected defaults')
@attr('fails_on_aws') # <Error><Code>IllegalLocationConstraintException</Code><Message>The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.</Message>
def test_bucket_acl_canned_during_create():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='acl: public-read,private')
@attr(assertion='read back expected values')
def test_bucket_acl_canned():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
client.put_bucket_acl(ACL='private', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket.acls')
@attr(method='put')
@attr(operation='acl: public-read-write')
@attr(assertion='read back expected values')
def test_bucket_acl_canned_publicreadwrite():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='WRITE',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='acl: authenticated-read')
@attr(assertion='read back expected values')
def test_bucket_acl_canned_authenticatedread():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='authenticated-read', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AuthenticatedUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='get')
@attr(operation='default acl')
@attr(assertion='read back expected defaults')
def test_object_acl_default():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl public-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_during_create():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(ACL='public-read', Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl public-read,private')
@attr(assertion='read back expected values')
def test_object_acl_canned():
bucket_name = get_new_bucket()
client = get_client()
# Since it defaults to private, set it public-read first
client.put_object(ACL='public-read', Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
# Then back to private.
client.put_object_acl(ACL='private',Bucket=bucket_name, Key='foo')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object')
@attr(method='put')
@attr(operation='acl public-read-write')
@attr(assertion='read back expected values')
def test_object_acl_canned_publicreadwrite():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(ACL='public-read-write', Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='WRITE',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl authenticated-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_authenticatedread():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(ACL='authenticated-read', Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AuthenticatedUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl bucket-owner-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_bucketownerread():
bucket_name = get_new_bucket_name()
main_client = get_client()
alt_client = get_alt_client()
main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
bucket_acl_response = main_client.get_bucket_acl(Bucket=bucket_name)
bucket_owner_id = bucket_acl_response['Grants'][2]['Grantee']['ID']
bucket_owner_display_name = bucket_acl_response['Grants'][2]['Grantee']['DisplayName']
alt_client.put_object(ACL='bucket-owner-read', Bucket=bucket_name, Key='foo')
response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
alt_display_name = get_alt_display_name()
alt_user_id = get_alt_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='READ',
ID=bucket_owner_id,
DisplayName=bucket_owner_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl bucket-owner-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_bucketownerfullcontrol():
bucket_name = get_new_bucket_name()
main_client = get_client()
alt_client = get_alt_client()
main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
bucket_acl_response = main_client.get_bucket_acl(Bucket=bucket_name)
bucket_owner_id = bucket_acl_response['Grants'][2]['Grantee']['ID']
bucket_owner_display_name = bucket_acl_response['Grants'][2]['Grantee']['DisplayName']
alt_client.put_object(ACL='bucket-owner-full-control', Bucket=bucket_name, Key='foo')
response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
alt_display_name = get_alt_display_name()
alt_user_id = get_alt_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=bucket_owner_id,
DisplayName=bucket_owner_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='set write-acp')
@attr(assertion='does not modify owner')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_object_acl_full_control_verify_owner():
bucket_name = get_new_bucket_name()
main_client = get_client()
alt_client = get_alt_client()
main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
main_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
grant = { 'Grants': [{'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}], 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
main_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grant)
grant = { 'Grants': [{'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'READ_ACP'}], 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
alt_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grant)
response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
eq(response['Owner']['ID'], main_user_id)
def add_obj_user_grant(bucket_name, key, grant):
"""
Adds a grant to the existing grants meant to be passed into
the AccessControlPolicy argument of put_object_acls for an object
owned by the main user, not the alt user
A grant is a dictionary in the form of:
{u'Grantee': {u'Type': 'type', u'DisplayName': 'name', u'ID': 'id'}, u'Permission': 'PERM'}
"""
client = get_client()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
response = client.get_object_acl(Bucket=bucket_name, Key=key)
grants = response['Grants']
grants.append(grant)
grant = {'Grants': grants, 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
return grant
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='set write-acp')
@attr(assertion='does not modify other attributes')
def test_object_acl_full_control_verify_attributes():
bucket_name = get_new_bucket_name()
main_client = get_client()
alt_client = get_alt_client()
main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
header = {'x-amz-foo': 'bar'}
# lambda to add any header
add_header = (lambda **kwargs: kwargs['params']['headers'].update(header))
main_client.meta.events.register('before-call.s3.PutObject', add_header)
main_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = main_client.get_object(Bucket=bucket_name, Key='foo')
content_type = response['ContentType']
etag = response['ETag']
alt_user_id = get_alt_user_id()
grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
grants = add_obj_user_grant(bucket_name, 'foo', grant)
main_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grants)
response = main_client.get_object(Bucket=bucket_name, Key='foo')
eq(content_type, response['ContentType'])
eq(etag, response['ETag'])
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl private')
@attr(assertion='a private object can be set to private')
def test_bucket_acl_canned_private_to_private():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_bucket_acl(Bucket=bucket_name, ACL='private')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
def add_bucket_user_grant(bucket_name, grant):
"""
Adds a grant to the existing grants meant to be passed into
the AccessControlPolicy argument of put_object_acls for an object
owned by the main user, not the alt user
A grant is a dictionary in the form of:
{u'Grantee': {u'Type': 'type', u'DisplayName': 'name', u'ID': 'id'}, u'Permission': 'PERM'}
"""
client = get_client()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
grants.append(grant)
grant = {'Grants': grants, 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
return grant
def _check_object_acl(permission):
"""
Sets the permission on an object then checks to see
if it was set
"""
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
policy = {}
policy['Owner'] = response['Owner']
policy['Grants'] = response['Grants']
policy['Grants'][0]['Permission'] = permission
client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=policy)
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
grants = response['Grants']
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
check_grants(
grants,
[
dict(
Permission=permission,
ID=main_user_id,
DisplayName=main_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl FULL_CONTRO')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl():
_check_object_acl('FULL_CONTROL')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl WRITE')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl_write():
_check_object_acl('WRITE')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl WRITE_ACP')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl_writeacp():
_check_object_acl('WRITE_ACP')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl READ')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl_read():
_check_object_acl('READ')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl READ_ACP')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl_readacp():
_check_object_acl('READ_ACP')
def _bucket_acl_grant_userid(permission):
"""
create a new bucket, grant a specific user the specified
permission, read back the acl and verify correct setting
"""
bucket_name = get_new_bucket()
client = get_client()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': permission}
grant = add_bucket_user_grant(bucket_name, grant)
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=grant)
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission=permission,
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=main_user_id,
DisplayName=main_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
return bucket_name
def _check_bucket_acl_grant_can_read(bucket_name):
"""
verify ability to read the specified bucket
"""
alt_client = get_alt_client()
response = alt_client.head_bucket(Bucket=bucket_name)
def _check_bucket_acl_grant_cant_read(bucket_name):
"""
verify inability to read the specified bucket
"""
alt_client = get_alt_client()
check_access_denied(alt_client.head_bucket, Bucket=bucket_name)
def _check_bucket_acl_grant_can_readacp(bucket_name):
"""
verify ability to read acls on specified bucket
"""
alt_client = get_alt_client()
alt_client.get_bucket_acl(Bucket=bucket_name)
def _check_bucket_acl_grant_cant_readacp(bucket_name):
"""
verify inability to read acls on specified bucket
"""
alt_client = get_alt_client()
check_access_denied(alt_client.get_bucket_acl, Bucket=bucket_name)
def _check_bucket_acl_grant_can_write(bucket_name):
"""
verify ability to write the specified bucket
"""
alt_client = get_alt_client()
alt_client.put_object(Bucket=bucket_name, Key='foo-write', Body='bar')
def _check_bucket_acl_grant_cant_write(bucket_name):
"""
verify inability to write the specified bucket
"""
alt_client = get_alt_client()
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key='foo-write', Body='bar')
def _check_bucket_acl_grant_can_writeacp(bucket_name):
"""
verify ability to set acls on the specified bucket
"""
alt_client = get_alt_client()
alt_client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
def _check_bucket_acl_grant_cant_writeacp(bucket_name):
"""
verify inability to set acls on the specified bucket
"""
alt_client = get_alt_client()
check_access_denied(alt_client.put_bucket_acl,Bucket=bucket_name, ACL='public-read')
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid FULL_CONTROL')
@attr(assertion='can read/write data/acls')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_bucket_acl_grant_userid_fullcontrol():
bucket_name = _bucket_acl_grant_userid('FULL_CONTROL')
# alt user can read
_check_bucket_acl_grant_can_read(bucket_name)
# can read acl
_check_bucket_acl_grant_can_readacp(bucket_name)
# can write
_check_bucket_acl_grant_can_write(bucket_name)
# can write acl
_check_bucket_acl_grant_can_writeacp(bucket_name)
client = get_client()
bucket_acl_response = client.get_bucket_acl(Bucket=bucket_name)
owner_id = bucket_acl_response['Owner']['ID']
owner_display_name = bucket_acl_response['Owner']['DisplayName']
main_display_name = get_main_display_name()
main_user_id = get_main_user_id()
eq(owner_id, main_user_id)
eq(owner_display_name, main_display_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid READ')
@attr(assertion='can read data, no other r/w')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_acl_grant_userid_read():
bucket_name = _bucket_acl_grant_userid('READ')
# alt user can read
_check_bucket_acl_grant_can_read(bucket_name)
# can't read acl
_check_bucket_acl_grant_cant_readacp(bucket_name)
# can't write
_check_bucket_acl_grant_cant_write(bucket_name)
# can't write acl
_check_bucket_acl_grant_cant_writeacp(bucket_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid READ_ACP')
@attr(assertion='can read acl, no other r/w')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_acl_grant_userid_readacp():
bucket_name = _bucket_acl_grant_userid('READ_ACP')
# alt user can't read
_check_bucket_acl_grant_cant_read(bucket_name)
# can read acl
_check_bucket_acl_grant_can_readacp(bucket_name)
# can't write
_check_bucket_acl_grant_cant_write(bucket_name)
# can't write acp
#_check_bucket_acl_grant_cant_writeacp_can_readacp(bucket)
_check_bucket_acl_grant_cant_writeacp(bucket_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid WRITE')
@attr(assertion='can write data, no other r/w')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_acl_grant_userid_write():
bucket_name = _bucket_acl_grant_userid('WRITE')
# alt user can't read
_check_bucket_acl_grant_cant_read(bucket_name)
# can't read acl
_check_bucket_acl_grant_cant_readacp(bucket_name)
# can write
_check_bucket_acl_grant_can_write(bucket_name)
# can't write acl
_check_bucket_acl_grant_cant_writeacp(bucket_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid WRITE_ACP')
@attr(assertion='can write acls, no other r/w')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_acl_grant_userid_writeacp():
bucket_name = _bucket_acl_grant_userid('WRITE_ACP')
# alt user can't read
_check_bucket_acl_grant_cant_read(bucket_name)
# can't read acl
_check_bucket_acl_grant_cant_readacp(bucket_name)
# can't write
_check_bucket_acl_grant_cant_write(bucket_name)
# can write acl
_check_bucket_acl_grant_can_writeacp(bucket_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/invalid userid')
@attr(assertion='fails 400')
def test_bucket_acl_grant_nonexist_user():
bucket_name = get_new_bucket()
client = get_client()
bad_user_id = '_foo'
#response = client.get_bucket_acl(Bucket=bucket_name)
grant = {'Grantee': {'ID': bad_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
grant = add_bucket_user_grant(bucket_name, grant)
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, AccessControlPolicy=grant)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='revoke all ACLs')
@attr(assertion='can: read obj, get/set bucket acl, cannot write objs')
def test_bucket_acl_no_grants():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_bucket_acl(Bucket=bucket_name)
old_grants = response['Grants']
policy = {}
policy['Owner'] = response['Owner']
# clear grants
policy['Grants'] = []
# remove read/write permission
response = client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
# can read
client.get_object(Bucket=bucket_name, Key='foo')
# can't write
check_access_denied(client.put_object, Bucket=bucket_name, Key='baz', Body='a')
#TODO fix this test once a fix is in for same issues in
# test_access_bucket_private_object_private
client2 = get_client()
# owner can read acl
client2.get_bucket_acl(Bucket=bucket_name)
# owner can write acl
client2.put_bucket_acl(Bucket=bucket_name, ACL='private')
# set policy back to original so that bucket can be cleaned up
policy['Grants'] = old_grants
client2.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
def _get_acl_header(user_id=None, perms=None):
all_headers = ["read", "write", "read-acp", "write-acp", "full-control"]
headers = []
if user_id == None:
user_id = get_alt_user_id()
if perms != None:
for perm in perms:
header = ("x-amz-grant-{perm}".format(perm=perm), "id={uid}".format(uid=user_id))
headers.append(header)
else:
for perm in all_headers:
header = ("x-amz-grant-{perm}".format(perm=perm), "id={uid}".format(uid=user_id))
headers.append(header)
return headers
@attr(resource='object')
@attr(method='PUT')
@attr(operation='add all grants to user through headers')
@attr(assertion='adds all grants individually to second user')
@attr('fails_on_dho')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_object_header_acl_grants():
bucket_name = get_new_bucket()
client = get_client()
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
headers = _get_acl_header()
def add_headers_before_sign(**kwargs):
updated_headers = (kwargs['request'].__dict__['headers'].__dict__['_headers'] + headers)
kwargs['request'].__dict__['headers'].__dict__['_headers'] = updated_headers
client.meta.events.register('before-sign.s3.PutObject', add_headers_before_sign)
client.put_object(Bucket=bucket_name, Key='foo_key', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo_key')
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='WRITE',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='READ_ACP',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='WRITE_ACP',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='PUT')
@attr(operation='add all grants to user through headers')
@attr(assertion='adds all grants individually to second user')
@attr('fails_on_dho')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_header_acl_grants():
headers = _get_acl_header()
bucket_name = get_new_bucket_name()
client = get_client()
headers = _get_acl_header()
def add_headers_before_sign(**kwargs):
updated_headers = (kwargs['request'].__dict__['headers'].__dict__['_headers'] + headers)
kwargs['request'].__dict__['headers'].__dict__['_headers'] = updated_headers
client.meta.events.register('before-sign.s3.CreateBucket', add_headers_before_sign)
client.create_bucket(Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
check_grants(
grants,
[
dict(
Permission='READ',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='WRITE',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='READ_ACP',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='WRITE_ACP',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
alt_client = get_alt_client()
alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
# set bucket acl to public-read-write so that teardown can work
alt_client.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
# This test will fail on DH Objects. DHO allows multiple users with one account, which
# would violate the uniqueness requirement of a user's email. As such, DHO users are
# created without an email.
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='add second FULL_CONTROL user')
@attr(assertion='works for S3, fails for DHO')
@attr('fails_on_aws') # <Error><Code>AmbiguousGrantByEmailAddress</Code><Message>The e-mail address you provided is associated with more than one account. Please retry your request using a different identification method or after resolving the ambiguity.</Message>
def test_bucket_acl_grant_email():
bucket_name = get_new_bucket()
client = get_client()
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
alt_email_address = get_alt_email()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
grant = {'Grantee': {'EmailAddress': alt_email_address, 'Type': 'AmazonCustomerByEmail' }, 'Permission': 'FULL_CONTROL'}
grant = add_bucket_user_grant(bucket_name, grant)
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy = grant)
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=main_user_id,
DisplayName=main_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
]
)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='add acl for nonexistent user')
@attr(assertion='fail 400')
def test_bucket_acl_grant_email_not_exist():
# behavior not documented by amazon
bucket_name = get_new_bucket()
client = get_client()
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
alt_email_address = get_alt_email()
NONEXISTENT_EMAIL = 'doesnotexist@dreamhost.com.invalid'
grant = {'Grantee': {'EmailAddress': NONEXISTENT_EMAIL, 'Type': 'AmazonCustomerByEmail'}, 'Permission': 'FULL_CONTROL'}
grant = add_bucket_user_grant(bucket_name, grant)
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, AccessControlPolicy = grant)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'UnresolvableGrantByEmailAddress')
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='revoke all ACLs')
@attr(assertion='acls read back as empty')
def test_bucket_acl_revoke_all():
# revoke all access, including the owner's access
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_bucket_acl(Bucket=bucket_name)
old_grants = response['Grants']
policy = {}
policy['Owner'] = response['Owner']
# clear grants
policy['Grants'] = []
# remove read/write permission for everyone
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
response = client.get_bucket_acl(Bucket=bucket_name)
eq(len(response['Grants']), 0)
# set policy back to original so that bucket can be cleaned up
policy['Grants'] = old_grants
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
# TODO rgw log_bucket.set_as_logging_target() gives 403 Forbidden
# http://tracker.newdream.net/issues/984
@attr(resource='bucket.log')
@attr(method='put')
@attr(operation='set/enable/disable logging target')
@attr(assertion='operations succeed')
@attr('fails_on_rgw')
def test_logging_toggle():
bucket_name = get_new_bucket()
client = get_client()
main_display_name = get_main_display_name()
main_user_id = get_main_user_id()
status = {'LoggingEnabled': {'TargetBucket': bucket_name, 'TargetGrants': [{'Grantee': {'DisplayName': main_display_name, 'ID': main_user_id,'Type': 'CanonicalUser'},'Permission': 'FULL_CONTROL'}], 'TargetPrefix': 'foologgingprefix'}}
client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus=status)
client.get_bucket_logging(Bucket=bucket_name)
status = {'LoggingEnabled': {}}
client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus=status)
# NOTE: this does not actually test whether or not logging works
def _setup_access(bucket_acl, object_acl):
"""
Simple test fixture: create a bucket with given ACL, with objects:
- a: owning user, given ACL
- a2: same object accessed by some other user
- b: owning user, default ACL in bucket w/given ACL
- b2: same object accessed by a some other user
"""
bucket_name = get_new_bucket()
client = get_client()
key1 = 'foo'
key2 = 'bar'
newkey = 'new'
client.put_bucket_acl(Bucket=bucket_name, ACL=bucket_acl)
client.put_object(Bucket=bucket_name, Key=key1, Body='foocontent')
client.put_object_acl(Bucket=bucket_name, Key=key1, ACL=object_acl)
client.put_object(Bucket=bucket_name, Key=key2, Body='barcontent')
return bucket_name, key1, key2, newkey
def get_bucket_key_names(bucket_name):
objs_list = get_objects_list(bucket_name)
return frozenset(obj for obj in objs_list)
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/private')
@attr(assertion='public has no access to bucket or objects')
def test_access_bucket_private_object_private():
# all the test_access_* tests follow this template
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='private')
alt_client = get_alt_client()
# acled object read fail
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
# default object read fail
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
# bucket read fail
check_access_denied(alt_client.list_objects, Bucket=bucket_name)
# acled object write fail
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
# NOTE: The above put's causes the connection to go bad, therefore the client can't be used
# anymore. This can be solved either by:
# 1) putting an empty string ('') in the 'Body' field of those put_object calls
# 2) getting a new client hence the creation of alt_client{2,3} for the tests below
# TODO: Test it from another host and on AWS, Report this to Amazon, if findings are identical
alt_client2 = get_alt_client()
# default object write fail
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
# bucket write fail
alt_client3 = get_alt_client()
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/private with list-objects-v2')
@attr(assertion='public has no access to bucket or objects')
@attr('list-objects-v2')
def test_access_bucket_private_objectv2_private():
# all the test_access_* tests follow this template
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='private')
alt_client = get_alt_client()
# acled object read fail
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
# default object read fail
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
# bucket read fail
check_access_denied(alt_client.list_objects_v2, Bucket=bucket_name)
# acled object write fail
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
# NOTE: The above put's causes the connection to go bad, therefore the client can't be used
# anymore. This can be solved either by:
# 1) putting an empty string ('') in the 'Body' field of those put_object calls
# 2) getting a new client hence the creation of alt_client{2,3} for the tests below
# TODO: Test it from another host and on AWS, Report this to Amazon, if findings are identical
alt_client2 = get_alt_client()
# default object write fail
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
# bucket write fail
alt_client3 = get_alt_client()
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read')
@attr(assertion='public can only read readable object')
def test_access_bucket_private_object_publicread():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read, b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
check_access_denied(alt_client3.list_objects, Bucket=bucket_name)
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read with list-objects-v2')
@attr(assertion='public can only read readable object')
@attr('list-objects-v2')
def test_access_bucket_private_objectv2_publicread():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read, b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
check_access_denied(alt_client3.list_objects_v2, Bucket=bucket_name)
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read/write')
@attr(assertion='public can only read the readable object')
def test_access_bucket_private_object_publicreadwrite():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read-write')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read-only ... because it is in a private bucket
# b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
check_access_denied(alt_client3.list_objects, Bucket=bucket_name)
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read/write with list-objects-v2')
@attr(assertion='public can only read the readable object')
@attr('list-objects-v2')
def test_access_bucket_private_objectv2_publicreadwrite():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read-write')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read-only ... because it is in a private bucket
# b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
check_access_denied(alt_client3.list_objects_v2, Bucket=bucket_name)
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read/private')
@attr(assertion='public can only list the bucket')
def test_access_bucket_publicread_object_private():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='private')
alt_client = get_alt_client()
# a should be private, b gets default (private)
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
objs = get_objects_list(bucket=bucket_name, client=alt_client3)
eq(objs, ['bar', 'foo'])
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read/public-read')
@attr(assertion='public can read readable objects and list bucket')
def test_access_bucket_publicread_object_publicread():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='public-read')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
# a should be public-read, b gets default (private)
body = _get_body(response)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
objs = get_objects_list(bucket=bucket_name, client=alt_client3)
eq(objs, ['bar', 'foo'])
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read/public-read-write')
@attr(assertion='public can read readable objects and list bucket')
def test_access_bucket_publicread_object_publicreadwrite():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='public-read-write')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read-only ... because it is in a r/o bucket
# b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
objs = get_objects_list(bucket=bucket_name, client=alt_client3)
eq(objs, ['bar', 'foo'])
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read-write/private')
@attr(assertion='private objects cannot be read, but can be overwritten')
def test_access_bucket_publicreadwrite_object_private():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='private')
alt_client = get_alt_client()
# a should be private, b gets default (private)
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
alt_client.put_object(Bucket=bucket_name, Key=key1, Body='barcontent')
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
objs = get_objects_list(bucket=bucket_name, client=alt_client)
eq(objs, ['bar', 'foo'])
alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read-write/public-read')
@attr(assertion='private objects cannot be read, but can be overwritten')
def test_access_bucket_publicreadwrite_object_publicread():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='public-read')
alt_client = get_alt_client()
# a should be public-read, b gets default (private)
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
eq(body, 'foocontent')
alt_client.put_object(Bucket=bucket_name, Key=key1, Body='barcontent')
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
objs = get_objects_list(bucket=bucket_name, client=alt_client)
eq(objs, ['bar', 'foo'])
alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read-write/public-read-write')
@attr(assertion='private objects cannot be read, but can be overwritten')
def test_access_bucket_publicreadwrite_object_publicreadwrite():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='public-read-write')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read-write, b gets default (private)
eq(body, 'foocontent')
alt_client.put_object(Bucket=bucket_name, Key=key1, Body='foooverwrite')
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
objs = get_objects_list(bucket=bucket_name, client=alt_client)
eq(objs, ['bar', 'foo'])
alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets')
@attr(assertion='returns all expected buckets')
def test_buckets_create_then_list():
client = get_client()
bucket_names = []
for i in range(5):
bucket_name = get_new_bucket_name()
bucket_names.append(bucket_name)
for name in bucket_names:
client.create_bucket(Bucket=name)
response = client.list_buckets()
bucket_dicts = response['Buckets']
buckets_list = []
buckets_list = get_buckets_list()
for name in bucket_names:
if name not in buckets_list:
raise RuntimeError("S3 implementation's GET on Service did not return bucket we created: %r", bucket.name)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets (anonymous)')
@attr(assertion='succeeds')
@attr('fails_on_aws')
def test_list_buckets_anonymous():
# Get a connection with bad authorization, then change it to be our new Anonymous auth mechanism,
# emulating standard HTTP access.
#
# While it may have been possible to use httplib directly, doing it this way takes care of also
# allowing us to vary the calling format in testing.
unauthenticated_client = get_unauthenticated_client()
response = unauthenticated_client.list_buckets()
eq(len(response['Buckets']), 0)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets (bad auth)')
@attr(assertion='fails 403')
def test_list_buckets_invalid_auth():
bad_auth_client = get_bad_auth_client()
e = assert_raises(ClientError, bad_auth_client.list_buckets)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'InvalidAccessKeyId')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets (bad auth)')
@attr(assertion='fails 403')
def test_list_buckets_bad_auth():
main_access_key = get_main_aws_access_key()
bad_auth_client = get_bad_auth_client(aws_access_key_id=main_access_key)
e = assert_raises(ClientError, bad_auth_client.list_buckets)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'SignatureDoesNotMatch')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name starts with alphabetic works')
# this test goes outside the user-configure prefix because it needs to
# control the initial character of the bucket name
@nose.with_setup(
setup=lambda: nuke_prefixed_buckets(prefix='a'+get_prefix()),
teardown=lambda: nuke_prefixed_buckets(prefix='a'+get_prefix()),
)
def test_bucket_create_naming_good_starts_alpha():
check_good_bucket_name('foo', _prefix='a'+get_prefix())
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name starts with numeric works')
# this test goes outside the user-configure prefix because it needs to
# control the initial character of the bucket name
@nose.with_setup(
setup=lambda: nuke_prefixed_buckets(prefix='0'+get_prefix()),
teardown=lambda: nuke_prefixed_buckets(prefix='0'+get_prefix()),
)
def test_bucket_create_naming_good_starts_digit():
check_good_bucket_name('foo', _prefix='0'+get_prefix())
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name containing dot works')
def test_bucket_create_naming_good_contains_period():
check_good_bucket_name('aaa.111')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name containing hyphen works')
def test_bucket_create_naming_good_contains_hyphen():
check_good_bucket_name('aaa-111')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket with objects and recreate it')
@attr(assertion='bucket recreation not overriding index')
def test_bucket_recreate_not_overriding():
key_names = ['mykey1', 'mykey2']
bucket_name = _create_objects(keys=key_names)
objs_list = get_objects_list(bucket_name)
eq(key_names, objs_list)
client = get_client()
client.create_bucket(Bucket=bucket_name)
objs_list = get_objects_list(bucket_name)
eq(key_names, objs_list)
@attr(resource='object')
@attr(method='put')
@attr(operation='create and list objects with special names')
@attr(assertion='special names work')
def test_bucket_create_special_key_names():
key_names = [
' ',
'"',
'$',
'%',
'&',
'\'',
'<',
'>',
'_',
'_ ',
'_ _',
'__',
]
bucket_name = _create_objects(keys=key_names)
objs_list = get_objects_list(bucket_name)
eq(key_names, objs_list)
client = get_client()
for name in key_names:
eq((name in objs_list), True)
response = client.get_object(Bucket=bucket_name, Key=name)
body = _get_body(response)
eq(name, body)
client.put_object_acl(Bucket=bucket_name, Key=name, ACL='private')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='create and list objects with underscore as prefix, list using prefix')
@attr(assertion='listing works correctly')
def test_bucket_list_special_prefix():
key_names = ['_bla/1', '_bla/2', '_bla/3', '_bla/4', 'abcd']
bucket_name = _create_objects(keys=key_names)
objs_list = get_objects_list(bucket_name)
eq(len(objs_list), 5)
objs_list = get_objects_list(bucket_name, prefix='_bla/')
eq(len(objs_list), 4)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy zero sized object in same bucket')
@attr(assertion='works')
def test_object_copy_zero_size():
key = 'foo123bar'
bucket_name = _create_objects(keys=[key])
fp_a = FakeWriteFile(0, '')
client = get_client()
client.put_object(Bucket=bucket_name, Key=key, Body=fp_a)
copy_source = {'Bucket': bucket_name, 'Key': key}
client.copy(copy_source, bucket_name, 'bar321foo')
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
eq(response['ContentLength'], 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object in same bucket')
@attr(assertion='works')
def test_object_copy_same_bucket():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy(copy_source, bucket_name, 'bar321foo')
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
body = _get_body(response)
eq('foo', body)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object with content-type')
@attr(assertion='works')
def test_object_copy_verify_contenttype():
bucket_name = get_new_bucket()
client = get_client()
content_type = 'text/bla'
client.put_object(Bucket=bucket_name, ContentType=content_type, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy(copy_source, bucket_name, 'bar321foo')
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
body = _get_body(response)
eq('foo', body)
response_content_type = response['ContentType']
eq(response_content_type, content_type)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object to itself')
@attr(assertion='fails')
def test_object_copy_to_itself():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'foo123bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='object')
@attr(method='put')
@attr(operation='modify object metadata by copying')
@attr(assertion='fails')
def test_object_copy_to_itself_with_metadata():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
metadata = {'foo': 'bar'}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='foo123bar', Metadata=metadata, MetadataDirective='REPLACE')
response = client.get_object(Bucket=bucket_name, Key='foo123bar')
eq(response['Metadata'], metadata)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object from different bucket')
@attr(assertion='works')
def test_object_copy_diff_bucket():
bucket_name1 = get_new_bucket()
bucket_name2 = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name1, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name1, 'Key': 'foo123bar'}
client.copy(copy_source, bucket_name2, 'bar321foo')
response = client.get_object(Bucket=bucket_name2, Key='bar321foo')
body = _get_body(response)
eq('foo', body)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy to an inaccessible bucket')
@attr(assertion='fails w/AttributeError')
def test_object_copy_not_owned_bucket():
client = get_client()
alt_client = get_alt_client()
bucket_name1 = get_new_bucket_name()
bucket_name2 = get_new_bucket_name()
client.create_bucket(Bucket=bucket_name1)
alt_client.create_bucket(Bucket=bucket_name2)
client.put_object(Bucket=bucket_name1, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name1, 'Key': 'foo123bar'}
e = assert_raises(ClientError, alt_client.copy, copy_source, bucket_name2, 'bar321foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy a non-owned object in a non-owned bucket, but with perms')
@attr(assertion='works')
def test_object_copy_not_owned_object_bucket():
client = get_client()
alt_client = get_alt_client()
bucket_name = get_new_bucket_name()
client.create_bucket(Bucket=bucket_name)
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
alt_user_id = get_alt_user_id()
grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
grants = add_obj_user_grant(bucket_name, 'foo123bar', grant)
client.put_object_acl(Bucket=bucket_name, Key='foo123bar', AccessControlPolicy=grants)
grant = add_bucket_user_grant(bucket_name, grant)
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=grant)
alt_client.get_object(Bucket=bucket_name, Key='foo123bar')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
alt_client.copy(copy_source, bucket_name, 'bar321foo')
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object and change acl')
@attr(assertion='works')
def test_object_copy_canned_acl():
bucket_name = get_new_bucket()
client = get_client()
alt_client = get_alt_client()
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo', ACL='public-read')
# check ACL is applied by doing GET from another user
alt_client.get_object(Bucket=bucket_name, Key='bar321foo')
metadata={'abc': 'def'}
copy_source = {'Bucket': bucket_name, 'Key': 'bar321foo'}
client.copy_object(ACL='public-read', Bucket=bucket_name, CopySource=copy_source, Key='foo123bar', Metadata=metadata, MetadataDirective='REPLACE')
# check ACL is applied by doing GET from another user
alt_client.get_object(Bucket=bucket_name, Key='foo123bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object and retain metadata')
def test_object_copy_retaining_metadata():
for size in [3, 1024 * 1024]:
bucket_name = get_new_bucket()
client = get_client()
content_type = 'audio/ogg'
metadata = {'key1': 'value1', 'key2': 'value2'}
client.put_object(Bucket=bucket_name, Key='foo123bar', Metadata=metadata, ContentType=content_type, Body=bytearray(size))
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo')
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
eq(content_type, response['ContentType'])
eq(metadata, response['Metadata'])
body = _get_body(response)
eq(size, response['ContentLength'])
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object and replace metadata')
def test_object_copy_replacing_metadata():
for size in [3, 1024 * 1024]:
bucket_name = get_new_bucket()
client = get_client()
content_type = 'audio/ogg'
metadata = {'key1': 'value1', 'key2': 'value2'}
client.put_object(Bucket=bucket_name, Key='foo123bar', Metadata=metadata, ContentType=content_type, Body=bytearray(size))
metadata = {'key3': 'value3', 'key2': 'value2'}
content_type = 'audio/mpeg'
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo', Metadata=metadata, MetadataDirective='REPLACE', ContentType=content_type)
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
eq(content_type, response['ContentType'])
eq(metadata, response['Metadata'])
eq(size, response['ContentLength'])
@attr(resource='object')
@attr(method='put')
@attr(operation='copy from non-existent bucket')
def test_object_copy_bucket_not_found():
bucket_name = get_new_bucket()
client = get_client()
copy_source = {'Bucket': bucket_name + "-fake", 'Key': 'foo123bar'}
e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'bar321foo')
status = _get_status(e.response)
eq(status, 404)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy from non-existent object')
def test_object_copy_key_not_found():
bucket_name = get_new_bucket()
client = get_client()
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'bar321foo')
status = _get_status(e.response)
eq(status, 404)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object to/from versioned bucket')
@attr(assertion='works')
@attr('versioning')
def test_object_copy_versioned_bucket():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
size = 1*5
data = bytearray(size)
data_str = data.decode()
key1 = 'foo123bar'
client.put_object(Bucket=bucket_name, Key=key1, Body=data)
response = client.get_object(Bucket=bucket_name, Key=key1)
version_id = response['VersionId']
# copy object in the same bucket
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key2 = 'bar321foo'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key2)
response = client.get_object(Bucket=bucket_name, Key=key2)
body = _get_body(response)
eq(data_str, body)
eq(size, response['ContentLength'])
# second copy
version_id2 = response['VersionId']
copy_source = {'Bucket': bucket_name, 'Key': key2, 'VersionId': version_id2}
key3 = 'bar321foo2'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key3)
response = client.get_object(Bucket=bucket_name, Key=key3)
body = _get_body(response)
eq(data_str, body)
eq(size, response['ContentLength'])
# copy to another versioned bucket
bucket_name2 = get_new_bucket()
check_configure_versioning_retry(bucket_name2, "Enabled", "Enabled")
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key4 = 'bar321foo3'
client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key=key4)
response = client.get_object(Bucket=bucket_name2, Key=key4)
body = _get_body(response)
eq(data_str, body)
eq(size, response['ContentLength'])
# copy to another non versioned bucket
bucket_name3 = get_new_bucket()
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key5 = 'bar321foo4'
client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key5)
response = client.get_object(Bucket=bucket_name3, Key=key5)
body = _get_body(response)
eq(data_str, body)
eq(size, response['ContentLength'])
# copy from a non versioned bucket
copy_source = {'Bucket': bucket_name3, 'Key': key5}
key6 = 'foo123bar2'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key6)
response = client.get_object(Bucket=bucket_name, Key=key6)
body = _get_body(response)
eq(data_str, body)
eq(size, response['ContentLength'])
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object to/from versioned bucket with url-encoded name')
@attr(assertion='works')
@attr('versioning')
def test_object_copy_versioned_url_encoding():
bucket = get_new_bucket_resource()
check_configure_versioning_retry(bucket.name, "Enabled", "Enabled")
src_key = 'foo?bar'
src = bucket.put_object(Key=src_key)
src.load() # HEAD request tests that the key exists
# copy object in the same bucket
dst_key = 'bar&foo'
dst = bucket.Object(dst_key)
dst.copy_from(CopySource={'Bucket': src.bucket_name, 'Key': src.key, 'VersionId': src.version_id})
dst.load() # HEAD request tests that the key exists
def generate_random(size, part_size=5*1024*1024):
"""
Generate the specified number random data.
(actually each MB is a repetition of the first KB)
"""
chunk = 1024
allowed = string.ascii_letters
for x in range(0, size, part_size):
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
s = ''
left = size - x
this_part_size = min(left, part_size)
for y in range(this_part_size // chunk):
s = s + strpart
if this_part_size > len(s):
s = s + strpart[0:this_part_size - len(s)]
yield s
if (x == size):
return
def _multipart_upload(bucket_name, key, size, part_size=5*1024*1024, client=None, content_type=None, metadata=None, resend_parts=[]):
"""
generate a multi-part upload for a random file of specifed size,
if requested, generate a list of the parts
return the upload descriptor
"""
if client == None:
client = get_client()
if content_type == None and metadata == None:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
else:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key, Metadata=metadata, ContentType=content_type)
upload_id = response['UploadId']
s = ''
parts = []
for i, part in enumerate(generate_random(size, part_size)):
# part_num is necessary because PartNumber for upload_part and in parts must start at 1 and i starts at 0
part_num = i+1
s += part
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num})
if i in resend_parts:
client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
return (upload_id, s, parts)
@attr(resource='object')
@attr(method='put')
@attr(operation='test copy object of a multipart upload')
@attr(assertion='successful')
@attr('versioning')
def test_object_copy_versioning_multipart_upload():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key1 = "srcmultipart"
key1_metadata = {'foo': 'bar'}
content_type = 'text/bla'
objlen = 30 * 1024 * 1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen, content_type=content_type, metadata=key1_metadata)
client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key1)
key1_size = response['ContentLength']
version_id = response['VersionId']
# copy object in the same bucket
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key2 = 'dstmultipart'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key2)
response = client.get_object(Bucket=bucket_name, Key=key2)
version_id2 = response['VersionId']
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
# second copy
copy_source = {'Bucket': bucket_name, 'Key': key2, 'VersionId': version_id2}
key3 = 'dstmultipart2'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key3)
response = client.get_object(Bucket=bucket_name, Key=key3)
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
# copy to another versioned bucket
bucket_name2 = get_new_bucket()
check_configure_versioning_retry(bucket_name2, "Enabled", "Enabled")
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key4 = 'dstmultipart3'
client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key=key4)
response = client.get_object(Bucket=bucket_name2, Key=key4)
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
# copy to another non versioned bucket
bucket_name3 = get_new_bucket()
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key5 = 'dstmultipart4'
client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key5)
response = client.get_object(Bucket=bucket_name3, Key=key5)
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
# copy from a non versioned bucket
copy_source = {'Bucket': bucket_name3, 'Key': key5}
key6 = 'dstmultipart5'
client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key6)
response = client.get_object(Bucket=bucket_name3, Key=key6)
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart upload without parts')
def test_multipart_upload_empty():
bucket_name = get_new_bucket()
client = get_client()
key1 = "mymultipart"
objlen = 0
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen)
e = assert_raises(ClientError, client.complete_multipart_upload,Bucket=bucket_name, Key=key1, UploadId=upload_id)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart uploads with single small part')
def test_multipart_upload_small():
bucket_name = get_new_bucket()
client = get_client()
key1 = "mymultipart"
objlen = 1
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key1)
eq(response['ContentLength'], objlen)
def _create_key_with_random_content(keyname, size=7*1024*1024, bucket_name=None, client=None):
if bucket_name is None:
bucket_name = get_new_bucket()
if client == None:
client = get_client()
data_str = str(next(generate_random(size, size)))
data = bytes(data_str, 'utf-8')
client.put_object(Bucket=bucket_name, Key=keyname, Body=data)
return bucket_name
def _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size, client=None, part_size=5*1024*1024, version_id=None):
if(client == None):
client = get_client()
response = client.create_multipart_upload(Bucket=dest_bucket_name, Key=dest_key)
upload_id = response['UploadId']
if(version_id == None):
copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
else:
copy_source = {'Bucket': src_bucket_name, 'Key': src_key, 'VersionId': version_id}
parts = []
i = 0
for start_offset in range(0, size, part_size):
end_offset = min(start_offset + part_size - 1, size - 1)
part_num = i+1
copy_source_range = 'bytes={start}-{end}'.format(start=start_offset, end=end_offset)
response = client.upload_part_copy(Bucket=dest_bucket_name, Key=dest_key, CopySource=copy_source, PartNumber=part_num, UploadId=upload_id, CopySourceRange=copy_source_range)
parts.append({'ETag': response['CopyPartResult']['ETag'], 'PartNumber': part_num})
i = i+1
return (upload_id, parts)
def _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name, version_id=None):
client = get_client()
if(version_id == None):
response = client.get_object(Bucket=src_bucket_name, Key=src_key)
else:
response = client.get_object(Bucket=src_bucket_name, Key=src_key, VersionId=version_id)
src_size = response['ContentLength']
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
dest_size = response['ContentLength']
dest_data = _get_body(response)
assert(src_size >= dest_size)
r = 'bytes={s}-{e}'.format(s=0, e=dest_size-1)
if(version_id == None):
response = client.get_object(Bucket=src_bucket_name, Key=src_key, Range=r)
else:
response = client.get_object(Bucket=src_bucket_name, Key=src_key, Range=r, VersionId=version_id)
src_data = _get_body(response)
eq(src_data, dest_data)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies with single small part')
def test_multipart_copy_small():
src_key = 'foo'
src_bucket_name = _create_key_with_random_content(src_key)
dest_bucket_name = get_new_bucket()
dest_key = "mymultipart"
size = 1
client = get_client()
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
eq(size, response['ContentLength'])
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies with an invalid range')
def test_multipart_copy_invalid_range():
client = get_client()
src_key = 'source'
src_bucket_name = _create_key_with_random_content(src_key, size=5)
response = client.create_multipart_upload(Bucket=src_bucket_name, Key='dest')
upload_id = response['UploadId']
copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
copy_source_range = 'bytes={start}-{end}'.format(start=0, end=21)
e = assert_raises(ClientError, client.upload_part_copy,Bucket=src_bucket_name, Key='dest', UploadId=upload_id, CopySource=copy_source, CopySourceRange=copy_source_range, PartNumber=1)
status, error_code = _get_status_and_error_code(e.response)
valid_status = [400, 416]
if not status in valid_status:
raise AssertionError("Invalid response " + str(status))
eq(error_code, 'InvalidRange')
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copy with an improperly formatted range')
# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40795 is resolved
@attr('fails_on_rgw')
def test_multipart_copy_improper_range():
client = get_client()
src_key = 'source'
src_bucket_name = _create_key_with_random_content(src_key, size=5)
response = client.create_multipart_upload(
Bucket=src_bucket_name, Key='dest')
upload_id = response['UploadId']
copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
test_ranges = ['{start}-{end}'.format(start=0, end=2),
'bytes={start}'.format(start=0),
'bytes=hello-world',
'bytes=0-bar',
'bytes=hello-',
'bytes=0-2,3-5']
for test_range in test_ranges:
e = assert_raises(ClientError, client.upload_part_copy,
Bucket=src_bucket_name, Key='dest',
UploadId=upload_id,
CopySource=copy_source,
CopySourceRange=test_range,
PartNumber=1)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies without x-amz-copy-source-range')
def test_multipart_copy_without_range():
client = get_client()
src_key = 'source'
src_bucket_name = _create_key_with_random_content(src_key, size=10)
dest_bucket_name = get_new_bucket_name()
get_new_bucket(name=dest_bucket_name)
dest_key = "mymultipartcopy"
response = client.create_multipart_upload(Bucket=dest_bucket_name, Key=dest_key)
upload_id = response['UploadId']
parts = []
copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
part_num = 1
copy_source_range = 'bytes={start}-{end}'.format(start=0, end=9)
response = client.upload_part_copy(Bucket=dest_bucket_name, Key=dest_key, CopySource=copy_source, PartNumber=part_num, UploadId=upload_id)
parts.append({'ETag': response['CopyPartResult']['ETag'], 'PartNumber': part_num})
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
eq(response['ContentLength'], 10)
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies with single small part')
def test_multipart_copy_special_names():
src_bucket_name = get_new_bucket()
dest_bucket_name = get_new_bucket()
dest_key = "mymultipart"
size = 1
client = get_client()
for src_key in (' ', '_', '__', '?versionId'):
_create_key_with_random_content(src_key, bucket_name=src_bucket_name)
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
response = client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
eq(size, response['ContentLength'])
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
def _check_content_using_range(key, bucket_name, data, step):
client = get_client()
response = client.get_object(Bucket=bucket_name, Key=key)
size = response['ContentLength']
for ofs in range(0, size, step):
toread = size - ofs
if toread > step:
toread = step
end = ofs + toread - 1
r = 'bytes={s}-{e}'.format(s=ofs, e=end)
response = client.get_object(Bucket=bucket_name, Key=key, Range=r)
eq(response['ContentLength'], toread)
body = _get_body(response)
eq(body, data[ofs:end+1])
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload')
@attr(assertion='successful')
@attr('fails_on_aws')
def test_multipart_upload():
bucket_name = get_new_bucket()
key="mymultipart"
content_type='text/bla'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
client = get_client()
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, content_type=content_type, metadata=metadata)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.head_bucket(Bucket=bucket_name)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
eq(rgw_bytes_used, objlen)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
eq(rgw_object_count, 1)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['ContentType'], content_type)
eq(response['Metadata'], metadata)
body = _get_body(response)
eq(len(body), response['ContentLength'])
eq(body, data)
_check_content_using_range(key, bucket_name, data, 1000000)
_check_content_using_range(key, bucket_name, data, 10000000)
def check_versioning(bucket_name, status):
client = get_client()
try:
response = client.get_bucket_versioning(Bucket=bucket_name)
eq(response['Status'], status)
except KeyError:
eq(status, None)
# amazon is eventual consistent, retry a bit if failed
def check_configure_versioning_retry(bucket_name, status, expected_string):
client = get_client()
client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': status})
read_status = None
for i in range(5):
try:
response = client.get_bucket_versioning(Bucket=bucket_name)
read_status = response['Status']
except KeyError:
read_status = None
if (expected_string == read_status):
break
time.sleep(1)
eq(expected_string, read_status)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies of versioned objects')
@attr('versioning')
def test_multipart_copy_versioned():
src_bucket_name = get_new_bucket()
dest_bucket_name = get_new_bucket()
dest_key = "mymultipart"
check_versioning(src_bucket_name, None)
src_key = 'foo'
check_configure_versioning_retry(src_bucket_name, "Enabled", "Enabled")
size = 15 * 1024 * 1024
_create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
_create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
_create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
version_id = []
client = get_client()
response = client.list_object_versions(Bucket=src_bucket_name)
for ver in response['Versions']:
version_id.append(ver['VersionId'])
for vid in version_id:
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size, version_id=vid)
response = client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
eq(size, response['ContentLength'])
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name, version_id=vid)
def _check_upload_multipart_resend(bucket_name, key, objlen, resend_parts):
content_type = 'text/bla'
metadata = {'foo': 'bar'}
client = get_client()
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, content_type=content_type, metadata=metadata, resend_parts=resend_parts)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['ContentType'], content_type)
eq(response['Metadata'], metadata)
body = _get_body(response)
eq(len(body), response['ContentLength'])
eq(body, data)
_check_content_using_range(key, bucket_name, data, 1000000)
_check_content_using_range(key, bucket_name, data, 10000000)
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multiple multi-part upload with different sizes')
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload')
@attr(assertion='successful')
def test_multipart_upload_resend_part():
bucket_name = get_new_bucket()
key="mymultipart"
objlen = 30 * 1024 * 1024
_check_upload_multipart_resend(bucket_name, key, objlen, [0])
_check_upload_multipart_resend(bucket_name, key, objlen, [1])
_check_upload_multipart_resend(bucket_name, key, objlen, [2])
_check_upload_multipart_resend(bucket_name, key, objlen, [1,2])
_check_upload_multipart_resend(bucket_name, key, objlen, [0,1,2,3,4,5])
@attr(assertion='successful')
def test_multipart_upload_multiple_sizes():
bucket_name = get_new_bucket()
key="mymultipart"
client = get_client()
objlen = 5*1024*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 5*1024*1024+100*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 5*1024*1024+600*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 10*1024*1024+100*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 10*1024*1024+600*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 10*1024*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
@attr(assertion='successful')
def test_multipart_copy_multiple_sizes():
src_key = 'foo'
src_bucket_name = _create_key_with_random_content(src_key, 12*1024*1024)
dest_bucket_name = get_new_bucket()
dest_key="mymultipart"
client = get_client()
size = 5*1024*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 5*1024*1024+100*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 5*1024*1024+600*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 10*1024*1024+100*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 10*1024*1024+600*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 10*1024*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
@attr(resource='object')
@attr(method='put')
@attr(operation='check failure on multiple multi-part upload with size too small')
@attr(assertion='fails 400')
def test_multipart_upload_size_too_small():
bucket_name = get_new_bucket()
key="mymultipart"
client = get_client()
size = 100*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=size, part_size=10*1024)
e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'EntityTooSmall')
def gen_rand_string(size, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def _do_test_multipart_upload_contents(bucket_name, key, num_parts):
payload=gen_rand_string(5)*1024*1024
client = get_client()
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response['UploadId']
parts = []
for part_num in range(0, num_parts):
part = bytes(payload, 'utf-8')
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num+1, Body=part)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num+1})
last_payload = '123'*1024*1024
last_part = bytes(last_payload, 'utf-8')
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=num_parts+1, Body=last_part)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': num_parts+1})
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key)
test_string = _get_body(response)
all_payload = payload*num_parts + last_payload
assert test_string == all_payload
return all_payload
@attr(resource='object')
@attr(method='put')
@attr(operation='check contents of multi-part upload')
@attr(assertion='successful')
def test_multipart_upload_contents():
bucket_name = get_new_bucket()
_do_test_multipart_upload_contents(bucket_name, 'mymultipart', 3)
@attr(resource='object')
@attr(method='put')
@attr(operation=' multi-part upload overwrites existing key')
@attr(assertion='successful')
def test_multipart_upload_overwrite_existing_object():
bucket_name = get_new_bucket()
client = get_client()
key = 'mymultipart'
payload='12345'*1024*1024
num_parts=2
client.put_object(Bucket=bucket_name, Key=key, Body=payload)
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response['UploadId']
parts = []
for part_num in range(0, num_parts):
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num+1, Body=payload)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num+1})
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key)
test_string = _get_body(response)
assert test_string == payload*num_parts
@attr(resource='object')
@attr(method='put')
@attr(operation='abort multi-part upload')
@attr(assertion='successful')
def test_abort_multipart_upload():
bucket_name = get_new_bucket()
key="mymultipart"
objlen = 10 * 1024 * 1024
client = get_client()
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id)
response = client.head_bucket(Bucket=bucket_name)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', 0))
eq(rgw_bytes_used, 0)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 0))
eq(rgw_object_count, 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='abort non-existent multi-part upload')
@attr(assertion='fails 404')
def test_abort_multipart_upload_not_found():
bucket_name = get_new_bucket()
client = get_client()
key="mymultipart"
client.put_object(Bucket=bucket_name, Key=key)
e = assert_raises(ClientError, client.abort_multipart_upload, Bucket=bucket_name, Key=key, UploadId='56788')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchUpload')
@attr(resource='object')
@attr(method='put')
@attr(operation='concurrent multi-part uploads')
@attr(assertion='successful')
def test_list_multipart_upload():
bucket_name = get_new_bucket()
client = get_client()
key="mymultipart"
mb = 1024 * 1024
upload_ids = []
(upload_id1, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=5*mb)
upload_ids.append(upload_id1)
(upload_id2, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=6*mb)
upload_ids.append(upload_id2)
key2="mymultipart2"
(upload_id3, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key2, size=5*mb)
upload_ids.append(upload_id3)
response = client.list_multipart_uploads(Bucket=bucket_name)
uploads = response['Uploads']
resp_uploadids = []
for i in range(0, len(uploads)):
resp_uploadids.append(uploads[i]['UploadId'])
for i in range(0, len(upload_ids)):
eq(True, (upload_ids[i] in resp_uploadids))
client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id1)
client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id2)
client.abort_multipart_upload(Bucket=bucket_name, Key=key2, UploadId=upload_id3)
@attr(resource='object')
@attr(method='put')
@attr(operation='multi-part upload with missing part')
def test_multipart_upload_missing_part():
bucket_name = get_new_bucket()
client = get_client()
key="mymultipart"
size = 1
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response['UploadId']
parts = []
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=bytes('\x00', 'utf-8'))
# 'PartNumber should be 1'
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 9999})
e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidPart')
@attr(resource='object')
@attr(method='put')
@attr(operation='multi-part upload with incorrect ETag')
def test_multipart_upload_incorrect_etag():
bucket_name = get_new_bucket()
client = get_client()
key="mymultipart"
size = 1
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response['UploadId']
parts = []
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=bytes('\x00', 'utf-8'))
# 'ETag' should be "93b885adfe0da089cdf634904fd59f71"
parts.append({'ETag': "ffffffffffffffffffffffffffffffff", 'PartNumber': 1})
e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidPart')
def _simple_http_req_100_cont(host, port, is_secure, method, resource):
"""
Send the specified request w/expect 100-continue
and await confirmation.
"""
req_str = '{method} {resource} HTTP/1.1\r\nHost: {host}\r\nAccept-Encoding: identity\r\nContent-Length: 123\r\nExpect: 100-continue\r\n\r\n'.format(
method=method,
resource=resource,
host=host,
)
req = bytes(req_str, 'utf-8')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if is_secure:
s = ssl.wrap_socket(s);
s.settimeout(5)
s.connect((host, port))
s.send(req)
try:
data = s.recv(1024)
except socket.error as msg:
print('got response: ', msg)
print('most likely server doesn\'t support 100-continue')
s.close()
data_str = data.decode()
l = data_str.split(' ')
assert l[0].startswith('HTTP')
return l[1]
@attr(resource='object')
@attr(method='put')
@attr(operation='w/expect continue')
@attr(assertion='succeeds if object is public-read-write')
@attr('100_continue')
@attr('fails_on_mod_proxy_fcgi')
def test_100_continue():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
objname='testobj'
resource = '/{bucket}/{obj}'.format(bucket=bucket_name, obj=objname)
host = get_config_host()
port = get_config_port()
is_secure = get_config_is_secure()
#NOTES: this test needs to be tested when is_secure is True
status = _simple_http_req_100_cont(host, port, is_secure, 'PUT', resource)
eq(status, '403')
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
status = _simple_http_req_100_cont(host, port, is_secure, 'PUT', resource)
eq(status, '100')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set cors')
@attr(assertion='succeeds')
@attr('cors')
def test_set_cors():
bucket_name = get_new_bucket()
client = get_client()
allowed_methods = ['GET', 'PUT']
allowed_origins = ['*.get', '*.put']
cors_config ={
'CORSRules': [
{'AllowedMethods': allowed_methods,
'AllowedOrigins': allowed_origins,
},
]
}
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
response = client.get_bucket_cors(Bucket=bucket_name)
eq(response['CORSRules'][0]['AllowedMethods'], allowed_methods)
eq(response['CORSRules'][0]['AllowedOrigins'], allowed_origins)
client.delete_bucket_cors(Bucket=bucket_name)
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
def _cors_request_and_check(func, url, headers, expect_status, expect_allow_origin, expect_allow_methods):
r = func(url, headers=headers)
eq(r.status_code, expect_status)
assert r.headers.get('access-control-allow-origin', None) == expect_allow_origin
assert r.headers.get('access-control-allow-methods', None) == expect_allow_methods
@attr(resource='bucket')
@attr(method='get')
@attr(operation='check cors response when origin header set')
@attr(assertion='returning cors header')
@attr('cors')
def test_cors_origin_response():
bucket_name = _setup_bucket_acl(bucket_acl='public-read')
client = get_client()
cors_config ={
'CORSRules': [
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['*suffix'],
},
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['start*end'],
},
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['prefix*'],
},
{'AllowedMethods': ['PUT'],
'AllowedOrigins': ['*.put'],
}
]
}
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
time.sleep(3)
url = _get_post_url(bucket_name)
_cors_request_and_check(requests.get, url, None, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'foo.suffix'}, 200, 'foo.suffix', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'foo.bar'}, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'foo.suffix.get'}, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'startend'}, 200, 'startend', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'start1end'}, 200, 'start1end', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'start12end'}, 200, 'start12end', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': '0start12end'}, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'prefix'}, 200, 'prefix', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'prefix.suffix'}, 200, 'prefix.suffix', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'bla.prefix'}, 200, None, None)
obj_url = '{u}/{o}'.format(u=url, o='bar')
_cors_request_and_check(requests.get, obj_url, {'Origin': 'foo.suffix'}, 404, 'foo.suffix', 'GET')
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'GET',
'content-length': '0'}, 403, 'foo.suffix', 'GET')
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'PUT',
'content-length': '0'}, 403, None, None)
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'DELETE',
'content-length': '0'}, 403, None, None)
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'content-length': '0'}, 403, None, None)
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.put', 'content-length': '0'}, 403, 'foo.put', 'PUT')
_cors_request_and_check(requests.get, obj_url, {'Origin': 'foo.suffix'}, 404, 'foo.suffix', 'GET')
_cors_request_and_check(requests.options, url, None, 400, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.suffix'}, 400, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'bla'}, 400, None, None)
_cors_request_and_check(requests.options, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'GET',
'content-length': '0'}, 200, 'foo.suffix', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'foo.bar', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.suffix.get', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'startend', 'Access-Control-Request-Method': 'GET'}, 200, 'startend', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'start1end', 'Access-Control-Request-Method': 'GET'}, 200, 'start1end', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'start12end', 'Access-Control-Request-Method': 'GET'}, 200, 'start12end', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': '0start12end', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'prefix', 'Access-Control-Request-Method': 'GET'}, 200, 'prefix', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'prefix.suffix', 'Access-Control-Request-Method': 'GET'}, 200, 'prefix.suffix', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'bla.prefix', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'PUT'}, 200, 'foo.put', 'PUT')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='check cors response when origin is set to wildcard')
@attr(assertion='returning cors header')
@attr('cors')
def test_cors_origin_wildcard():
bucket_name = _setup_bucket_acl(bucket_acl='public-read')
client = get_client()
cors_config ={
'CORSRules': [
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['*'],
},
]
}
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
time.sleep(3)
url = _get_post_url(bucket_name)
_cors_request_and_check(requests.get, url, None, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'example.origin'}, 200, '*', 'GET')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='check cors response when Access-Control-Request-Headers is set in option request')
@attr(assertion='returning cors header')
@attr('cors')
def test_cors_header_option():
bucket_name = _setup_bucket_acl(bucket_acl='public-read')
client = get_client()
cors_config ={
'CORSRules': [
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['*'],
'ExposeHeaders': ['x-amz-meta-header1'],
},
]
}
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
time.sleep(3)
url = _get_post_url(bucket_name)
obj_url = '{u}/{o}'.format(u=url, o='bar')
_cors_request_and_check(requests.options, obj_url, {'Origin': 'example.origin','Access-Control-Request-Headers':'x-amz-meta-header2','Access-Control-Request-Method':'GET'}, 403, None, None)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='put tags')
@attr(assertion='succeeds')
@attr('tagging')
def test_set_bucket_tagging():
bucket_name = get_new_bucket()
client = get_client()
tags={
'TagSet': [
{
'Key': 'Hello',
'Value': 'World'
},
]
}
e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchTagSetError')
client.put_bucket_tagging(Bucket=bucket_name, Tagging=tags)
response = client.get_bucket_tagging(Bucket=bucket_name)
eq(len(response['TagSet']), 1)
eq(response['TagSet'][0]['Key'], 'Hello')
eq(response['TagSet'][0]['Value'], 'World')
client.delete_bucket_tagging(Bucket=bucket_name)
e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchTagSetError')
class FakeFile(object):
"""
file that simulates seek, tell, and current character
"""
def __init__(self, char='A', interrupt=None):
self.offset = 0
self.char = bytes(char, 'utf-8')
self.interrupt = interrupt
def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
self.offset = offset
elif whence == os.SEEK_END:
self.offset = self.size + offset;
elif whence == os.SEEK_CUR:
self.offset += offset
def tell(self):
return self.offset
class FakeWriteFile(FakeFile):
"""
file that simulates interruptable reads of constant data
"""
def __init__(self, size, char='A', interrupt=None):
FakeFile.__init__(self, char, interrupt)
self.size = size
def read(self, size=-1):
if size < 0:
size = self.size - self.offset
count = min(size, self.size - self.offset)
self.offset += count
# Sneaky! do stuff before we return (the last time)
if self.interrupt != None and self.offset == self.size and count > 0:
self.interrupt()
return self.char*count
class FakeReadFile(FakeFile):
"""
file that simulates writes, interrupting after the second
"""
def __init__(self, size, char='A', interrupt=None):
FakeFile.__init__(self, char, interrupt)
self.interrupted = False
self.size = 0
self.expected_size = size
def write(self, chars):
eq(chars, self.char*len(chars))
self.offset += len(chars)
self.size += len(chars)
# Sneaky! do stuff on the second seek
if not self.interrupted and self.interrupt != None \
and self.offset > 0:
self.interrupt()
self.interrupted = True
def close(self):
eq(self.size, self.expected_size)
class FakeFileVerifier(object):
"""
file that verifies expected data has been written
"""
def __init__(self, char=None):
self.char = char
self.size = 0
def write(self, data):
size = len(data)
if self.char == None:
self.char = data[0]
self.size += size
eq(data.decode(), self.char*size)
def _verify_atomic_key_data(bucket_name, key, size=-1, char=None):
"""
Make sure file is of the expected size and (simulated) content
"""
fp_verify = FakeFileVerifier(char)
client = get_client()
client.download_fileobj(bucket_name, key, fp_verify)
if size >= 0:
eq(fp_verify.size, size)
def _test_atomic_read(file_size):
"""
Create a file of A's, use it to set_contents_from_file.
Create a file of B's, use it to re-set_contents_from_file.
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
fp_a = FakeWriteFile(file_size, 'A')
client.put_object(Bucket=bucket_name, Key='testobj', Body=fp_a)
fp_b = FakeWriteFile(file_size, 'B')
fp_a2 = FakeReadFile(file_size, 'A',
lambda: client.put_object(Bucket=bucket_name, Key='testobj', Body=fp_b)
)
read_client = get_client()
read_client.download_fileobj(bucket_name, 'testobj', fp_a2)
fp_a2.close()
_verify_atomic_key_data(bucket_name, 'testobj', file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='read atomicity')
@attr(assertion='1MB successful')
def test_atomic_read_1mb():
_test_atomic_read(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='read atomicity')
@attr(assertion='4MB successful')
def test_atomic_read_4mb():
_test_atomic_read(1024*1024*4)
@attr(resource='object')
@attr(method='put')
@attr(operation='read atomicity')
@attr(assertion='8MB successful')
def test_atomic_read_8mb():
_test_atomic_read(1024*1024*8)
def _test_atomic_write(file_size):
"""
Create a file of A's, use it to set_contents_from_file.
Verify the contents are all A's.
Create a file of B's, use it to re-set_contents_from_file.
Before re-set continues, verify content's still A's
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
objname = 'testobj'
# create <file_size> file of A's
fp_a = FakeWriteFile(file_size, 'A')
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
# verify A's
_verify_atomic_key_data(bucket_name, objname, file_size, 'A')
# create <file_size> file of B's
# but try to verify the file before we finish writing all the B's
fp_b = FakeWriteFile(file_size, 'B',
lambda: _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
# verify B's
_verify_atomic_key_data(bucket_name, objname, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='1MB successful')
def test_atomic_write_1mb():
_test_atomic_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='4MB successful')
def test_atomic_write_4mb():
_test_atomic_write(1024*1024*4)
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='8MB successful')
def test_atomic_write_8mb():
_test_atomic_write(1024*1024*8)
def _test_atomic_dual_write(file_size):
"""
create an object, two sessions writing different contents
confirm that it is all one or the other
"""
bucket_name = get_new_bucket()
objname = 'testobj'
client = get_client()
client.put_object(Bucket=bucket_name, Key=objname)
# write <file_size> file of B's
# but before we're done, try to write all A's
fp_a = FakeWriteFile(file_size, 'A')
def rewind_put_fp_a():
fp_a.seek(0)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
fp_b = FakeWriteFile(file_size, 'B', rewind_put_fp_a)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
# verify the file
_verify_atomic_key_data(bucket_name, objname, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='1MB successful')
def test_atomic_dual_write_1mb():
_test_atomic_dual_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='4MB successful')
def test_atomic_dual_write_4mb():
_test_atomic_dual_write(1024*1024*4)
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='8MB successful')
def test_atomic_dual_write_8mb():
_test_atomic_dual_write(1024*1024*8)
def _test_atomic_conditional_write(file_size):
"""
Create a file of A's, use it to set_contents_from_file.
Verify the contents are all A's.
Create a file of B's, use it to re-set_contents_from_file.
Before re-set continues, verify content's still A's
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
objname = 'testobj'
client = get_client()
# create <file_size> file of A's
fp_a = FakeWriteFile(file_size, 'A')
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
fp_b = FakeWriteFile(file_size, 'B',
lambda: _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
)
# create <file_size> file of B's
# but try to verify the file before we finish writing all the B's
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
# verify B's
_verify_atomic_key_data(bucket_name, objname, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='1MB successful')
@attr('fails_on_aws')
def test_atomic_conditional_write_1mb():
_test_atomic_conditional_write(1024*1024)
def _test_atomic_dual_conditional_write(file_size):
"""
create an object, two sessions writing different contents
confirm that it is all one or the other
"""
bucket_name = get_new_bucket()
objname = 'testobj'
client = get_client()
fp_a = FakeWriteFile(file_size, 'A')
response = client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
_verify_atomic_key_data(bucket_name, objname, file_size, 'A')
etag_fp_a = response['ETag'].replace('"', '')
# write <file_size> file of C's
# but before we're done, try to write all B's
fp_b = FakeWriteFile(file_size, 'B')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': etag_fp_a}))
client.meta.events.register('before-call.s3.PutObject', lf)
def rewind_put_fp_b():
fp_b.seek(0)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
fp_c = FakeWriteFile(file_size, 'C', rewind_put_fp_b)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=objname, Body=fp_c)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
# verify the file
_verify_atomic_key_data(bucket_name, objname, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='1MB successful')
@attr('fails_on_aws')
# TODO: test not passing with SSL, fix this
@attr('fails_on_rgw')
def test_atomic_dual_conditional_write_1mb():
_test_atomic_dual_conditional_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='write file in deleted bucket')
@attr(assertion='fail 404')
@attr('fails_on_aws')
# TODO: test not passing with SSL, fix this
@attr('fails_on_rgw')
def test_atomic_write_bucket_gone():
bucket_name = get_new_bucket()
client = get_client()
def remove_bucket():
client.delete_bucket(Bucket=bucket_name)
objname = 'foo'
fp_a = FakeWriteFile(1024*1024, 'A', remove_bucket)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=objname, Body=fp_a)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='put')
@attr(operation='begin to overwrite file with multipart upload then abort')
@attr(assertion='read back original key contents')
def test_atomic_multipart_upload_write():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.create_multipart_upload(Bucket=bucket_name, Key='foo')
upload_id = response['UploadId']
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
client.abort_multipart_upload(Bucket=bucket_name, Key='foo', UploadId=upload_id)
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
class Counter:
def __init__(self, default_val):
self.val = default_val
def inc(self):
self.val = self.val + 1
class ActionOnCount:
def __init__(self, trigger_count, action):
self.count = 0
self.trigger_count = trigger_count
self.action = action
self.result = 0
def trigger(self):
self.count = self.count + 1
if self.count == self.trigger_count:
self.result = self.action()
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart check for two writes of the same part, first write finishes last')
@attr(assertion='object contains correct content')
def test_multipart_resend_first_finishes_last():
bucket_name = get_new_bucket()
client = get_client()
key_name = "mymultipart"
response = client.create_multipart_upload(Bucket=bucket_name, Key=key_name)
upload_id = response['UploadId']
#file_size = 8*1024*1024
file_size = 8
counter = Counter(0)
# upload_part might read multiple times from the object
# first time when it calculates md5, second time when it writes data
# out. We want to interject only on the last time, but we can't be
# sure how many times it's going to read, so let's have a test run
# and count the number of reads
fp_dry_run = FakeWriteFile(file_size, 'C',
lambda: counter.inc()
)
parts = []
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, PartNumber=1, Body=fp_dry_run)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
client.complete_multipart_upload(Bucket=bucket_name, Key=key_name, UploadId=upload_id, MultipartUpload={'Parts': parts})
client.delete_object(Bucket=bucket_name, Key=key_name)
# clear parts
parts[:] = []
# ok, now for the actual test
fp_b = FakeWriteFile(file_size, 'B')
def upload_fp_b():
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, Body=fp_b, PartNumber=1)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
action = ActionOnCount(counter.val, lambda: upload_fp_b())
response = client.create_multipart_upload(Bucket=bucket_name, Key=key_name)
upload_id = response['UploadId']
fp_a = FakeWriteFile(file_size, 'A',
lambda: action.trigger()
)
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, PartNumber=1, Body=fp_a)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
client.complete_multipart_upload(Bucket=bucket_name, Key=key_name, UploadId=upload_id, MultipartUpload={'Parts': parts})
_verify_atomic_key_data(bucket_name, key_name, file_size, 'A')
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_request_response_code():
content = 'testcontent'
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=4-7')
fetched_content = _get_body(response)
eq(fetched_content, content[4:8])
eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-7/11')
eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
def _generate_random_string(size):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(size))
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_big_request_response_code():
content = _generate_random_string(8*1024*1024)
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=3145728-5242880')
fetched_content = _get_body(response)
eq(fetched_content, content[3145728:5242881])
eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 3145728-5242880/8388608')
eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_request_skip_leading_bytes_response_code():
content = 'testcontent'
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=4-')
fetched_content = _get_body(response)
eq(fetched_content, content[4:])
eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-10/11')
eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_request_return_trailing_bytes_response_code():
content = 'testcontent'
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=-7')
fetched_content = _get_body(response)
eq(fetched_content, content[-7:])
eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-10/11')
eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns invalid range, 416')
def test_ranged_request_invalid_range():
content = 'testcontent'
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
# test invalid range
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='testobj', Range='bytes=40-50')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 416)
eq(error_code, 'InvalidRange')
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns invalid range, 416')
def test_ranged_request_empty_object():
content = ''
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
# test invalid range
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='testobj', Range='bytes=40-50')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 416)
eq(error_code, 'InvalidRange')
@attr(resource='bucket')
@attr(method='create')
@attr(operation='create versioned bucket')
@attr(assertion='can create and suspend bucket versioning')
@attr('versioning')
def test_versioning_bucket_create_suspend():
bucket_name = get_new_bucket()
check_versioning(bucket_name, None)
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
def check_obj_content(client, bucket_name, key, version_id, content):
response = client.get_object(Bucket=bucket_name, Key=key, VersionId=version_id)
if content is not None:
body = _get_body(response)
eq(body, content)
else:
eq(response['DeleteMarker'], True)
def check_obj_versions(client, bucket_name, key, version_ids, contents):
# check to see if objects is pointing at correct version
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
# obj versions in versions come out created last to first not first to last like version_ids & contents
versions.reverse()
i = 0
for version in versions:
eq(version['VersionId'], version_ids[i])
eq(version['Key'], key)
check_obj_content(client, bucket_name, key, version['VersionId'], contents[i])
i += 1
def create_multiple_versions(client, bucket_name, key, num_versions, version_ids = None, contents = None, check_versions = True):
contents = contents or []
version_ids = version_ids or []
for i in range(num_versions):
body = 'content-{i}'.format(i=i)
response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
version_id = response['VersionId']
contents.append(body)
version_ids.append(version_id)
if check_versions:
check_obj_versions(client, bucket_name, key, version_ids, contents)
return (version_ids, contents)
def remove_obj_version(client, bucket_name, key, version_ids, contents, index):
eq(len(version_ids), len(contents))
index = index % len(version_ids)
rm_version_id = version_ids.pop(index)
rm_content = contents.pop(index)
check_obj_content(client, bucket_name, key, rm_version_id, rm_content)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=rm_version_id)
if len(version_ids) != 0:
check_obj_versions(client, bucket_name, key, version_ids, contents)
def clean_up_bucket(client, bucket_name, key, version_ids):
for version_id in version_ids:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
client.delete_bucket(Bucket=bucket_name)
def _do_test_create_remove_versions(client, bucket_name, key, num_versions, remove_start_idx, idx_inc):
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
idx = remove_start_idx
for j in range(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
idx += idx_inc
response = client.list_object_versions(Bucket=bucket_name)
if 'Versions' in response:
print(response['Versions'])
@attr(resource='object')
@attr(method='create')
@attr(operation='create and remove versioned object')
@attr(assertion='can create access and remove appropriate versions')
@attr('versioning')
def test_versioning_obj_create_read_remove():
bucket_name = get_new_bucket()
client = get_client()
client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled', 'Status': 'Enabled'})
key = 'testobj'
num_versions = 5
_do_test_create_remove_versions(client, bucket_name, key, num_versions, -1, 0)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, -1, 0)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, 0, 0)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, 1, 0)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, 4, -1)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, 3, 3)
@attr(resource='object')
@attr(method='create')
@attr(operation='create and remove versioned object and head')
@attr(assertion='can create access and remove appropriate versions')
@attr('versioning')
def test_versioning_obj_create_read_remove_head():
bucket_name = get_new_bucket()
client = get_client()
client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled', 'Status': 'Enabled'})
key = 'testobj'
num_versions = 5
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
# removes old head object, checks new one
removed_version_id = version_ids.pop()
contents.pop()
num_versions = num_versions-1
response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=removed_version_id)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, contents[-1])
# add a delete marker
response = client.delete_object(Bucket=bucket_name, Key=key)
eq(response['DeleteMarker'], True)
delete_marker_version_id = response['VersionId']
version_ids.append(delete_marker_version_id)
response = client.list_object_versions(Bucket=bucket_name)
eq(len(response['Versions']), num_versions)
eq(len(response['DeleteMarkers']), 1)
eq(response['DeleteMarkers'][0]['VersionId'], delete_marker_version_id)
clean_up_bucket(client, bucket_name, key, version_ids)
@attr(resource='object')
@attr(method='create')
@attr(operation='create object, then switch to versioning')
@attr(assertion='behaves correctly')
@attr('versioning')
def test_versioning_obj_plain_null_version_removal():
bucket_name = get_new_bucket()
check_versioning(bucket_name, None)
client = get_client()
key = 'testobjfoo'
content = 'fooz'
client.put_object(Bucket=bucket_name, Key=key, Body=content)
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='object')
@attr(method='create')
@attr(operation='create object, then switch to versioning')
@attr(assertion='behaves correctly')
@attr('versioning')
def test_versioning_obj_plain_null_version_overwrite():
bucket_name = get_new_bucket()
check_versioning(bucket_name, None)
client = get_client()
key = 'testobjfoo'
content = 'fooz'
client.put_object(Bucket=bucket_name, Key=key, Body=content)
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
content2 = 'zzz'
response = client.put_object(Bucket=bucket_name, Key=key, Body=content2)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, content2)
version_id = response['VersionId']
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, content)
client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='object')
@attr(method='create')
@attr(operation='create object, then switch to versioning')
@attr(assertion='behaves correctly')
@attr('versioning')
def test_versioning_obj_plain_null_version_overwrite_suspended():
bucket_name = get_new_bucket()
check_versioning(bucket_name, None)
client = get_client()
key = 'testobjbar'
content = 'foooz'
client.put_object(Bucket=bucket_name, Key=key, Body=content)
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
content2 = 'zzz'
response = client.put_object(Bucket=bucket_name, Key=key, Body=content2)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, content2)
response = client.list_object_versions(Bucket=bucket_name)
# original object with 'null' version id still counts as a version
eq(len(response['Versions']), 1)
client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
def delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents):
client.delete_object(Bucket=bucket_name, Key=key)
# clear out old null objects in lists since they will get overwritten
eq(len(version_ids), len(contents))
i = 0
for version_id in version_ids:
if version_id == 'null':
version_ids.pop(i)
contents.pop(i)
i += 1
return (version_ids, contents)
def overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, content):
client.put_object(Bucket=bucket_name, Key=key, Body=content)
# clear out old null objects in lists since they will get overwritten
eq(len(version_ids), len(contents))
i = 0
for version_id in version_ids:
if version_id == 'null':
version_ids.pop(i)
contents.pop(i)
i += 1
# add new content with 'null' version id to the end
contents.append(content)
version_ids.append('null')
return (version_ids, contents)
@attr(resource='object')
@attr(method='create')
@attr(operation='suspend versioned bucket')
@attr(assertion='suspended versioning behaves correctly')
@attr('versioning')
def test_versioning_obj_suspend_versions():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
num_versions = 5
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 1')
overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 2')
delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 3')
delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, 3, version_ids, contents)
num_versions += 3
for idx in range(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
eq(len(version_ids), 0)
eq(len(version_ids), len(contents))
@attr(resource='object')
@attr(method='remove')
@attr(operation='create and remove versions')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_create_versions_remove_all():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
num_versions = 10
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
for idx in range(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
eq(len(version_ids), 0)
eq(len(version_ids), len(contents))
@attr(resource='object')
@attr(method='remove')
@attr(operation='create and remove versions')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_create_versions_remove_special_names():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
keys = ['_testobj', '_', ':', ' ']
num_versions = 10
for key in keys:
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
for idx in range(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
eq(len(version_ids), 0)
eq(len(version_ids), len(contents))
@attr(resource='object')
@attr(method='multipart')
@attr(operation='create and test multipart object')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_create_overwrite_multipart():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
num_versions = 3
contents = []
version_ids = []
for i in range(num_versions):
ret = _do_test_multipart_upload_contents(bucket_name, key, 3)
contents.append(ret)
response = client.list_object_versions(Bucket=bucket_name)
for version in response['Versions']:
version_ids.append(version['VersionId'])
version_ids.reverse()
check_obj_versions(client, bucket_name, key, version_ids, contents)
for idx in range(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
eq(len(version_ids), 0)
eq(len(version_ids), len(contents))
@attr(resource='object')
@attr(method='multipart')
@attr(operation='list versioned objects')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_list_marker():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
key2 = 'testobj-1'
num_versions = 5
contents = []
version_ids = []
contents2 = []
version_ids2 = []
# for key #1
for i in range(num_versions):
body = 'content-{i}'.format(i=i)
response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
version_id = response['VersionId']
contents.append(body)
version_ids.append(version_id)
# for key #2
for i in range(num_versions):
body = 'content-{i}'.format(i=i)
response = client.put_object(Bucket=bucket_name, Key=key2, Body=body)
version_id = response['VersionId']
contents2.append(body)
version_ids2.append(version_id)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
# obj versions in versions come out created last to first not first to last like version_ids & contents
versions.reverse()
i = 0
# test the last 5 created objects first
for i in range(5):
version = versions[i]
eq(version['VersionId'], version_ids2[i])
eq(version['Key'], key2)
check_obj_content(client, bucket_name, key2, version['VersionId'], contents2[i])
i += 1
# then the first 5
for j in range(5):
version = versions[i]
eq(version['VersionId'], version_ids[j])
eq(version['Key'], key)
check_obj_content(client, bucket_name, key, version['VersionId'], contents[j])
i += 1
@attr(resource='object')
@attr(method='multipart')
@attr(operation='create and test versioned object copying')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_copy_obj_version():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
num_versions = 3
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
for i in range(num_versions):
new_key_name = 'key_{i}'.format(i=i)
copy_source = {'Bucket': bucket_name, 'Key': key, 'VersionId': version_ids[i]}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=new_key_name)
response = client.get_object(Bucket=bucket_name, Key=new_key_name)
body = _get_body(response)
eq(body, contents[i])
another_bucket_name = get_new_bucket()
for i in range(num_versions):
new_key_name = 'key_{i}'.format(i=i)
copy_source = {'Bucket': bucket_name, 'Key': key, 'VersionId': version_ids[i]}
client.copy_object(Bucket=another_bucket_name, CopySource=copy_source, Key=new_key_name)
response = client.get_object(Bucket=another_bucket_name, Key=new_key_name)
body = _get_body(response)
eq(body, contents[i])
new_key_name = 'new_key'
copy_source = {'Bucket': bucket_name, 'Key': key}
client.copy_object(Bucket=another_bucket_name, CopySource=copy_source, Key=new_key_name)
response = client.get_object(Bucket=another_bucket_name, Key=new_key_name)
body = _get_body(response)
eq(body, contents[-1])
@attr(resource='object')
@attr(method='delete')
@attr(operation='delete multiple versions')
@attr(assertion='deletes multiple versions of an object with a single call')
@attr('versioning')
def test_versioning_multi_object_delete():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'key'
num_versions = 2
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
versions.reverse()
for version in versions:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
# now remove again, should all succeed due to idempotency
for version in versions:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='object')
@attr(method='delete')
@attr(operation='delete multiple versions')
@attr(assertion='deletes multiple versions of an object and delete marker with a single call')
@attr('versioning')
def test_versioning_multi_object_delete_with_marker():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'key'
num_versions = 2
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
client.delete_object(Bucket=bucket_name, Key=key)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
delete_markers = response['DeleteMarkers']
version_ids.append(delete_markers[0]['VersionId'])
eq(len(version_ids), 3)
eq(len(delete_markers), 1)
for version in versions:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
for delete_marker in delete_markers:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=delete_marker['VersionId'])
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
eq(('DeleteMarkers' in response), False)
for version in versions:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
for delete_marker in delete_markers:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=delete_marker['VersionId'])
# now remove again, should all succeed due to idempotency
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
eq(('DeleteMarkers' in response), False)
@attr(resource='object')
@attr(method='delete')
@attr(operation='multi delete create marker')
@attr(assertion='returns correct marker version id')
@attr('versioning')
def test_versioning_multi_object_delete_with_marker_create():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'key'
response = client.delete_object(Bucket=bucket_name, Key=key)
delete_marker_version_id = response['VersionId']
response = client.list_object_versions(Bucket=bucket_name)
delete_markers = response['DeleteMarkers']
eq(len(delete_markers), 1)
eq(delete_marker_version_id, delete_markers[0]['VersionId'])
eq(key, delete_markers[0]['Key'])
@attr(resource='object')
@attr(method='put')
@attr(operation='change acl on an object version changes specific version')
@attr(assertion='works')
@attr('versioning')
def test_versioned_object_acl():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'xyz'
num_versions = 3
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
version_id = version_ids[1]
response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
display_name = get_main_display_name()
user_id = get_main_user_id()
eq(response['Owner']['DisplayName'], display_name)
eq(response['Owner']['ID'], user_id)
grants = response['Grants']
default_policy = [
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
]
check_grants(grants, default_policy)
client.put_object_acl(ACL='public-read',Bucket=bucket_name, Key=key, VersionId=version_id)
response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
client.put_object(Bucket=bucket_name, Key=key)
response = client.get_object_acl(Bucket=bucket_name, Key=key)
grants = response['Grants']
check_grants(grants, default_policy)
@attr(resource='object')
@attr(method='put')
@attr(operation='change acl on an object with no version specified changes latest version')
@attr(assertion='works')
@attr('versioning')
def test_versioned_object_acl_no_version_specified():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'xyz'
num_versions = 3
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
response = client.get_object(Bucket=bucket_name, Key=key)
version_id = response['VersionId']
response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
display_name = get_main_display_name()
user_id = get_main_user_id()
eq(response['Owner']['DisplayName'], display_name)
eq(response['Owner']['ID'], user_id)
grants = response['Grants']
default_policy = [
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
]
check_grants(grants, default_policy)
client.put_object_acl(ACL='public-read',Bucket=bucket_name, Key=key)
response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
def _do_create_object(client, bucket_name, key, i):
body = 'data {i}'.format(i=i)
client.put_object(Bucket=bucket_name, Key=key, Body=body)
def _do_remove_ver(client, bucket_name, key, version_id):
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
def _do_create_versioned_obj_concurrent(client, bucket_name, key, num):
t = []
for i in range(num):
thr = threading.Thread(target = _do_create_object, args=(client, bucket_name, key, i))
thr.start()
t.append(thr)
return t
def _do_clear_versioned_bucket_concurrent(client, bucket_name):
t = []
response = client.list_object_versions(Bucket=bucket_name)
for version in response.get('Versions', []):
thr = threading.Thread(target = _do_remove_ver, args=(client, bucket_name, version['Key'], version['VersionId']))
thr.start()
t.append(thr)
return t
def _do_wait_completion(t):
for thr in t:
thr.join()
@attr(resource='object')
@attr(method='put')
@attr(operation='concurrent creation of objects, concurrent removal')
@attr(assertion='works')
# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/39142 is resolved
@attr('fails_on_rgw')
@attr('versioning')
def test_versioned_concurrent_object_create_concurrent_remove():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'myobj'
num_versions = 5
for i in range(5):
t = _do_create_versioned_obj_concurrent(client, bucket_name, key, num_versions)
_do_wait_completion(t)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
eq(len(versions), num_versions)
t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
_do_wait_completion(t)
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='object')
@attr(method='put')
@attr(operation='concurrent creation and removal of objects')
@attr(assertion='works')
@attr('versioning')
def test_versioned_concurrent_object_create_and_remove():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'myobj'
num_versions = 3
all_threads = []
for i in range(3):
t = _do_create_versioned_obj_concurrent(client, bucket_name, key, num_versions)
all_threads.append(t)
t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
all_threads.append(t)
for t in all_threads:
_do_wait_completion(t)
t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
_do_wait_completion(t)
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config')
@attr('lifecycle')
def test_lifecycle_set():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Days': 2}, 'Prefix': 'test2/', 'Status':'Disabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get lifecycle config')
@attr('lifecycle')
def test_lifecycle_get():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'test1/', 'Expiration': {'Days': 31}, 'Prefix': 'test1/', 'Status':'Enabled'},
{'ID': 'test2/', 'Expiration': {'Days': 120}, 'Prefix': 'test2/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
eq(response['Rules'], rules)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get lifecycle config no id')
@attr('lifecycle')
def test_lifecycle_get_no_id():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'Expiration': {'Days': 31}, 'Prefix': 'test1/', 'Status':'Enabled'},
{'Expiration': {'Days': 120}, 'Prefix': 'test2/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
current_lc = response['Rules']
Rule = namedtuple('Rule',['prefix','status','days'])
rules = {'rule1' : Rule('test1/','Enabled',31),
'rule2' : Rule('test2/','Enabled',120)}
for lc_rule in current_lc:
if lc_rule['Prefix'] == rules['rule1'].prefix:
eq(lc_rule['Expiration']['Days'], rules['rule1'].days)
eq(lc_rule['Status'], rules['rule1'].status)
assert 'ID' in lc_rule
elif lc_rule['Prefix'] == rules['rule2'].prefix:
eq(lc_rule['Expiration']['Days'], rules['rule2'].days)
eq(lc_rule['Status'], rules['rule2'].status)
assert 'ID' in lc_rule
else:
# neither of the rules we supplied was returned, something wrong
print("rules not right")
assert False
# The test harness for lifecycle is configured to treat days as 10 second intervals.
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration():
bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
'keep2/bar', 'expire3/foo', 'expire3/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'expire1/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Days': 4}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.list_objects(Bucket=bucket_name)
init_objects = response['Contents']
time.sleep(28)
response = client.list_objects(Bucket=bucket_name)
expire1_objects = response['Contents']
time.sleep(10)
response = client.list_objects(Bucket=bucket_name)
keep2_objects = response['Contents']
time.sleep(20)
response = client.list_objects(Bucket=bucket_name)
expire3_objects = response['Contents']
eq(len(init_objects), 6)
eq(len(expire1_objects), 4)
eq(len(keep2_objects), 4)
eq(len(expire3_objects), 2)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with list-objects-v2')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
@attr('list-objects-v2')
def test_lifecyclev2_expiration():
bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
'keep2/bar', 'expire3/foo', 'expire3/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'expire1/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Days': 4}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.list_objects_v2(Bucket=bucket_name)
init_objects = response['Contents']
time.sleep(28)
response = client.list_objects_v2(Bucket=bucket_name)
expire1_objects = response['Contents']
time.sleep(10)
response = client.list_objects_v2(Bucket=bucket_name)
keep2_objects = response['Contents']
time.sleep(20)
response = client.list_objects_v2(Bucket=bucket_name)
expire3_objects = response['Contents']
eq(len(init_objects), 6)
eq(len(expire1_objects), 4)
eq(len(keep2_objects), 4)
eq(len(expire3_objects), 2)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration on versioning enabled bucket')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_versioning_enabled():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
create_multiple_versions(client, bucket_name, "test1/a", 1)
client.delete_object(Bucket=bucket_name, Key="test1/a")
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(30)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
delete_markers = response['DeleteMarkers']
eq(len(versions), 1)
eq(len(delete_markers), 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with 1 tag')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_tags1():
bucket_name = get_new_bucket()
client = get_client()
tom_key = 'days1/tom'
tom_tagset = {'TagSet':
[{'Key': 'tom', 'Value': 'sawyer'}]}
client.put_object(Bucket=bucket_name, Key=tom_key, Body='tom_body')
response = client.put_object_tagging(Bucket=bucket_name, Key=tom_key,
Tagging=tom_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
lifecycle_config = {
'Rules': [
{
'Expiration': {
'Days': 1,
},
'ID': 'rule_tag1',
'Filter': {
'Prefix': 'days1/',
'Tag': {
'Key': 'tom',
'Value': 'sawyer'
},
},
'Status': 'Enabled',
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
time.sleep(28)
try:
expire_objects = response['Contents']
except KeyError:
expire_objects = []
eq(len(expire_objects), 0)
# factor out common setup code
def setup_lifecycle_tags2(client, bucket_name):
tom_key = 'days1/tom'
tom_tagset = {'TagSet':
[{'Key': 'tom', 'Value': 'sawyer'}]}
client.put_object(Bucket=bucket_name, Key=tom_key, Body='tom_body')
response = client.put_object_tagging(Bucket=bucket_name, Key=tom_key,
Tagging=tom_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
huck_key = 'days1/huck'
huck_tagset = {
'TagSet':
[{'Key': 'tom', 'Value': 'sawyer'},
{'Key': 'huck', 'Value': 'finn'}]}
client.put_object(Bucket=bucket_name, Key=huck_key, Body='huck_body')
response = client.put_object_tagging(Bucket=bucket_name, Key=huck_key,
Tagging=huck_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
lifecycle_config = {
'Rules': [
{
'Expiration': {
'Days': 1,
},
'ID': 'rule_tag1',
'Filter': {
'Prefix': 'days1/',
'Tag': {
'Key': 'tom',
'Value': 'sawyer'
},
'And': {
'Prefix': 'days1',
'Tags': [
{
'Key': 'huck',
'Value': 'finn'
},
]
}
},
'Status': 'Enabled',
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
return response
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with 2 tags')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_tags2():
bucket_name = get_new_bucket()
client = get_client()
response = setup_lifecycle_tags2(client, bucket_name)
time.sleep(28)
response = client.list_objects(Bucket=bucket_name)
expire1_objects = response['Contents']
eq(len(expire1_objects), 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with versioning and 2 tags')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_versioned_tags2():
bucket_name = get_new_bucket()
client = get_client()
# mix in versioning
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
response = setup_lifecycle_tags2(client, bucket_name)
time.sleep(28)
response = client.list_objects(Bucket=bucket_name)
expire1_objects = response['Contents']
eq(len(expire1_objects), 1)
# setup for scenario based on vidushi mishra's in rhbz#1877737
def setup_lifecycle_noncur_tags(client, bucket_name, days):
# first create and tag the objects (10 versions of 1)
key = "myobject_"
tagset = {'TagSet':
[{'Key': 'vidushi', 'Value': 'mishra'}]}
for ix in range(10):
body = "%s v%d" % (key, ix)
response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.put_object_tagging(Bucket=bucket_name, Key=key,
Tagging=tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
lifecycle_config = {
'Rules': [
{
'NoncurrentVersionExpiration': {
'NoncurrentDays': days,
},
'ID': 'rule_tag1',
'Filter': {
'Prefix': '',
'Tag': {
'Key': 'vidushi',
'Value': 'mishra'
},
},
'Status': 'Enabled',
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
return response
def verify_lifecycle_expiration_noncur_tags(client, bucket_name, secs):
time.sleep(secs)
try:
response = client.list_object_versions(Bucket=bucket_name)
objs_list = response['Versions']
except:
objs_list = []
return len(objs_list)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle noncurrent expiration with 1 tag filter')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_noncur_tags1():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
# create 10 object versions (9 noncurrent) and a tag-filter
# noncurrent version expiration at 4 "days"
response = setup_lifecycle_noncur_tags(client, bucket_name, 4)
num_objs = verify_lifecycle_expiration_noncur_tags(
client, bucket_name, 20)
# at T+20, 10 objects should exist
eq(num_objs, 10)
num_objs = verify_lifecycle_expiration_noncur_tags(
client, bucket_name, 40)
# at T+60, only the current object version should exist
eq(num_objs, 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='id too long in lifecycle rule')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_id_too_long():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 256*'a', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='same id')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_same_id():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'},
{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test2/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='invalid status in lifecycle rule')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_invalid_status():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'disabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'invalid'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with expiration date')
@attr('lifecycle')
def test_lifecycle_set_date():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Date': '2017-09-27'}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with not iso8601 date')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_set_invalid_date():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Date': '20200101'}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with date')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_date():
bucket_name = _create_objects(keys=['past/foo', 'future/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Date': '2015-01-01'}, 'Prefix': 'past/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Date': '2030-01-01'}, 'Prefix': 'future/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.list_objects(Bucket=bucket_name)
init_objects = response['Contents']
time.sleep(20)
response = client.list_objects(Bucket=bucket_name)
expire_objects = response['Contents']
eq(len(init_objects), 2)
eq(len(expire_objects), 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration days 0')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_days0():
bucket_name = _create_objects(keys=['days0/foo', 'days0/bar'])
client = get_client()
rules=[{'Expiration': {'Days': 0}, 'ID': 'rule1', 'Prefix': 'days0/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
# days: 0 is legal in a transition rule, but not legal in an
# expiration rule
response_code = ""
try:
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
except botocore.exceptions.ClientError as e:
response_code = e.response['Error']['Code']
eq(response_code, 'InvalidArgument')
def setup_lifecycle_expiration(client, bucket_name, rule_id, delta_days,
rule_prefix):
rules=[{'ID': rule_id,
'Expiration': {'Days': delta_days}, 'Prefix': rule_prefix,
'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
key = rule_prefix + 'foo'
body = 'bar'
response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
return response
def check_lifecycle_expiration_header(response, start_time, rule_id,
delta_days):
expr_exists = ('x-amz-expiration' in response['ResponseMetadata']['HTTPHeaders'])
if (not expr_exists):
return False
expr_hdr = response['ResponseMetadata']['HTTPHeaders']['x-amz-expiration']
m = re.search(r'expiry-date="(.+)", rule-id="(.+)"', expr_hdr)
expiration = dateutil.parser.parse(m.group(1))
days_to_expire = ((expiration.replace(tzinfo=None) - start_time).days == delta_days)
rule_eq_id = (m.group(2) == rule_id)
return days_to_expire and rule_eq_id
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration header put')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_header_put():
bucket_name = get_new_bucket()
client = get_client()
now = datetime.datetime.now(None)
response = setup_lifecycle_expiration(
client, bucket_name, 'rule1', 1, 'days1/')
eq(check_lifecycle_expiration_header(response, now, 'rule1', 1), True)
@attr(resource='bucket')
@attr(method='head')
@attr(operation='test lifecycle expiration header head')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_header_head():
bucket_name = get_new_bucket()
client = get_client()
now = datetime.datetime.now(None)
response = setup_lifecycle_expiration(
client, bucket_name, 'rule1', 1, 'days1/')
key = 'days1/' + 'foo'
# stat the object, check header
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(check_lifecycle_expiration_header(response, now, 'rule1', 1), True)
@attr(resource='bucket')
@attr(method='head')
@attr(operation='test lifecycle expiration header head with tags')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_header_tags_head():
bucket_name = get_new_bucket()
client = get_client()
lifecycle={
"Rules": [
{
"Filter": {
"Tag": {"Key": "key1", "Value": "tag1"}
},
"Status": "Enabled",
"Expiration": {
"Days": 1
},
"ID": "rule1"
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle)
key1 = "obj_key1"
body1 = "obj_key1_body"
tags1={'TagSet': [{'Key': 'key1', 'Value': 'tag1'},
{'Key': 'key5','Value': 'tag5'}]}
response = client.put_object(Bucket=bucket_name, Key=key1, Body=body1)
response = client.put_object_tagging(Bucket=bucket_name, Key=key1,Tagging=tags1)
# stat the object, check header
response = client.head_object(Bucket=bucket_name, Key=key1)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1), True)
# test that header is not returning when it should not
lifecycle={
"Rules": [
{
"Filter": {
"Tag": {"Key": "key2", "Value": "tag1"}
},
"Status": "Enabled",
"Expiration": {
"Days": 1
},
"ID": "rule1"
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle)
# stat the object, check header
response = client.head_object(Bucket=bucket_name, Key=key1)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1), False)
@attr(resource='bucket')
@attr(method='head')
@attr(operation='test lifecycle expiration header head with tags and And')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_header_and_tags_head():
now = datetime.datetime.now(None)
bucket_name = get_new_bucket()
client = get_client()
lifecycle={
"Rules": [
{
"Filter": {
"And": {
"Tags": [
{
"Key": "key1",
"Value": "tag1"
},
{
"Key": "key5",
"Value": "tag6"
}
]
}
},
"Status": "Enabled",
"Expiration": {
"Days": 1
},
"ID": "rule1"
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle)
key1 = "obj_key1"
body1 = "obj_key1_body"
tags1={'TagSet': [{'Key': 'key1', 'Value': 'tag1'},
{'Key': 'key5','Value': 'tag5'}]}
response = client.put_object(Bucket=bucket_name, Key=key1, Body=body1)
response = client.put_object_tagging(Bucket=bucket_name, Key=key1,Tagging=tags1)
# stat the object, check header
response = client.head_object(Bucket=bucket_name, Key=key1)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1), False)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with noncurrent version expiration')
@attr('lifecycle')
def test_lifecycle_set_noncurrent():
bucket_name = _create_objects(keys=['past/foo', 'future/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 2}, 'Prefix': 'past/', 'Status':'Enabled'},
{'ID': 'rule2', 'NoncurrentVersionExpiration': {'NoncurrentDays': 3}, 'Prefix': 'future/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle non-current version expiration')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_noncur_expiration():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
create_multiple_versions(client, bucket_name, "test1/a", 3)
# not checking the object contents on the second run, because the function doesn't support multiple checks
create_multiple_versions(client, bucket_name, "test2/abc", 3, check_versions=False)
response = client.list_object_versions(Bucket=bucket_name)
init_versions = response['Versions']
rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 2}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(50)
response = client.list_object_versions(Bucket=bucket_name)
expire_versions = response['Versions']
eq(len(init_versions), 6)
eq(len(expire_versions), 4)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with delete marker expiration')
@attr('lifecycle')
def test_lifecycle_set_deletemarker():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with Filter')
@attr('lifecycle')
def test_lifecycle_set_filter():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Filter': {'Prefix': 'foo'}, 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with empty Filter')
@attr('lifecycle')
def test_lifecycle_set_empty_filter():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Filter': {}, 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle delete marker expiration')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_deletemarker_expiration():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
create_multiple_versions(client, bucket_name, "test1/a", 1)
create_multiple_versions(client, bucket_name, "test2/abc", 1, check_versions=False)
client.delete_object(Bucket=bucket_name, Key="test1/a")
client.delete_object(Bucket=bucket_name, Key="test2/abc")
response = client.list_object_versions(Bucket=bucket_name)
init_versions = response['Versions']
deleted_versions = response['DeleteMarkers']
total_init_versions = init_versions + deleted_versions
rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 1}, 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(50)
response = client.list_object_versions(Bucket=bucket_name)
init_versions = response['Versions']
deleted_versions = response['DeleteMarkers']
total_expire_versions = init_versions + deleted_versions
eq(len(total_init_versions), 4)
eq(len(total_expire_versions), 2)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with multipart expiration')
@attr('lifecycle')
def test_lifecycle_set_multipart():
bucket_name = get_new_bucket()
client = get_client()
rules = [
{'ID': 'rule1', 'Prefix': 'test1/', 'Status': 'Enabled',
'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 2}},
{'ID': 'rule2', 'Prefix': 'test2/', 'Status': 'Disabled',
'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 3}}
]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle multipart expiration')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_multipart_expiration():
bucket_name = get_new_bucket()
client = get_client()
key_names = ['test1/a', 'test2/']
upload_ids = []
for key in key_names:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_ids.append(response['UploadId'])
response = client.list_multipart_uploads(Bucket=bucket_name)
init_uploads = response['Uploads']
rules = [
{'ID': 'rule1', 'Prefix': 'test1/', 'Status': 'Enabled',
'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 2}},
]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(50)
response = client.list_multipart_uploads(Bucket=bucket_name)
expired_uploads = response['Uploads']
eq(len(init_uploads), 2)
eq(len(expired_uploads), 1)
def _test_encryption_sse_customer_write(file_size):
"""
Tests Create a file of A's, use it to set_contents_from_file.
Create a file of B's, use it to re-set_contents_from_file.
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
key = 'testobj'
data = 'A'*file_size
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, data)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-C encrypted transfer 1 byte')
@attr(assertion='success')
@attr('encryption')
def test_encrypted_transfer_1b():
_test_encryption_sse_customer_write(1)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-C encrypted transfer 1KB')
@attr(assertion='success')
@attr('encryption')
def test_encrypted_transfer_1kb():
_test_encryption_sse_customer_write(1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-C encrypted transfer 1MB')
@attr(assertion='success')
@attr('encryption')
def test_encrypted_transfer_1MB():
_test_encryption_sse_customer_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-C encrypted transfer 13 bytes')
@attr(assertion='success')
@attr('encryption')
def test_encrypted_transfer_13b():
_test_encryption_sse_customer_write(13)
@attr(assertion='success')
@attr('encryption')
def test_encryption_sse_c_method_head():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*1000
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.HeadObject', lf)
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-C and read without SSE-C')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_present():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*1000
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-C but read with other key')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_other_key():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers_A = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
sse_client_headers_B = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers_A))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers_B))
client.meta.events.register('before-call.s3.GetObject', lf)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-C, but md5 is bad')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_invalid_md5():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'AAAAAAAAAAAAAAAAAAAAAA=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-C, but dont provide MD5')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_no_md5():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
@attr(resource='object')
@attr(method='put')
@attr(operation='declare SSE-C but do not provide key')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_no_key():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
@attr(resource='object')
@attr(method='put')
@attr(operation='Do not declare SSE-C but provide key and MD5')
@attr(assertion='operation successfull, no encryption')
@attr('encryption')
def test_encryption_key_no_sse_c():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
def _multipart_upload_enc(client, bucket_name, key, size, part_size, init_headers, part_headers, metadata, resend_parts):
"""
generate a multi-part upload for a random file of specifed size,
if requested, generate a list of the parts
return the upload descriptor
"""
if client == None:
client = get_client()
lf = (lambda **kwargs: kwargs['params']['headers'].update(init_headers))
client.meta.events.register('before-call.s3.CreateMultipartUpload', lf)
if metadata == None:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
else:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key, Metadata=metadata)
upload_id = response['UploadId']
s = ''
parts = []
for i, part in enumerate(generate_random(size, part_size)):
# part_num is necessary because PartNumber for upload_part and in parts must start at 1 and i starts at 0
part_num = i+1
s += part
lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
client.meta.events.register('before-call.s3.UploadPart', lf)
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num})
if i in resend_parts:
lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
client.meta.events.register('before-call.s3.UploadPart', lf)
client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
return (upload_id, s, parts)
def _check_content_using_range_enc(client, bucket_name, key, data, step, enc_headers=None):
response = client.get_object(Bucket=bucket_name, Key=key)
size = response['ContentLength']
for ofs in range(0, size, step):
toread = size - ofs
if toread > step:
toread = step
end = ofs + toread - 1
lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
r = 'bytes={s}-{e}'.format(s=ofs, e=end)
response = client.get_object(Bucket=bucket_name, Key=key, Range=r)
read_range = response['ContentLength']
body = _get_body(response)
eq(read_range, toread)
eq(body, data[ofs:end+1])
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload')
@attr(assertion='successful')
@attr('encryption')
@attr('fails_on_aws') # allow-unordered is a non-standard extension
def test_encryption_sse_c_multipart_upload():
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
enc_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
'Content-Type': content_type
}
resend_parts = []
(upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
part_size=5*1024*1024, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.head_bucket(Bucket=bucket_name)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
eq(rgw_object_count, 1)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
eq(rgw_bytes_used, objlen)
lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['Metadata'], metadata)
eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type)
body = _get_body(response)
eq(body, data)
size = response['ContentLength']
eq(len(body), size)
_check_content_using_range_enc(client, bucket_name, key, data, 1000000, enc_headers=enc_headers)
_check_content_using_range_enc(client, bucket_name, key, data, 10000000, enc_headers=enc_headers)
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart upload with bad key for uploading chunks')
@attr(assertion='successful')
@attr('encryption')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_encryption_sse_c_multipart_invalid_chunks_1():
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
init_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
'Content-Type': content_type
}
part_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
}
resend_parts = []
e = assert_raises(ClientError, _multipart_upload_enc, client=client, bucket_name=bucket_name,
key=key, size=objlen, part_size=5*1024*1024, init_headers=init_headers, part_headers=part_headers, metadata=metadata, resend_parts=resend_parts)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart upload with bad md5 for chunks')
@attr(assertion='successful')
@attr('encryption')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_encryption_sse_c_multipart_invalid_chunks_2():
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
init_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
'Content-Type': content_type
}
part_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'AAAAAAAAAAAAAAAAAAAAAA=='
}
resend_parts = []
e = assert_raises(ClientError, _multipart_upload_enc, client=client, bucket_name=bucket_name,
key=key, size=objlen, part_size=5*1024*1024, init_headers=init_headers, part_headers=part_headers, metadata=metadata, resend_parts=resend_parts)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload and download with bad key')
@attr(assertion='successful')
@attr('encryption')
def test_encryption_sse_c_multipart_bad_download():
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
put_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
'Content-Type': content_type
}
get_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
}
resend_parts = []
(upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
part_size=5*1024*1024, init_headers=put_headers, part_headers=put_headers, metadata=metadata, resend_parts=resend_parts)
lf = (lambda **kwargs: kwargs['params']['headers'].update(put_headers))
client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.head_bucket(Bucket=bucket_name)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
eq(rgw_object_count, 1)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
eq(rgw_bytes_used, objlen)
lf = (lambda **kwargs: kwargs['params']['headers'].update(put_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['Metadata'], metadata)
eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type)
lf = (lambda **kwargs: kwargs['params']['headers'].update(get_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
@attr('encryption')
def test_encryption_sse_c_post_object_authenticated_request():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["starts-with", "$x-amz-server-side-encryption-customer-algorithm", ""], \
["starts-with", "$x-amz-server-side-encryption-customer-key", ""], \
["starts-with", "$x-amz-server-side-encryption-customer-key-md5", ""], \
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),
('x-amz-server-side-encryption-customer-algorithm', 'AES256'), \
('x-amz-server-side-encryption-customer-key', 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs='), \
('x-amz-server-side-encryption-customer-key-md5', 'DWygnHRtgiJ77HCm+1rvHw=='), \
('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
get_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(get_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(assertion='success')
@attr('encryption')
def _test_sse_kms_customer_write(file_size, key_id = 'testkey-1'):
"""
Tests Create a file of A's, use it to set_contents_from_file.
Create a file of B's, use it to re-set_contents_from_file.
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': key_id
}
data = 'A'*file_size
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key='testobj', Body=data)
response = client.get_object(Bucket=bucket_name, Key='testobj')
body = _get_body(response)
eq(body, data)
@attr(resource='object')
@attr(method='head')
@attr(operation='Test SSE-KMS encrypted does perform head properly')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_method_head():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
}
data = 'A'*1000
key = 'testobj'
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'aws:kms')
eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'], kms_keyid)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.HeadObject', lf)
e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-KMS and read without SSE-KMS')
@attr(assertion='operation success')
@attr('encryption')
def test_sse_kms_present():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
}
data = 'A'*100
key = 'testobj'
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, data)
@attr(resource='object')
@attr(method='put')
@attr(operation='declare SSE-KMS but do not provide key_id')
@attr(assertion='operation fails')
@attr('encryption')
def test_sse_kms_no_key():
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
}
data = 'A'*100
key = 'testobj'
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
@attr(resource='object')
@attr(method='put')
@attr(operation='Do not declare SSE-KMS but provide key_id')
@attr(assertion='operation successfull, no encryption')
@attr('encryption')
def test_sse_kms_not_declared():
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-2'
}
data = 'A'*100
key = 'testobj'
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='complete KMS multi-part upload')
@attr(assertion='successful')
@attr('encryption')
def test_sse_kms_multipart_upload():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
enc_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
'Content-Type': content_type
}
resend_parts = []
(upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
part_size=5*1024*1024, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.head_bucket(Bucket=bucket_name)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
eq(rgw_object_count, 1)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
eq(rgw_bytes_used, objlen)
lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
client.meta.events.register('before-call.s3.UploadPart', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['Metadata'], metadata)
eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type)
body = _get_body(response)
eq(body, data)
size = response['ContentLength']
eq(len(body), size)
_check_content_using_range(key, bucket_name, data, 1000000)
_check_content_using_range(key, bucket_name, data, 10000000)
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart KMS upload with bad key_id for uploading chunks')
@attr(assertion='successful')
@attr('encryption')
def test_sse_kms_multipart_invalid_chunks_1():
kms_keyid = get_main_kms_keyid()
kms_keyid2 = get_secondary_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/bla'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
init_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
'Content-Type': content_type
}
part_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid2
}
resend_parts = []
_multipart_upload_enc(client, bucket_name, key, objlen, part_size=5*1024*1024,
init_headers=init_headers, part_headers=part_headers, metadata=metadata,
resend_parts=resend_parts)
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart KMS upload with unexistent key_id for chunks')
@attr(assertion='successful')
@attr('encryption')
def test_sse_kms_multipart_invalid_chunks_2():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
init_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
'Content-Type': content_type
}
part_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-not-present'
}
resend_parts = []
_multipart_upload_enc(client, bucket_name, key, objlen, part_size=5*1024*1024,
init_headers=init_headers, part_headers=part_headers, metadata=metadata,
resend_parts=resend_parts)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated KMS browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
@attr('encryption')
def test_sse_kms_post_object_authenticated_request():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["starts-with", "$x-amz-server-side-encryption", ""], \
["starts-with", "$x-amz-server-side-encryption-aws-kms-key-id", ""], \
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),
('x-amz-server-side-encryption', 'aws:kms'), \
('x-amz-server-side-encryption-aws-kms-key-id', kms_keyid), \
('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-KMS encrypted transfer 1 byte')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_transfer_1b():
kms_keyid = get_main_kms_keyid()
if kms_keyid is None:
raise SkipTest
_test_sse_kms_customer_write(1, key_id = kms_keyid)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-KMS encrypted transfer 1KB')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_transfer_1kb():
kms_keyid = get_main_kms_keyid()
if kms_keyid is None:
raise SkipTest
_test_sse_kms_customer_write(1024, key_id = kms_keyid)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-KMS encrypted transfer 1MB')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_transfer_1MB():
kms_keyid = get_main_kms_keyid()
if kms_keyid is None:
raise SkipTest
_test_sse_kms_customer_write(1024*1024, key_id = kms_keyid)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-KMS encrypted transfer 13 bytes')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_transfer_13b():
kms_keyid = get_main_kms_keyid()
if kms_keyid is None:
raise SkipTest
_test_sse_kms_customer_write(13, key_id = kms_keyid)
@attr(resource='object')
@attr(method='get')
@attr(operation='write encrypted with SSE-KMS and read with SSE-KMS')
@attr(assertion='operation fails')
@attr('encryption')
def test_sse_kms_read_declare():
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-1'
}
data = 'A'*100
key = 'testobj'
client.put_object(Bucket=bucket_name, Key=key, Body=data)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy')
@attr(assertion='succeeds')
@attr('bucket-policy')
def test_bucket_policy():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
response = alt_client.list_objects(Bucket=bucket_name)
eq(len(response['Contents']), 1)
@attr('bucket-policy')
@attr('list-objects-v2')
def test_bucketv2_policy():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
response = alt_client.list_objects_v2(Bucket=bucket_name)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy and ACL')
@attr(assertion='fails')
@attr('bucket-policy')
def test_bucket_policy_acl():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Deny",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
e = assert_raises(ClientError, alt_client.list_objects, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
client.delete_bucket_policy(Bucket=bucket_name)
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy and ACL with list-objects-v2')
@attr(assertion='fails')
@attr('bucket-policy')
@attr('list-objects-v2')
def test_bucketv2_policy_acl():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Deny",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
e = assert_raises(ClientError, alt_client.list_objects_v2, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
client.delete_bucket_policy(Bucket=bucket_name)
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy for a user belonging to a different tenant')
@attr(assertion='succeeds')
@attr('bucket-policy')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_policy_different_tenant():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3::*:" + bucket_name
resource2 = "arn:aws:s3::*:" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
# TODO: figure out how to change the bucketname
def change_bucket_name(**kwargs):
kwargs['params']['url'] = "http://localhost:8000/:{bucket_name}?encoding-type=url".format(bucket_name=bucket_name)
kwargs['params']['url_path'] = "/:{bucket_name}".format(bucket_name=bucket_name)
kwargs['params']['context']['signing']['bucket'] = ":{bucket_name}".format(bucket_name=bucket_name)
print(kwargs['request_signer'])
print(kwargs)
#bucket_name = ":" + bucket_name
tenant_client = get_tenant_client()
tenant_client.meta.events.register('before-call.s3.ListObjects', change_bucket_name)
response = tenant_client.list_objects(Bucket=bucket_name)
#alt_client = get_alt_client()
#response = alt_client.list_objects(Bucket=bucket_name)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy for a user belonging to a different tenant')
@attr(assertion='succeeds')
@attr('bucket-policy')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
@attr('list-objects-v2')
def test_bucketv2_policy_different_tenant():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3::*:" + bucket_name
resource2 = "arn:aws:s3::*:" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
# TODO: figure out how to change the bucketname
def change_bucket_name(**kwargs):
kwargs['params']['url'] = "http://localhost:8000/:{bucket_name}?encoding-type=url".format(bucket_name=bucket_name)
kwargs['params']['url_path'] = "/:{bucket_name}".format(bucket_name=bucket_name)
kwargs['params']['context']['signing']['bucket'] = ":{bucket_name}".format(bucket_name=bucket_name)
print(kwargs['request_signer'])
print(kwargs)
#bucket_name = ":" + bucket_name
tenant_client = get_tenant_client()
tenant_client.meta.events.register('before-call.s3.ListObjects', change_bucket_name)
response = tenant_client.list_objects_v2(Bucket=bucket_name)
#alt_client = get_alt_client()
#response = alt_client.list_objects_v2(Bucket=bucket_name)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy on another bucket')
@attr(assertion='succeeds')
@attr('bucket-policy')
def test_bucket_policy_another_bucket():
bucket_name = get_new_bucket()
bucket_name2 = get_new_bucket()
client = get_client()
key = 'asdf'
key2 = 'abcd'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
client.put_object(Bucket=bucket_name2, Key=key2, Body='abcd')
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"arn:aws:s3:::*",
"arn:aws:s3:::*/*"
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
response = client.get_bucket_policy(Bucket=bucket_name)
response_policy = response['Policy']
client.put_bucket_policy(Bucket=bucket_name2, Policy=response_policy)
alt_client = get_alt_client()
response = alt_client.list_objects(Bucket=bucket_name)
eq(len(response['Contents']), 1)
alt_client = get_alt_client()
response = alt_client.list_objects(Bucket=bucket_name2)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy on another bucket with list-objects-v2')
@attr(assertion='succeeds')
@attr('bucket-policy')
@attr('list-objects-v2')
def test_bucketv2_policy_another_bucket():
bucket_name = get_new_bucket()
bucket_name2 = get_new_bucket()
client = get_client()
key = 'asdf'
key2 = 'abcd'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
client.put_object(Bucket=bucket_name2, Key=key2, Body='abcd')
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"arn:aws:s3:::*",
"arn:aws:s3:::*/*"
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
response = client.get_bucket_policy(Bucket=bucket_name)
response_policy = response['Policy']
client.put_bucket_policy(Bucket=bucket_name2, Policy=response_policy)
alt_client = get_alt_client()
response = alt_client.list_objects_v2(Bucket=bucket_name)
eq(len(response['Contents']), 1)
alt_client = get_alt_client()
response = alt_client.list_objects_v2(Bucket=bucket_name2)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put condition operator end with ifExists')
@attr('bucket-policy')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_policy_set_condition_operator_end_with_IfExists():
bucket_name = get_new_bucket()
client = get_client()
key = 'foo'
client.put_object(Bucket=bucket_name, Key=key)
policy = '''{
"Version":"2012-10-17",
"Statement": [{
"Sid": "Allow Public Access to All Objects",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Condition": {
"StringLikeIfExists": {
"aws:Referer": "http://www.example.com/*"
}
},
"Resource": "arn:aws:s3:::%s/*"
}
]
}''' % bucket_name
boto3.set_stream_logger(name='botocore')
client.put_bucket_policy(Bucket=bucket_name, Policy=policy)
request_headers={'referer': 'http://www.example.com/'}
lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
request_headers={'referer': 'http://www.example.com/index.html'}
lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# the 'referer' headers need to be removed for this one
#response = client.get_object(Bucket=bucket_name, Key=key)
#eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
request_headers={'referer': 'http://example.com'}
lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
# TODO: Compare Requests sent in Boto3, Wireshark, RGW Log for both boto and boto3
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
response = client.get_bucket_policy(Bucket=bucket_name)
print(response)
def _create_simple_tagset(count):
tagset = []
for i in range(count):
tagset.append({'Key': str(i), 'Value': str(i)})
return {'TagSet': tagset}
def _make_random_string(size):
return ''.join(random.choice(string.ascii_letters) for _ in range(size))
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Get/PutObjTagging output')
@attr(assertion='success')
@attr('tagging')
def test_get_obj_tagging():
key = 'testputtags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
input_tagset = _create_simple_tagset(2)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='Test HEAD obj tagging output')
@attr(assertion='success')
@attr('tagging')
def test_get_obj_head_tagging():
key = 'testputtags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
count = 2
input_tagset = _create_simple_tagset(count)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-tagging-count'], str(count))
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Put max allowed tags')
@attr(assertion='success')
@attr('tagging')
def test_put_max_tags():
key = 'testputmaxtags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
input_tagset = _create_simple_tagset(10)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Put max allowed tags')
@attr(assertion='fails')
@attr('tagging')
def test_put_excess_tags():
key = 'testputmaxtags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
input_tagset = _create_simple_tagset(11)
e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidTag')
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Put max allowed k-v size')
@attr(assertion='success')
@attr('tagging')
def test_put_max_kvsize_tags():
key = 'testputmaxkeysize'
bucket_name = _create_key_with_random_content(key)
client = get_client()
tagset = []
for i in range(10):
k = _make_random_string(128)
v = _make_random_string(256)
tagset.append({'Key': k, 'Value': v})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
for kv_pair in response['TagSet']:
eq((kv_pair in input_tagset['TagSet']), True)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test exceed key size')
@attr(assertion='success')
@attr('tagging')
def test_put_excess_key_tags():
key = 'testputexcesskeytags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
tagset = []
for i in range(10):
k = _make_random_string(129)
v = _make_random_string(256)
tagset.append({'Key': k, 'Value': v})
input_tagset = {'TagSet': tagset}
e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidTag')
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test exceed val size')
@attr(assertion='success')
@attr('tagging')
def test_put_excess_val_tags():
key = 'testputexcesskeytags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
tagset = []
for i in range(10):
k = _make_random_string(128)
v = _make_random_string(257)
tagset.append({'Key': k, 'Value': v})
input_tagset = {'TagSet': tagset}
e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidTag')
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test PUT modifies existing tags')
@attr(assertion='success')
@attr('tagging')
def test_put_modify_tags():
key = 'testputmodifytags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
tagset = []
tagset.append({'Key': 'key', 'Value': 'val'})
tagset.append({'Key': 'key2', 'Value': 'val2'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
tagset2 = []
tagset2.append({'Key': 'key3', 'Value': 'val3'})
input_tagset2 = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset2)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset2['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Delete tags')
@attr(assertion='success')
@attr('tagging')
def test_put_delete_tags():
key = 'testputmodifytags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
input_tagset = _create_simple_tagset(2)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
response = client.delete_object_tagging(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr('tagging')
@attr(assertion='succeeds and returns written data')
def test_post_object_tags_anonymous_request():
bucket_name = get_new_bucket_name()
client = get_client()
url = _get_post_url(bucket_name)
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
key_name = "foo.txt"
input_tagset = _create_simple_tagset(2)
# xml_input_tagset is the same as input_tagset in xml.
# There is not a simple way to change input_tagset to xml like there is in the boto2 tetss
xml_input_tagset = "<Tagging><TagSet><Tag><Key>0</Key><Value>0</Value></Tag><Tag><Key>1</Key><Value>1</Value></Tag></TagSet></Tagging>"
payload = OrderedDict([
("key" , key_name),
("acl" , "public-read"),
("Content-Type" , "text/plain"),
("tagging", xml_input_tagset),
('file', ('bar')),
])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key=key_name)
body = _get_body(response)
eq(body, 'bar')
response = client.get_object_tagging(Bucket=bucket_name, Key=key_name)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr('tagging')
@attr(assertion='succeeds and returns written data')
def test_post_object_tags_authenticated_request():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [
{"bucket": bucket_name},
["starts-with", "$key", "foo"],
{"acl": "private"},
["starts-with", "$Content-Type", "text/plain"],
["content-length-range", 0, 1024],
["starts-with", "$tagging", ""]
]}
# xml_input_tagset is the same as `input_tagset = _create_simple_tagset(2)` in xml
# There is not a simple way to change input_tagset to xml like there is in the boto2 tetss
xml_input_tagset = "<Tagging><TagSet><Tag><Key>0</Key><Value>0</Value></Tag><Tag><Key>1</Key><Value>1</Value></Tag></TagSet></Tagging>"
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([
("key" , "foo.txt"),
("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("tagging", xml_input_tagset),
("Content-Type" , "text/plain"),
('file', ('bar'))])
r = requests.post(url, files = payload)
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='Test PutObj with tagging headers')
@attr(assertion='success')
@attr('tagging')
def test_put_obj_with_tags():
bucket_name = get_new_bucket()
client = get_client()
key = 'testtagobj1'
data = 'A'*100
tagset = []
tagset.append({'Key': 'bar', 'Value': ''})
tagset.append({'Key': 'foo', 'Value': 'bar'})
put_obj_tag_headers = {
'x-amz-tagging' : 'foo=bar&bar'
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(put_obj_tag_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, data)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
response_tagset = response['TagSet']
tagset = tagset
eq(response_tagset, tagset)
def _make_arn_resource(path="*"):
return "arn:aws:s3:::{}".format(path)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test GetObjTagging public read')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_get_tags_acl_public():
key = 'testputtagsacl'
bucket_name = _create_key_with_random_content(key)
client = get_client()
resource = _make_arn_resource("{}/{}".format(bucket_name, key))
policy_document = make_json_policy("s3:GetObjectTagging",
resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
input_tagset = _create_simple_tagset(10)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='Test PutObjTagging public wrote')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_put_tags_acl_public():
key = 'testputtagsacl'
bucket_name = _create_key_with_random_content(key)
client = get_client()
resource = _make_arn_resource("{}/{}".format(bucket_name, key))
policy_document = make_json_policy("s3:PutObjectTagging",
resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
input_tagset = _create_simple_tagset(10)
alt_client = get_alt_client()
response = alt_client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='test deleteobjtagging public')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_delete_tags_obj_public():
key = 'testputtagsacl'
bucket_name = _create_key_with_random_content(key)
client = get_client()
resource = _make_arn_resource("{}/{}".format(bucket_name, key))
policy_document = make_json_policy("s3:DeleteObjectTagging",
resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
input_tagset = _create_simple_tagset(10)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.delete_object_tagging(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='test whether a correct version-id returned')
@attr(assertion='version-id is same as bucket list')
@attr('versioning')
def test_versioning_bucket_atomic_upload_return_version_id():
bucket_name = get_new_bucket()
client = get_client()
key = 'bar'
# for versioning-enabled-bucket, an non-empty version-id should return
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
response = client.put_object(Bucket=bucket_name, Key=key)
version_id = response['VersionId']
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
for version in versions:
eq(version['VersionId'], version_id)
# for versioning-default-bucket, no version-id should return.
bucket_name = get_new_bucket()
key = 'baz'
response = client.put_object(Bucket=bucket_name, Key=key)
eq(('VersionId' in response), False)
# for versioning-suspended-bucket, no version-id should return.
bucket_name = get_new_bucket()
key = 'baz'
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
response = client.put_object(Bucket=bucket_name, Key=key)
eq(('VersionId' in response), False)
@attr(resource='object')
@attr(method='put')
@attr(operation='test whether a correct version-id returned')
@attr(assertion='version-id is same as bucket list')
@attr('versioning')
def test_versioning_bucket_multipart_upload_return_version_id():
content_type='text/bla'
objlen = 30 * 1024 * 1024
bucket_name = get_new_bucket()
client = get_client()
key = 'bar'
metadata={'foo': 'baz'}
# for versioning-enabled-bucket, an non-empty version-id should return
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
version_id = response['VersionId']
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
for version in versions:
eq(version['VersionId'], version_id)
# for versioning-default-bucket, no version-id should return.
bucket_name = get_new_bucket()
key = 'baz'
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
eq(('VersionId' in response), False)
# for versioning-suspended-bucket, no version-id should return
bucket_name = get_new_bucket()
key = 'foo'
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
eq(('VersionId' in response), False)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test ExistingObjectTag conditional on get object')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_get_obj_existing_tag():
bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
client = get_client()
tag_conditional = {"StringEquals": {
"s3:ExistingObjectTag/security" : "public"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObject",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
tagset = []
tagset.append({'Key': 'security', 'Value': 'public'})
tagset.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset2 = []
tagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset3 = []
tagset3.append({'Key': 'security1', 'Value': 'public'})
input_tagset = {'TagSet': tagset3}
response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key='publictag')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='privatetag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='invalidtag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test ExistingObjectTag conditional on get object tagging')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_get_obj_tagging_existing_tag():
bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
client = get_client()
tag_conditional = {"StringEquals": {
"s3:ExistingObjectTag/security" : "public"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObjectTagging",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
tagset = []
tagset.append({'Key': 'security', 'Value': 'public'})
tagset.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset2 = []
tagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset3 = []
tagset3.append({'Key': 'security1', 'Value': 'public'})
input_tagset = {'TagSet': tagset3}
response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.get_object_tagging(Bucket=bucket_name, Key='publictag')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# A get object itself should fail since we allowed only GetObjectTagging
e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='publictag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='privatetag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='invalidtag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test ExistingObjectTag conditional on put object tagging')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_put_obj_tagging_existing_tag():
bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
client = get_client()
tag_conditional = {"StringEquals": {
"s3:ExistingObjectTag/security" : "public"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:PutObjectTagging",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
tagset = []
tagset.append({'Key': 'security', 'Value': 'public'})
tagset.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset2 = []
tagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
# PUT requests with object tagging are a bit wierd, if you forget to put
# the tag which is supposed to be existing anymore well, well subsequent
# put requests will fail
testtagset1 = []
testtagset1.append({'Key': 'security', 'Value': 'public'})
testtagset1.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': testtagset1}
response = alt_client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
e = assert_raises(ClientError, alt_client.put_object_tagging, Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
testtagset2 = []
testtagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': testtagset2}
response = alt_client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# Now try putting the original tags again, this should fail
input_tagset = {'TagSet': testtagset1}
e = assert_raises(ClientError, alt_client.put_object_tagging, Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test copy-source conditional on put obj')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_put_obj_copy_source():
bucket_name = _create_objects(keys=['public/foo', 'public/bar', 'private/foo'])
client = get_client()
src_resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObject",
src_resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
bucket_name2 = get_new_bucket()
tag_conditional = {"StringLike": {
"s3:x-amz-copy-source" : bucket_name + "/public/*"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name2, "*"))
policy_document = make_json_policy("s3:PutObject",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name2, Policy=policy_document)
alt_client = get_alt_client()
copy_source = {'Bucket': bucket_name, 'Key': 'public/foo'}
alt_client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key='new_foo')
# This is possible because we are still the owner, see the grants with
# policy on how to do this right
response = alt_client.get_object(Bucket=bucket_name2, Key='new_foo')
body = _get_body(response)
eq(body, 'public/foo')
copy_source = {'Bucket': bucket_name, 'Key': 'public/bar'}
alt_client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key='new_foo2')
response = alt_client.get_object(Bucket=bucket_name2, Key='new_foo2')
body = _get_body(response)
eq(body, 'public/bar')
copy_source = {'Bucket': bucket_name, 'Key': 'private/foo'}
check_access_denied(alt_client.copy_object, Bucket=bucket_name2, CopySource=copy_source, Key='new_foo2')
@attr(resource='object')
@attr(method='put')
@attr(operation='Test copy-source conditional on put obj')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_put_obj_copy_source_meta():
src_bucket_name = _create_objects(keys=['public/foo', 'public/bar'])
client = get_client()
src_resource = _make_arn_resource("{}/{}".format(src_bucket_name, "*"))
policy_document = make_json_policy("s3:GetObject",
src_resource)
client.put_bucket_policy(Bucket=src_bucket_name, Policy=policy_document)
bucket_name = get_new_bucket()
tag_conditional = {"StringEquals": {
"s3:x-amz-metadata-directive" : "COPY"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:PutObject",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-metadata-directive": "COPY"}))
alt_client.meta.events.register('before-call.s3.CopyObject', lf)
copy_source = {'Bucket': src_bucket_name, 'Key': 'public/foo'}
alt_client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='new_foo')
# This is possible because we are still the owner, see the grants with
# policy on how to do this right
response = alt_client.get_object(Bucket=bucket_name, Key='new_foo')
body = _get_body(response)
eq(body, 'public/foo')
# remove the x-amz-metadata-directive header
def remove_header(**kwargs):
if ("x-amz-metadata-directive" in kwargs['params']['headers']):
del kwargs['params']['headers']["x-amz-metadata-directive"]
alt_client.meta.events.register('before-call.s3.CopyObject', remove_header)
copy_source = {'Bucket': src_bucket_name, 'Key': 'public/bar'}
check_access_denied(alt_client.copy_object, Bucket=bucket_name, CopySource=copy_source, Key='new_foo2', Metadata={"foo": "bar"})
@attr(resource='object')
@attr(method='put')
@attr(operation='Test put obj with canned-acl not to be public')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_put_obj_acl():
bucket_name = get_new_bucket()
client = get_client()
# An allow conditional will require atleast the presence of an x-amz-acl
# attribute a Deny conditional would negate any requests that try to set a
# public-read/write acl
conditional = {"StringLike": {
"s3:x-amz-acl" : "public*"
}}
p = Policy()
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
s1 = Statement("s3:PutObject",resource)
s2 = Statement("s3:PutObject", resource, effect="Deny", condition=conditional)
policy_document = p.add_statement(s1).add_statement(s2).to_json()
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
key1 = 'private-key'
# if we want to be really pedantic, we should check that this doesn't raise
# and mark a failure, however if this does raise nosetests would mark this
# as an ERROR anyway
response = alt_client.put_object(Bucket=bucket_name, Key=key1, Body=key1)
#response = alt_client.put_object_acl(Bucket=bucket_name, Key=key1, ACL='private')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
key2 = 'public-key'
lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-acl": "public-read"}))
alt_client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, alt_client.put_object, Bucket=bucket_name, Key=key2, Body=key2)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test put obj with amz-grant back to bucket-owner')
@attr(assertion='success')
@attr('bucket-policy')
def test_bucket_policy_put_obj_grant():
bucket_name = get_new_bucket()
bucket_name2 = get_new_bucket()
client = get_client()
# In normal cases a key owner would be the uploader of a key in first case
# we explicitly require that the bucket owner is granted full control over
# the object uploaded by any user, the second bucket is where no such
# policy is enforced meaning that the uploader still retains ownership
main_user_id = get_main_user_id()
alt_user_id = get_alt_user_id()
owner_id_str = "id=" + main_user_id
s3_conditional = {"StringEquals": {
"s3:x-amz-grant-full-control" : owner_id_str
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:PutObject",
resource,
conditions=s3_conditional)
resource = _make_arn_resource("{}/{}".format(bucket_name2, "*"))
policy_document2 = make_json_policy("s3:PutObject", resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
client.put_bucket_policy(Bucket=bucket_name2, Policy=policy_document2)
alt_client = get_alt_client()
key1 = 'key1'
lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-grant-full-control" : owner_id_str}))
alt_client.meta.events.register('before-call.s3.PutObject', lf)
response = alt_client.put_object(Bucket=bucket_name, Key=key1, Body=key1)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
def remove_header(**kwargs):
if ("x-amz-grant-full-control" in kwargs['params']['headers']):
del kwargs['params']['headers']["x-amz-grant-full-control"]
alt_client.meta.events.register('before-call.s3.PutObject', remove_header)
key2 = 'key2'
response = alt_client.put_object(Bucket=bucket_name2, Key=key2, Body=key2)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
acl1_response = client.get_object_acl(Bucket=bucket_name, Key=key1)
# user 1 is trying to get acl for the object from user2 where ownership
# wasn't transferred
check_access_denied(client.get_object_acl, Bucket=bucket_name2, Key=key2)
acl2_response = alt_client.get_object_acl(Bucket=bucket_name2, Key=key2)
eq(acl1_response['Grants'][0]['Grantee']['ID'], main_user_id)
eq(acl2_response['Grants'][0]['Grantee']['ID'], alt_user_id)
@attr(resource='object')
@attr(method='put')
@attr(operation='Deny put obj requests without encryption')
@attr(assertion='success')
@attr('encryption')
@attr('bucket-policy')
# TODO: remove this 'fails_on_rgw' once I get the test passing
@attr('fails_on_rgw')
def test_bucket_policy_put_obj_enc():
bucket_name = get_new_bucket()
client = get_v2_client()
deny_incorrect_algo = {
"StringNotEquals": {
"s3:x-amz-server-side-encryption": "AES256"
}
}
deny_unencrypted_obj = {
"Null" : {
"s3:x-amz-server-side-encryption": "true"
}
}
p = Policy()
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
policy_document = p.add_statement(s1).add_statement(s2).to_json()
boto3.set_stream_logger(name='botocore')
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
key1_str ='testobj'
#response = client.get_bucket_policy(Bucket=bucket_name)
#print response
check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
sse_client_headers = {
'x-amz-server-side-encryption' : 'AES256',
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
#TODO: why is this a 400 and not passing, it appears boto3 is not parsing the 200 response the rgw sends back properly
# DEBUGGING: run the boto2 and compare the requests
# DEBUGGING: try to run this with v2 auth (figure out why get_v2_client isn't working) to make the requests similar to what boto2 is doing
# DEBUGGING: try to add other options to put_object to see if that makes the response better
client.put_object(Bucket=bucket_name, Key=key1_str)
@attr(resource='object')
@attr(method='put')
@attr(operation='put obj with RequestObjectTag')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_policy_put_obj_request_obj_tag():
bucket_name = get_new_bucket()
client = get_client()
tag_conditional = {"StringEquals": {
"s3:RequestObjectTag/security" : "public"
}}
p = Policy()
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
s1 = Statement("s3:PutObject", resource, effect="Allow", condition=tag_conditional)
policy_document = p.add_statement(s1).to_json()
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
key1_str ='testobj'
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
headers = {"x-amz-tagging" : "security=public"}
lf = (lambda **kwargs: kwargs['params']['headers'].update(headers))
client.meta.events.register('before-call.s3.PutObject', lf)
#TODO: why is this a 400 and not passing
alt_client.put_object(Bucket=bucket_name, Key=key1_str, Body=key1_str)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test ExistingObjectTag conditional on get object acl')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_get_obj_acl_existing_tag():
bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
client = get_client()
tag_conditional = {"StringEquals": {
"s3:ExistingObjectTag/security" : "public"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObjectAcl",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
tagset = []
tagset.append({'Key': 'security', 'Value': 'public'})
tagset.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset2 = []
tagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset3 = []
tagset3.append({'Key': 'security1', 'Value': 'public'})
input_tagset = {'TagSet': tagset3}
response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.get_object_acl(Bucket=bucket_name, Key='publictag')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# A get object itself should fail since we allowed only GetObjectTagging
e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='publictag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='privatetag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='invalidtag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with defalut retention')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_lock():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1
}
}}
response = client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration=conf)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'COMPLIANCE',
'Years':1
}
}}
response = client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration=conf)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_bucket_versioning(Bucket=bucket_name)
eq(response['Status'], 'Enabled')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with bucket object lock not enabled')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'InvalidBucketState')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with days and years')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_with_days_and_years():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1,
'Years':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with invalid days')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_days():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':0
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRetentionPeriod')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with invalid years')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_years():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Years':-1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRetentionPeriod')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with invalid mode')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_years():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'abc',
'Years':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'governance',
'Years':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with invalid status')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_status():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Disabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Years':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
attr(resource='bucket')
@attr(method='put')
@attr(operation='Test suspend versioning when object lock enabled')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_suspend_versioning():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
e = assert_raises(ClientError, client.put_bucket_versioning, Bucket=bucket_name, VersioningConfiguration={'Status': 'Suspended'})
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'InvalidBucketState')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object lock')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_obj_lock():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1
}
}}
client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration=conf)
response = client.get_object_lock_configuration(Bucket=bucket_name)
eq(response['ObjectLockConfiguration'], conf)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object lock with bucket object lock not enabled')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_get_obj_lock_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, client.get_object_lock_configuration, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'ObjectLockConfigurationNotFoundError')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test put object retention')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
response = client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention with bucket object lock not enabled')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_retention_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention with invalid mode')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_retention_invalid_mode():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
retention = {'Mode':'governance', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
retention = {'Mode':'abc', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object retention')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_obj_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
response = client.get_object_retention(Bucket=bucket_name, Key=key)
eq(response['Retention'], retention)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object retention with invalid bucket')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_get_obj_retention_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
e = assert_raises(ClientError, client.get_object_retention, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention with version id')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention_versionid():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, VersionId=version_id, Retention=retention)
response = client.get_object_retention(Bucket=bucket_name, Key=key, VersionId=version_id)
eq(response['Retention'], retention)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention to override default retention')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention_override_default_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1
}
}}
client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration=conf)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
response = client.get_object_retention(Bucket=bucket_name, Key=key)
eq(response['Retention'], retention)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention to increase retention period')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention_increase_period():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention1 = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention1)
retention2 = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention2)
response = client.get_object_retention(Bucket=bucket_name, Key=key)
eq(response['Retention'], retention2)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention to shorten period')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_retention_shorten_period():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention to shorten period with bypass header')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention_shorten_period_bypass():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention, BypassGovernanceRetention=True)
response = client.get_object_retention(Bucket=bucket_name, Key=key)
eq(response['Retention'], retention)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='Test delete object with retention')
@attr(assertion='retention period make effects')
@attr('object-lock')
def test_object_lock_delete_object_with_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put legal hold')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_legal_hold():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'ON'}
response = client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put legal hold with invalid bucket')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_legal_hold_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'ON'}
e = assert_raises(ClientError, client.put_object_legal_hold, Bucket=bucket_name, Key=key, LegalHold=legal_hold)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put legal hold with invalid status')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_legal_hold_invalid_status():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'abc'}
e = assert_raises(ClientError, client.put_object_legal_hold, Bucket=bucket_name, Key=key, LegalHold=legal_hold)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get legal hold')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_legal_hold():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'ON'}
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
response = client.get_object_legal_hold(Bucket=bucket_name, Key=key)
eq(response['LegalHold'], legal_hold)
legal_hold_off = {'Status': 'OFF'}
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold_off)
response = client.get_object_legal_hold(Bucket=bucket_name, Key=key)
eq(response['LegalHold'], legal_hold_off)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get legal hold with invalid bucket')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_get_legal_hold_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
e = assert_raises(ClientError, client.get_object_legal_hold, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='Test delete object with legal hold on')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_delete_object_with_legal_hold_on():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status': 'ON'})
e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='Test delete object with legal hold off')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_delete_object_with_legal_hold_off():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status': 'OFF'})
response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object metadata')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_obj_metadata():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'ON'}
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ObjectLockMode'], retention['Mode'])
eq(response['ObjectLockRetainUntilDate'], retention['RetainUntilDate'])
eq(response['ObjectLockLegalHoldStatus'], legal_hold['Status'])
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put legal hold and retention when uploading object')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_uploading_obj():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='GOVERNANCE',
ObjectLockRetainUntilDate=datetime.datetime(2030,1,1,tzinfo=pytz.UTC), ObjectLockLegalHoldStatus='ON')
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ObjectLockMode'], 'GOVERNANCE')
eq(response['ObjectLockRetainUntilDate'], datetime.datetime(2030,1,1,tzinfo=pytz.UTC))
eq(response['ObjectLockLegalHoldStatus'], 'ON')
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy w/ x-amz-copy-source-if-match: the latest ETag')
@attr(assertion='succeeds')
def test_copy_object_ifmatch_good():
bucket_name = get_new_bucket()
client = get_client()
resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
client.copy_object(Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfMatch=resp['ETag'], Key='bar')
response = client.get_object(Bucket=bucket_name, Key='bar')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy w/ x-amz-copy-source-if-match: bogus ETag')
@attr(assertion='fails 412')
# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40808 is resolved
@attr('fails_on_rgw')
def test_copy_object_ifmatch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
e = assert_raises(ClientError, client.copy_object, Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfMatch='ABCORZ', Key='bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy w/ x-amz-copy-source-if-none-match: the latest ETag')
@attr(assertion='fails 412')
# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40808 is resolved
@attr('fails_on_rgw')
def test_copy_object_ifnonematch_good():
bucket_name = get_new_bucket()
client = get_client()
resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
e = assert_raises(ClientError, client.copy_object, Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfNoneMatch=resp['ETag'], Key='bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy w/ x-amz-copy-source-if-none-match: bogus ETag')
@attr(assertion='succeeds')
def test_copy_object_ifnonematch_failed():
bucket_name = get_new_bucket()
client = get_client()
resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
client.copy_object(Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfNoneMatch='ABCORZ', Key='bar')
response = client.get_object(Bucket=bucket_name, Key='bar')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='get')
@attr(operation='read to invalid key')
@attr(assertion='fails 400')
# TODO: results in a 404 instead of 400 on the RGW
@attr('fails_on_rgw')
def test_object_read_unreadable():
bucket_name = get_new_bucket()
client = get_client()
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='\xae\x8a-')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(e.response['Error']['Message'], 'Couldn\'t parse the specified URI.')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test User Policy')
@attr(assertion='succeeds')
@attr('user-policy')
def test_user_policy():
client = get_tenant_iam_client()
policy_document = json.dumps(
{"Version":"2012-10-17",
"Statement": {
"Effect":"Allow",
"Action":"*",
"Resource":"*"}}
)
client.put_user_policy(
PolicyDocument= policy_document,
PolicyName='AllAccessPolicy',
UserName=get_tenant_user_id(),
)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a new bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],False)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a public acl bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_public_acl_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
client = get_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a authenticated acl bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_authpublic_acl_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
client = get_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a public policy bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_publicpolicy_acl_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
client = get_client()
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],False)
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a public policy bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_nonpublicpolicy_acl_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
client = get_client()
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],False)
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
],
"Condition": {
"IpAddress":
{"aws:SourceIp": "10.0.0.0/32"}
}
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],False)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a public policy bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_nonpublicpolicy_deny_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],False)
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"NotPrincipal": {"AWS": "arn:aws:iam::s3tenant1:root"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
],
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get public access block on a bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_default_public_block():
#client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'}))
bucket_name = get_new_bucket()
client = get_client()
resp = client.get_public_access_block(Bucket=bucket_name)
eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], False)
eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], False)
eq(resp['PublicAccessBlockConfiguration']['IgnorePublicAcls'], False)
eq(resp['PublicAccessBlockConfiguration']['RestrictPublicBuckets'], False)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='get public access block on a bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_put_public_block():
#client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'}))
bucket_name = get_new_bucket()
client = get_client()
access_conf = {'BlockPublicAcls': True,
'IgnorePublicAcls': True,
'BlockPublicPolicy': True,
'RestrictPublicBuckets': False}
client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
resp = client.get_public_access_block(Bucket=bucket_name)
eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], access_conf['BlockPublicAcls'])
eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], access_conf['BlockPublicPolicy'])
eq(resp['PublicAccessBlockConfiguration']['IgnorePublicAcls'], access_conf['IgnorePublicAcls'])
eq(resp['PublicAccessBlockConfiguration']['RestrictPublicBuckets'], access_conf['RestrictPublicBuckets'])
@attr(resource='bucket')
@attr(method='put')
@attr(operation='get public access block on a bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_block_public_put_bucket_acls():
#client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'}))
bucket_name = get_new_bucket()
client = get_client()
access_conf = {'BlockPublicAcls': True,
'IgnorePublicAcls': False,
'BlockPublicPolicy': True,
'RestrictPublicBuckets': False}
client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
resp = client.get_public_access_block(Bucket=bucket_name)
eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], access_conf['BlockPublicAcls'])
eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], access_conf['BlockPublicPolicy'])
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='public-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='public-read-write')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='authenticated-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='block public acls on canned acls')
@attr(assertion='succeeds')
@attr('policy_status')
def test_block_public_object_canned_acls():
bucket_name = get_new_bucket()
client = get_client()
access_conf = {'BlockPublicAcls': True,
'IgnorePublicAcls': False,
'BlockPublicPolicy': False,
'RestrictPublicBuckets': False}
client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
# resp = client.get_public_access_block(Bucket=bucket_name)
# eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], access_conf['BlockPublicAcls'])
# eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], access_conf['BlockPublicPolicy'])
#FIXME: use empty body until #42208
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo1', Body='', ACL='public-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo2', Body='', ACL='public-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo3', Body='', ACL='authenticated-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='block public acls on canned acls')
@attr(assertion='succeeds')
@attr('policy_status')
def test_block_public_policy():
bucket_name = get_new_bucket()
client = get_client()
access_conf = {'BlockPublicAcls': False,
'IgnorePublicAcls': False,
'BlockPublicPolicy': True,
'RestrictPublicBuckets': False}
client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObject",
resource)
check_access_denied(client.put_bucket_policy, Bucket=bucket_name, Policy=policy_document)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='ignore public acls on canned acls')
@attr(assertion='succeeds')
@attr('policy_status')
def test_ignore_public_acls():
bucket_name = get_new_bucket()
client = get_client()
alt_client = get_alt_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
# Public bucket should be accessible
alt_client.list_objects(Bucket=bucket_name)
client.put_object(Bucket=bucket_name,Key='key1',Body='abcde',ACL='public-read')
resp=alt_client.get_object(Bucket=bucket_name, Key='key1')
eq(_get_body(resp), 'abcde')
access_conf = {'BlockPublicAcls': False,
'IgnorePublicAcls': True,
'BlockPublicPolicy': False,
'RestrictPublicBuckets': False}
client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
# IgnorePublicACLs is true, so regardless this should behave as a private bucket
check_access_denied(alt_client.list_objects, Bucket=bucket_name)
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key='key1')
|
window.py
|
# -*- coding=UTF-8 -*-
# pyright: strict
"""umamusume pertty derby automation. """
import contextlib
import logging
import sys
import threading
import time
from ctypes import windll
from typing import Callable, Optional, Set, Text, Tuple
import mouse
import PIL.Image
import PIL.ImageGrab
import win32con
import win32gui
import win32ui
LOGGER = logging.getLogger(__name__)
class _g:
init_once = False
class g:
use_legacy_screenshot = False
on_foreground_will_change = lambda: None
def message_box(
msg: Text,
caption: Text,
*,
flags: int = 0,
h_wnd: int = 0,
on_close: Optional[Callable[[int], None]] = None,
) -> Callable[[], None]:
def _run():
res = win32gui.MessageBox(h_wnd, msg, caption, flags)
if callable(on_close):
on_close(res)
t = threading.Thread(target=_run)
t.start()
h_wnd_set: Set[int] = set()
def _iter_window(h_wnd: int, _: None):
if win32gui.GetClassName(h_wnd) != "#32770": # message box
return
h_wnd_set.add(h_wnd)
assert t.ident is not None
while not h_wnd_set:
time.sleep(0.01)
win32gui.EnumThreadWindows(t.ident, _iter_window, None)
assert len(h_wnd_set) == 1, h_wnd_set
def _close():
for i in h_wnd_set:
if win32gui.IsWindow(i):
win32gui.PostMessage(i, win32con.WM_CLOSE, 0, 0)
t.join()
return _close
def init():
if _g.init_once:
return
_g.init_once = True
# Window size related function will returns incorrect result
# if we don't make python process dpi aware
# https://github.com/NateScarlet/auto-derby/issues/11
windll.user32.SetProcessDPIAware()
def set_client_size(h_wnd: int, width: int, height: int):
init()
left, top, right, bottom = win32gui.GetWindowRect(h_wnd)
_, _, w, h = win32gui.GetClientRect(h_wnd)
LOGGER.info("width=%s height=%s", w, h)
if h == height and w == width:
LOGGER.info("already in wanted size")
return
borderWidth = right - left - w
borderHeight = bottom - top - h
win32gui.SetWindowPos(
h_wnd, 0, left, top, width + borderWidth, height + borderHeight, 0
)
set_client_size(h_wnd, width, height) # repeat until exact match
return
@contextlib.contextmanager
def topmost(h_wnd: int):
init()
left, top, right, bottom = win32gui.GetWindowRect(h_wnd)
win32gui.SetWindowPos(
h_wnd, win32con.HWND_TOPMOST, left, top, right - left, bottom - top, 0
)
yield
left, top, right, bottom = win32gui.GetWindowRect(h_wnd)
win32gui.SetWindowPos(
h_wnd, win32con.HWND_NOTOPMOST, left, top, right - left, bottom - top, 0
)
def set_foreground(h_wnd: int) -> None:
g.on_foreground_will_change()
LOGGER.debug("set foreground window: h_wnd=%s", h_wnd)
try:
win32gui.SetForegroundWindow(h_wnd)
except Exception as ex:
LOGGER.warn(
"set foreground window failed: h_wnd=%s error='%s'",
h_wnd,
ex,
)
def set_forground(h_wnd: int) -> None:
import warnings
warnings.warn("use set_foreground instead", DeprecationWarning)
return set_foreground(h_wnd)
@contextlib.contextmanager
def recover_foreground():
h_wnd = win32gui.GetForegroundWindow()
LOGGER.debug("foreground window: h_wnd=%s", h_wnd)
g.on_foreground_will_change()
yield
time.sleep(0.1) # switch too fast may cause issue
set_foreground(h_wnd)
def info(msg: Text) -> Callable[[], None]:
return message_box(msg, "auto-derby")
@contextlib.contextmanager
def recover_cursor():
ox, oy = win32gui.GetCursorPos()
yield
mouse.move(ox, oy)
def click_at(h_wnd: int, point: Tuple[int, int]):
point = win32gui.ClientToScreen(h_wnd, point)
with recover_foreground(), recover_cursor(), topmost(h_wnd):
mouse.move(point[0], point[1])
mouse.click()
time.sleep(0.2)
def drag_at(
h_wnd: int, point: Tuple[int, int], *, dx: int, dy: int, duration: float = 1
):
x, y = win32gui.ClientToScreen(h_wnd, point)
with recover_foreground(), recover_cursor(), topmost(h_wnd):
mouse.drag(x, y, x + dx, y + dy, duration=duration)
move_at(h_wnd, (-1, -1))
time.sleep(0.05)
def wheel_at(h_wnd: int, delta: int) -> None:
with recover_foreground():
set_foreground(h_wnd)
for _ in range(abs(delta)):
mouse.wheel(1 if delta > 0 else -1)
time.sleep(1 / 120.0)
time.sleep(1)
def move_at(h_wnd: int, point: Tuple[int, int]):
x, y = win32gui.ClientToScreen(h_wnd, point)
mouse.move(x, y)
def screenshot_pil_crop(h_wnd: int) -> PIL.Image.Image:
init()
# XXX: BitBlt capture not work, background window is not supportted
# Maybe use WindowsGraphicsCapture like obs do
with topmost(h_wnd):
# not use GetWindowRect to exclude border
win32gui.ShowWindow(h_wnd, win32con.SW_NORMAL)
_, _, w, h = win32gui.GetClientRect(h_wnd)
x, y = win32gui.ClientToScreen(h_wnd, (0, 0))
left, top, right, bottom = x, y, x + w, y + h
bbox = (left, top, right, bottom)
return PIL.ImageGrab.grab(bbox, True, True)
# https://docs.microsoft.com/en-us/windows/win32/winprog/using-the-windows-headers
_WIN32_WINNT_WINBLUE = 0x0603
def _win_ver():
v = sys.getwindowsversion()
return v.major << 8 | v.minor
_WIN32_WINNT = _win_ver()
PW_CLIENT_ONLY = 1 << 0
# https://stackoverflow.com/a/40042587
PW_RENDERFULLCONTENT = 1 << 1 if _WIN32_WINNT >= _WIN32_WINNT_WINBLUE else 0
if PW_RENDERFULLCONTENT == 0:
LOGGER.info(
(
"background screenshot not work before windows8.1, "
"will use legacy screenshot."
)
)
g.use_legacy_screenshot = True
# https://stackoverflow.com/a/24352388
def screenshot_print_window(h_wnd: int) -> PIL.Image.Image:
window_dc = win32gui.GetWindowDC(h_wnd)
handle_dc = win32ui.CreateDCFromHandle(window_dc)
win32gui.ShowWindow(h_wnd, win32con.SW_NORMAL)
_, _, width, height = win32gui.GetClientRect(h_wnd)
compatible_dc = handle_dc.CreateCompatibleDC()
bitmap = win32ui.CreateBitmap()
try:
bitmap.CreateCompatibleBitmap(handle_dc, width, height)
compatible_dc.SelectObject(bitmap)
result = windll.user32.PrintWindow(
h_wnd,
compatible_dc.GetSafeHdc(),
PW_CLIENT_ONLY | PW_RENDERFULLCONTENT,
)
if result != 1:
raise RuntimeError("print window failed: %s" % result)
return PIL.Image.frombuffer(
"RGB", (width, height), bitmap.GetBitmapBits(True), "raw", "BGRX", 0, 1
)
finally:
win32gui.DeleteObject(bitmap.GetHandle())
handle_dc.DeleteDC()
compatible_dc.DeleteDC()
win32gui.ReleaseDC(h_wnd, window_dc)
def screenshot(h_wnd: int) -> PIL.Image.Image:
if g.use_legacy_screenshot:
return screenshot_pil_crop(h_wnd)
return screenshot_print_window(h_wnd)
|
multipro3.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
进程间的通讯
当我们使用进程和线程的时候,我们会发现使用线程的时候列表是可传递数值的,是空享内存的
但是我们使用进程的时候会发现进程数据是不共享的列表每次都是一个元素输出
'''
from multiprocessing import Process
from threading import Thread
def run(info_list,n):
info_list.append(n)
print info_list
'''
if __name__ =='__main__':
info = []
for i in range(10):
p = Process(target=run,args=[info,i])
p.start()
'''
if __name__ =='__main__':
info = []
for i in range(10):
t = Thread(target=run,args=[info,i])
t.start()
|
function_http.py
|
import socket
import os
import threading
from picamera import PiCamera
class OutputStream:
def __init__(self, to, cam):
self.to = to
self.cam = cam
def start_header(self, response_code=200, msg="OK"):
self.to.send(f"HTTP/1.0 {str(response_code)} {str(msg)}\r\n".encode())
def add_header(self, key, val):
self.to.send(f"{str(key)}: {str(val)}\r\n".encode())
def end_header(self):
self.to.send(b"\r\n")
def register_conn(self, conn):
self.to = conn
def write(self, buf):
self.to.send(b"--FRAME\r\n")
self.add_header('Age', 0)
self.add_header('Cache-Control', 'no-cache, private')
self.add_header('Pragma', 'no-cache')
self.add_header("Content-Type", "image/jpeg")
self.add_header("Content-Length", len(buf))
self.end_header()
self.to.sendall(buf)
self.to.send(b"\r\n")
class HTTPHeaders:
def __init__(self):
self.header_dict = {}
def parse(self, header_string) -> dict:
headers = header_string.split("\r\n")[:-2]
first_line = headers.pop(0)
method, uri, version = first_line.split()
self.header_dict["method"] = method
self.header_dict["uri"] = uri
self.header_dict["version"] = version
for key_val in headers:
key, val = key_val.split(": ")
self.header_dict[key] = val
def get(self):
return self.header_dict
class HTTPServer:
def __init__(self, server_sock):
self.server_sock = server_sock
self.conn = None
self.cam = PiCamera(resolution="160x120", framerate=30)
self.cam.vflip = True
self.cam.hflip = True
def wait_for_connection(self):
self.conn, addr = self.server_sock.accept()
print(addr)
self.headers = HTTPHeaders()
header_string = self.conn.recv(1024*4).decode() # Header
if not header_string:
return
self.headers.parse(header_string)
session_info = self.headers.get()
if session_info["method"] == "GET":
self.doGET(session_info)
elif session_info["method"] == "POST":
pass
else:
pass
def start_header(self, response_code=200, msg="OK"):
self.conn.send(f"HTTP/1.0 {str(response_code)} {str(msg)}\r\n".encode())
def add_header(self, key, val):
self.conn.send(f"{str(key)}: {str(val)}\r\n".encode())
def end_header(self):
self.conn.send(b"\r\n")
def doGET(self, headers):
path = headers["uri"][1:]
if path != "stream.mjpg":
if path == "":
path = "index.html"
if not os.path.exists(path):
self.start_header(404, "NOTFOUND")
self.end_header()
self.conn.close()
return
print(path)
with open(path, "rb") as f:
filedata = f.read()
self.start_header()
self.add_header("Content-Length", len(filedata))
#self.add_header("Content-Type", )
self.end_header()
self.conn.sendall(filedata)
self.conn.close()
else: # path = "stream.mjpg"
try:
# if the camera already recording.
self.cam.stop_recording()
except:
pass
print("stream.mjpg")
self.start_header()
self.add_header("Content-Type", "multipart/x-mixed-replace; boundary=FRAME")
self.end_header()
self.output = OutputStream(self.conn, self.cam)
self.cam.start_recording(output=self.output, format="mjpeg")
def client_loop(self):
while True:
self.wait_for_connection()
def client_loop_in_thread(self):
thread = threading.Thread(target=self.client_loop)
thread.daemon = True
thread.start()
if __name__ == "__main__":
sock = socket.socket()
sock.bind(("", 8000))
sock.listen(1)
server = HTTPServer(sock)
server.client_loop()
|
master.py
|
# -*- coding: utf-8 -*-
'''
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
# Import python libs
import os
import re
import time
import errno
import fnmatch
import signal
import shutil
import stat
import logging
import hashlib
import datetime
try:
import pwd
except ImportError: # This is in case windows minion is importing
pass
import getpass
import resource
import subprocess
import multiprocessing
import sys
# Import third party libs
import zmq
import yaml
from M2Crypto import RSA
# Import salt libs
import salt.crypt
import salt.utils
import salt.client
import salt.payload
import salt.pillar
import salt.state
import salt.runner
import salt.auth
import salt.wheel
import salt.minion
import salt.search
import salt.key
import salt.fileserver
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.verify
import salt.utils.minions
import salt.utils.gzip_util
from salt.utils.debug import enable_sigusr1_handler, inspect_stack
from salt.exceptions import SaltMasterError, MasterExit
from salt.utils.event import tagify
# Import halite libs
try:
import halite
HAS_HALITE = True
except ImportError:
HAS_HALITE = False
log = logging.getLogger(__name__)
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(signal.SIGKILL, proc.pid)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
class SMaster(object):
'''
Create a simple salt-master, this will generate the top level master
'''
def __init__(self, opts):
'''
Create a salt master server instance
'''
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
self.crypticle = self.__prep_crypticle()
def __prep_crypticle(self):
'''
Return the crypticle used for AES
'''
return salt.crypt.Crypticle(self.opts, self.opts['aes'])
def __prep_key(self):
'''
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
'''
users = []
keys = {}
acl_users = set(self.opts['client_acl'].keys())
if self.opts.get('user'):
acl_users.add(self.opts['user'])
acl_users.add(getpass.getuser())
for user in pwd.getpwall():
users.append(user.pw_name)
for user in acl_users:
log.info(
'Preparing the {0} key for local communication'.format(
user
)
)
cumask = os.umask(191)
if user not in users:
try:
founduser = pwd.getpwnam(user)
except KeyError:
log.error('ACL user {0} is not available'.format(user))
continue
keyfile = os.path.join(
self.opts['cachedir'], '.{0}_key'.format(user)
)
if os.path.exists(keyfile):
log.debug('Removing stale keyfile: {0}'.format(keyfile))
os.unlink(keyfile)
key = salt.crypt.Crypticle.generate_key_string()
with salt.utils.fopen(keyfile, 'w+') as fp_:
fp_.write(key)
os.umask(cumask)
os.chmod(keyfile, 256)
try:
os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1)
except OSError:
# The master is not being run as root and can therefore not
# chown the key file
pass
keys[user] = key
return keys
class Master(SMaster):
'''
The salt master server
'''
def __init__(self, opts):
'''
Create a salt master server instance
'''
# Warn if ZMQ < 3.2
if not(hasattr(zmq, 'zmq_version_info')) or \
zmq.zmq_version_info() < (3, 2):
# PyZMQ 2.1.9 does not have zmq_version_info
log.warning('You have a version of ZMQ less than ZMQ 3.2! There '
'are known connection keep-alive issues with ZMQ < '
'3.2 which may result in loss of contact with '
'minions. Please upgrade your ZMQ!')
SMaster.__init__(self, opts)
def _clear_old_jobs(self):
'''
The clean old jobs function is the general passive maintenance process
controller for the Salt master. This is where any data that needs to
be cleanly maintained from the master is maintained.
'''
jid_root = os.path.join(self.opts['cachedir'], 'jobs')
search = salt.search.Search(self.opts)
last = int(time.time())
rotate = int(time.time())
fileserver = salt.fileserver.Fileserver(self.opts)
runners = salt.loader.runner(self.opts)
schedule = salt.utils.schedule.Schedule(self.opts, runners)
while True:
now = int(time.time())
loop_interval = int(self.opts['loop_interval'])
if self.opts['keep_jobs'] != 0 and (now - last) >= loop_interval:
cur = '{0:%Y%m%d%H}'.format(datetime.datetime.now())
if os.path.exists(jid_root):
for top in os.listdir(jid_root):
t_path = os.path.join(jid_root, top)
for final in os.listdir(t_path):
f_path = os.path.join(t_path, final)
jid_file = os.path.join(f_path, 'jid')
if not os.path.isfile(jid_file):
continue
with salt.utils.fopen(jid_file, 'r') as fn_:
jid = fn_.read()
if len(jid) < 18:
# Invalid jid, scrub the dir
shutil.rmtree(f_path)
elif int(cur) - int(jid[:10]) > \
self.opts['keep_jobs']:
shutil.rmtree(f_path)
if self.opts.get('publish_session'):
if now - rotate >= self.opts['publish_session'] * 60:
salt.crypt.dropfile(self.opts['cachedir'])
rotate = now
if self.opts.get('search'):
if now - last >= self.opts['search_index_interval']:
search.index()
try:
if not fileserver.servers:
log.error('No fileservers loaded, The master will not be'
'able to serve files to minions')
raise SaltMasterError('No fileserver backends available')
fileserver.update()
except Exception as exc:
log.error(
'Exception {0} occurred in file server update'.format(exc)
)
try:
schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if schedule.loop_interval < loop_interval:
loop_interval = schedule.loop_interval
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
last = now
try:
time.sleep(loop_interval)
except KeyboardInterrupt:
break
def __set_max_open_files(self):
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
if mof_h == resource.RLIM_INFINITY:
# Unclear what to do with infinity... OSX reports RLIM_INFINITY as
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
'Current values for max open files soft/hard setting: '
'{0}/{1}'.format(
mof_s, mof_h
)
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts['max_open_files']
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
'The value for the \'max_open_files\' setting, {0}, is higher '
'than what the user running salt is allowed to raise to, {1}. '
'Defaulting to {1}.'.format(mof_c, mof_h)
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.info('Raising max open files value to {0}'.format(mof_c))
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
'New values for max open files soft/hard values: '
'{0}/{1}'.format(mof_s, mof_h)
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under OSX reported that our 100000 default value is
# still too high.
log.critical(
'Failed to raise max open files setting to {0}. If this '
'value is too low. The salt-master will most likely fail '
'to run properly.'.format(
mof_c
)
)
def _pre_flight(self):
'''
Run pre flight checks, if anything in this method fails then the master
should not start up
'''
errors = []
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
'Failed to load fileserver backends, the configured backends '
'are:\n{0}'.format(
' '.join(self.opts['fileserver_backend'])
)
)
if not self.opts['fileserver_backend']:
errors.append('No fileserver backends are configured')
if errors:
for error in errors:
log.error(error)
log.error('Master failed pre flight checks, exiting\n')
sys.exit(1)
def start(self):
'''
Turn on the master server components
'''
self._pre_flight()
log.info(
'salt-master is starting as user \'{0}\''.format(getpass.getuser())
)
enable_sigusr1_handler()
self.__set_max_open_files()
clear_old_jobs_proc = multiprocessing.Process(
target=self._clear_old_jobs)
clear_old_jobs_proc.start()
reqserv = ReqServer(
self.opts,
self.crypticle,
self.key,
self.master_key)
reqserv.start_publisher()
reqserv.start_event_publisher()
reqserv.start_reactor()
reqserv.start_halite()
def sigterm_clean(signum, frame):
'''
Cleaner method for stopping multiprocessing processes when a
SIGTERM is encountered. This is required when running a salt
master under a process minder like daemontools
'''
log.warn(
'Caught signal {0}, stopping the Salt Master'.format(
signum
)
)
clean_proc(clear_old_jobs_proc)
clean_proc(reqserv.publisher)
clean_proc(reqserv.eventpublisher)
if hasattr(reqserv, 'halite'):
clean_proc(reqserv.halite)
if hasattr(reqserv, 'reactor'):
clean_proc(reqserv.reactor)
for proc in reqserv.work_procs:
clean_proc(proc)
raise MasterExit
signal.signal(signal.SIGTERM, sigterm_clean)
try:
reqserv.run()
except KeyboardInterrupt:
# Shut the master down gracefully on SIGINT
log.warn('Stopping the Salt Master')
raise SystemExit('\nExiting on Ctrl-c')
class Halite(multiprocessing.Process):
'''
Manage the Halite server
'''
def __init__(self, hopts):
super(Halite, self).__init__()
self.hopts = hopts
def run(self):
'''
Fire up halite!
'''
halite.start(self.hopts)
class Publisher(multiprocessing.Process):
'''
The publishing interface, a simple zeromq publisher that sends out the
commands.
'''
def __init__(self, opts):
super(Publisher, self).__init__()
self.opts = opts
def run(self):
'''
Bind to the interface specified in the configuration file
'''
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 100))
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 100))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 100))
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
# Start the minion command publisher
log.info('Starting the Salt Publisher on {0}'.format(pub_uri))
pub_sock.bind(pub_uri)
pull_sock.bind(pull_uri)
# Restrict access to the socket
os.chmod(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'),
448
)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
package = pull_sock.recv()
pub_sock.send(package)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except KeyboardInterrupt:
if pub_sock.closed is False:
pub_sock.setsockopt(zmq.LINGER, 1)
pub_sock.close()
if pull_sock.closed is False:
pull_sock.setsockopt(zmq.LINGER, 1)
pull_sock.close()
if context.closed is False:
context.term()
class ReqServer(object):
'''
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, crypticle, key, mkey):
self.opts = opts
self.master_key = mkey
self.context = zmq.Context(self.opts['worker_threads'])
# Prepare the zeromq sockets
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
self.workers = self.context.socket(zmq.DEALER)
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
# Prepare the AES key
self.key = key
self.crypticle = crypticle
def __bind(self):
'''
Binds the reply server
'''
dfn = os.path.join(self.opts['cachedir'], '.dfn')
if os.path.isfile(dfn):
try:
os.remove(dfn)
except os.error:
pass
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.work_procs = []
for ind in range(int(self.opts['worker_threads'])):
self.work_procs.append(MWorker(self.opts,
self.master_key,
self.key,
self.crypticle))
for ind, proc in enumerate(self.work_procs):
log.info('Starting Salt worker process {0}'.format(ind))
proc.start()
self.workers.bind(self.w_uri)
while True:
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
def start_publisher(self):
'''
Start the salt publisher interface
'''
# Start the publisher
self.publisher = Publisher(self.opts)
self.publisher.start()
def start_event_publisher(self):
'''
Start the salt publisher interface
'''
# Start the publisher
self.eventpublisher = salt.utils.event.EventPublisher(self.opts)
self.eventpublisher.start()
def start_reactor(self):
'''
Start the reactor, but only if the reactor interface is configured
'''
if self.opts.get('reactor'):
self.reactor = salt.utils.event.Reactor(self.opts)
self.reactor.start()
def start_halite(self):
'''
If halite is configured and installed, fire it up!
'''
if HAS_HALITE and 'halite' in self.opts:
log.info('Halite: Starting up ...')
self.halite = Halite(self.opts['halite'])
self.halite.start()
elif 'halite' in self.opts:
log.info('Halite: Not configured, skipping.')
else:
log.debug('Halite: Unavailable.')
def run(self):
'''
Start up the ReqServer
'''
self.__bind()
def destroy(self):
if self.clients.closed is False:
self.clients.setsockopt(zmq.LINGER, 1)
self.clients.close()
if self.workers.closed is False:
self.workers.setsockopt(zmq.LINGER, 1)
self.workers.close()
if self.context.closed is False:
self.context.term()
# Also stop the workers
for worker in self.work_procs:
if worker.is_alive() is True:
worker.terminate()
def __del__(self):
self.destroy()
class MWorker(multiprocessing.Process):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
crypticle):
multiprocessing.Process.__init__(self)
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.crypticle = crypticle
self.mkey = mkey
self.key = key
self.k_mtime = 0
def __bind(self):
'''
Bind to the local port
'''
context = zmq.Context(1)
socket = context.socket(zmq.REP)
w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket {0}'.format(w_uri))
try:
socket.connect(w_uri)
while True:
try:
package = socket.recv()
self._update_aes()
payload = self.serial.loads(package)
ret = self.serial.dumps(self._handle_payload(payload))
socket.send(ret)
# Properly handle EINTR from SIGUSR1
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
# Changes here create a zeromq condition, check with thatch45 before
# making any zeromq changes
except KeyboardInterrupt:
socket.close()
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
'''
try:
key = payload['enc']
load = payload['load']
except KeyError:
return ''
return {'aes': self._handle_aes,
'pub': self._handle_pub,
'clear': self._handle_clear}[key](load)
def _handle_clear(self, load):
'''
Take care of a cleartext command
'''
log.info('Clear payload received with command {cmd}'.format(**load))
if load['cmd'].startswith('__'):
return False
return getattr(self.clear_funcs, load['cmd'])(load)
def _handle_pub(self, load):
'''
Handle a command sent via a public key pair
'''
if load['cmd'].startswith('__'):
return False
log.info('Pubkey payload received with command {cmd}'.format(**load))
def _handle_aes(self, load):
'''
Handle a command sent via an AES key
'''
try:
data = self.crypticle.loads(load)
except Exception:
return ''
if 'cmd' not in data:
log.error('Received malformed command {0}'.format(data))
return {}
log.info('AES payload received with command {0}'.format(data['cmd']))
if data['cmd'].startswith('__'):
return False
return self.aes_funcs.run_func(data['cmd'], data)
def _update_aes(self):
'''
Check to see if a fresh AES key is available and update the components
of the worker
'''
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
except os.error:
return
if stats.st_mode != 0100400:
# Invalid dfn, return
return
if stats.st_mtime > self.k_mtime:
# new key, refresh crypticle
with salt.utils.fopen(dfn) as fp_:
aes = fp_.read()
if len(aes) != 76:
return
self.crypticle = salt.crypt.Crypticle(self.opts, aes)
self.clear_funcs.crypticle = self.crypticle
self.clear_funcs.opts['aes'] = aes
self.aes_funcs.crypticle = self.crypticle
self.aes_funcs.opts['aes'] = aes
self.k_mtime = stats.st_mtime
def run(self):
'''
Start a Master Worker
'''
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
self.mkey,
self.crypticle)
self.aes_funcs = AESFuncs(self.opts, self.crypticle)
self.__bind()
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts, crypticle):
self.opts = opts
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
self.serial = salt.payload.Serial(opts)
self.crypticle = crypticle
self.ckminions = salt.utils.minions.CkMinions(opts)
# Create the tops dict for loading external top data
self.tops = salt.loader.tops(self.opts)
# Make a client
self.local = salt.client.LocalClient(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
self.__setup_fileserver()
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = fs_.serve_file
self._file_hash = fs_.file_hash
self._file_list = fs_.file_list
self._file_list_emptydirs = fs_.file_list_emptydirs
self._dir_list = fs_.dir_list
self._symlink_list = fs_.symlink_list
self._file_envs = fs_.envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
with salt.utils.fopen(pub_path, 'r') as fp_:
minion_pub = fp_.read()
tmp_pub = salt.utils.mkstemp()
with salt.utils.fopen(tmp_pub, 'w+') as fp_:
fp_.write(minion_pub)
pub = None
try:
pub = RSA.load_pub_key(tmp_pub)
except RSA.RSAError as err:
log.error('Unable to load temporary public key "{0}": {1}'
.format(tmp_pub, err))
try:
os.remove(tmp_pub)
if pub.public_decrypt(token, 5) == 'salt':
return True
except RSA.RSAError as err:
log.error('Unable to decrypt token: {0}'.format(err))
log.error('Salt minion claiming to be {0} has attempted to'
'communicate with the master and could not be verified'
.format(id_))
return False
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if re.match('publish.*', clear_load['fun']):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
(
'Minion id {0} is not who it says it is and is attempting '
'to issue a peer command'
).format(clear_load['id'])
)
return False
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
good = self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
return False
return True
def _ext_nodes(self, load):
'''
Return the results from an external node classifier if one is
specified
'''
if 'id' not in load:
log.error('Received call for external nodes without an id')
return {}
if not salt.utils.verify.valid_id(self.opts, load['id']):
return {}
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
ret = {}
# The old ext_nodes method is set to be deprecated in 0.10.4
# and should be removed within 3-5 releases in favor of the
# "master_tops" system
if self.opts['external_nodes']:
if not salt.utils.which(self.opts['external_nodes']):
log.error(('Specified external nodes controller {0} is not'
' available, please verify that it is installed'
'').format(self.opts['external_nodes']))
return {}
cmd = '{0} {1}'.format(self.opts['external_nodes'], load['id'])
ndata = yaml.safe_load(
subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE
).communicate()[0])
if 'environment' in ndata:
env = ndata['environment']
else:
env = 'base'
if 'classes' in ndata:
if isinstance(ndata['classes'], dict):
ret[env] = list(ndata['classes'])
elif isinstance(ndata['classes'], list):
ret[env] = ndata['classes']
else:
return ret
# Evaluate all configured master_tops interfaces
opts = {}
grains = {}
if 'opts' in load:
opts = load['opts']
if 'grains' in load['opts']:
grains = load['opts']['grains']
for fun in self.tops:
if fun not in self.opts.get('master_tops', {}):
continue
try:
ret.update(self.tops[fun](opts=opts, grains=grains))
except Exception as exc:
# If anything happens in the top generation, log it and move on
log.error(
'Top function {0} failed with error {1} for minion '
'{2}'.format(
fun, exc, load['id']
)
)
return ret
def _master_opts(self, load):
'''
Return the master options to the minion
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for env in envs:
if env not in file_roots:
file_roots[env] = []
mopts['file_roots'] = file_roots
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
'''
if any(key not in load for key in ('id', 'tgt', 'fun')):
return {}
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
if 'mine_get' in self.opts:
# If master side acl defined.
if not isinstance(self.opts['mine_get'], dict):
return {}
perms = set()
for match in self.opts['mine_get']:
if re.match(match, load['id']):
if isinstance(self.opts['mine_get'][match], list):
perms.update(self.opts['mine_get'][match])
if not any(re.match(perm, load['fun']) for perm in perms):
return {}
ret = {}
if not salt.utils.verify.valid_id(self.opts, load['id']):
return ret
checker = salt.utils.minions.CkMinions(self.opts)
minions = checker.check_minions(
load['tgt'],
load.get('expr_form', 'glob')
)
for minion in minions:
mine = os.path.join(
self.opts['cachedir'],
'minions',
minion,
'mine.p')
try:
with salt.utils.fopen(mine) as fp_:
fdata = self.serial.load(fp_).get(load['fun'])
if fdata:
ret[minion] = fdata
except Exception:
continue
return ret
def _mine(self, load):
'''
Return the mine data
'''
if 'id' not in load or 'data' not in load:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'mine.p')
if not load.get('clear', False):
if os.path.isfile(datap):
with salt.utils.fopen(datap, 'r') as fp_:
new = self.serial.load(fp_)
if isinstance(new, dict):
new.update(load['data'])
load['data'] = new
with salt.utils.fopen(datap, 'w+') as fp_:
fp_.write(self.serial.dumps(load['data']))
return True
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
'''
if 'id' not in load or 'fun' not in load:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
return True
datap = os.path.join(cdir, 'mine.p')
if os.path.isfile(datap):
try:
with salt.utils.fopen(datap, 'r') as fp_:
mine_data = self.serial.load(fp_)
if isinstance(mine_data, dict):
if mine_data.pop(load['fun'], False):
with salt.utils.fopen(datap, 'w+') as fp_:
fp_.write(self.serial.dumps(mine_data))
except OSError:
return False
return True
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
'''
if 'id' not in load:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
return True
datap = os.path.join(cdir, 'mine.p')
if os.path.isfile(datap):
try:
os.remove(datap)
except OSError:
return False
return True
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not self.opts['file_recv'] or os.path.isabs(load['path']):
return False
if os.path.isabs(load['path']) or '../' in load['path']:
# Can overwrite master files!!
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
load['path'])
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(load['data'])
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains', 'env')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
pillar = salt.pillar.Pillar(
self.opts,
load['grains'],
load['id'],
load['env'],
load.get('ext'))
data = pillar.compile_pillar()
if self.opts.get('minion_data_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'data.p')
with salt.utils.fopen(datap, 'w+') as fp_:
fp_.write(
self.serial.dumps(
{'grains': load['grains'],
'pillar': data})
)
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
'''
if 'id' not in load:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
if 'events' not in load and ('tag' not in load or 'data' not in load):
return False
if 'events' in load:
for event in load['events']:
self.event.fire_event(event, event['tag']) # old dup event
if load.get('pretag') is not None:
self.event.fire_event(event, tagify(event['tag'], base=load['pretag']))
else:
tag = load['tag']
self.event.fire_event(load, tag)
return True
def _return(self, load):
'''
Handle the return data sent from the minions
'''
# If the return data is invalid, just ignore it
if any(key not in load for key in ('return', 'jid', 'id')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if load['jid'] == 'req':
# The minion is returning a standalone job, request a jobid
load['jid'] = salt.utils.prep_jid(
self.opts['cachedir'],
self.opts['hash_type'],
load.get('nocache', False))
log.info('Got return from {id} for job {jid}'.format(**load))
self.event.fire_event(load, load['jid']) # old dup event
self.event.fire_event(load, tagify([load['jid'], 'ret', load['id']], 'job'))
self.event.fire_ret_load(load)
if self.opts['master_ext_job_cache']:
fstr = '{0}.returner'.format(self.opts['master_ext_job_cache'])
self.mminion.returners[fstr](load)
return
if not self.opts['job_cache'] or self.opts.get('ext_job_cache'):
return
jid_dir = salt.utils.jid_dir(
load['jid'],
self.opts['cachedir'],
self.opts['hash_type']
)
if not os.path.isdir(jid_dir):
log.error(
'An inconsistency occurred, a job was received with a job id '
'that is not present on the master: {jid}'.format(**load)
)
return False
if os.path.exists(os.path.join(jid_dir, 'nocache')):
return
hn_dir = os.path.join(jid_dir, load['id'])
if not os.path.isdir(hn_dir):
os.makedirs(hn_dir)
# Otherwise the minion has already returned this jid and it should
# be dropped
else:
log.error(
'An extra return was detected from minion {0}, please verify '
'the minion, this could be a replay attack'.format(
load['id']
)
)
return False
self.serial.dump(
load['return'],
# Use atomic open here to avoid the file being read before it's
# completely written to. Refs #1935
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, 'return.p'), 'w+'
)
)
if 'out' in load:
self.serial.dump(
load['out'],
# Use atomic open here to avoid the file being read before
# it's completely written to. Refs #1935
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, 'out.p'), 'w+'
)
)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
'''
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
return None
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
# set the write flag
jid_dir = salt.utils.jid_dir(
load['jid'],
self.opts['cachedir'],
self.opts['hash_type']
)
if not os.path.isdir(jid_dir):
os.makedirs(jid_dir)
if 'load' in load:
with salt.utils.fopen(os.path.join(jid_dir, '.load.p'), 'w+') as fp_:
self.serial.dump(load['load'], fp_)
wtag = os.path.join(jid_dir, 'wtag_{0}'.format(load['id']))
try:
with salt.utils.fopen(wtag, 'w+') as fp_:
fp_.write('')
except (IOError, OSError):
log.error(
'Failed to commit the write tag for the syndic return, are '
'permissions correct in the cache dir: {0}?'.format(
self.opts['cachedir']
)
)
return False
# Format individual return loads
for key, item in load['return'].items():
ret = {'jid': load['jid'],
'id': key,
'return': item}
if 'out' in load:
ret['out'] = load['out']
self._return(ret)
if os.path.isfile(wtag):
os.remove(wtag)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
'''
if 'peer_run' not in self.opts:
return {}
if not isinstance(self.opts['peer_run'], dict):
return {}
if any(key not in clear_load for key in ('fun', 'arg', 'id', 'tok')):
return {}
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
clear_load['id']
)
)
return {}
perms = set()
for match in self.opts['peer_run']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer_run'][match], list):
perms.update(self.opts['peer_run'][match])
good = False
for perm in perms:
if re.match(perm, clear_load['fun']):
good = True
if not good:
return {}
# Prepare the runner object
opts = {'fun': clear_load['fun'],
'arg': clear_load['arg'],
'id': clear_load['id'],
'doc': False,
'conf_file': self.opts['conf_file']}
opts.update(self.opts)
runner = salt.runner.Runner(opts)
return runner.run()
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
'''
if any(key not in load for key in ('jid', 'id', 'tok')):
return {}
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, load['jid'])
with salt.utils.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
if not self.__verify_minion_publish(clear_load):
return {}
# Set up the publication payload
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'expr_form': clear_load.get('tgt_type', 'glob'),
'tgt': clear_load['tgt'],
'ret': clear_load['ret'],
'id': clear_load['id'],
}
if 'tgt_type' in clear_load:
if clear_load['tgt_type'].startswith('node'):
if clear_load['tgt'] in self.opts['nodegroups']:
load['tgt'] = self.opts['nodegroups'][clear_load['tgt']]
load['expr_form_type'] = 'compound'
load['expr_form'] = clear_load['tgt_type']
else:
return {}
else:
load['expr_form'] = clear_load['tgt_type']
ret = {}
ret['jid'] = self.local.cmd_async(**load)
ret['minions'] = self.ckminions.check_minions(
clear_load['tgt'],
load['expr_form'])
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, ret['jid'])
with salt.utils.fopen(jid_fn, 'w+') as fp_:
fp_.write(clear_load['id'])
return ret
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
if not self.__verify_minion_publish(clear_load):
return {}
# Set up the publication payload
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'expr_form': clear_load.get('tgt_type', 'glob'),
'tgt': clear_load['tgt'],
'ret': clear_load['ret'],
'id': clear_load['id'],
}
if 'tmo' in clear_load:
try:
load['timeout'] = int(clear_load['tmo'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(
clear_load['tmo'])
log.warn(msg)
return {}
if 'timeout' in clear_load:
try:
load['timeout'] = int(clear_load['timeout'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(
clear_load['tmo'])
log.warn(msg)
return {}
if 'tgt_type' in clear_load:
if clear_load['tgt_type'].startswith('node'):
if clear_load['tgt'] in self.opts['nodegroups']:
load['tgt'] = self.opts['nodegroups'][clear_load['tgt']]
load['expr_form_type'] = 'compound'
else:
return {}
else:
load['expr_form'] = clear_load['tgt_type']
load['raw'] = True
ret = {}
for minion in self.local.cmd_iter(**load):
if clear_load.get('form', '') == 'full':
data = minion
if 'jid' in minion:
ret['__jid__'] = minion['jid']
data['ret'] = data.pop('return')
ret[minion['id']] = data
else:
ret[minion['id']] = minion['return']
if 'jid' in minion:
ret['__jid__'] = minion['jid']
for key, val in self.local.get_cache_returns(ret['__jid__']).items():
if not key in ret:
ret[key] = val
if clear_load.get('form', '') != 'full':
ret.pop('__jid__')
return ret
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
'''
if 'id' not in load or 'tok' not in load:
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
(
'Minion id {0} is not who it says it is and is attempting '
'to revoke the key for {0}'
).format(load['id'])
)
return False
keyapi = salt.key.Key(self.opts)
keyapi.delete_key(load['id'])
return True
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
'''
# Don't honor private functions
if func.startswith('__'):
return self.crypticle.dumps({})
# Run the func
if hasattr(self, func):
try:
ret = getattr(self, func)(load)
except Exception:
ret = ''
log.error(
'Error in function {0}:\n'.format(func),
exc_info=True
)
else:
log.error(
'Received function {0} which is unavailable on the master, '
'returning False'.format(
func
)
)
return self.crypticle.dumps(False)
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return self.crypticle.dumps(ret)
# encrypt with a specific AES key
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
load['id'])
key = salt.crypt.Crypticle.generate_key_string()
pcrypt = salt.crypt.Crypticle(
self.opts,
key)
try:
pub = RSA.load_pub_key(pubfn)
except RSA.RSAError:
return self.crypticle.dumps({})
pret = {}
pret['key'] = pub.public_encrypt(key, 4)
pret['pillar'] = pcrypt.dumps(ret)
return pret
# AES Encrypt the return
return self.crypticle.dumps(ret)
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key, master_key, crypticle):
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.key = key
self.master_key = master_key
self.crypticle = crypticle
# Create the event manager
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
# Make a client
self.local = salt.client.LocalClient(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
def _send_cluster(self):
'''
Send the cluster data out
'''
log.debug('Sending out cluster data')
ret = self.local.cmd(self.opts['cluster_masters'],
'cluster.distrib',
self._cluster_load(),
0,
'list'
)
log.debug('Cluster distributed: {0}'.format(ret))
def _cluster_load(self):
'''
Generates the data sent to the cluster nodes.
'''
minions = {}
master_pem = ''
with salt.utils.fopen(self.opts['conf_file'], 'r') as fp_:
master_conf = fp_.read()
minion_dir = os.path.join(self.opts['pki_dir'], 'minions')
for host in os.listdir(minion_dir):
pub = os.path.join(minion_dir, host)
minions[host] = salt.utils.fopen(pub, 'r').read()
if self.opts['cluster_mode'] == 'full':
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
with salt.utils.fopen(master_pem_path) as fp_:
master_pem = fp_.read()
return [minions,
master_conf,
master_pem,
self.opts['conf_file']]
def _check_permissions(self, filename):
'''
Check if the specified filename has correct permissions
'''
if salt.utils.is_windows():
return True
# After we've ascertained we're not on windows
import grp
try:
user = self.opts['user']
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except KeyError:
log.error(
'Failed to determine groups for user {0}. The user is not '
'available.\n'.format(
user
)
)
return False
fmode = os.stat(filename)
if os.getuid() == 0:
if fmode.st_uid == uid or fmode.st_gid != gid:
return True
elif self.opts.get('permissive_pki_access', False) \
and fmode.st_gid in groups:
return True
else:
if stat.S_IWOTH & fmode.st_mode:
# don't allow others to write to the file
return False
# check group flags
if self.opts.get('permissive_pki_access', False) \
and stat.S_IWGRP & fmode.st_mode:
return True
elif stat.S_IWGRP & fmode.st_mode:
return False
# check if writable by group or other
if not (stat.S_IWGRP & fmode.st_mode or
stat.S_IWOTH & fmode.st_mode):
return True
return False
def _check_autosign(self, keyid):
'''
Checks if the specified keyid should automatically be signed.
'''
if self.opts['auto_accept']:
return True
autosign_file = self.opts.get('autosign_file', None)
if not autosign_file or not os.path.exists(autosign_file):
return False
if not self._check_permissions(autosign_file):
message = 'Wrong permissions for {0}, ignoring content'
log.warn(message.format(autosign_file))
return False
with salt.utils.fopen(autosign_file, 'r') as fp_:
for line in fp_:
line = line.strip()
if line.startswith('#'):
continue
if line == keyid:
return True
if fnmatch.fnmatch(keyid, line):
return True
try:
if re.match(r'\A{0}\Z'.format(line), keyid):
return True
except re.error:
log.warn(
'{0} is not a valid regular expression, ignoring line '
'in {1}'.format(
line, autosign_file
)
)
continue
return False
def _auth(self, load):
'''
Authenticate the client, use the sent public key to encrypt the AES key
which was generated at start up.
This method fires an event over the master event manager. The event is
tagged "auth" and returns a dict with information about the auth
event
'''
# 0. Check for max open files
# 1. Verify that the key we are receiving matches the stored key
# 2. Store the key if it is not there
# 3. make an RSA key with the pub key
# 4. encrypt the AES key as an encrypted salt.payload
# 5. package the return and return it
salt.utils.verify.check_max_open_files(self.opts)
if not salt.utils.verify.valid_id(self.opts, load['id']):
log.info(
'Authentication request from invalid id {id}'.format(**load)
)
return {'enc': 'clear',
'load': {'ret': False}}
log.info('Authentication request from {id}'.format(**load))
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
load['id'])
pubfn_pend = os.path.join(self.opts['pki_dir'],
'minions_pre',
load['id'])
pubfn_rejected = os.path.join(self.opts['pki_dir'],
'minions_rejected',
load['id'])
if self.opts['open_mode']:
# open mode is turned on, nuts to checks and overwrite whatever
# is there
pass
elif os.path.isfile(pubfn_rejected):
# The key has been rejected, don't place it in pending
log.info('Public key rejected for {id}'.format(**load))
ret = {'enc': 'clear',
'load': {'ret': False}}
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return ret
elif os.path.isfile(pubfn):
# The key has been accepted check it
if salt.utils.fopen(pubfn, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys did not match. This may be an attempt to compromise '
'the Salt cluster.'.format(**load)
)
ret = {'enc': 'clear',
'load': {'ret': False}}
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return ret
elif not os.path.isfile(pubfn_pend)\
and not self._check_autosign(load['id']):
if os.path.isdir(pubfn_pend):
# The key path is a directory, error out
log.info(
'New public key id is a directory {id}'.format(**load)
)
ret = {'enc': 'clear',
'load': {'ret': False}}
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return ret
# This is a new key, stick it in pre
log.info(
'New public key placed in pending for {id}'.format(**load)
)
with salt.utils.fopen(pubfn_pend, 'w+') as fp_:
fp_.write(load['pub'])
ret = {'enc': 'clear',
'load': {'ret': True}}
eload = {'result': True,
'act': 'pend',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return ret
elif os.path.isfile(pubfn_pend)\
and not self._check_autosign(load['id']):
# This key is in pending, if it is the same key ret True, else
# ret False
if salt.utils.fopen(pubfn_pend, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys in pending did not match. This may be an attempt to '
'compromise the Salt cluster.'.format(**load)
)
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
else:
log.info(
'Authentication failed from host {id}, the key is in '
'pending and needs to be accepted with salt-key '
'-a {id}'.format(**load)
)
eload = {'result': True,
'act': 'pend',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': True}}
elif os.path.isfile(pubfn_pend)\
and self._check_autosign(load['id']):
# This key is in pending, if it is the same key auto accept it
if salt.utils.fopen(pubfn_pend, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys in pending did not match. This may be an attempt to '
'compromise the Salt cluster.'.format(**load)
)
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
else:
pass
elif not os.path.isfile(pubfn_pend)\
and self._check_autosign(load['id']):
# This is a new key and it should be automatically be accepted
pass
else:
# Something happened that I have not accounted for, FAIL!
log.warn('Unaccounted for authentication failure')
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
log.info('Authentication accepted from {id}'.format(**load))
# only write to disk if you are adding the file
if not os.path.isfile(pubfn):
with salt.utils.fopen(pubfn, 'w+') as fp_:
fp_.write(load['pub'])
pub = None
# The key payload may sometimes be corrupt when using auto-accept
# and an empty request comes in
try:
pub = RSA.load_pub_key(pubfn)
except RSA.RSAError as err:
log.error('Corrupt public key "{0}": {1}'.format(pubfn, err))
return {'enc': 'clear',
'load': {'ret': False}}
ret = {'enc': 'pub',
'pub_key': self.master_key.get_pub_str(),
'publish_port': self.opts['publish_port'],
}
if self.opts['auth_mode'] >= 2:
if 'token' in load:
try:
mtoken = self.master_key.key.private_decrypt(load['token'], 4)
aes = '{0}_|-{1}'.format(self.opts['aes'], mtoken)
except Exception:
# Token failed to decrypt, send back the salty bacon to
# support older minions
pass
else:
aes = self.opts['aes']
ret['aes'] = pub.public_encrypt(aes, 4)
else:
if 'token' in load:
try:
mtoken = self.master_key.key.private_decrypt(
load['token'], 4
)
ret['token'] = pub.public_encrypt(mtoken, 4)
except Exception:
# Token failed to decrypt, send back the salty bacon to
# support older minions
pass
aes = self.opts['aes']
ret['aes'] = pub.public_encrypt(self.opts['aes'], 4)
# Be aggressive about the signature
digest = hashlib.sha256(aes).hexdigest()
ret['sig'] = self.master_key.key.private_encrypt(digest, 5)
eload = {'result': True,
'act': 'accept',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return ret
def cloud(self, clear_load):
'''
Hook into the salt-cloud libs and execute cloud routines
# NOT HOOKED IN YET
'''
authorize = salt.auth.Authorize(self.opts, clear_load, self.loadauth)
if not authorize.rights('cloud', clear_load):
return False
return True
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
if 'token' in clear_load:
try:
token = self.loadauth.get_tok(clear_load['token'])
except Exception as exc:
log.error(
'Exception occurred when generating auth token: {0}'.format(
exc
)
)
return ''
if not token:
log.warning('Authentication failure of type "token" occurred.')
return ''
if token['eauth'] not in self.opts['external_auth']:
log.warning('Authentication failure of type "token" occurred.')
return ''
if token['name'] not in self.opts['external_auth'][token['eauth']]:
log.warning('Authentication failure of type "token" occurred.')
return ''
good = self.ckminions.runner_check(
self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "token" occurred for '
'user {0}.').format(token['name'])
log.warning(msg)
return ''
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(
fun,
clear_load.get('kwarg', {}),
token['name'])
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return ''
if 'eauth' not in clear_load:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
if clear_load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
try:
name = self.loadauth.load_name(clear_load)
if not ((name in self.opts['external_auth'][clear_load['eauth']]) | ('*' in self.opts['external_auth'][clear_load['eauth']])):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
if not self.loadauth.time_auth(clear_load):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
good = self.ckminions.runner_check(
self.opts['external_auth'][clear_load['eauth']][name] if name in self.opts['external_auth'][clear_load['eauth']] else self.opts['external_auth'][clear_load['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(fun,
clear_load.get('kwarg', {}),
clear_load.get('username', 'UNKNOWN'))
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return ''
except Exception as exc:
log.error(
'Exception occurred in the wheel system: {0}'.format(exc)
)
return ''
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
if 'token' in clear_load:
try:
token = self.loadauth.get_tok(clear_load['token'])
except Exception as exc:
log.error(
'Exception occurred when generating auth token: {0}'.format(
exc
)
)
return ''
if not token:
log.warning('Authentication failure of type "token" occurred.')
return ''
if token['eauth'] not in self.opts['external_auth']:
log.warning('Authentication failure of type "token" occurred.')
return ''
if token['name'] not in self.opts['external_auth'][token['eauth']]:
log.warning('Authentication failure of type "token" occurred.')
return ''
good = self.ckminions.wheel_check(
self.opts['external_auth'][token['eauth']][token['name']]
if token['name'] in self.opts['external_auth'][token['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "token" occurred for '
'user {0}.').format(token['name'])
log.warning(msg)
return ''
jid = salt.utils.gen_jid()
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': token['name']}
try:
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **clear_load.get('kwarg', {}))
data['ret'] = ret
data['success'] = True
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return tag
except Exception as exc:
log.error(exc)
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
data['ret'] = 'Exception occured in wheel {0}: {1}'.format(
fun,
exc,
)
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return tag
if 'eauth' not in clear_load:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
if clear_load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
try:
name = self.loadauth.load_name(clear_load)
if not ((name in self.opts['external_auth'][clear_load['eauth']]) |
('*' in self.opts['external_auth'][clear_load['eauth']])):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
if not self.loadauth.time_auth(clear_load):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
good = self.ckminions.wheel_check(
self.opts['external_auth'][clear_load['eauth']][name]
if name in self.opts['external_auth'][clear_load['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return ''
jid = salt.utils.gen_jid()
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': clear_load.get('username', 'UNKNOWN')}
try:
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **clear_load.get('kwarg', {}))
data['ret'] = ret
data['success'] = True
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return tag
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
data['ret'] = 'Exception occured in wheel {0}: {1}'.format(
fun,
exc,
)
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return tag
except Exception as exc:
log.error(
'Exception occurred in the wheel system: {0}'.format(exc)
)
return ''
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
if 'eauth' not in clear_load:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
if clear_load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.warning('Authentication failure of type "eauth" occurred.')
return ''
try:
name = self.loadauth.load_name(clear_load)
if not ((name in self.opts['external_auth'][clear_load['eauth']]) |
('*' in self.opts['external_auth'][clear_load['eauth']])):
log.warning('Authentication failure of type "eauth" occurred.')
return ''
if not self.loadauth.time_auth(clear_load):
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return self.loadauth.mk_token(clear_load)
except Exception as exc:
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
return ''
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
# check blacklist/whitelist
good = True
# Check if the user is blacklisted
for user_re in self.opts['client_acl_blacklist'].get('users', []):
if re.match(user_re, clear_load['user']):
good = False
break
# check if the cmd is blacklisted
for module_re in self.opts['client_acl_blacklist'].get('modules', []):
# if this is a regular command, its a single function
if type(clear_load['fun']) == str:
funs_to_check = [clear_load['fun']]
# if this a compound function
else:
funs_to_check = clear_load['fun']
for fun in funs_to_check:
if re.match(module_re, fun):
good = False
break
if good is False:
log.error(
'{user} does not have permissions to run {function}. Please '
'contact your local administrator if you believe this is in '
'error.\n'.format(
user=clear_load['user'],
function=clear_load['fun']
)
)
return ''
# to make sure we don't step on anyone else's toes
del good
# Check for external auth calls
if extra.get('token', False):
# A token was passed, check it
try:
token = self.loadauth.get_tok(extra['token'])
except Exception as exc:
log.error(
'Exception occurred when generating auth token: {0}'.format(
exc
)
)
return ''
if not token:
log.warning('Authentication failure of type "token" occurred.')
return ''
if token['eauth'] not in self.opts['external_auth']:
log.warning('Authentication failure of type "token" occurred.')
return ''
if not ((token['name'] in self.opts['external_auth'][token['eauth']]) |
('*' in self.opts['external_auth'][token['eauth']])):
log.warning('Authentication failure of type "token" occurred.')
return ''
good = self.ckminions.auth_check(
self.opts['external_auth'][token['eauth']][token['name']]
if token['name'] in self.opts['external_auth'][token['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "token" occurred.'
)
return ''
clear_load['user'] = token['name']
log.debug('Minion tokenized user = "{0}"'.format(clear_load['user']))
elif 'eauth' in extra:
if extra['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
try:
name = self.loadauth.load_name(extra)
if not ((name in self.opts['external_auth'][extra['eauth']]) |
('*' in self.opts['external_auth'][extra['eauth']])):
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
if not self.loadauth.time_auth(extra):
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
except Exception as exc:
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
return ''
good = self.ckminions.auth_check(
self.opts['external_auth'][extra['eauth']][name]
if name in self.opts['external_auth'][extra['eauth']]
else self.opts['external_auth'][extra['eauth']]['*'],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
clear_load['user'] = name
# Verify that the caller has root on master
elif 'user' in clear_load:
if clear_load['user'].startswith('sudo_'):
# If someone can sudo, allow them to act as root
if clear_load.get('key', 'invalid') == self.key.get('root'):
clear_load.pop('key')
elif clear_load.pop('key') != self.key[self.opts.get('user', 'root')]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif clear_load['user'] == self.opts.get('user', 'root'):
if clear_load.pop('key') != self.key[self.opts.get('user', 'root')]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif clear_load['user'] == 'root':
if clear_load.pop('key') != self.key.get(self.opts.get('user', 'root')):
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif clear_load['user'] == getpass.getuser():
if clear_load.pop('key') != self.key.get(clear_load['user']):
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
else:
if clear_load['user'] in self.key:
# User is authorised, check key and check perms
if clear_load.pop('key') != self.key[clear_load['user']]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
if clear_load['user'] not in self.opts['client_acl']:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
good = self.ckminions.auth_check(
self.opts['client_acl'][clear_load['user']],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "user" '
'occurred.'
)
return ''
else:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
else:
if clear_load.pop('key') != self.key[getpass.getuser()]:
log.warning(
'Authentication failure of type "other" occurred.'
)
return ''
# Retrieve the minions list
minions = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob')
)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions
}
}
# Retrieve the jid
if not clear_load['jid']:
clear_load['jid'] = salt.utils.prep_jid(
self.opts['cachedir'],
self.opts['hash_type'],
extra.get('nocache', False)
)
self.event.fire_event({'minions': minions}, clear_load['jid'])
jid_dir = salt.utils.jid_dir(
clear_load['jid'],
self.opts['cachedir'],
self.opts['hash_type']
)
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, 'new_job') # old dup event
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
# Verify the jid dir
if not os.path.isdir(jid_dir):
os.makedirs(jid_dir)
# Save the invocation information
self.serial.dump(
clear_load,
salt.utils.fopen(os.path.join(jid_dir, '.load.p'), 'w+')
)
# save the minions to a cache so we can see in the UI
self.serial.dump(
minions,
salt.utils.fopen(os.path.join(jid_dir, '.minions.p'), 'w+')
)
if self.opts['ext_job_cache']:
try:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load)
except KeyError:
log.critical(
'The specified returner used for the external job cache '
'"{0}" does not have a save_load function!'.format(
self.opts['ext_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'user' in clear_load:
log.info(
'User {user} Published command {fun} with jid {jid}'.format(
**clear_load
)
)
load['user'] = clear_load['user']
else:
log.info(
'Published command {fun} with jid {jid}'.format(
**clear_load
)
)
log.debug('Published command details {0}'.format(load))
payload['load'] = self.crypticle.dumps(load)
# Send 0MQ to the publisher
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUSH)
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
pub_sock.send(self.serial.dumps(payload))
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions
}
}
|
train.py
|
# Author: Bichen Wu (bichen@berkeley.edu) 08/25/2016
"""Train"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import sys
import time
import math
import numpy as np
from six.moves import xrange
import tensorflow as tf
import threading
from config import *
from imdb import kitti
from utils.util import *
from nets import *
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset', 'KITTI',
"""Currently only support KITTI dataset.""")
tf.app.flags.DEFINE_string('data_path', '', """Root directory of data""")
tf.app.flags.DEFINE_string('image_set', 'train',
""" Can be train, trainval, val, or test""")
tf.app.flags.DEFINE_string('train_dir', '/tmp/bichen/logs/squeezeseg/train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Maximum number of batches to run.""")
tf.app.flags.DEFINE_string('net', 'squeezeSeg',
"""Neural net architecture. """)
tf.app.flags.DEFINE_string('pretrained_model_path', '',
"""Path to the pretrained model.""")
tf.app.flags.DEFINE_integer('summary_step', 50,
"""Number of steps to save summary.""")
tf.app.flags.DEFINE_integer('checkpoint_step', 1000,
"""Number of steps to save summary.""")
tf.app.flags.DEFINE_string('gpu', '0', """gpu id.""")
def train():
"""Train SqueezeSeg model"""
assert FLAGS.dataset == 'KITTI', \
'Currently only support KITTI dataset'
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
with tf.Graph().as_default():
assert FLAGS.net == 'squeezeSeg', \
'Selected neural net architecture not supported: {}'.format(FLAGS.net)
if FLAGS.net == 'squeezeSeg':
mc = kitti_squeezeSeg_config()
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = SqueezeSeg(mc)
imdb = kitti(FLAGS.image_set, FLAGS.data_path, mc)
# save model size, flops, activations by layers
with open(os.path.join(FLAGS.train_dir, 'model_metrics.txt'), 'w') as f:
f.write('Number of parameter by layer:\n')
count = 0
for c in model.model_size_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
count = 0
f.write('\nActivation size by layer:\n')
for c in model.activation_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
count = 0
f.write('\nNumber of flops by layer:\n')
for c in model.flop_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
f.close()
print ('Model statistics saved to {}.'.format(
os.path.join(FLAGS.train_dir, 'model_metrics.txt')))
def enqueue(sess, coord):
with coord.stop_on_exception():
while not coord.should_stop():
# read batch input
lidar_per_batch, lidar_mask_per_batch, label_per_batch,\
weight_per_batch = imdb.read_batch()
feed_dict = {
model.ph_keep_prob: mc.KEEP_PROB,
model.ph_lidar_input: lidar_per_batch,
model.ph_lidar_mask: lidar_mask_per_batch,
model.ph_label: label_per_batch,
model.ph_loss_weight: weight_per_batch,
}
sess.run(model.enqueue_op, feed_dict=feed_dict)
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.summary.merge_all()
init = tf.initialize_all_variables()
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(init)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
coord = tf.train.Coordinator()
enq_threads = []
for _ in range(mc.NUM_ENQUEUE_THREAD):
eqth = threading.Thread(target=enqueue, args=[sess, coord])
eqth.start()
enq_threads.append(eqth)
run_options = tf.RunOptions(timeout_in_ms=60000)
try:
for step in xrange(FLAGS.max_steps):
start_time = time.time()
if step % FLAGS.summary_step == 0 or step == FLAGS.max_steps-1:
op_list = [
model.lidar_input, model.lidar_mask, model.label, model.train_op,
model.loss, model.pred_cls, summary_op
]
lidar_per_batch, lidar_mask_per_batch, label_per_batch, \
_, loss_value, pred_cls, summary_str = sess.run(op_list,
options=run_options)
label_image = visualize_seg(label_per_batch[:6, :, :], mc)
pred_image = visualize_seg(pred_cls[:6, :, :], mc)
# Run evaluation on the batch
ious, _, _, _ = evaluate_iou(
label_per_batch, pred_cls*np.squeeze(lidar_mask_per_batch),
mc.NUM_CLASS)
feed_dict = {}
# Assume that class-0 is the background class
for i in range(1, mc.NUM_CLASS):
feed_dict[model.iou_summary_placeholders[i]] = ious[i]
iou_summary_list = sess.run(model.iou_summary_ops[1:], feed_dict)
# Run visualization
viz_op_list = [model.show_label, model.show_depth_img, model.show_pred]
viz_summary_list = sess.run(
viz_op_list,
feed_dict={
model.depth_image_to_show: lidar_per_batch[:6, :, :, [4]],
model.label_to_show: label_image,
model.pred_image_to_show: pred_image,
}
)
# Add summaries
summary_writer.add_summary(summary_str, step)
for sum_str in iou_summary_list:
summary_writer.add_summary(sum_str, step)
for viz_sum in viz_summary_list:
summary_writer.add_summary(viz_sum, step)
# force tensorflow to synchronise summaries
summary_writer.flush()
else:
_, loss_value = sess.run(
[model.train_op, model.loss], options=run_options)
duration = time.time() - start_time
assert not np.isnan(loss_value), \
'Model diverged. Total loss: {}, conf_loss: {}, bbox_loss: {}, ' \
'class_loss: {}'.format(loss_value, conf_loss, bbox_loss,
class_loss)
if step % 10 == 0:
num_images_per_step = mc.BATCH_SIZE
images_per_sec = num_images_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f images/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
images_per_sec, sec_per_batch))
sys.stdout.flush()
# Save the model checkpoint periodically.
if step % FLAGS.checkpoint_step == 0 or step == FLAGS.max_steps-1:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
except Exception, e:
coord.request_stop(e)
finally:
coord.request_stop()
sess.run(model.q.close(cancel_pending_enqueues=True))
coord.join(enq_threads)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
|
Client.py
|
#!/usr/bin/env python
###########################################################################
import os
import shutil
def EnsureDir(path):
if not os.path.exists(path):
os.makedirs(path)
def RemoveDir(path):
if os.path.exists(path):
shutil.rmtree(path)
def split_list(a, n):
k, m = divmod(len(a), n)
return (a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n))
###########################################################################
# filename - string, name of ZIP file
# process_index - process index, for VERBOSEose output
# verbose - number of passwords, after which program will raport progress
# passwords - list of strings, containing all passwords to be checked
# tmpdir - path to temporary directory. MUST BE ENSURED AND DELETED OUTSIDE OF THIS FUNCTION
from zipfile import ZipFile
def decrypt(filename, process_index, verbose, passwords, tmppath):
file = ZipFile(filename)
pass_num = len(passwords)
index = 0
for password in passwords:
if verbose > 0:
index=index+1
if index % verbose == 0:
print(f'Process {process_index}: checked {index}/{pass_num} passwords in current bulk')
try:
file.extractall(path=tmppath, pwd=password.encode())
return password
except:
continue
return None
###########################################################################
import multiprocessing
from multiprocessing import Process, Manager
from ctypes import c_char_p
import time
def subprocess(ret, index, verbose, filename, tmpdir, passwords, ):
pid = os.getpid()
tmppath = tmpdir + "/" + str(pid)
EnsureDir(tmppath)
result = decrypt(filename, index, verbose, passwords, tmppath)
RemoveDir(tmppath)
ret.value = result if result else ""
# return: None if not found
# otherwise, string with password
def start(filename, passwords, tmpdir, thread_num, verbose):
splitted_passwords = list(split_list(passwords, thread_num))
threads = []
manager = Manager()
index = 0 # for output
for pass_list in splitted_passwords:
index = index+1
ret = manager.Value(c_char_p, "")
thread = Process(target=subprocess, args=(ret, index, verbose, filename, tmpdir, pass_list,))
thread.start()
threads.append( (thread, ret) )
still_alive = True
output = None
while still_alive:
time.sleep(10)
still_alive = False
for (thread,ret) in threads:
if thread.is_alive():
still_alive = True
else:
if ret.value != "":
output = ret.value
still_alive = False
break
for (thread,ret) in threads:
if thread.is_alive():
thread.terminate()
return output
###########################################################################
#import socket
#import pickle
#def init_connection(conn, thread_num):
# return 1
#def run_task(conn, thread_num, verbose):
# return 1
#def send_results(conn, result):
# return 1
#def main(ip, port, thread_num, verbose):
# EnsureDir(".tmp")
# with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# s.connect((ip, port))
# filename = s.recv(1000)
# filelength = pickle.loads(s.recv(1000))
# filedata = s.recv(filelength+1)
# f = open(filename, "wb")
# f.write(filedata)
# f.close()
# max_num = pickle.loads(s.recv(1000))
# for i in range(1,max_num):
# length = pickle.loads(s.recv(1000))
# passwords = pickle.loads(s.recv(length+1))
# result = start(filename.decode("utf-8"), passwords, ".tmp", thread_num, verbose)
# s.sendall(pickle.dumps(result))
# RemoveDir(".tmp")
###########################################################################
import argparse
parser = argparse.ArgumentParser(description='Client program. Allows to connect to the server, in order to crack.')
parser.add_argument('-ip', required=True, metavar='ip', help='IP address of the server')
parser.add_argument('-port', required=False, metavar='port', help='IP address of the server')
parser.add_argument('-threads', required=False, metavar='threads', help='Number of threads to be used')
parser.add_argument('-verbose', required=False, metavar='verbose', help='Number of passwords, after which program will raport progress')
if __name__ == '__main__':
args = parser.parse_args()
thread_num = int(args.threads) if args.threads else multiprocessing.cpu_count()
verbose = int(args.verbose) if args.verbose else 10000
port = int(args.port) if args.port else 65432
#main(args.ip, port, thread_num, verbose)
|
python_ls.py
|
# Copyright 2017 Palantir Technologies, Inc.
from functools import partial
import logging
import os
import sys
import socketserver
import threading
from pyls_jsonrpc.dispatchers import MethodDispatcher
from pyls_jsonrpc.endpoint import Endpoint
from pyls_jsonrpc.streams import JsonRpcStreamReader, JsonRpcStreamWriter
from kedro.framework.startup import _get_project_metadata
from kedro.framework.project.settings import _get_project_settings
from kedro.framework.session import KedroSession
from kedro.framework.context.context import KedroContext
from . import lsp, _utils, uris
from .config import config
from .workspace import Workspace
log = logging.getLogger(__name__)
LINT_DEBOUNCE_S = 0.5 # 500 ms
PARENT_PROCESS_WATCH_INTERVAL = 10 # 10 s
MAX_WORKERS = 64
PYTHON_FILE_EXTENSIONS = (".py", ".pyi")
CONFIG_FILEs = ("pycodestyle.cfg", "setup.cfg", "tox.ini", ".flake8")
class _StreamHandlerWrapper(socketserver.StreamRequestHandler, object):
"""A wrapper class that is used to construct a custom handler class."""
delegate = None
def setup(self):
super(_StreamHandlerWrapper, self).setup()
# pylint: disable=no-member
self.delegate = self.DELEGATE_CLASS(self.rfile, self.wfile)
def handle(self):
try:
self.delegate.start()
except OSError as e:
if os.name == "nt":
# Catch and pass on ConnectionResetError when parent process
# dies
# pylint: disable=no-member, undefined-variable
if isinstance(e, WindowsError) and e.winerror == 10054:
pass
# pylint: disable=no-member
self.SHUTDOWN_CALL()
def start_tcp_lang_server(bind_addr, port, check_parent_process, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError("Handler class must be an instance of PythonLanguageServer")
def shutdown_server(check_parent_process, *args):
# pylint: disable=unused-argument
if check_parent_process:
log.debug("Shutting down server")
# Shutdown call must be done on a thread, to prevent deadlocks
stop_thread = threading.Thread(target=server.shutdown)
stop_thread.start()
# Construct a custom wrapper class around the user's handler_class
wrapper_class = type(
handler_class.__name__ + "Handler",
(_StreamHandlerWrapper,),
{
"DELEGATE_CLASS": partial(
handler_class, check_parent_process=check_parent_process
),
"SHUTDOWN_CALL": partial(shutdown_server, check_parent_process),
},
)
server = socketserver.TCPServer(
(bind_addr, port), wrapper_class, bind_and_activate=False
)
server.allow_reuse_address = True
try:
server.server_bind()
server.server_activate()
log.info("Serving %s on (%s, %s)", handler_class.__name__, bind_addr, port)
server.serve_forever()
finally:
log.info("Shutting down")
server.server_close()
def start_io_lang_server(rfile, wfile, check_parent_process, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError("Handler class must be an instance of PythonLanguageServer")
log.info("Starting %s IO language server", handler_class.__name__)
server = handler_class(rfile, wfile, check_parent_process)
server.start()
class PythonLanguageServer(MethodDispatcher):
""" Implementation of the Microsoft VSCode Language Server Protocol
https://github.com/Microsoft/language-server-protocol/blob/master/versions/protocol-1-x.md
"""
# pylint: disable=too-many-public-methods,redefined-builtin
def __init__(self, rx, tx, check_parent_process=False):
self.workspace = None
self.config = None
self.root_uri = None
self.watching_thread = None
self.workspaces = {}
self.uri_workspace_mapper = {}
self._jsonrpc_stream_reader = JsonRpcStreamReader(rx)
self._jsonrpc_stream_writer = JsonRpcStreamWriter(tx)
self._check_parent_process = check_parent_process
self._endpoint = Endpoint(
self, self._jsonrpc_stream_writer.write, max_workers=MAX_WORKERS
)
self._dispatchers = []
self._shutdown = False
def start(self):
"""Entry point for the server."""
self._jsonrpc_stream_reader.listen(self._endpoint.consume)
def __getitem__(self, item):
"""Override getitem to fallback through multiple dispatchers."""
if self._shutdown and item != "exit":
# exit is the only allowed method during shutdown
log.debug("Ignoring non-exit method during shutdown: %s", item)
raise KeyError
try:
return super(PythonLanguageServer, self).__getitem__(item)
except KeyError:
# Fallback through extra dispatchers
for dispatcher in self._dispatchers:
try:
return dispatcher[item]
except KeyError:
continue
raise KeyError()
def m_shutdown(self, **_kwargs):
self._shutdown = True
return None
def m_exit(self, **_kwargs):
self._endpoint.shutdown()
self._jsonrpc_stream_reader.close()
self._jsonrpc_stream_writer.close()
def _match_uri_to_workspace(self, uri):
workspace_uri = _utils.match_uri_to_workspace(uri, self.workspaces)
return self.workspaces.get(workspace_uri, self.workspace)
def _hook(self, hook_name, doc_uri=None, **kwargs):
"""Calls hook_name and returns a list of results from all registered handlers"""
workspace = self._match_uri_to_workspace(doc_uri)
doc = workspace.get_document(doc_uri) if doc_uri else None
hook_handlers = self.config.plugin_manager.subset_hook_caller(
hook_name, self.config.disabled_plugins
)
return hook_handlers(
config=self.config, workspace=workspace, document=doc, **kwargs
)
def capabilities(self):
server_capabilities = {
"codeActionProvider": True,
"codeLensProvider": {
"resolveProvider": False, # We may need to make this configurable
},
"completionProvider": {
"resolveProvider": False, # We know everything ahead of time
"triggerCharacters": ["."],
},
"documentFormattingProvider": True,
"documentHighlightProvider": True,
"documentRangeFormattingProvider": True,
"documentSymbolProvider": True,
"definitionProvider": True,
"executeCommandProvider": {
"commands": flatten(self._hook("pyls_commands"))
},
"hoverProvider": True,
"referencesProvider": True,
"renameProvider": True,
"foldingRangeProvider": True,
"signatureHelpProvider": {"triggerCharacters": ["(", ",", "="]},
"textDocumentSync": {
"change": lsp.TextDocumentSyncKind.INCREMENTAL,
"save": {"includeText": True,},
"openClose": True,
},
"workspace": {
"workspaceFolders": {"supported": True, "changeNotifications": True}
},
"experimental": merge(self._hook("pyls_experimental_capabilities")),
}
log.info("Server capabilities: %s", server_capabilities)
return server_capabilities
def m_initialize(
self,
processId=None,
rootUri=None,
rootPath=None,
initializationOptions=None,
**_kwargs,
):
log.debug(
"Language server initialized with %s %s %s %s",
processId,
rootUri,
rootPath,
initializationOptions,
)
if rootUri is None:
rootUri = uris.from_fs_path(rootPath) if rootPath is not None else ""
self.workspaces.pop(self.root_uri, None)
self.root_uri = rootUri
self.config = config.Config(
rootUri,
initializationOptions or {},
processId,
_kwargs.get("capabilities", {}),
)
self.workspace = Workspace(rootUri, self._endpoint, self.config)
self.workspaces[rootUri] = self.workspace
self._dispatchers = self._hook("pyls_dispatchers")
self._hook("pyls_initialize")
self.get_kedro_context(rootPath)
if (
self._check_parent_process
and processId is not None
and self.watching_thread is None
):
def watch_parent_process(pid):
# exit when the given pid is not alive
if not _utils.is_process_alive(pid):
log.info("parent process %s is not alive, exiting!", pid)
self.m_exit()
else:
threading.Timer(
PARENT_PROCESS_WATCH_INTERVAL, watch_parent_process, args=[pid]
).start()
self.watching_thread = threading.Thread(
target=watch_parent_process, args=(processId,)
)
self.watching_thread.daemon = True
self.watching_thread.start()
# Get our capabilities
return {"capabilities": self.capabilities()}
def get_kedro_context(self, rootPath):
metadata = _get_project_metadata(rootPath)
sys.path.insert(0, str(metadata.source_dir))
with KedroSession.create(metadata.package_name, rootPath) as session:
self._kedro_context = session.load_context()
def m_initialized(self, **_kwargs):
self._hook("pyls_initialized")
def code_actions(self, doc_uri, range, context):
return flatten(
self._hook("pyls_code_actions", doc_uri, range=range, context=context)
)
def code_lens(self, doc_uri):
return flatten(self._hook("pyls_code_lens", doc_uri))
def completions(self, doc_uri, position):
completions = self._hook("pyls_completions", doc_uri, position=position)
return {"isIncomplete": False, "items": flatten(completions)}
def definitions(self, doc_uri, position):
# log.info(">>>>>>>>>>>>>>>>>>> GETTING DEFINITIONS")
# log.info(self._kedro_context)
return flatten(
self._hook(
"pyls_definitions",
doc_uri,
position=position,
kedro_context=self._kedro_context,
)
)
def document_symbols(self, doc_uri):
return flatten(self._hook("pyls_document_symbols", doc_uri))
def execute_command(self, command, arguments):
return self._hook("pyls_execute_command", command=command, arguments=arguments)
def format_document(self, doc_uri):
return self._hook("pyls_format_document", doc_uri)
def format_range(self, doc_uri, range):
return self._hook("pyls_format_range", doc_uri, range=range)
def highlight(self, doc_uri, position):
return (
flatten(self._hook("pyls_document_highlight", doc_uri, position=position))
or None
)
def hover(self, doc_uri, position):
return self._hook("pyls_hover", doc_uri, position=position) or {"contents": ""}
@_utils.debounce(LINT_DEBOUNCE_S, keyed_by="doc_uri")
def lint(self, doc_uri, is_saved):
# Since we're debounced, the document may no longer be open
workspace = self._match_uri_to_workspace(doc_uri)
if doc_uri in workspace.documents:
workspace.publish_diagnostics(
doc_uri, flatten(self._hook("pyls_lint", doc_uri, is_saved=is_saved))
)
def references(self, doc_uri, position, exclude_declaration):
return flatten(
self._hook(
"pyls_references",
doc_uri,
position=position,
exclude_declaration=exclude_declaration,
)
)
def rename(self, doc_uri, position, new_name):
return self._hook("pyls_rename", doc_uri, position=position, new_name=new_name)
def signature_help(self, doc_uri, position):
return self._hook("pyls_signature_help", doc_uri, position=position)
def folding(self, doc_uri):
return flatten(self._hook("pyls_folding_range", doc_uri))
def m_text_document__did_close(self, textDocument=None, **_kwargs):
workspace = self._match_uri_to_workspace(textDocument["uri"])
workspace.rm_document(textDocument["uri"])
def m_text_document__did_open(self, textDocument=None, **_kwargs):
workspace = self._match_uri_to_workspace(textDocument["uri"])
workspace.put_document(
textDocument["uri"],
textDocument["text"],
version=textDocument.get("version"),
)
self._hook("pyls_document_did_open", textDocument["uri"])
self.lint(textDocument["uri"], is_saved=True)
def m_text_document__did_change(
self, contentChanges=None, textDocument=None, **_kwargs
):
workspace = self._match_uri_to_workspace(textDocument["uri"])
for change in contentChanges:
workspace.update_document(
textDocument["uri"], change, version=textDocument.get("version")
)
self.lint(textDocument["uri"], is_saved=False)
def m_text_document__did_save(self, textDocument=None, **_kwargs):
self.lint(textDocument["uri"], is_saved=True)
def m_text_document__code_action(
self, textDocument=None, range=None, context=None, **_kwargs
):
return self.code_actions(textDocument["uri"], range, context)
def m_text_document__code_lens(self, textDocument=None, **_kwargs):
return self.code_lens(textDocument["uri"])
def m_text_document__completion(self, textDocument=None, position=None, **_kwargs):
return self.completions(textDocument["uri"], position)
def m_text_document__definition(self, textDocument=None, position=None, **_kwargs):
return self.definitions(textDocument["uri"], position)
def m_text_document__document_highlight(
self, textDocument=None, position=None, **_kwargs
):
return self.highlight(textDocument["uri"], position)
def m_text_document__hover(self, textDocument=None, position=None, **_kwargs):
return self.hover(textDocument["uri"], position)
def m_text_document__document_symbol(self, textDocument=None, **_kwargs):
return self.document_symbols(textDocument["uri"])
def m_text_document__formatting(self, textDocument=None, _options=None, **_kwargs):
# For now we're ignoring formatting options.
return self.format_document(textDocument["uri"])
def m_text_document__rename(
self, textDocument=None, position=None, newName=None, **_kwargs
):
return self.rename(textDocument["uri"], position, newName)
def m_text_document__folding_range(self, textDocument=None, **_kwargs):
return self.folding(textDocument["uri"])
def m_text_document__range_formatting(
self, textDocument=None, range=None, _options=None, **_kwargs
):
# Again, we'll ignore formatting options for now.
return self.format_range(textDocument["uri"], range)
def m_text_document__references(
self, textDocument=None, position=None, context=None, **_kwargs
):
exclude_declaration = not context["includeDeclaration"]
return self.references(textDocument["uri"], position, exclude_declaration)
def m_text_document__signature_help(
self, textDocument=None, position=None, **_kwargs
):
return self.signature_help(textDocument["uri"], position)
def m_workspace__did_change_configuration(self, settings=None):
self.config.update((settings or {}).get("pyls", {}))
for workspace_uri in self.workspaces:
workspace = self.workspaces[workspace_uri]
workspace.update_config(settings)
for doc_uri in workspace.documents:
self.lint(doc_uri, is_saved=False)
def m_workspace__did_change_workspace_folders(
self, event=None, **_kwargs
): # pylint: disable=too-many-locals
if event is None:
return
added = event.get("added", [])
removed = event.get("removed", [])
for removed_info in removed:
if "uri" in removed_info:
removed_uri = removed_info["uri"]
self.workspaces.pop(removed_uri, None)
for added_info in added:
if "uri" in added_info:
added_uri = added_info["uri"]
workspace_config = config.Config(
added_uri,
self.config._init_opts,
self.config._process_id,
self.config._capabilities,
)
workspace_config.update(self.config._settings)
self.workspaces[added_uri] = Workspace(
added_uri, self._endpoint, workspace_config
)
root_workspace_removed = any(
removed_info["uri"] == self.root_uri for removed_info in removed
)
workspace_added = len(added) > 0 and "uri" in added[0]
if root_workspace_removed and workspace_added:
added_uri = added[0]["uri"]
self.root_uri = added_uri
new_root_workspace = self.workspaces[added_uri]
self.config = new_root_workspace._config
self.workspace = new_root_workspace
elif root_workspace_removed:
# NOTE: Removing the root workspace can only happen when the server
# is closed, thus the else condition of this if can never happen.
if self.workspaces:
log.debug("Root workspace deleted!")
available_workspaces = sorted(self.workspaces)
first_workspace = available_workspaces[0]
new_root_workspace = self.workspaces[first_workspace]
self.root_uri = first_workspace
self.config = new_root_workspace._config
self.workspace = new_root_workspace
# Migrate documents that are on the root workspace and have a better
# match now
doc_uris = list(self.workspace._docs.keys())
for uri in doc_uris:
doc = self.workspace._docs.pop(uri)
new_workspace = self._match_uri_to_workspace(uri)
new_workspace._docs[uri] = doc
def m_workspace__did_change_watched_files(self, changes=None, **_kwargs):
changed_py_files = set()
config_changed = False
for d in changes or []:
if d["uri"].endswith(PYTHON_FILE_EXTENSIONS):
changed_py_files.add(d["uri"])
elif d["uri"].endswith(CONFIG_FILEs):
config_changed = True
if config_changed:
self.config.settings.cache_clear()
elif not changed_py_files:
# Only externally changed python files and lint configs may result in changed diagnostics.
return
for workspace_uri in self.workspaces:
workspace = self.workspaces[workspace_uri]
for doc_uri in workspace.documents:
# Changes in doc_uri are already handled by m_text_document__did_save
if doc_uri not in changed_py_files:
self.lint(doc_uri, is_saved=False)
def m_workspace__execute_command(self, command=None, arguments=None):
return self.execute_command(command, arguments)
def flatten(list_of_lists):
return [item for lst in list_of_lists for item in lst]
def merge(list_of_dicts):
return {k: v for dictionary in list_of_dicts for k, v in dictionary.items()}
|
client.py
|
#!/usr/bin/python2
import socket
import json
import os
import sys
import time
import threading
# Constants
DEFAULTPORT = 60000
if os.path.isfile("port"):
with open("port", 'r') as f:
DEFAULTPORT = int(f.read().strip())
# Utility functions
def dprint(s):
if hasattr(dprint, 'number'):
sys.stderr.write("%02d: %s"%(dprint.number, s) + '\n')
else:
sys.stderr.write(str(s)+'\n')
# Helper functions
def getport(peerID):
return DEFAULTPORT + peerID
def sendMsgA(addr, msg, peerID):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(addr)
s.send(msg)
print(s.recv(1024))
s.close()
except:
dprint("could not send message to %s"%peerID)
def sendMessage(opcode, message, peernum):
"""Sends a message without blocking. May throw error on timeout"""
if not hasattr(sendMessage, 'peers'):
sendMessage.peers = {}
sendMessage.threads = []
with open('peers.txt') as f:
for i, ip in enumerate(filter(lambda x: x != '', f.read().split('\n'))):
sendMessage.peers[i+1] = (ip.strip(), getport(i+1))
message['senderid'] = 0
message['opcode'] = opcode
#dprint("Sending message: %s to %s"% (str(message), str(peernum)))
t = threading.Thread(target=sendMsgA, args=(sendMessage.peers[peernum], json.dumps(message), peernum))
t.start()
sendMessage.threads.append(t)
destPeerID = int(sys.argv[1])
while True:
s = raw_input("%02d> "%destPeerID)
if not s:
break
parts = s.split(' ')
if len(parts) < 2:
print("Invalid command - not enough arguments")
continue
command = {}
command['opcode'] = parts[0]
command['filename'] = parts[1]
command['id'] = time.time()
if not command['opcode'] in ['create', 'delete', 'append', 'read']:
print("Invalid command - invalid operation")
continue
if parts[0] == 'append':
command['line'] = ' '.join(parts[2:])
sendMessage('EVENT', {'event': json.dumps(command)}, destPeerID)
time.sleep(.3)
|
test__transaction.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2001, 2002, 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Test transaction behavior for variety of cases.
I wrote these unittests to investigate some odd transaction
behavior when doing unittests of integrating non sub transaction
aware objects, and to insure proper txn behavior. these
tests test the transaction system independent of the rest of the
zodb.
you can see the method calls to a jar by passing the
keyword arg tracing to the modify method of a dataobject.
the value of the arg is a prefix used for tracing print calls
to that objects jar.
the number of times a jar method was called can be inspected
by looking at an attribute of the jar that is the method
name prefixed with a c (count/check).
i've included some tracing examples for tests that i thought
were illuminating as doc strings below.
TODO
add in tests for objects which are modified multiple times,
for example an object that gets modified in multiple sub txns.
"""
import os
import warnings
import unittest
class TransactionTests(unittest.TestCase):
def _getTargetClass(self):
from transaction._transaction import Transaction
return Transaction
def _makeOne(self, synchronizers=None, manager=None):
return self._getTargetClass()(synchronizers, manager)
def test_verifyImplements_ITransaction(self):
from zope.interface.verify import verifyClass
from transaction.interfaces import ITransaction
verifyClass(ITransaction, self._getTargetClass())
def test_verifyProvides_ITransaction(self):
from zope.interface.verify import verifyObject
from transaction.interfaces import ITransaction
verifyObject(ITransaction, self._makeOne())
def test_ctor_defaults(self):
from transaction.weakset import WeakSet
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
self.assertTrue(isinstance(txn._synchronizers, WeakSet))
self.assertEqual(len(txn._synchronizers), 0)
self.assertTrue(txn._manager is None)
self.assertEqual(txn.user, u"")
self.assertEqual(txn.description, u"")
self.assertTrue(txn._savepoint2index is None)
self.assertEqual(txn._savepoint_index, 0)
self.assertEqual(txn._resources, [])
self.assertEqual(txn._adapters, {})
self.assertEqual(txn._voted, {})
self.assertEqual(txn.extension, {})
self.assertTrue(txn._extension is txn.extension) # legacy
self.assertTrue(txn.log is logger)
self.assertEqual(len(logger._log), 1)
self.assertEqual(logger._log[0][0], 'debug')
self.assertEqual(logger._log[0][1], 'new transaction')
self.assertTrue(txn._failure_traceback is None)
self.assertEqual(txn._before_commit, [])
self.assertEqual(txn._after_commit, [])
def test_ctor_w_syncs(self):
from transaction.weakset import WeakSet
synchs = WeakSet()
txn = self._makeOne(synchronizers=synchs)
self.assertTrue(txn._synchronizers is synchs)
def test_isDoomed(self):
from transaction._transaction import Status
txn = self._makeOne()
self.assertFalse(txn.isDoomed())
txn.status = Status.DOOMED
self.assertTrue(txn.isDoomed())
def test_doom_active(self):
from transaction._transaction import Status
txn = self._makeOne()
txn.doom()
self.assertTrue(txn.isDoomed())
self.assertEqual(txn.status, Status.DOOMED)
def test_doom_invalid(self):
from transaction._transaction import Status
txn = self._makeOne()
for status in Status.COMMITTING, Status.COMMITTED, Status.COMMITFAILED:
txn.status = status
self.assertRaises(ValueError, txn.doom)
def test_doom_already_doomed(self):
from transaction._transaction import Status
txn = self._makeOne()
txn.status = Status.DOOMED
txn.doom()
self.assertTrue(txn.isDoomed())
self.assertEqual(txn.status, Status.DOOMED)
def test__prior_operation_failed(self):
from transaction.interfaces import TransactionFailedError
class _Traceback(object):
def getvalue(self):
return 'TRACEBACK'
txn = self._makeOne()
txn._failure_traceback = _Traceback()
with self.assertRaises(TransactionFailedError) as exc:
txn._prior_operation_failed()
err = exc.exception
self.assertTrue(str(err).startswith('An operation previously failed'))
self.assertTrue(str(err).endswith("with traceback:\n\nTRACEBACK"))
def test_join_COMMITFAILED(self):
from transaction.interfaces import TransactionFailedError
from transaction._transaction import Status
class _Traceback(object):
def getvalue(self):
return 'TRACEBACK'
txn = self._makeOne()
txn.status = Status.COMMITFAILED
txn._failure_traceback = _Traceback()
self.assertRaises(TransactionFailedError, txn.join, object())
def test_join_COMMITTING(self):
from transaction._transaction import Status
txn = self._makeOne()
txn.status = Status.COMMITTING
self.assertRaises(ValueError, txn.join, object())
def test_join_COMMITTED(self):
from transaction._transaction import Status
txn = self._makeOne()
txn.status = Status.COMMITTED
self.assertRaises(ValueError, txn.join, object())
def test_join_DOOMED_non_preparing_wo_sp2index(self):
from transaction._transaction import Status
txn = self._makeOne()
txn.status = Status.DOOMED
resource = object()
txn.join(resource)
self.assertEqual(txn._resources, [resource])
def test__unjoin_miss(self):
txn = self._makeOne()
txn._unjoin(object()) # no raise
def test__unjoin_hit(self):
txn = self._makeOne()
resource = object()
txn._resources.append(resource)
txn._unjoin(resource)
self.assertEqual(txn._resources, [])
def test_savepoint_COMMITFAILED(self):
from transaction.interfaces import TransactionFailedError
from transaction._transaction import Status
class _Traceback(object):
def getvalue(self):
return 'TRACEBACK'
txn = self._makeOne()
txn.status = Status.COMMITFAILED
txn._failure_traceback = _Traceback()
self.assertRaises(TransactionFailedError, txn.savepoint)
def test_savepoint_empty(self):
from weakref import WeakKeyDictionary
from transaction import _transaction
from transaction._transaction import Savepoint
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
sp = txn.savepoint()
self.assertTrue(isinstance(sp, Savepoint))
self.assertTrue(sp.transaction is txn)
self.assertEqual(sp._savepoints, [])
self.assertEqual(txn._savepoint_index, 1)
self.assertTrue(isinstance(txn._savepoint2index, WeakKeyDictionary))
self.assertEqual(txn._savepoint2index[sp], 1)
def test_savepoint_non_optimistc_resource_wo_support(self):
from transaction import _transaction
from transaction._transaction import Status
from transaction._compat import StringIO
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
resource = object()
txn._resources.append(resource)
self.assertRaises(TypeError, txn.savepoint)
self.assertEqual(txn.status, Status.COMMITFAILED)
self.assertTrue(isinstance(txn._failure_traceback, StringIO))
self.assertTrue('TypeError' in txn._failure_traceback.getvalue())
self.assertEqual(len(logger._log), 2)
self.assertEqual(logger._log[0][0], 'error')
self.assertTrue(logger._log[0][1].startswith('Error in abort'))
self.assertEqual(logger._log[1][0], 'error')
self.assertTrue(logger._log[1][1].startswith('Error in tpc_abort'))
def test__remove_and_invalidate_after_miss(self):
from weakref import WeakKeyDictionary
txn = self._makeOne()
txn._savepoint2index = WeakKeyDictionary()
class _SP(object):
def __init__(self, txn):
self.transaction = txn
holdme = []
for i in range(10):
sp = _SP(txn)
holdme.append(sp) # prevent gc
txn._savepoint2index[sp] = i
self.assertEqual(len(txn._savepoint2index), 10)
self.assertRaises(KeyError, txn._remove_and_invalidate_after, _SP(txn))
self.assertEqual(len(txn._savepoint2index), 10)
def test__remove_and_invalidate_after_hit(self):
from weakref import WeakKeyDictionary
txn = self._makeOne()
txn._savepoint2index = WeakKeyDictionary()
class _SP(object):
def __init__(self, txn, index):
self.transaction = txn
self._index = index
def __lt__(self, other):
return self._index < other._index
def __repr__(self): # pragma: no cover
return '_SP: %d' % self._index
holdme = []
for i in range(10):
sp = _SP(txn, i)
holdme.append(sp) # prevent gc
txn._savepoint2index[sp] = i
self.assertEqual(len(txn._savepoint2index), 10)
txn._remove_and_invalidate_after(holdme[1])
self.assertEqual(sorted(txn._savepoint2index), sorted(holdme[:2]))
def test__invalidate_all_savepoints(self):
from weakref import WeakKeyDictionary
txn = self._makeOne()
txn._savepoint2index = WeakKeyDictionary()
class _SP(object):
def __init__(self, txn, index):
self.transaction = txn
self._index = index
def __repr__(self): # pragma: no cover
return '_SP: %d' % self._index
holdme = []
for i in range(10):
sp = _SP(txn, i)
holdme.append(sp) # prevent gc
txn._savepoint2index[sp] = i
self.assertEqual(len(txn._savepoint2index), 10)
txn._invalidate_all_savepoints()
self.assertEqual(list(txn._savepoint2index), [])
def test_commit_DOOMED(self):
from transaction.interfaces import DoomedTransaction
from transaction._transaction import Status
txn = self._makeOne()
txn.status = Status.DOOMED
self.assertRaises(DoomedTransaction, txn.commit)
def test_commit_COMMITFAILED(self):
from transaction._transaction import Status
from transaction.interfaces import TransactionFailedError
class _Traceback(object):
def getvalue(self):
return 'TRACEBACK'
txn = self._makeOne()
txn.status = Status.COMMITFAILED
txn._failure_traceback = _Traceback()
self.assertRaises(TransactionFailedError, txn.commit)
def test_commit_wo_savepoints_wo_hooks_wo_synchronizers(self):
from transaction._transaction import Status
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _Mgr(object):
def __init__(self, txn):
self._txn = txn
def free(self, txn):
assert txn is self._txn
self._txn = None
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
mgr = txn._manager = _Mgr(txn)
txn.commit()
self.assertEqual(txn.status, Status.COMMITTED)
self.assertTrue(mgr._txn is None)
self.assertEqual(logger._log[0][0], 'debug')
self.assertEqual(logger._log[0][1], 'commit')
def test_commit_w_savepoints(self):
from weakref import WeakKeyDictionary
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _SP(object):
def __init__(self, txn, index):
self.transaction = txn
self._index = index
def __repr__(self): # pragma: no cover
return '_SP: %d' % self._index
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
txn._savepoint2index = WeakKeyDictionary()
holdme = []
for i in range(10):
sp = _SP(txn, i)
holdme.append(sp) # prevent gc
txn._savepoint2index[sp] = i
logger._clear()
txn.commit()
self.assertEqual(list(txn._savepoint2index), [])
def test_commit_w_beforeCommitHooks(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_hooked1, _hooked2 = [], []
def _hook1(*args, **kw):
_hooked1.append((args, kw))
def _hook2(*args, **kw):
_hooked2.append((args, kw))
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
txn._before_commit.append((_hook1, ('one',), {'uno': 1}))
txn._before_commit.append((_hook2, (), {}))
logger._clear()
txn.commit()
self.assertEqual(_hooked1, [(('one',), {'uno': 1})])
self.assertEqual(_hooked2, [((), {})])
self.assertEqual(txn._before_commit, [])
def test_commit_w_synchronizers(self):
from transaction.weakset import WeakSet
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _Synch(object):
_before = _after = False
def beforeCompletion(self, txn):
self._before = txn
def afterCompletion(self, txn):
self._after = txn
synchs = [_Synch(), _Synch(), _Synch()]
ws = WeakSet()
for synch in synchs:
ws.add(synch)
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne(synchronizers=ws)
logger._clear()
txn.commit()
for synch in synchs:
self.assertTrue(synch._before is txn)
self.assertTrue(synch._after is txn)
def test_commit_w_afterCommitHooks(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_hooked1, _hooked2 = [], []
def _hook1(*args, **kw):
_hooked1.append((args, kw))
def _hook2(*args, **kw):
_hooked2.append((args, kw))
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
txn._after_commit.append((_hook1, ('one',), {'uno': 1}))
txn._after_commit.append((_hook2, (), {}))
logger._clear()
txn.commit()
self.assertEqual(_hooked1, [((True, 'one',), {'uno': 1})])
self.assertEqual(_hooked2, [((True,), {})])
self.assertEqual(txn._after_commit, [])
self.assertEqual(txn._resources, [])
def test_commit_error_w_afterCompleteHooks(self):
from transaction import _transaction
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
class BrokenResource(object):
def sortKey(self):
return 'zzz'
def tpc_begin(self, txn):
raise ValueError('test')
broken = BrokenResource()
resource = Resource('aaa')
_hooked1, _hooked2 = [], []
def _hook1(*args, **kw):
_hooked1.append((args, kw))
def _hook2(*args, **kw):
_hooked2.append((args, kw))
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
txn._after_commit.append((_hook1, ('one',), {'uno': 1}))
txn._after_commit.append((_hook2, (), {}))
txn._resources.append(broken)
txn._resources.append(resource)
logger._clear()
self.assertRaises(ValueError, txn.commit)
self.assertEqual(_hooked1, [((False, 'one',), {'uno': 1})])
self.assertEqual(_hooked2, [((False,), {})])
self.assertEqual(txn._after_commit, [])
self.assertTrue(resource._b)
self.assertFalse(resource._c)
self.assertFalse(resource._v)
self.assertFalse(resource._f)
self.assertTrue(resource._a)
self.assertTrue(resource._x)
def test_commit_error_w_synchronizers(self):
from transaction.weakset import WeakSet
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _Synch(object):
_before = _after = False
def beforeCompletion(self, txn):
self._before = txn
def afterCompletion(self, txn):
self._after = txn
synchs = [_Synch(), _Synch(), _Synch()]
ws = WeakSet()
for synch in synchs:
ws.add(synch)
class BrokenResource(object):
def sortKey(self):
return 'zzz'
def tpc_begin(self, txn):
raise ValueError('test')
broken = BrokenResource()
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne(synchronizers=ws)
logger._clear()
txn._resources.append(broken)
self.assertRaises(ValueError, txn.commit)
for synch in synchs:
self.assertTrue(synch._before is txn)
self.assertTrue(synch._after is txn) # called in _cleanup
def test_commit_clears_resources(self):
class DM(object):
tpc_begin = commit = tpc_finish = tpc_vote = lambda s, txn: True
dm = DM()
txn = self._makeOne()
txn.join(dm)
self.assertEqual(txn._resources, [dm])
txn.commit()
self.assertEqual(txn._resources, [])
def test_getBeforeCommitHooks_empty(self):
txn = self._makeOne()
self.assertEqual(list(txn.getBeforeCommitHooks()), [])
def test_addBeforeCommitHook(self):
def _hook(*args, **kw):
raise AssertionError("Not called")
txn = self._makeOne()
txn.addBeforeCommitHook(_hook, ('one',), dict(uno=1))
self.assertEqual(list(txn.getBeforeCommitHooks()),
[(_hook, ('one',), {'uno': 1})])
def test_callBeforeCommitHook_w_error(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_calls = []
def _hook(*args, **kw):
_calls.append((args, kw))
def _hook_err(*args, **kw):
raise ValueError()
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn.addBeforeCommitHook(_hook, ('one',), dict(uno=1))
txn.addBeforeCommitHook(_hook_err, ('two',), dict(dos=2))
txn.addBeforeCommitHook(_hook, ('three',), dict(tres=3))
# only first hook gets called, and instead of logging the error,
# the exception is raised
self.assertRaises(ValueError, txn._callBeforeCommitHooks)
self.assertEqual(_calls, [(('one',), {'uno': 1})])
self.assertEqual(len(logger._log), 0)
def test_addBeforeCommitHook_w_kws(self):
def _hook(*args, **kw):
raise AssertionError("Not called")
txn = self._makeOne()
txn.addBeforeCommitHook(_hook, ('one',))
self.assertEqual(list(txn.getBeforeCommitHooks()),
[(_hook, ('one',), {})])
def test_getAfterCommitHooks_empty(self):
txn = self._makeOne()
self.assertEqual(list(txn.getAfterCommitHooks()), [])
def test_addAfterCommitHook(self):
def _hook(*args, **kw):
raise AssertionError("Not called")
txn = self._makeOne()
txn.addAfterCommitHook(_hook, ('one',), dict(uno=1))
self.assertEqual(list(txn.getAfterCommitHooks()),
[(_hook, ('one',), {'uno': 1})])
def test_addAfterCommitHook_wo_kws(self):
def _hook(*args, **kw):
raise AssertionError("Not called")
txn = self._makeOne()
txn.addAfterCommitHook(_hook, ('one',))
self.assertEqual(list(txn.getAfterCommitHooks()),
[(_hook, ('one',), {})])
def test_callAfterCommitHook_w_error(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_hooked2 = []
def _hook1(*args, **kw):
raise ValueError()
def _hook2(*args, **kw):
_hooked2.append((args, kw))
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn.addAfterCommitHook(_hook1, ('one',))
txn.addAfterCommitHook(_hook2, ('two',), dict(dos=2))
txn._callAfterCommitHooks()
# second hook gets called even if first raises
self.assertEqual(_hooked2, [((True, 'two',), {'dos': 2})])
self.assertEqual(len(logger._log), 1)
self.assertEqual(logger._log[0][0], 'error')
self.assertTrue(logger._log[0][1].startswith("Error in hook"))
def test_callAfterCommitHook_w_abort(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_hooked2 = []
def _hook1(*args, **kw):
raise ValueError()
def _hook2(*args, **kw):
_hooked2.append((args, kw))
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn.addAfterCommitHook(_hook1, ('one',))
txn.addAfterCommitHook(_hook2, ('two',), dict(dos=2))
txn._callAfterCommitHooks()
self.assertEqual(logger._log[0][0], 'error')
self.assertTrue(logger._log[0][1].startswith("Error in hook"))
def test__commitResources_normal(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
resources = [Resource('bbb'), Resource('aaa')]
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn._resources.extend(resources)
txn._commitResources()
self.assertEqual(len(txn._voted), 2)
for r in resources:
self.assertTrue(r._b and r._c and r._v and r._f)
self.assertFalse(r._a and r._x)
self.assertTrue(id(r) in txn._voted)
self.assertEqual(len(logger._log), 2)
self.assertEqual(logger._log[0][0], 'debug')
self.assertEqual(logger._log[0][1], 'commit Resource: aaa')
self.assertEqual(logger._log[1][0], 'debug')
self.assertEqual(logger._log[1][1], 'commit Resource: bbb')
def test__commitResources_error_in_tpc_begin(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
resources = [Resource('bbb', 'tpc_begin'), Resource('aaa')]
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn._resources.extend(resources)
self.assertRaises(ValueError, txn._commitResources)
for r in resources:
if r._key == 'aaa':
self.assertTrue(r._b)
else:
self.assertFalse(r._b)
self.assertFalse(r._c and r._v and r._f)
self.assertTrue(r._a and r._x)
self.assertEqual(len(logger._log), 0)
def test__commitResources_error_in_afterCompletion(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _Synchronizers(object):
def __init__(self, res):
self._res = res
def map(self, func):
for res in self._res:
func(res)
resources = [Resource('bbb', 'tpc_begin'),
Resource('aaa', 'afterCompletion')]
sync = _Synchronizers(resources)
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne(sync)
logger._clear()
txn._resources.extend(resources)
self.assertRaises(ValueError, txn._commitResources)
for r in resources:
if r._key == 'aaa':
self.assertTrue(r._b)
else:
self.assertFalse(r._b)
self.assertFalse(r._c and r._v and r._f)
self.assertTrue(r._a and r._x)
self.assertEqual(len(logger._log), 0)
self.assertTrue(resources[0]._after)
self.assertFalse(resources[1]._after)
def test__commitResources_error_in_commit(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
resources = [Resource('bbb', 'commit'), Resource('aaa')]
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn._resources.extend(resources)
self.assertRaises(ValueError, txn._commitResources)
for r in resources:
self.assertTrue(r._b)
if r._key == 'aaa':
self.assertTrue(r._c)
else:
self.assertFalse(r._c)
self.assertFalse(r._v and r._f)
self.assertTrue(r._a and r._x)
self.assertEqual(len(logger._log), 1)
self.assertEqual(logger._log[0][0], 'debug')
self.assertEqual(logger._log[0][1], 'commit Resource: aaa')
def test__commitResources_error_in_tpc_vote(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
resources = [Resource('bbb', 'tpc_vote'), Resource('aaa')]
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn._resources.extend(resources)
self.assertRaises(ValueError, txn._commitResources)
self.assertEqual(len(txn._voted), 1)
for r in resources:
self.assertTrue(r._b and r._c)
if r._key == 'aaa':
self.assertTrue(id(r) in txn._voted)
self.assertTrue(r._v)
self.assertFalse(r._f)
self.assertFalse(r._a)
self.assertTrue(r._x)
else:
self.assertFalse(id(r) in txn._voted)
self.assertFalse(r._v)
self.assertFalse(r._f)
self.assertTrue(r._a and r._x)
self.assertEqual(len(logger._log), 2)
self.assertEqual(logger._log[0][0], 'debug')
self.assertEqual(logger._log[0][1], 'commit Resource: aaa')
self.assertEqual(logger._log[1][0], 'debug')
self.assertEqual(logger._log[1][1], 'commit Resource: bbb')
def test__commitResources_error_in_tpc_finish(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
resources = [Resource('bbb', 'tpc_finish'), Resource('aaa')]
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn._resources.extend(resources)
self.assertRaises(ValueError, txn._commitResources)
for r in resources:
self.assertTrue(r._b and r._c and r._v)
self.assertTrue(id(r) in txn._voted)
if r._key == 'aaa':
self.assertTrue(r._f)
else:
self.assertFalse(r._f)
self.assertFalse(r._a and r._x) # no cleanup if tpc_finish raises
self.assertEqual(len(logger._log), 3)
self.assertEqual(logger._log[0][0], 'debug')
self.assertEqual(logger._log[0][1], 'commit Resource: aaa')
self.assertEqual(logger._log[1][0], 'debug')
self.assertEqual(logger._log[1][1], 'commit Resource: bbb')
self.assertEqual(logger._log[2][0], 'critical')
self.assertTrue(logger._log[2][1].startswith(
'A storage error occurred'))
def test_abort_wo_savepoints_wo_hooks_wo_synchronizers(self):
from transaction._transaction import Status
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _Mgr(object):
def __init__(self, txn):
self._txn = txn
def free(self, txn):
assert txn is self._txn
self._txn = None
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
mgr = txn._manager = _Mgr(txn)
txn.abort()
self.assertEqual(txn.status, Status.ACTIVE)
self.assertTrue(mgr._txn is None)
self.assertEqual(logger._log[0][0], 'debug')
self.assertEqual(logger._log[0][1], 'abort')
def test_abort_w_savepoints(self):
from weakref import WeakKeyDictionary
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _SP(object):
def __init__(self, txn, index):
self.transaction = txn
self._index = index
def __repr__(self): # pragma: no cover
return '_SP: %d' % self._index
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
txn._savepoint2index = WeakKeyDictionary()
holdme = []
for i in range(10):
sp = _SP(txn, i)
holdme.append(sp) # prevent gc
txn._savepoint2index[sp] = i
logger._clear()
txn.abort()
self.assertEqual(list(txn._savepoint2index), [])
def test_abort_w_beforeCommitHooks(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_hooked1, _hooked2 = [], []
def _hook1(*args, **kw):
raise AssertionError("Not called")
def _hook2(*args, **kw):
raise AssertionError("Not called")
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
txn._before_commit.append((_hook1, ('one',), {'uno': 1}))
txn._before_commit.append((_hook2, (), {}))
logger._clear()
txn.abort()
self.assertEqual(_hooked1, [])
self.assertEqual(_hooked2, [])
# Hooks are not called but cleared on abort
self.assertEqual(list(txn.getBeforeCommitHooks()), [])
self.assertIsNone(txn._manager)
def test_abort_w_synchronizers(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
test = self
class _Synch(object):
_before = _after = None
def beforeCompletion(self, txn):
self._before = txn
txn.set_data(self, 42)
test.assertIsNotNone(txn._manager)
def afterCompletion(self, txn):
self._after = txn
# data is accessible afterCompletion,
# but the transaction is not current anymore.
test.assertEqual(42, txn.data(self))
test.assertIsNone(txn._manager)
class _BadSynch(_Synch):
def afterCompletion(self, txn):
_Synch.afterCompletion(self, txn)
raise SystemExit
# Ensure iteration order
class Synchs(object):
synchs = [_Synch(), _Synch(), _Synch(), _BadSynch()]
def map(self, func):
for s in self.synchs:
func(s)
logger = DummyLogger()
class Manager(object):
txn = None
def free(self, txn):
test.assertIs(txn, self.txn)
self.txn = None
manager = Manager()
synchs = Synchs()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne(synchronizers=synchs, manager=manager)
manager.txn = txn
logger._clear()
with self.assertRaises(SystemExit):
txn.abort()
for synch in synchs.synchs:
self.assertIs(synch._before, txn)
self.assertIs(synch._after, txn)
# And everything was cleaned up despite raising the bad
# exception
self.assertIsNone(txn._manager)
self.assertIsNot(txn._synchronizers, synchs)
self.assertIsNone(manager.txn)
def test_abort_w_afterCommitHooks(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_hooked1, _hooked2 = [], []
def _hook1(*args, **kw):
raise AssertionError("Not called")
def _hook2(*args, **kw):
raise AssertionError("Not called")
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
txn._after_commit.append((_hook1, ('one',), {'uno': 1}))
txn._after_commit.append((_hook2, (), {}))
logger._clear()
txn.abort()
# Hooks are not called but cleared on abort
self.assertEqual(_hooked1, [])
self.assertEqual(_hooked2, [])
self.assertEqual(list(txn.getAfterCommitHooks()), [])
self.assertEqual(txn._resources, [])
self.assertIsNone(txn._manager)
def test_abort_error_w_afterCommitHooks(self):
from transaction import _transaction
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
class BrokenResource(object):
def sortKey(self):
raise AssertionError("Not called")
def abort(self, txn):
raise ValueError('test')
broken = BrokenResource()
aaa = Resource('aaa')
broken2 = BrokenResource()
_hooked1, _hooked2 = [], []
def _hook1(*args, **kw):
raise AssertionError("Not called")
def _hook2(*args, **kw):
raise AssertionError("Not called")
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
txn._after_commit.append((_hook1, ('one',), {'uno': 1}))
txn._after_commit.append((_hook2, (), {}))
txn._resources.append(aaa)
txn._resources.append(broken)
txn._resources.append(broken2)
logger._clear()
self.assertRaises(ValueError, txn.abort)
# Hooks are not called but cleared on abort
self.assertEqual(_hooked1, [])
self.assertEqual(_hooked2, [])
self.assertEqual(list(txn.getAfterCommitHooks()), [])
self.assertTrue(aaa._a)
self.assertFalse(aaa._x)
self.assertIsNone(txn._manager)
def test_abort_error_w_synchronizers(self):
from transaction.weakset import WeakSet
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _Synch(object):
_before = _after = False
def beforeCompletion(self, txn):
self._before = txn
def afterCompletion(self, txn):
self._after = txn
synchs = [_Synch(), _Synch(), _Synch()]
ws = WeakSet()
for synch in synchs:
ws.add(synch)
class BrokenResource(object):
def sortKey(self):
raise AssertionError("Should not be called")
def abort(self, txn):
raise ValueError('test')
broken = BrokenResource()
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
t = self._makeOne(synchronizers=ws)
logger._clear()
t._resources.append(broken)
self.assertRaises(ValueError, t.abort)
for synch in synchs:
self.assertTrue(synch._before is t)
self.assertTrue(synch._after is t) # called in _cleanup
self.assertIsNot(t._synchronizers, ws)
def test_abort_synchronizer_error_w_resources(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _Synch(object):
_before = _after = False
def beforeCompletion(self, txn):
self._before = txn
def afterCompletion(self, txn):
self._after = txn
class _BadSynch(_Synch):
def beforeCompletion(self, txn):
_Synch.beforeCompletion(self, txn)
raise SystemExit
# Ensure iteration order
class Synchs(object):
synchs = [_Synch(), _Synch(), _Synch(), _BadSynch()]
def map(self, func):
for s in self.synchs:
func(s)
resource = Resource('a')
logger = DummyLogger()
synchs = Synchs()
with Monkey(_transaction, _LOGGER=logger):
t = self._makeOne(synchronizers=synchs)
logger._clear()
t._resources.append(resource)
with self.assertRaises(SystemExit):
t.abort()
for synch in synchs.synchs:
self.assertTrue(synch._before is t)
self.assertTrue(synch._after is t) # called in _cleanup
self.assertIsNot(t._synchronizers, synchs)
self.assertTrue(resource._a)
def test_abort_clears_resources(self):
class DM(object):
def abort(self, txn):
return True
dm = DM()
txn = self._makeOne()
txn.join(dm)
self.assertEqual(txn._resources, [dm])
txn.abort()
self.assertEqual(txn._resources, [])
def test_getBeforeAbortHooks_empty(self):
txn = self._makeOne()
self.assertEqual(list(txn.getBeforeAbortHooks()), [])
def test_addBeforeAbortHook(self):
def _hook(*args, **kw):
raise AssertionError("Not called")
txn = self._makeOne()
txn.addBeforeAbortHook(_hook, ('one',), dict(uno=1))
self.assertEqual(list(txn.getBeforeAbortHooks()),
[(_hook, ('one',), {'uno': 1})])
def test_addBeforeAbortHook_w_kws(self):
def _hook(*args, **kw):
raise AssertionError("Not called")
txn = self._makeOne()
txn.addBeforeAbortHook(_hook, ('one',))
self.assertEqual(list(txn.getBeforeAbortHooks()),
[(_hook, ('one',), {})])
def test_getAfterAbortHooks_empty(self):
txn = self._makeOne()
self.assertEqual(list(txn.getAfterAbortHooks()), [])
def test_addAfterAbortHook(self):
def _hook(*args, **kw):
raise AssertionError("Not called")
txn = self._makeOne()
txn.addAfterAbortHook(_hook, ('one',), dict(uno=1))
self.assertEqual(list(txn.getAfterAbortHooks()),
[(_hook, ('one',), {'uno': 1})])
def test_addAfterAbortHook_wo_kws(self):
def _hook(*args, **kw):
raise AssertionError("Not called")
txn = self._makeOne()
txn.addAfterAbortHook(_hook, ('one',))
self.assertEqual(list(txn.getAfterAbortHooks()),
[(_hook, ('one',), {})])
def test_callBeforeAbortHook_w_error(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_hooked2 = []
def _hook1(*args, **kw):
raise ValueError()
def _hook2(*args, **kw):
_hooked2.append((args, kw))
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn.addBeforeAbortHook(_hook1, ('one',))
txn.addBeforeAbortHook(_hook2, ('two',), dict(dos=2))
txn._callBeforeAbortHooks()
# second hook gets called even if first raises
self.assertEqual(_hooked2, [(('two',), {'dos': 2})])
self.assertEqual(len(logger._log), 1)
self.assertEqual(logger._log[0][0], 'error')
self.assertTrue(logger._log[0][1].startswith("Error in hook"))
def test_callBeforeAbortHook_w_abort(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_hooked2 = []
def _hook1(*args, **kw):
raise ValueError()
def _hook2(*args, **kw):
_hooked2.append((args, kw))
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn.addBeforeAbortHook(_hook1, ('one',))
txn.addBeforeAbortHook(_hook2, ('two',), dict(dos=2))
txn._callBeforeAbortHooks()
self.assertEqual(logger._log[0][0], 'error')
self.assertTrue(logger._log[0][1].startswith("Error in hook"))
def test_callAfterAbortHook_w_abort_error(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_hooked2 = []
def _hook2(*args, **kw):
_hooked2.append((args, kw))
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
r = Resource("r", "abort")
txn.join(r)
txn.addAfterAbortHook(_hook2, ('two',), dict(dos=2))
txn._callAfterAbortHooks()
self.assertEqual(logger._log[0][0], 'error')
self.assertTrue(
logger._log[0][1].startswith("Error in abort() on manager"))
def test_callAfterAbortHook_w_error_w_abort_error(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_hooked2 = []
def _hook1(*args, **kw):
raise ValueError()
def _hook2(*args, **kw):
_hooked2.append((args, kw)) # pragma: no cover
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
r = Resource("r", "abort")
txn.join(r)
txn.addAfterAbortHook(_hook1, ('one',), dict(dos=1))
txn.addAfterAbortHook(_hook2, ('two',), dict(dos=2))
with self.assertRaises(ValueError):
txn._callAfterAbortHooks()
self.assertEqual(logger._log[0][0], 'error')
self.assertTrue(
logger._log[0][1].startswith("Error in abort() on manager"))
def test_abort_w_abortHooks(self):
comm = []
txn = self._makeOne()
def bah():
comm.append("before")
def aah():
comm.append("after")
txn.addAfterAbortHook(aah)
txn.addBeforeAbortHook(bah)
txn.abort()
self.assertEqual(comm, ["before", "after"])
self.assertEqual(list(txn.getBeforeAbortHooks()), [])
self.assertEqual(list(txn.getAfterAbortHooks()), [])
def test_commit_w_abortHooks(self):
comm = []
txn = self._makeOne()
def bah():
comm.append("before") # pragma: no cover
def aah():
comm.append("after") # pragma: no cover
txn.addAfterAbortHook(aah)
txn.addBeforeAbortHook(bah)
txn.commit()
self.assertEqual(comm, []) # not called
# but cleared
self.assertEqual(list(txn.getBeforeAbortHooks()), [])
self.assertEqual(list(txn.getAfterAbortHooks()), [])
def test_commit_w_error_w_abortHooks(self):
comm = []
txn = self._makeOne()
def bah():
comm.append("before") # pragma: no cover
def aah():
comm.append("after") # pragma: no cover
txn.addAfterAbortHook(aah)
txn.addBeforeAbortHook(bah)
r = Resource("aaa", "tpc_vote")
txn.join(r)
with self.assertRaises(ValueError):
txn.commit()
self.assertEqual(comm, []) # not called
# not cleared
self.assertEqual(list(txn.getBeforeAbortHooks()), [(bah, (), {})])
self.assertEqual(list(txn.getAfterAbortHooks()), [(aah, (), {})])
def test_note(self):
txn = self._makeOne()
try:
txn.note(u'This is a note.')
self.assertEqual(txn.description, u'This is a note.')
txn.note(u'Another.')
self.assertEqual(txn.description, u'This is a note.\nAnother.')
finally:
txn.abort()
def test_note_bytes(self):
txn = self._makeOne()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
txn.note(b'haha')
self.assertNonTextDeprecationWarning(w)
self.assertEqual(txn.description, u'haha')
def test_note_None(self):
txn = self._makeOne()
self.assertEqual(u'', txn.description)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
txn.note(None)
self.assertFalse(w)
self.assertEqual(txn.description, u'')
def test_note_42(self):
txn = self._makeOne()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
txn.note(42)
self.assertNonTextDeprecationWarning(w)
self.assertEqual(txn.description, u'42')
def assertNonTextDeprecationWarning(self, w):
[w] = w
self.assertEqual(
(DeprecationWarning, "Expected text",
os.path.splitext(__file__)[0]),
(w.category, str(w.message), os.path.splitext(w.filename)[0]),
)
def test_description_bytes(self):
txn = self._makeOne()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
txn.description = b'haha'
self.assertNonTextDeprecationWarning(w)
self.assertEqual(txn.description, u'haha')
def test_description_42(self):
txn = self._makeOne()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
txn.description = 42
self.assertNonTextDeprecationWarning(w)
self.assertEqual(txn.description, u'42')
def test_description_None(self):
txn = self._makeOne()
self.assertEqual(u'', txn.description)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
txn.description = None
self.assertFalse(w)
self.assertEqual(txn.description, u'')
def test_setUser_default_path(self):
txn = self._makeOne()
txn.setUser(u'phreddy')
self.assertEqual(txn.user, u'/ phreddy')
def test_setUser_explicit_path(self):
txn = self._makeOne()
txn.setUser(u'phreddy', u'/bedrock')
self.assertEqual(txn.user, u'/bedrock phreddy')
def test_user_w_none(self):
txn = self._makeOne()
txn.user = b'phreddy'
with self.assertRaises(ValueError):
txn.user = None # resets to empty text
self.assertEqual(txn.user, u'phreddy')
def _test_user_non_text(self, user, path, expect, both=False):
txn = self._makeOne()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
if path:
txn.setUser(user, path)
else:
if path is None:
txn.setUser(user)
else:
txn.user = user
if both:
self.assertNonTextDeprecationWarning(w[:1])
self.assertNonTextDeprecationWarning(w[1:])
else:
self.assertNonTextDeprecationWarning(w)
self.assertEqual(expect, txn.user)
def test_user_non_text(self, user=b'phreddy', path=b'/bedrock',
expect=u"/bedrock phreddy", both=True):
self._test_user_non_text(b'phreddy', b'/bedrock',
u"/bedrock phreddy", True)
self._test_user_non_text(b'phreddy', None, u'/ phreddy')
self._test_user_non_text(b'phreddy', False, u'phreddy')
self._test_user_non_text(b'phreddy', u'/bedrock', u'/bedrock phreddy')
self._test_user_non_text(u'phreddy', b'/bedrock', u'/bedrock phreddy')
self._test_user_non_text(u'phreddy', 2, u'2 phreddy')
self._test_user_non_text(1, u'/bedrock', u'/bedrock 1')
self._test_user_non_text(1, 2, u'2 1', True)
def test_setExtendedInfo_single(self):
txn = self._makeOne()
txn.setExtendedInfo('frob', 'qux')
self.assertEqual(txn.extension, {u'frob': 'qux'})
self.assertTrue(txn._extension is txn._extension) # legacy
def test_setExtendedInfo_multiple(self):
txn = self._makeOne()
txn.setExtendedInfo('frob', 'qux')
txn.setExtendedInfo('baz', 'spam')
txn.setExtendedInfo('frob', 'quxxxx')
self.assertEqual(txn._extension, {u'frob': 'quxxxx', u'baz': 'spam'})
self.assertTrue(txn._extension is txn._extension) # legacy
def test__extension_settable(self):
# Because ZEO sets it. I'll fix ZEO, but maybe something else will
# break
txn = self._makeOne()
txn._extension = dict(baz='spam')
txn.setExtendedInfo('frob', 'qux')
self.assertEqual(txn.extension, {u'frob': 'qux', 'baz': 'spam'})
def test_data(self):
txn = self._makeOne()
# Can't get data that wasn't set:
with self.assertRaises(KeyError) as c:
txn.data(self)
self.assertEqual(c.exception.args, (self,))
data = dict(a=1)
txn.set_data(self, data)
self.assertEqual(txn.data(self), data)
# Can't get something we haven't stored.
with self.assertRaises(KeyError) as c:
txn.data(data)
self.assertEqual(c.exception.args, (data,))
# When the transaction ends, data are discarded:
txn.commit()
with self.assertRaises(KeyError) as c:
txn.data(self)
self.assertEqual(c.exception.args, (self,))
def test_isRetryableError_w_transient_error(self):
from transaction.interfaces import TransientError
from transaction._manager import TransactionManager
txn = self._makeOne(manager=TransactionManager())
txn._manager._txn = txn
self.assertTrue(txn.isRetryableError(TransientError()))
def test_isRetryableError_w_transient_subclass(self):
from transaction.interfaces import TransientError
from transaction._manager import TransactionManager
class _Derived(TransientError):
pass
txn = self._makeOne(manager=TransactionManager())
txn._manager._txn = txn
self.assertTrue(txn.isRetryableError(_Derived()))
def test_isRetryableError_w_normal_exception_no_resources(self):
from transaction._manager import TransactionManager
txn = self._makeOne(manager=TransactionManager())
txn._manager._txn = txn
self.assertFalse(txn.isRetryableError(Exception()))
def test_isRetryableError_w_normal_exception_w_resource_voting_yes(self):
from transaction._manager import TransactionManager
class _Resource(object):
def should_retry(self, err):
return True
txn = self._makeOne(manager=TransactionManager())
txn._manager._txn = txn
txn._resources.append(_Resource())
self.assertTrue(txn.isRetryableError(Exception()))
def test_isRetryableError_w_multiple(self):
from transaction._manager import TransactionManager
class _Resource(object):
_should = True
def should_retry(self, err):
return self._should
txn = self._makeOne(manager=TransactionManager())
txn._manager._txn = txn
res1 = _Resource()
res1._should = False
res2 = _Resource()
txn._resources.append(res1)
txn._resources.append(res2)
self.assertTrue(txn.isRetryableError(Exception()))
class Test_rm_key(unittest.TestCase):
def _callFUT(self, oid):
from transaction._transaction import rm_key
return rm_key(oid)
def test_miss(self):
self.assertTrue(self._callFUT(object()) is None)
def test_hit(self):
self.assertEqual(self._callFUT(Resource('zzz')), 'zzz')
class SavepointTests(unittest.TestCase):
def _getTargetClass(self):
from transaction._transaction import Savepoint
return Savepoint
def _makeOne(self, txn, optimistic, *resources):
return self._getTargetClass()(txn, optimistic, *resources)
def test_ctor_w_savepoint_oblivious_resource_non_optimistic(self):
txn = object()
resource = object()
self.assertRaises(TypeError, self._makeOne, txn, False, resource)
def test_ctor_w_savepoint_oblivious_resource_optimistic(self):
from transaction._transaction import NoRollbackSavepoint
txn = object()
resource = object()
sp = self._makeOne(txn, True, resource)
self.assertEqual(len(sp._savepoints), 1)
self.assertTrue(isinstance(sp._savepoints[0], NoRollbackSavepoint))
self.assertTrue(sp._savepoints[0].datamanager is resource)
def test_ctor_w_savepoint_aware_resources(self):
class _Aware(object):
def savepoint(self):
return self
txn = object()
one = _Aware()
another = _Aware()
sp = self._makeOne(txn, True, one, another)
self.assertEqual(len(sp._savepoints), 2)
self.assertTrue(isinstance(sp._savepoints[0], _Aware))
self.assertTrue(sp._savepoints[0] is one)
self.assertTrue(isinstance(sp._savepoints[1], _Aware))
self.assertTrue(sp._savepoints[1] is another)
def test_valid_wo_transacction(self):
sp = self._makeOne(None, True, object())
self.assertFalse(sp.valid)
def test_valid_w_transacction(self):
sp = self._makeOne(object(), True, object())
self.assertTrue(sp.valid)
def test_rollback_w_txn_None(self):
from transaction.interfaces import InvalidSavepointRollbackError
txn = None
class _Aware(object):
def savepoint(self):
return self
resource = _Aware()
sp = self._makeOne(txn, False, resource)
self.assertRaises(InvalidSavepointRollbackError, sp.rollback)
def test_rollback_w_sp_error(self):
class _TXN(object):
_sarce = False
_raia = None
def _saveAndRaiseCommitishError(self):
import sys
from transaction._compat import reraise
self._sarce = True
reraise(*sys.exc_info())
def _remove_and_invalidate_after(self, sp):
self._raia = sp
class _Broken(object):
def rollback(self):
raise ValueError()
_broken = _Broken()
class _GonnaRaise(object):
def savepoint(self):
return _broken
txn = _TXN()
resource = _GonnaRaise()
sp = self._makeOne(txn, False, resource)
self.assertRaises(ValueError, sp.rollback)
self.assertTrue(txn._raia is sp)
self.assertTrue(txn._sarce)
class AbortSavepointTests(unittest.TestCase):
def _getTargetClass(self):
from transaction._transaction import AbortSavepoint
return AbortSavepoint
def _makeOne(self, datamanager, transaction):
return self._getTargetClass()(datamanager, transaction)
def test_ctor(self):
dm = object()
txn = object()
asp = self._makeOne(dm, txn)
self.assertTrue(asp.datamanager is dm)
self.assertTrue(asp.transaction is txn)
def test_rollback(self):
class _DM(object):
_aborted = None
def abort(self, txn):
self._aborted = txn
class _TXN(object):
_unjoined = None
def _unjoin(self, datamanager):
self._unjoin = datamanager
dm = _DM()
txn = _TXN()
asp = self._makeOne(dm, txn)
asp.rollback()
self.assertTrue(dm._aborted is txn)
self.assertTrue(txn._unjoin is dm)
class NoRollbackSavepointTests(unittest.TestCase):
def _getTargetClass(self):
from transaction._transaction import NoRollbackSavepoint
return NoRollbackSavepoint
def _makeOne(self, datamanager):
return self._getTargetClass()(datamanager)
def test_ctor(self):
dm = object()
nrsp = self._makeOne(dm)
self.assertTrue(nrsp.datamanager is dm)
def test_rollback(self):
dm = object()
nrsp = self._makeOne(dm)
self.assertRaises(TypeError, nrsp.rollback)
class MiscellaneousTests(unittest.TestCase):
def test_bug239086(self):
# The original implementation of thread transaction manager made
# invalid assumptions about thread ids.
import threading
import transaction
import transaction.tests.savepointsample as SPS
dm = SPS.SampleSavepointDataManager()
self.assertEqual(list(dm.keys()), [])
class Sync(object):
def __init__(self, label):
self.label = label
self.log = []
def beforeCompletion(self, txn):
raise AssertionError("Not called")
def afterCompletion(self, txn):
raise AssertionError("Not called")
def newTransaction(self, txn):
self.log.append('%s %s' % (self.label, 'new'))
def run_in_thread(f):
txn = threading.Thread(target=f)
txn.start()
txn.join()
sync = Sync(1)
@run_in_thread
def _():
transaction.manager.registerSynch(sync)
transaction.manager.begin()
dm['a'] = 1
self.assertEqual(sync.log, ['1 new'])
@run_in_thread
def _2():
transaction.abort() # should do nothing.
self.assertEqual(sync.log, ['1 new'])
self.assertEqual(list(dm.keys()), ['a'])
dm = SPS.SampleSavepointDataManager()
self.assertEqual(list(dm.keys()), [])
@run_in_thread
def _3():
dm['a'] = 1
self.assertEqual(sync.log, ['1 new'])
transaction.abort() # should do nothing
self.assertEqual(list(dm.keys()), ['a'])
def test_gh5(self):
from transaction import _transaction
from transaction._compat import native_
buffer = _transaction._makeTracebackBuffer()
s = u'ąčę'
buffer.write(s)
buffer.seek(0)
self.assertEqual(buffer.read(), native_(s, 'utf-8'))
class Resource(object):
_b = _c = _v = _f = _a = _x = _after = False
def __init__(self, key, error=None):
self._key = key
self._error = error
def __repr__(self):
return 'Resource: %s' % self._key
def sortKey(self):
return self._key
def tpc_begin(self, txn):
if self._error == 'tpc_begin':
raise ValueError()
self._b = True
def commit(self, txn):
if self._error == 'commit':
raise ValueError()
self._c = True
def tpc_vote(self, txn):
if self._error == 'tpc_vote':
raise ValueError()
self._v = True
def tpc_finish(self, txn):
if self._error == 'tpc_finish':
raise ValueError()
self._f = True
def abort(self, txn):
if self._error == 'abort':
raise AssertionError("Not called in that state")
self._a = True
def tpc_abort(self, txn):
if self._error == 'tpc_abort':
raise AssertionError("Not called in that state")
self._x = True
def afterCompletion(self, txn):
if self._error == 'afterCompletion':
raise ValueError()
self._after = True
|
app.py
|
import os
import sys
import logging
import multiprocessing
import time
from flask import Flask, request, jsonify
from flask_cors import CORS
import io
from io import BytesIO
from PIL import Image
import cv2
import numpy as np
from worker import get_model_api
# define the app
app = Flask(__name__)
CORS(app) # needed for cross-domain requests, allow everything by default
UPLOAD_FOLDER = os.path.basename('uploads')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# global
#global timeout_secs
#timeout_secs = 3
# load the model
model_api = get_model_api()
# API route
@app.route('/answer/blank', methods=['POST'])
def mathreco():
"""API function
All model-specific logic to be defined in the get_model_api()
function
"""
try:
request_id=request.form['id']
except:
request_id=888
try:
color_space=request.form['color']
except:
color_space='sRGB'
file = request.files['image']
filename = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
file.save(filename)
app.logger.debug("api_input: " + filename)
if color_space != 'sRGB':
img = cv2.imread(filename)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img2 = np.zeros_like(img)
img2[:,:,0] = gray
img2[:,:,1] = gray
img2[:,:,2] = gray
cv2.imwrite(filename, img2)
app.logger.debug("color space converted : " + filename)
manager = multiprocessing.Manager()
return_list = manager.list()
p = multiprocessing.Process(target=model_api, args=(request_id, filename,return_list))
p.start()
for _ in range(20*timeout_secs):
# check worker every 50 ms
time.sleep(0.05)
if len(return_list) > 0 :
output_data =return_list[0]
#app.logger.debug("api_output: " + str(output_data))
# Cleanup
p.terminate()
p.join()
response = jsonify(output_data)
return response
#output_data = model_api(input_data, return_dict)
# Terminate worker after timeout
app.logger.debug("Timeout")
p.terminate()
output_data={}
output_data['status']='Timeout'
output_data['info']=1000*timeout_secs
response = jsonify(output_data)
return response
@app.route('/')
def index():
return "Index API"
# HTTP Errors handlers
@app.errorhandler(404)
def url_error(e):
return """
Wrong URL!
<pre>{}</pre>""".format(e), 404
@app.errorhandler(500)
def server_error(e):
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
if __name__ == '__main__':
global timeout_secs
try:
timeout_secs = int(os.environ["TIMEOUT_SECS"])
app.logger.info("timout "+str(timeout_secs))
except:
timeout_secs=3
app.logger.info("default timout "+str(timeout_secs))
try:
mathreco_port=int(os.environ["MATHRECO_PORT"])
print ("port: ", mathreco_port)
app.logger.info("port "+str(mathreco_port))
except:
mathreco_port=8686;
app.logger.info("default port 8080")
# This is used when running locally.
app.run(host='0.0.0.0',port=mathreco_port, debug=True)
|
test.py
|
import threading
import time
from flask import Flask
app = Flask(__name__)
def run_job():
while True:
print("Run recurring task")
#time.sleep(3)
@app.before_first_request
def activate_job():
print("b4")
@app.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
thread = threading.Thread(target=run_job)
thread.start()
app.run()
|
BBCLIPS.py
|
# -*- coding: utf-8 -*-
'''
@author: arcra
'''
import time, threading, os
import Tkinter as tk
import argparse
import clipsFunctions
from clipsFunctions import clips, _clipsLock
import pyrobotics.BB as BB
from pyrobotics.messages import Command, Response
import GUI
from BBFunctions import assertQueue, ResponseReceived, CreateSharedVar, WriteSharedVar, SubscribeToSharedVar, RunCommand
defaultTimeout = 2000
defaultAttempts = 1
_sleepingLock = threading.Lock()
_sleeping = False
def setCmdTimer(t, cmd, cmdId):
t = threading.Thread(target=cmdTimerThread, args = (t, cmd, cmdId))
t.daemon = True
t.start()
return True
def cmdTimerThread(t, cmd, cmdId):
time.sleep(t/1000.0)
assertQueue.append('(BB_timer "{0}" {1})'.format(cmd, cmdId))
#clipsFunctions.Assert('(BB_timer "{0}" {1})'.format(cmd, cmdId))
def setTimer(t, sym):
t = threading.Thread(target=timerThread, args = (t, sym))
t.daemon = True
t.start()
return True
def timerThread(t, sym):
time.sleep(t/1000.0)
assertQueue.append('(BB_timer {0})'.format(sym))
#clipsFunctions.Assert('(BB_timer {0})'.format(sym))
def SendCommand(cmdName, params):
cmd = Command(cmdName, params)
BB.Send(cmd)
return cmd._id
def SendResponse(cmdName, cmd_id, result, response):
result = str(result).lower() not in ['false', '0']
r = Response(cmdName, result, response)
r._id = cmd_id
BB.Send(r)
def stop():
GUI._pausedLock.acquire()
GUI.gui.paused = True
GUI._pausedLock.release()
return True
def sleep(ms, sym):
t = threading.Thread(target=sleepingTimerThread, args = (ms, sym))
t.daemon = True
t.start()
return True
def sleepingTimerThread(t, sym):
_sleepingLock.acquire()
_sleeping = True
_sleepingLock.release()
time.sleep(t/1000)
_sleepingLock.acquire()
_sleeping = False
_sleepingLock.release()
def Initialize(params):
clips.Memory.Conserve = True
clips.Memory.EnvironmentErrorsEnabled = True
clips.SetExternalTraceback(True)
clips.DebugConfig.FactsWatched = params.watchfacts
clips.DebugConfig.GlobalsWatched = params.watchglobals
clips.DebugConfig.FunctionsWatched = params.watchfunctions
clips.DebugConfig.RulesWatched = params.watchrules
clips.RegisterPythonFunction(SendCommand)
clips.RegisterPythonFunction(SendResponse)
clips.RegisterPythonFunction(setCmdTimer)
clips.RegisterPythonFunction(setTimer)
clips.RegisterPythonFunction(CreateSharedVar)
clips.RegisterPythonFunction(WriteSharedVar)
clips.RegisterPythonFunction(SubscribeToSharedVar)
clips.RegisterPythonFunction(sleep)
clips.RegisterPythonFunction(stop)
clips.BuildGlobal('defaultTimeout', defaultTimeout)
clips.BuildGlobal('defaultAttempts', defaultAttempts)
filePath = os.path.dirname(os.path.abspath(__file__))
clips.BatchStar(os.path.join(filePath, 'CLIPS', 'utils.clp'))
clips.BatchStar(os.path.join(filePath, 'CLIPS', 'BB_interface.clp'))
clipsFunctions.PrintOutput()
GUI.use_gui = not params.nogui
if GUI.use_gui:
GUI.gui = GUI.clipsGUI()
else:
GUI.debug = params.debug
if params.file:
GUI.load_file(params.file)
BB.Initialize(params.port, functionMap = {'*':(RunCommand, True)}, asyncHandler = ResponseReceived)
print 'Waiting for BlackBoard to connect...'
BB.Start()
print 'BlackBoard connected!'
BB.SetReady()
print 'READY!'
def main():
parser = argparse.ArgumentParser(description="Runs an instance of BBCLIPS. (CLIPS interpreter embedded in python with BB communication.)")
parser.add_argument('-p', '--port', default = '2001', type=int, help='States the port number that this instance module should use.')
parser.add_argument('--nogui', default=False, action='store_const', const=True, help='Runs the program without the GUI.')
parser.add_argument('--debug', default=False, action='store_const', const=True, help='Show a CLIPS prompt as in an interactive CLIPS session.')
parser.add_argument('-n', '--steps', default=1, action='store', type=int, help='Number of steps to run when pressing enter on a debug session.')
parser.add_argument('-f', '--file', help='Specifies the file that should be loaded (mainly for nogui usage).')
watch_group = parser.add_argument_group('Watch options', 'Set the watch flags of the clips interpreter.')
watch_group.add_argument('--watchfunctions', '--wfunctions', '--wfunc', default=False, action='store_const', const=True, help='Enables the watch functions flag of the clips interpreter.')
watch_group.add_argument('--watchglobals', '--wglobals', '--wg', default=False, action='store_const', const=True, help='Enables the watch globals flag of the clips interpreter.')
watch_group.add_argument('--watchfacts', '--wfacts', '--wf', default=False, action='store_const', const=True, help='Enables the watch facts flag of the clips interpreter.')
watch_group.add_argument('--watchrules', '--wrules', '--wr', default=False, action='store_const', const=True, help='Enables the watch rules flag of the clips interpreter.')
log_group = parser.add_argument_group('Log options', 'Set the log level of the BBCLIPS module.')
log_group.add_argument('--log', default='ERROR', choices=['INFO', 'WARNING', 'ERROR'], help='Default is ERROR.')
args = parser.parse_args()
Initialize(args)
if args.nogui:
if args.debug:
s = raw_input('[CLIPS]>')
while s != '(exit)':
if s == '(facts)':
clips.PrintFacts()
elif s == '(rules)':
clips.PrintRules()
elif s == '(agenda)':
clips.PrintAgenda()
elif s == '':
assertEnqueuedFacts()
clipsFunctions.PrintOutput()
clipsFunctions.Run(args.steps)
clipsFunctions.PrintOutput()
else:
try:
_clipsLock.acquire()
#clips.SendCommand(s, True)
clips.Eval(s)
clipsFunctions.PrintOutput()
_clipsLock.release()
except:
print 'ERROR: Clips could not run the command.'
clipsFunctions.PrintOutput()
_clipsLock.release()
s = raw_input('[CLIPS]>')
else:
mainLoop()
else:
loop_thread = threading.Thread(target=mainLoop)
loop_thread.daemon = True
loop_thread.start()
tk.mainloop()
def assertEnqueuedFacts():
_clipsLock.acquire()
while True:
try:
f = assertQueue.popleft()
except:
break
asserted = False
while not asserted:
try:
clips.Assert(f)
asserted = True
except:
#print 'Fact: ' + str(f) + ' could not be asserted, trying again...'
pass
if not asserted:
time.sleep(50)
_clipsLock.release()
def mainLoop():
while True:
assertEnqueuedFacts()
_sleepingLock.acquire()
sleeping = _sleeping
_sleepingLock.release()
GUI._pausedLock.acquire()
paused = GUI.gui.paused
GUI._pausedLock.release()
if sleeping or paused or (GUI.use_gui and GUI.gui.runTimes):
clipsFunctions.PrintOutput()
continue
clipsFunctions.Run(2)
clipsFunctions.PrintOutput()
if __name__ == "__main__":
main()
|
main.py
|
from __future__ import absolute_import, print_function
import argparse
import os
import sys
import threading
import time
from os import listdir
from os.path import isfile, join
from sys import platform as _platform
from threading import Thread
import cv2
import pyfakewebcam
from PIL import Image, ImageTk
if sys.version_info.major >= 3:
from tkinter import SUNKEN, RAISED, Tk, PhotoImage, Button, Label
else:
from Tkinter import SUNKEN, RAISED, Tk, PhotoImage, Button, Label
_streaming = False
if _platform == "linux" or _platform == "linux2":
try:
import pyfakewebcam
_streaming = True
except ImportError:
print("Could not import pyfakewebcam")
### Function to set wich sprite must be drawn
def put_sprite(num):
global SPRITES, BTNS
SPRITES[num] = 1 - SPRITES[num] # not actual value
if SPRITES[num]:
BTNS[num].config(relief=SUNKEN)
else:
BTNS[num].config(relief=RAISED)
# Draws sprite over a image
# It uses the alpha chanel to see which pixels need to be reeplaced
# Input: image, sprite: numpy arrays
# output: resulting merged image
def draw_sprite(frame, sprite, x_offset, y_offset):
(h, w) = (sprite.shape[0], sprite.shape[1])
(imgH, imgW) = (frame.shape[0], frame.shape[1])
if y_offset + h >= imgH: # if sprite gets out of image in the bottom
sprite = sprite[0 : imgH - y_offset, :, :]
if x_offset + w >= imgW: # if sprite gets out of image to the right
sprite = sprite[:, 0 : imgW - x_offset, :]
if x_offset < 0: # if sprite gets out of image to the left
sprite = sprite[:, abs(x_offset) : :, :]
w = sprite.shape[1]
x_offset = 0
# for each RGB chanel
for c in range(3):
# chanel 4 is alpha: 255 is not transpartne, 0 is transparent background
frame[y_offset : y_offset + h, x_offset : x_offset + w, c] = sprite[:, :, c] * (
sprite[:, :, 3] / 255.0
) + frame[y_offset : y_offset + h, x_offset : x_offset + w, c] * (
1.0 - sprite[:, :, 3] / 255.0
)
return frame
# Returns the rectangles
# Img is a BGR image
# haar_cascade is a cv2.CascadeClassifier object
# the other inputs are the filter parameters
def apply_Haar_filter(img, haar_cascade, scaleFact=1.1, minNeigh=5, minSizeW=30):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
features = haar_cascade.detectMultiScale(
gray,
scaleFactor=scaleFact,
minNeighbors=minNeigh,
minSize=(minSizeW, minSizeW),
flags=cv2.CASCADE_SCALE_IMAGE,
)
return features
# Adjust the given sprite to the head's width and position
# in case of the sprite not fitting the screen in the top, the sprite should be trimed
def adjust_sprite2head(sprite, head_width, head_ypos):
(h_sprite, w_sprite) = (sprite.shape[0], sprite.shape[1])
factor = 1.0 * head_width / w_sprite
sprite = cv2.resize(
sprite, (0, 0), fx=factor, fy=factor
) # adjust to have the same width as head
(h_sprite, w_sprite) = (sprite.shape[0], sprite.shape[1])
y_orig = (
head_ypos - h_sprite
) # adjust the position of sprite to end where the head begins
if (
y_orig < 0
): # check if the head is not to close to the top of the image and the sprite would not fit in the screen
sprite = sprite[abs(y_orig) : :, :, :] # in that case, we cut the sprite
y_orig = 0 # the sprite then begins at the top of the image
return (sprite, y_orig)
def apply_sprite(image, path2sprite, w, x, y):
sprite = cv2.imread(path2sprite, -1)
(sprite, y_final) = adjust_sprite2head(sprite, w, y)
image = draw_sprite(image, sprite, x, y_final)
def apply_sprite2feature(
image,
sprite_path,
haar_filter,
x_offset,
y_offset,
y_offset_image,
adjust2feature,
desired_width,
x,
y,
w,
h,
):
sprite = cv2.imread(sprite_path, -1)
(h_sprite, w_sprite) = (sprite.shape[0], sprite.shape[1])
xpos = x + x_offset
ypos = y + y_offset
factor = 1.0 * desired_width / w_sprite
sub_img = image[y + int(y_offset_image) : y + h, x : x + w, :]
feature = apply_Haar_filter(sub_img, haar_filter, 1.3, 10, 10)
if len(feature) != 0:
xpos, ypos = x, y + feature[0, 1] # adjust only to feature in y axis (eyes)
if adjust2feature:
size_mustache = 1.2 # how many times bigger than mouth
factor = 1.0 * (feature[0, 2] * size_mustache) / w_sprite
xpos = (
x + feature[0, 0] - int(feature[0, 2] * (size_mustache - 1) / 2)
) # centered respect to width
ypos = (
y + y_offset_image + feature[0, 1] - int(h_sprite * factor)
) # right on top
sprite = cv2.resize(sprite, (0, 0), fx=factor, fy=factor)
image = draw_sprite(image, sprite, int(xpos), int(ypos))
# Principal Loop where openCV (magic) ocurs
def cvloop(run_event, read_camera=0, virtual_camera=0):
global panelA
global SPRITES
dir_ = "./sprites/flyes/"
flies = [
f for f in listdir(dir_) if isfile(join(dir_, f))
] # image of flies to make the "animation"
i = 0
video_capture = cv2.VideoCapture(read_camera) # read from webcam
(x, y, w, h) = (0, 0, 10, 10) # whatever initial values
# Filters path
haar_faces = cv2.CascadeClassifier("./filters/haarcascade_frontalface_default.xml")
haar_eyes = cv2.CascadeClassifier("./filters/haarcascade_eye.xml")
haar_mouth = cv2.CascadeClassifier("./filters/Mouth.xml")
haar_nose = cv2.CascadeClassifier("./filters/Nose.xml")
stream_camera = None
while run_event.is_set(): # while the thread is active we loop
ret, image = video_capture.read()
if not ret:
print("Error reading camera, exiting")
break
if _streaming:
if stream_camera is None:
if virtual_camera:
h, w = image.shape[:2]
stream_camera = pyfakewebcam.FakeWebcam(
"/dev/video{}".format(virtual_camera), w, h
)
faces = apply_Haar_filter(image, haar_faces, 1.3, 5, 30)
for (x, y, w, h) in faces: # if there are faces
# take first face found (x,y,w,h) = (faces[0,0],faces[0,1],faces[0,2],faces[0,3])
# hat condition
if SPRITES[0]:
apply_sprite(image, "./sprites/hat.png", w, x, y)
# mustache condition
if SPRITES[1]:
# empirically mouth is at 2/3 of the face from the top
# empirically the width of mustache is have of face's width (offset of w/4)
# we look for mouths only from the half of the face (to avoid false positives)
apply_sprite2feature(
image,
"./sprites/mustache.png",
haar_mouth,
w / 4,
2 * h / 3,
h / 2,
True,
w / 2,
x,
y,
w,
h,
)
# glasses condition
if SPRITES[3]:
# empirically eyes are at 1/3 of the face from the top
apply_sprite2feature(
image,
"./sprites/glasses.png",
haar_eyes,
0,
h / 3,
0,
False,
w,
x,
y,
w,
h,
)
# flies condition
if SPRITES[2]:
# to make the "animation" we read each time a different image of that folder
# the images are placed in the correct order to give the animation impresion
apply_sprite(image, dir_ + flies[i], w, x, y)
i += 1
i = (
0 if i >= len(flies) else i
) # when done with all images of that folder, begin again
# OpenCV represents image as BGR; PIL but RGB, we need to change the chanel order
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if _streaming:
if virtual_camera:
stream_camera.schedule_frame(image)
# conerts to PIL format
image = Image.fromarray(image)
# Converts to a TK format to visualize it in the GUI
image = ImageTk.PhotoImage(image)
# Actualize the image in the panel to show it
panelA.configure(image=image)
panelA.image = image
video_capture.release()
# Parser
parser = argparse.ArgumentParser()
parser.add_argument("--read_camera", type=int, default=0, help="Id to read camera from")
parser.add_argument(
"--virtual_camera",
type=int,
default=0,
help="If different from 0, creates a virtual camera with results on that id (linux only)",
)
args = parser.parse_args()
# Initialize GUI object
root = Tk()
root.title("Snap chat filters")
this_dir = os.path.dirname(os.path.realpath(__file__))
# Adds a custom logo
imgicon = PhotoImage(file=os.path.join(this_dir, "imgs", "icon.gif"))
root.tk.call("wm", "iconphoto", root._w, imgicon)
##Create 3 buttons and assign their corresponding function to active sprites
btn1 = Button(root, text="Hat", command=lambda: put_sprite(0))
btn1.pack(side="top", fill="both", expand="no", padx="10", pady="10")
btn2 = Button(root, text="Mustache", command=lambda: put_sprite(1))
btn2.pack(side="top", fill="both", expand="no", padx="10", pady="10")
btn3 = Button(root, text="Flies", command=lambda: put_sprite(2))
btn3.pack(side="top", fill="both", expand="no", padx="10", pady="10")
btn4 = Button(root, text="Glasses", command=lambda: put_sprite(3))
btn4.pack(side="top", fill="both", expand="no", padx="10", pady="10")
# Create the panel where webcam image will be shown
panelA = Label(root)
panelA.pack(padx=10, pady=10)
# Variable to control which sprite you want to visualize
SPRITES = [
0,
0,
0,
0,
] # hat, mustache, flies, glasses -> 1 is visible, 0 is not visible
BTNS = [btn1, btn2, btn3, btn4]
# Creates a thread where the magic ocurs
run_event = threading.Event()
run_event.set()
action = Thread(target=cvloop, args=(run_event, args.read_camera, args.virtual_camera))
action.setDaemon(True)
action.start()
# Function to close all properly, aka threads and GUI
def terminate():
global root, run_event, action
print("Closing thread opencv...")
run_event.clear()
time.sleep(1)
# action.join() # strangely in Linux this thread does not terminate properly, so .join never finishes
root.destroy()
print("All closed! Chao")
# When the GUI is closed it actives the terminate function
root.protocol("WM_DELETE_WINDOW", terminate)
root.mainloop() # creates loop of GUI
|
elements.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
import tkinter as tk
import tkinter.filedialog as tkfile
import sys, os, re
import threading
from . import objects
from forcebalance.output import getLogger, StreamHandler, INFO
class ObjectViewer(tk.LabelFrame):
"""Provides a general overview of the loaded calculation objects"""
def __init__(self,root, **kwargs):
tk.LabelFrame.__init__(self, root, text="Loaded Objects", **kwargs)
self.root = root
self.calculation=None
self.activeselection=None
# needUpdate signals when the list of objects needs to be refreshed
self.needUpdate=tk.BooleanVar()
self.needUpdate.trace('r',self.update)
# selectionchanged can be used by the root window to determine when
# the DetailViewer needs to be loaded with a new object
self.selectionchanged=tk.BooleanVar()
self.selectionchanged.set(True)
self.content = tk.Text(self, cursor="arrow", state="disabled", width=30, height=20)
self.scrollbar = tk.Scrollbar(self, orient=tk.VERTICAL)
# bind scrollbar actions
self.scrollbar.config(command = self.content.yview)
self.content['yscrollcommand']=self.scrollbar.set
# arrange and display list elements
self.content.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
self.content.update()
self.scrollbar.pack(side=tk.RIGHT,fill=tk.Y)
def open(self, filename):
"""Parse forcebalance input file and add referenced objects"""
if filename=='': return
self.calculation = objects.CalculationObject(filename)
self.update()
def clear(self):
self.calculation = None
self.update()
def run(self):
def runthread():
cwd = os.getcwd()
os.chdir(self.calculation['options'].opts['root'])
try: self.calculation.run()
except:
print("An Error occurred")
self.update()
if threading.active_count() < 2:
calculation_thread = threading.Thread(target=runthread)
calculation_thread.start()
else:
print("Calculation already running")
def update(self, *args):
"""Update the list of objects being displayed, based on the contents of self.calculation"""
self.content["state"]= "normal"
self.content.delete("1.0","end")
self['text']="Objects"
if self.calculation:
self['text'] += " - " + self.calculation['options']['name']
self.content.bind('<Button-1>', _bindEventHandler(self.select, object = [ self.calculation ]))
self.content.insert("end",' ')
l = tk.Label(self.content,text="General Options", bg="#FFFFFF")
self.content.window_create("end",window = l)
l.bind('<Button-1>', _bindEventHandler(self.select, object = [ self.calculation['options'] ]))
self.content.insert("end",'\n')
# Event handler to toggle whether targets list should be expanded
def toggle(e):
self.calculation['_expand_targets'] = not self.calculation['_expand_targets']
self.needUpdate.get()
targetLabel = tk.Label(self.content,text="Targets", bg="#FFFFFF")
targetLabel.bind("<Button-3>", toggle)
targetLabel.bind('<Button-1>', _bindEventHandler(self.select, object = self.calculation['targets']))
if self.calculation['_expand_targets']:
self.content.insert("end",' ')
self.content.window_create("end", window = targetLabel)
self.content.insert("end",'\n')
for target in self.calculation['targets']:
self.content.insert("end",' ')
l=tk.Label(self.content, text=target['name'], bg="#FFFFFF")
self.content.window_create("end", window = l)
self.content.insert("end",'\n')
l.bind('<Button-1>', _bindEventHandler(self.select, object=[ target ]))
else:
self.content.insert("end",'+')
self.content.window_create("end", window = targetLabel)
self.content.insert("end",'\n')
self.content.insert("end",' ')
l=tk.Label(self.content, text="Forcefield", bg="#FFFFFF")
self.content.window_create("end", window = l)
l.bind('<Button-1>', _bindEventHandler(self.select, object=[ self.calculation['forcefield'] ]))
self.content.insert("end",'\n')
if self.calculation["result"]:
self.content.insert("end",' ')
l=tk.Label(self.content, text="Result", bg="#FFFFFF")
self.content.window_create("end", window = l)
l.bind('<Button-1>', _bindEventHandler(self.select, object=[ self.calculation["result"] ]))
self.content.insert("end",'\n\n')
self.content["state"]="disabled"
def select(self, e, object):
for widget in self.content.winfo_children():
widget["relief"]=tk.FLAT
e.widget["relief"]="solid"
if type(object) is not list: self.activeselection=[ object ]
else: self.activeselection=object
self.selectionchanged.get() # reading this variable triggers a refresh
def scrollUp(self, e):
self.content.yview('scroll', -2, 'units')
def scrollDown(self, e):
self.content.yview('scroll', 2, 'units')
class DetailViewer(tk.LabelFrame):
"""Shows detailed properties of the currently selected object (as defined in
and ObjectViewer)"""
def __init__(self, root, opts='', **kwargs):
# initialize variables
self.root = root
self.printAll = tk.IntVar()
self.printAll.set(False)
self.currentObject = None # keep current object in case view needs refreshing
self.currentElement= None # currently selected element within current object
# Viewer GUI elements
tk.LabelFrame.__init__(self, root, text="Details", **kwargs)
self.content = tk.Text(self,cursor="arrow",state="disabled", height=20)
self.content.tag_config("error", foreground="red")
self.scrollbar = tk.Scrollbar(self, orient=tk.VERTICAL)
self.helptext = tk.Text(self.root, width=70, state="disabled", bg="#F0F0F0", wrap=tk.WORD)
# bind scrollbar actions
self.scrollbar.config(command = self.content.yview)
self.content['yscrollcommand']=self.scrollbar.set
self.scrollbar.pack(side=tk.RIGHT,fill=tk.Y)
self.root.bind_class("scrollable", "<Button-4>", self.scrollUp)
self.root.bind_class("scrollable", "<Button-5>", self.scrollDown)
# arrange and display list elements
self.content.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
self.content.update()
# update when printAll variable is changed
self.printAll.trace('w', lambda *x : self.load())
def load(self,newObject=None):
"""update current object if a new one is passed in. Then clear existing text and
replace it with whatever the current object says to write"""
if newObject:
self.currentObject = newObject
self['text']="Details"
self.content["state"]="normal"
self.content.delete("1.0","end")
if self.currentObject:
if len(self.currentObject) ==1: # if there is an object to display and it is not a collection
self['text']+=" - %s" % self.currentObject[0]['name']
else:
self['text']+=" - %d Configured Targets" % len(self.currentObject)
try:
for object in self.currentObject:
self.populate(object)
except:
self.content.insert("end", "Error trying to display <%s %s>\n" % (self.currentObject[0]['type'], self.currentObject[0]['name']), "error")
from traceback import format_exc
self.content.insert("end", format_exc(), "error")
self.content["state"]="disabled"
def populate(self, object):
"""Populate the view with information in displayText argument"""
# ask object to tell us how to display it
displayText = object.display(self.printAll.get())
# if object provides string back, just print it to the content area
if type(displayText)==str:
self.content.insert("end", displayText)
# if object provides dictionary, iterate through key value pairs and organize using tk.Label objects
if type(displayText)==dict:
for key in displayText.keys():
frame = tk.Frame(self.content)
frame.bindtags((key, "scrollable"))
keylabel = tk.Label(frame, text=key, bg="#FFFFFF", padx=0, pady=0)
keylabel.bindtags((key, "scrollable"))
separator = tk.Label(frame, text=" : ", bg="#FFFFFF", padx=0, pady=0)
separator.bindtags((key, "scrollable"))
valuelabel = tk.Label(frame, text= str(displayText[key]), bg="#FFFFFF", padx=0, pady=0)
valuelabel.bindtags((key, "scrollable"))
keylabel.pack(side=tk.LEFT)
separator.pack(side=tk.LEFT)
valuelabel.pack(side=tk.LEFT)
self.content.window_create("end", window = frame)
self.content.insert("end", '\n')
# tuple is used to separate out options set at default values
# (<options dictionary>, <default options dictionary>)
if type(displayText)==tuple:
for key in displayText[0].keys():
frame = tk.Frame(self.content)
frame.bindtags((key, "scrollable"))
keylabel = tk.Label(frame, text=key, bg="#FFFFFF", padx=0, pady=0)
keylabel.bindtags((key, "scrollable"))
separator = tk.Label(frame, text=" : ", bg="#FFFFFF", padx=0, pady=0)
separator.bindtags((key, "scrollable"))
valuelabel = tk.Label(frame, text= str(displayText[0][key]), bg="#FFFFFF", padx=0, pady=0)
valuelabel.bindtags((key, "scrollable"))
keylabel.pack(side=tk.LEFT)
separator.pack(side=tk.LEFT)
valuelabel.pack(side=tk.LEFT)
self.content.window_create("end", window = frame)
self.content.insert("end", '\n')
# right click help popup
self.root.bind_class(key, "<Button-3>", _bindEventHandler(self.showHelp, object = object, option=key))
if self.printAll.get():
self.content.insert("end", "\n--- Default Values ---\n")
for key in displayText[1].keys():
frame = tk.Frame(self.content)
frame.bindtags((key, "scrollable"))
keylabel = tk.Label(frame, text=key, bg="#FFFFFF", padx=0, pady=0)
keylabel.bindtags((key, "scrollable"))
separator = tk.Label(frame, text=" : ", bg="#FFFFFF", padx=0, pady=0)
separator.bindtags((key, "scrollable"))
valuelabel = tk.Label(frame, text= str(displayText[1][key]), bg="#FFFFFF", padx=0, pady=0)
valuelabel.bindtags((key, "scrollable"))
keylabel.pack(side=tk.LEFT)
separator.pack(side=tk.LEFT)
valuelabel.pack(side=tk.LEFT)
self.content.window_create("end", window = frame)
self.content.insert("end", '\n')
self.root.bind_class(key, "<Button-3>", _bindEventHandler(self.showHelp, object = object, option=key))
self.content.insert("end",'\n')
def clear(self):
"""Clear the current object and reload a blank page"""
self.currentObject=None
self.load()
def showHelp(self, e, object, option):
"""Update and display help window showing option documentation string"""
self.helptext["state"]="normal"
self.helptext.delete("1.0","end")
# get message and calculate how high window should be
helpmessage = object.getOptionHelp(option)
height=0
for line in object.getOptionHelp(option).splitlines():
height += 1 + int(len(line)/70)
self.helptext.insert("end", helpmessage)
self.helptext['height']=height
self.helptext.place(x=e.x_root-self.root.winfo_x(), y=e.y_root-self.root.winfo_y())
self.root.bind("<Motion>", lambda e : self.helptext.place_forget())
self.root.bind("<Button>", lambda e : self.helptext.place_forget())
def scrollUp(self, e):
self.content.yview('scroll', -2, 'units')
def scrollDown(self, e):
self.content.yview('scroll', 2, 'units')
class ConsoleViewer(tk.LabelFrame):
"""Tries to emulate a terminal by displaying standard output"""
def __init__(self, root, **kwargs):
tk.LabelFrame.__init__(self, root, text="Console", **kwargs)
self.console = tk.Text(self,
state=tk.DISABLED,
cursor="arrow",
fg="#FFFFFF",
bg="#000000")
self.console.pack(fill=tk.BOTH, expand=1)
# console colors corresponding to ANSI escape sequences
self.console.tag_config("0", foreground="white", background="black")
#self.console.tag_config("1") # make text bold
self.console.tag_config("44", background="blue")
self.console.tag_config("91", foreground="red")
self.console.tag_config("92", foreground="green")
self.console.tag_config("93", foreground="yellow")
self.console.tag_config("94", foreground="blue")
self.console.tag_config("95", foreground="purple")
getLogger("forcebalance").addHandler(ConsolePaneHandler(self))
getLogger("forcebalance").setLevel(INFO)
# scroll to bottom of text when widget is resized
self.bind("<Configure>", lambda e: self.console.yview(tk.END))
## we implement write and flush so the console viewer
# can serve as a drop in replacement for sys.stdout
def write(self, input, tags="0"):
self.console['state']=tk.NORMAL
# processing of input
input = re.sub("\r","\n", input)
self.console.insert(tk.END, input, tags)
self.console.yview(tk.END)
self.console['state']=tk.DISABLED
def flush(self):
pass # does nothing since messages are sent to the console immediately on write
def clear(self):
self.console['state']=tk.NORMAL
self.console.delete(1.0, tk.END)
self.console['state']=tk.DISABLED
class ConsolePaneHandler(StreamHandler):
def __init__(self, console):
super(ConsolePaneHandler, self).__init__()
self.console = console
self.color = ["0"]
def emit(self, record):
# split messages by looking for terminal escape sequences
message = re.split("(\x1b\[[01]?;?[0-9]{1,2};?[0-9]{,2}m)", record.getMessage())
for section in message:
# if we get a new escape sequence, assign self.color to new color codes
# else write text using the current color codes
if section[0] == "\x1b":
self.color = tuple(section[2:-1].split(';'))
else:
self.console.write(section, tags=self.color)
self.flush()
def _bindEventHandler(handler, **kwargs):
"""Creates an event handler by taking a function that takes many arguments
and using kwargs to create a function that only takes in one argument"""
def f(e):
return handler(e, **kwargs)
return f
|
stepper_motor.py
|
#!/usr/bin/env python
import time
import sys
import Queue
import threading
import RPi.GPIO as GPIO
import config
import led
import gpio_lock
steps_queue = Queue.Queue()
change_notify = None
def set_motor_input(i1, i2, i3, i4):
GPIO.output(config.STEP_MOTOR_IN1, i1)
GPIO.output(config.STEP_MOTOR_IN2, i2)
GPIO.output(config.STEP_MOTOR_IN3, i3)
GPIO.output(config.STEP_MOTOR_IN4, i4)
def forward_one_step(sleep_time):
set_motor_input(1, 1, 0, 0)
time.sleep(sleep_time)
set_motor_input(0, 1, 1, 0)
time.sleep(sleep_time)
set_motor_input(0, 0, 1, 1)
time.sleep(sleep_time)
set_motor_input(1, 0, 0, 1)
time.sleep(sleep_time)
def backward_one_step(sleep_time):
set_motor_input(1, 0, 0, 1)
time.sleep(sleep_time)
set_motor_input(0, 0, 1, 1)
time.sleep(sleep_time)
set_motor_input(0, 1, 1, 0)
time.sleep(sleep_time)
set_motor_input(1, 1, 0, 0)
time.sleep(sleep_time)
def forward(step):
global steps_queue
steps_queue.put(step)
def backward(step):
global steps_queue
steps_queue.put(-step)
def do_steps():
global steps_queue
if steps_queue.qsize() == 0:
return;
steps = steps_queue.get()
if (steps > 0):
step_fun = forward_one_step
n = steps
else:
step_fun = backward_one_step
n = -steps
freq = led.get_frequency(config.LED_PORCH)
dc = led.get_duty_cycle(config.LED_PORCH)
status = led.get_status(config.LED_PORCH)
led.turn_on(config.LED_PORCH, 2, 50)
gpio_lock.acquire()
for i in range(0, n):
step_fun(0.01)
set_motor_input(0, 0, 0, 0)
gpio_lock.release()
if status == 'on':
led.turn_on(config.LED_PORCH, freq, dc)
else:
led.turn_off(config.LED_PORCH)
status_notify()
def status_notify():
if change_notify != None:
change_notify('door')
def demon_thread():
while True:
do_steps()
time.sleep(0.1)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(config.STEP_MOTOR_IN1, GPIO.OUT)
GPIO.setup(config.STEP_MOTOR_IN2, GPIO.OUT)
GPIO.setup(config.STEP_MOTOR_IN3, GPIO.OUT)
GPIO.setup(config.STEP_MOTOR_IN4, GPIO.OUT)
set_motor_input(0, 0, 0, 0)
t = threading.Thread(target=demon_thread,args=())
t.setDaemon(True)
t.start()
if __name__ == '__main__':
forward(100)
time.sleep(2)
backward(100)
# t.join() # can not terminate by ctrl+c...
while True:
time.sleep(2)
|
8.flask_multiple_session_impliment.py
|
import subprocess
from selenium.webdriver import Chrome
import pywebio
import template
import time
import util
from pywebio.input import *
from pywebio.output import *
from pywebio.utils import to_coroutine, run_as_function
def target():
template.basic_output()
template.background_output()
run_as_function(template.basic_input())
actions(buttons=['Continue'])
template.background_input()
async def async_target():
template.basic_output()
await template.coro_background_output()
await to_coroutine(template.basic_input())
await actions(buttons=['Continue'])
await template.coro_background_input()
def test(server_proc: subprocess.Popen, browser: Chrome):
template.test_output(browser)
time.sleep(1)
template.test_input(browser)
time.sleep(1)
template.save_output(browser, '8.flask_multiple_session_impliment_p1.html')
browser.get('http://localhost:8080/io2?_pywebio_debug=1&_pywebio_http_pull_interval=400')
template.test_output(browser)
time.sleep(1)
template.test_input(browser)
time.sleep(1)
template.save_output(browser, '8.flask_multiple_session_impliment_p2.html')
def start_test_server():
pywebio.enable_debug()
from flask import Flask, send_from_directory
from pywebio.platform.flask import webio_view, run_event_loop
from pywebio import STATIC_PATH
import threading
import logging
app = Flask(__name__)
app.add_url_rule('/io', 'webio_view', webio_view(target, cdn=False), methods=['GET', 'POST', 'OPTIONS'])
app.add_url_rule('/io2', 'webio_view_async_target', webio_view(async_target, cdn=False), methods=['GET', 'POST', 'OPTIONS'])
@app.route('/')
@app.route('/<path:static_file>')
def serve_static_file(static_file='index.html'):
return send_from_directory(STATIC_PATH, static_file)
threading.Thread(target=run_event_loop, daemon=True).start()
logging.getLogger('werkzeug').setLevel(logging.WARNING)
app.run(port=8080, host='127.0.0.1')
if __name__ == '__main__':
util.run_test(start_test_server, test, address='http://localhost:8080/io?_pywebio_debug=1&_pywebio_http_pull_interval=400')
|
display_ssd1306.py
|
#!/usr/bin/env python3
'''
*****************************************
PiFire Display Interface Library
*****************************************
Description:
This library supports using
the SSD1306 display with 64Hx128W resolution.
This module utilizes Luma.LCD to interface
this display.
*****************************************
'''
'''
Imported Libraries
'''
import time
import threading
import socket
from luma.core.interface.serial import i2c
from luma.core.render import canvas
from luma.oled.device import ssd1306
from PIL import Image, ImageDraw, ImageFont
'''
Display class definition
'''
class Display:
def __init__(self, buttonslevel='HIGH', rotation=0, units='F'):
# Init Global Variables and Constants
self.units = units
self.displayactive = False
self.in_data = None
self.status_data = None
self.displaytimeout = None
self.displaycommand = 'splash'
# Init Display Device, Input Device, Assets
self._init_globals()
self._init_assets()
self._init_display_device()
def _init_globals(self):
# Init constants and variables
self.WIDTH = 128
self.HEIGHT = 64
self.SIZE = (128, 64)
self.DEVICE_MODE = 1
def _init_display_device(self):
# Init Device
self.serial = i2c(port=1, address=0x3C)
self.device = ssd1306(self.serial)
# Setup & Start Display Loop Thread
display_thread = threading.Thread(target=self._display_loop)
display_thread.start()
def _display_loop(self):
'''
Main display loop
'''
while True:
if self.displaytimeout:
if time.time() > self.displaytimeout:
self.displaycommand = 'clear'
if self.displaycommand == 'clear':
self.displayactive = False
self.displaytimeout = None
self.displaycommand = None
self._display_clear()
if self.displaycommand == 'splash':
self.displayactive = True
self._display_splash()
self.displaytimeout = time.time() + 3
self.displaycommand = None
time.sleep(3) # Hold splash screen for 3 seconds
if self.displaycommand == 'text':
self.displayactive = True
self._display_text()
self.displaycommand = None
self.displaytimeout = time.time() + 10
if self.displaycommand == 'network':
self.displayactive = True
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
networkip = s.getsockname()[0]
if (networkip != ''):
self._display_network(networkip)
self.displaytimeout = time.time() + 30
self.displaycommand = None
else:
self.display_text("No IP Found")
if self.displayactive:
if not self.displaytimeout:
if (self.in_data is not None) and (self.status_data is not None):
self._display_current(self.in_data, self.status_data)
time.sleep(0.1)
'''
============== Graphics / Display / Draw Methods =============
'''
def _init_assets(self):
self._init_splash()
def _init_splash(self):
self.splash = Image.open('color-boot-splash.png') \
.transform(self.SIZE, Image.AFFINE, (1, 0, 0, 0, 1, 0), Image.BILINEAR) \
.convert("L") # \ .convert(self.DEVICE_MODE)
self.splashSize = self.splash.size
def _display_clear(self):
self.device.clear()
def _display_splash(self):
screen = Image.new('1', (self.WIDTH, self.HEIGHT), color=0)
screen.paste(self.splash, (32, 0, self.splashSize[0]+32, self.splashSize[1]))
self.device.display(screen)
def _display_text(self):
with canvas(self.device) as draw:
font = ImageFont.truetype("impact.ttf", 42)
(font_width, font_height) = font.getsize(self.displaydata)
draw.text((128//2 - font_width//2, 64//2 - font_height//2), self.displaydata, font=font, fill=255)
def _display_network(self, networkip):
pass
def _display_current(self, in_data, status_data):
with canvas(self.device) as draw:
# Grill Temperature (Large Centered)
if(self.units == 'F'):
font = ImageFont.truetype("impact.ttf", 42)
else:
font = ImageFont.truetype("impact.ttf", 38)
text = str(in_data['GrillTemp'])[:5]
(font_width, font_height) = font.getsize(text)
draw.text((128//2 - font_width//2,0), text, font=font, fill=255)
# Active Outputs F = Fan, I = Igniter, A = Auger (Upper Left)
font = ImageFont.truetype("FA-Free-Solid.otf", 24)
if(status_data['outpins']['fan']==0):
text = '\uf863'
draw.text((0, 0), text, font=font, fill=255)
if(status_data['outpins']['igniter']==0):
text = '\uf46a'
(font_width, font_height) = font.getsize(text)
draw.text((0, 5 + (64//2 - font_height//2)), text, font=font, fill=255)
if(status_data['outpins']['auger']==0):
text = '\uf101'
(font_width, font_height) = font.getsize(text)
draw.text((128 - font_width, 5 + (64//2 - font_height//2)), text, font=font, fill=255)
# Current Mode (Bottom Left)
font = ImageFont.truetype("trebuc.ttf", 18)
text = status_data['mode'] + ' Mode'
(font_width, font_height) = font.getsize(text)
draw.text((128//2 - font_width//2, 64 - font_height), text, font=font, fill=255)
# Notification Indicator (Upper Right)
font = ImageFont.truetype("FA-Free-Solid.otf", 24)
text = ' '
for item in status_data['notify_req']:
if status_data['notify_req'][item] == True:
text = '\uf0f3'
(font_width, font_height) = font.getsize(text)
draw.text((128 - font_width, 0), text, font=font, fill=255)
'''
================ Externally Available Methods ================
'''
def display_status(self, in_data, status_data):
'''
- Updates the current data for the display loop, if in a work mode
'''
self.units = status_data['units']
self.displayactive = True
self.in_data = in_data
self.status_data = status_data
def display_splash(self):
'''
- Calls Splash Screen
'''
self.displaycommand = 'splash'
def clear_display(self):
'''
- Clear display and turn off backlight
'''
self.displaycommand = 'clear'
def display_text(self, text):
'''
- Display some text
'''
self.displaycommand = 'text'
self.displaydata = text
def display_network(self):
'''
- Display Network IP QR Code
'''
self.displaycommand = 'network'
|
douyin01.py
|
# -*- coding:utf-8 -*-
import random
import time
from appium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from config import *
import multiprocessing
class DouYin(object):
def __init__(self, device_udid, driver_port):
# 驱动配置
self.desired_caps = {
'platformName': PLANTFORM,
'platformVersion': PLANTFORM_VERSION,
'deviceName': device_udid,
# 'udid': device_udid, # 用于区分多台设备
'automationName':'uiautomator2',
'appPackage': APP_PACKAGE,
'appActivity': APP_ACTIVITY,
'noReset': True,
# "unicodeKeyboard": True,
# "resetKeyboard": True
}
self.server = "http://192.168.56.1:{}/wd/hub".format(str(driver_port))
print(self.server)
print(self.desired_caps)
self.driver = webdriver.Remote(self.server, self.desired_caps)
time.sleep(20)
self.wait = WebDriverWait(self.driver, TIMEOUT)
def get_screen_size(self):
"""获取屏幕宽高度"""
x = self.driver.get_window_size()['width']
y = self.driver.get_window_size()['height']
return x, y
def enter_fans_list_page(self):
"""进入粉丝列表界面"""
# 方式1:通过联系人查找粉丝
# 点击消息
# message = self.wait.until(EC.element_to_be_clickable((
# By.NAME, '消息'
# )))
message = self.driver.find_element_by_name('消息')
message.click()
time.sleep(CLICK_TIME)
# 点击联系人
# contact = self.wait.until(EC.element_to_be_clickable((
# By.NAME, '联系人'
# )))
contact = self.driver.find_element_by_name('联系人')
contact.click()
time.sleep(CLICK_TIME)
# 点击搜索用户备注或昵称
search_nickname = self.wait.until(EC.element_to_be_clickable((
By.XPATH, '//android.widget.EditText[@resource-id="com.ss.android.ugc.aweme:id/cc2"]'
)))
search_nickname.click()
search_nickname.send_keys('高考')
time.sleep(CLICK_TIME)
# # 点击关注
# pay_attention = self.wait.until(EC.element_to_be_clickable((
# By.XPATH, '//android.widget.RelativeLayout[@resource-id="com.ss.android.ugc.aweme:id/ad8"]'
# )))
# pay_attention.click()
# time.sleep(CLICK_TIME)
# # 好友列表
# friends_list = self.wait.until(EC.element_to_be_clickable((
# By.XPATH, '//android.widget.TextView[@resource-id="com.ss.android.ugc.aweme:id/d16"]'
# )))
# friends_list.click()
# time.sleep(CLICK_TIME)
# # 发现好友
# find_friends = self.wait.until(EC.element_to_be_clickable((
# By.NAME, '发现好友'
# )))
# find_friends.click()
# time.sleep(CLICK_TIME)
# # 搜索输入框
# search_text = self.wait.until(EC.element_to_be_clickable((
# By.XPATH, '//android.widget.EditText[@resource-id="com.ss.android.ugc.aweme:id/a8d"]'
# )))
# search_text.click()
# search_text.send_keys('高考')
# time.sleep(CLICK_TIME)
# # 搜索
# search_s = self.wait.until(EC.element_to_be_clickable((
# By.XPATH, '//android.widget.TextView[@resource-id="com.ss.android.ugc.aweme:id/d6f"]'
# )))
# search_s.click()
# time.sleep(CLICK_TIME + 3)
# # 用户界面
# user = self.wait.until(EC.presence_of_element_located((By.XPATH, USER_XPATH)))
# user.click()
# time.sleep(CLICK_TIME)
# # 粉丝列表界面
# fans = self.wait.until(EC.presence_of_element_located((
# By.XPATH, '//android.widget.TextView[@resource-id="com.ss.android.ugc.aweme:id/ak3"]'
# )))
# fans.click()
# time.sleep(CLICK_TIME)
def crawl_fans_info(self):
"""获取粉丝信息"""
size = self.get_screen_size()
x1 = int(size[0] * 0.5)
y1 = int(size[1] * 0.9)
y2 = int(size[1] * 0.15)
while True:
if "暂时没有更多了" in self.driver.page_source or "TA还没有粉丝" in self.driver.page_source:
break
self.driver.swipe(x1, y1, x1, y2)
time.sleep(random.randint(3, 4))
return_ = self.wait.until(EC.presence_of_element_located((
By.XPATH, '//android.widget.ImageView[@resource-id="com.ss.android.ugc.aweme:id/nj"]'
)))
return_.click()
return_.click()
self.wait.until(EC.presence_of_element_located((
By.XPATH, '//android.widget.EditText[@resource-id="com.ss.android.ugc.aweme:id/agq"]'
))).clear()
def main(self):
"""主函数"""
while True:
self.enter_fans_list_page()
# self.crawl_fans_info()
if __name__ == '__main__':
# 进程列表
process_list = []
device_list = ['192.168.56.1:62001']
for device in range(len(device_list)):
driver_port = 4723 + 2 * device
douyin = DouYin(device_list[device], driver_port)
process_list.append(multiprocessing.Process(target=douyin.main))
for p1 in process_list:
p1.start()
for p2 in process_list:
p2.join()
|
VideoStream.py
|
#To make python 2 and python 3 compatible code
from __future__ import absolute_import
from threading import Thread
import sys
if sys.version_info[0] < 3:#e.g python version <3
import cv2
else:
import cv2
from cv2 import cv2
# import the Queue class from Python 3
if sys.version_info >= (3, 0):
from queue import Queue
# otherwise, import the Queue class for Python 2.7
else:
from Queue import Queue
#This class reads all the video frames in a separate thread and always has the keeps only the latest frame in its queue to be grabbed by another thread
class VideoStream(object):
def __init__(self, path, queueSize=3):
self.stream = cv2.VideoCapture(path)
self.stopped = False
self.Q = Queue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
try:
while True:
if self.stopped:
return
if not self.Q.full():
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
self.Q.put(frame)
#Clean the queue to keep only the latest frame
while self.Q.qsize() > 1:
self.Q.get()
except Exception as e:
print("got error: "+str(e))
def read(self):
return self.Q.get()
def more(self):
return self.Q.qsize() > 0
def stop(self):
self.stopped = True
def __exit__(self, exception_type, exception_value, traceback):
self.stream.release()
|
storage.py
|
import base64
import datetime
import json
import requests
import sys
import threading
import time
import utils
global _bc,_fs,_fs_d,_fs_s,_fs_u
ORG_NAME="tmp-5y34hjweu"
REPO_NAME="_app_data2"
with open("server/token.dt","r") as f:
GITHUB_TOKEN=f.read().strip()
GITHUB_HEADERS="application/vnd.github.v3+json,application/vnd.github.mercy-preview+json"
_bc=None
_fs={}
_fs_d={}
_fs_s=[]
_fs_u=[]
_tl=threading.Lock()
def _as_path(fp):
return ("/" if len(fp)==0 or fp[0] not in "\\/" else "")+fp.lower().replace("\\","/")
def _add_dirs(fp):
global _fs_d
assert(fp[0]=="/")
dl=fp.split("/")[1:-1]
d="/"
i=0
while (True):
if (i==len(dl)):
if (d not in _fs_d):
_fs_d[d]={"d":[],"f":[fp]}
elif (fp not in _fs_d[d]["f"]):
_fs_d[d]["f"]+=[fp]
break
nd=(d if i>0 else "")+"/"+dl[i]
if (d not in _fs_d):
_fs_d[d]={"d":[nd],"f":[]}
elif (nd not in _fs_d[d]["d"]):
_fs_d[d]["d"]+=[nd]
d=nd
i+=1
def _remove_dirs(fp):
global _fs_d
assert(fp[0]=="/")
dl=fp.split("/")[:-1]
d="/".join(dl)
if (fp in _fs_d[d]["f"]):
_fs_d[d]["f"].remove(fp)
if (len(_fs_d[d]["d"])==0 and len(_fs_d[d]["f"])==0):
for i in range(len(dl)-1,0,-1):
nd=("/" if i==1 else "/".join(dl[:i]))
del _fs_d[d]
if (d in _fs_d[nd]["d"]):
_fs_d[nd]["d"].remove(d)
if (len(_fs_d[nd]["d"])>0 or len(_fs_d[nd]["f"])>0):
break
d=nd
def _request(m="get",**kw):
kw["headers"]={**kw.get("headers",{}),"Authorization":f"token {GITHUB_TOKEN}","Accept":GITHUB_HEADERS,"User-Agent":"FileSystem API"}
r=getattr(requests,("get" if m=="raw" else m))(**kw)
if ("X-RateLimit-Remaining" in r.headers.keys() and r.headers["X-RateLimit-Remaining"]=="0"):
print(r.headers)
sys.exit(1)
time.sleep(0.72)
if (m=="raw"):
return r
if (type(r.json())==dict and "message" in r.json().keys() and r.json()["message"]=="Server Error"):
print(r.json())
return None
return r.json()
def _read_fs(bt,fp="",_l=False):
global _fs
utils.print(f"Reading Directory '{_as_path(fp)}' (sha={bt})")
t=_request("get",url=f"https://api.github.com/repos/{ORG_NAME}/{REPO_NAME}/git/trees/{bt}")
if ("message" in t and t["message"]=="Not Found"):
return []
if (_l==False):
_tl.acquire()
for k in t["tree"]:
if (k["type"]=="blob"):
_fs[fp+"/"+k["path"].lower()]=[k["url"],None,True]
_add_dirs(fp+"/"+k["path"].lower())
elif (k["type"]=="tree"):
_read_fs(k["sha"],fp=fp+"/"+k["path"].lower(),_l=True)
else:
raise RuntimeError(f"Unknown File Type '{k['type']}'")
if (_l==False):
_tl.release()
return []
def _is_b(dt):
if (len(dt)==0):
return False
dt=dt[:4096]
r1=len(dt.translate(None,b"\t\r\n\f\b"+bytes(range(32,127))))/len(dt)
r2=len(dt.translate(None,bytes(range(127,256))))/len(dt)
if (r1>0.90 and r2>0.9):
return True
enc_u=False
try:
dt.decode(encoding="utf-8")
enc_u=True
except UnicodeDecodeError:
pass
if ((r1>0.3 and r2<0.05) or (r1>0.8 and r2>0.8)):
return (False if enc_u==True else True)
else:
return (True if enc_u==False and (b"\x00" in dt or b"\xff" in dt) else False)
def _write_fs():
global _bc,_fs_u
while (True):
if (len(_fs_u)>0):
_tl.acquire()
bl=[]
for k in _fs_u:
if (k in _fs):
if (k not in _fs_s):
utils.print(f"Saving FileSystem File: '{k}'")
_fs[k][2]=True
dt=f"File too Large (size = {len(_fs[k][1])} b)"
b_sha=False
if (len(_fs[k][1])<=50*1024*1024):
b64=True
if (_is_b(_fs[k][1])==False):
try:
dt=str(_fs[k][1],"cp1252").replace("\r\n","\n")
b64=False
except UnicodeDecodeError:
pass
if (b64==True):
b_sha=True
dt=str(base64.b64encode(_fs[k][1]),"cp1252")
if (len(dt)>50*1024*1024):
b_sha=False
dt=f"File too Large (size = {len(dt)} b)"
else:
b=_request("post",url=f"https://api.github.com/repos/{ORG_NAME}/{REPO_NAME}/git/blobs",data=json.dumps({"content":dt,"encoding":"base64"}))
if (b is None):
raise RuntimeError(f"Error While creating Blob for File '{k}'")
else:
dt=b["sha"]
bl+=[({"path":k[1:],"mode":"100644","type":"blob","content":dt} if b_sha==False else {"path":k[1:],"mode":"100644","type":"blob","sha":dt})]
else:
if (k not in _fs_s):
utils.print(f"Deleting FileSystem File: '{k}'")
bl+=[{"path":k[1:],"mode":"100644","type":"blob","sha":None}]
_fs_u=[]
_bc=_request("post",url=f"https://api.github.com/repos/{ORG_NAME}/{REPO_NAME}/git/commits",data=json.dumps({"message":datetime.datetime.now().strftime("Commit %m/%d/%Y, %H:%M:%S"),"tree":_request("post",url=f"https://api.github.com/repos/{ORG_NAME}/{REPO_NAME}/git/trees",data=json.dumps({"base_tree":_bc["sha"],"tree":bl}))["sha"],"parents":[_bc["sha"]]}))
_request("patch",url=f"https://api.github.com/repos/{ORG_NAME}/{REPO_NAME}/git/refs/heads/main",data=json.dumps({"sha":_bc["sha"],"force":True}))
_tl.release()
time.sleep(150)
def listdir(fp):
fp=_as_path(fp)
if (fp not in _fs_d):
return ([],[])
return (_fs_d[fp]["d"][:],_fs_d[fp]["f"][:])
def exists(fp):
return (True if _as_path(fp) in _fs else False)
def set_silent(fp):
global _fs,_fs_s
fp=_as_path(fp)
_tl.acquire()
if (fp not in _fs):
_fs[fp]=[None,None,False]
_fs_s+=[fp]
_tl.release()
def read(fp):
global _fs
fp=_as_path(fp)
if (fp not in _fs):
raise RuntimeError(f"File '{fp}' not Found")
if (_fs[fp][1] is None):
_tl.acquire()
if (_fs[fp][0] is None):
_fs[fp][1]=b""
else:
_fs[fp][1]=base64.b64decode(_request("get",url=_fs[fp][0])["content"])
_tl.release()
return _fs[fp][1]
def write(fp,dt):
global _fs,_fs_u
if (type(dt)!=bytes):
raise TypeError(f"Expected 'bytes', found '{type(dt).__name__}'")
if (_is_b(dt)==True):
dt=dt.replace(b"\r\n",b"\n")
fp=_as_path(fp)
_tl.acquire()
_add_dirs(fp)
if (fp not in _fs or _fs[fp][1]!=dt and fp not in _fs_u):
_fs_u+=[fp]
if (fp not in _fs):
_fs[fp]=[None,dt,False]
else:
_fs[fp][1]=dt
_tl.release()
def delete(fp):
global _fs,_fs_u
fp=_as_path(fp)
_tl.acquire()
if (fp in _fs):
if (_fs[fp][2]==True):
if (fp not in _fs_u):
_fs_u+=[fp]
del _fs[fp]
_remove_dirs(fp)
_tl.release()
_bc=_request("get",url=f"https://api.github.com/repos/{ORG_NAME}/{REPO_NAME}/branches/main")["commit"]
_read_fs(_bc["commit"]["tree"]["sha"])
threading.Thread(target=_write_fs).start()
|
utils.py
|
import ast, json, threading, platform, os
from http.server import SimpleHTTPRequestHandler
from enum import Enum
try:
from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
except ImportError:
SimpleWebSocketServer = object
WebSocket = object
class Watchable(Enum):
Source = "Source"
Font = "Font"
Library = "Library"
Generic = "Generic"
class EditAction(Enum):
Newline = "newline"
Tokenize = "tokenize"
TokenizeLine = "tokenize_line"
NewSection = "newsection"
Capitalize = "capitalize"
SelectWorkarea = "select_workarea"
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class SimpleEcho(WebSocket):
def handleMessage(self):
if "webviewer" in self.data:
data = json.loads(self.data)
if data.get("webviewer") == True:
self.webviewer = True
#print("INCOMING!", self, self.address, self.data)
self.messages.append(self.data)
def handleConnected(self):
#if not str(self.address).startswith("('::ffff"):
# print(self.address, "connected")
self.messages = []
self.webviewer = False
def handleClose(self):
#print(self.address, "closed")
pass
def echo_server(port):
return SimpleWebSocketServer('', port, SimpleEcho)
def run_echo_server(port, name):
try:
server = echo_server(port)
daemon = threading.Thread(name=name,
target=server.serveforever)
daemon.setDaemon(True)
daemon.start()
print(f"<coldtype: websocket-opened@localhost:{port}>")
return server
except OSError:
return None
def bytesto(bytes):
r = float(bytes)
for i in range(2):
r = r / 1024
return(r)
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def file_and_line_to_def(filepath, lineno):
# https://julien.danjou.info/finding-definitions-from-a-source-file-and-a-line-number-in-python/
candidate = None
for item in ast.walk(ast.parse(filepath.read_text())):
if isinstance(item, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
if item.lineno > lineno:
continue
if candidate:
distance = lineno - item.lineno
if distance < (lineno - candidate.lineno):
candidate = item
else:
candidate = item
if candidate:
return candidate.name
def play_sound(name="Pop"):
if platform.system() == "Darwin":
os.system(f"afplay /System/Library/Sounds/{name}.aiff")
|
gpu.py
|
import io
import os
import time
import pandas as pd
import subprocess
import threading
import psutil
class GPUMonitor(object):
def __init__(self, interval=5):
"""
Constructor
:type interval: int
:param interval: Check interval, in seconds
"""
self.interval = interval
thread = threading.Thread(target=self.NvidiaDaemon, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def __GetProcessAttributes(self, pids):
processAttributes = []
for pid in pids:
proc = psutil.Process(pid)
processAttributes.append({'pid': pid,
'name': proc.name(),
'exe': proc.exe(),
'cmdline': proc.cmdline(),
'status': proc.status()})
processAttributes = pd.DataFrame(processAttributes)
return processAttributes
def __MonitorNvidiaGPU(self):
'Function that monitors Running Processes on Nvidia GPU'
'''
Returns a DataFrame (pid, process_name, cmdline, used_gpu_memory, utilization)
'''
getGPUProcesses = 'nvidia-smi pmon -c 1 -s mu'
proc = subprocess.Popen(getGPUProcesses, shell=True, stdout=subprocess.PIPE)
output = proc.stdout.read().decode('utf-8').split('\n')
# Remove the line describing the units of each feature
del output[1]
# convert to csv format...
output[0] = output[0].replace('# ', '')
output = [line.strip() for line in output]
output = [','.join(line.split()) for line in output]
# ...and drop the command feature (will be added later)...
output = [','.join(line.split(',')[:8]) for line in output]
# ...and convert to DataFrame
procsGPU = pd.read_csv(io.StringIO('\n'.join(output)), header=0)
procsGPUFeats = self.__GetProcessAttributes(procsGPU.pid.values)
return procsGPU.merge(procsGPUFeats, on='pid', how='inner')
def NvidiaDaemon(self):
""" Method that runs forever in the thread """
while True:
gpuProcs = self.__MonitorNvidiaGPU()
# if file does not exist write header
if not os.path.isfile('nvidia_log.csv'):
gpuProcs.to_csv('nvidia_log.csv', index=False)
print("\nWriting Records.\n")
# else it exists so append without writing the header
else:
gpuProcs.to_csv('nvidia_log.csv', mode='a', header=False, index=False)
print("\nAppending Records.\n")
time.sleep(self.interval)
'''
GPU Monitor Toy Example
'''
if __name__ == '__main__':
from tqdm import tqdm
''' How many seconds you want to Fetch Data '''
totalTime = 20
gpuMonitor = GPUMonitor(interval=5)
# i'll run the script for 30 seconds
for i in tqdm(range(totalTime), ascii=True, desc='Monitoring GPU...'):
time.sleep(1)
|
timeout.py
|
"""An implementation of the decorator design pattern for enforcing timeout.
Typical usage example:
@timeout(10)
def function_that_will_return_none_after_ten_seconds():
...
"""
from functools import wraps
from time import sleep
from kthread import KThread
def timeout(seconds):
"""A decorator which enforces a timeout on the decorated function.
Forces the function it decorates to return None after a specified number
of seconds.
Args:
seconds: The number of seconds after which the function should return None.
Returns:
The decorator. When the decorated function is executed, the return value
will either be the return value of that function if completed
successfully within the time alotted, or else None.
"""
def decorator(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
res = [] # When the thread completes successfully, this contains the result of func
terminating_thread = False # This is set to true right before the threak is terminated
def thread_func():
try:
res.append(func(*args, **kwargs))
except:
# Only raise errors that were not the result of killing the thread
if not terminating_thread:
raise
# Create a new thread
thread = KThread(target=thread_func)
thread.start()
# Join the thread until it exits or the timeout is up
thread.join(seconds)
# If the thread is still alive after the join call exits, kill it
if thread.is_alive():
terminating_thread = True
thread.terminate()
# If res contains something (meaning the function completed successfully), return it
if len(res) > 0:
return res[0]
# Otherwise, return None
return None
return wrapped_func
return decorator
|
lib.py
|
# -*- coding: utf-8 -*-
"""Utility functions used for Avalon - Harmony integration."""
import subprocess
import threading
import os
import random
import zipfile
import sys
import importlib
import shutil
import logging
import contextlib
import json
import signal
import time
from uuid import uuid4
from Qt import QtWidgets
from .server import Server
from openpype.tools import workfiles
from openpype.tools.tray_app.app import ConsoleTrayApp
from ..toonboom import setup_startup_scripts, check_libs
self = sys.modules[__name__]
self.server = None
self.pid = None
self.application_path = None
self.callback_queue = None
self.workfile_path = None
self.port = None
# Setup logging.
self.log = logging.getLogger(__name__)
self.log.setLevel(logging.DEBUG)
def signature(postfix="func") -> str:
"""Return random ECMA6 compatible function name.
Args:
postfix (str): name to append to random string.
Returns:
str: random function name.
"""
return "f{}_{}".format(str(uuid4()).replace("-", "_"), postfix)
class _ZipFile(zipfile.ZipFile):
"""Extended check for windows invalid characters."""
# this is extending default zipfile table for few invalid characters
# that can come from Mac
_windows_illegal_characters = ":<>|\"?*\r\n\x00"
_windows_illegal_name_trans_table = str.maketrans(
_windows_illegal_characters,
"_" * len(_windows_illegal_characters)
)
def main(*subprocess_args):
def is_host_connected():
# Harmony always connected, not waiting
return True
# coloring in ConsoleTrayApp
os.environ["OPENPYPE_LOG_NO_COLORS"] = "False"
app = QtWidgets.QApplication([])
app.setQuitOnLastWindowClosed(False)
ConsoleTrayApp('harmony', launch, subprocess_args, is_host_connected)
sys.exit(app.exec_())
def launch(application_path, *args):
"""Set Harmony for launch.
Launches Harmony and the server, then starts listening on the main thread
for callbacks from the server. This is to have Qt applications run in the
main thread.
Args:
application_path (str): Path to Harmony.
"""
from avalon import api, harmony
api.install(harmony)
self.port = random.randrange(49152, 65535)
os.environ["AVALON_HARMONY_PORT"] = str(self.port)
self.application_path = application_path
# Launch Harmony.
setup_startup_scripts()
check_libs()
if os.environ.get("AVALON_HARMONY_WORKFILES_ON_LAUNCH", False):
workfiles.show(save=False)
# No launch through Workfiles happened.
if not self.workfile_path:
zip_file = os.path.join(os.path.dirname(__file__), "temp.zip")
temp_path = get_local_harmony_path(zip_file)
if os.path.exists(temp_path):
self.log.info(f"removing existing {temp_path}")
try:
shutil.rmtree(temp_path)
except Exception as e:
self.log.critical(f"cannot clear {temp_path}")
raise Exception(f"cannot clear {temp_path}") from e
launch_zip_file(zip_file)
def get_local_harmony_path(filepath):
"""From the provided path get the equivalent local Harmony path."""
basename = os.path.splitext(os.path.basename(filepath))[0]
harmony_path = os.path.join(os.path.expanduser("~"), ".avalon", "harmony")
return os.path.join(harmony_path, basename)
def launch_zip_file(filepath):
"""Launch a Harmony application instance with the provided zip file.
Args:
filepath (str): Path to file.
"""
print(f"Localizing {filepath}")
temp_path = get_local_harmony_path(filepath)
scene_path = os.path.join(
temp_path, os.path.basename(temp_path) + ".xstage"
)
unzip = False
if os.path.exists(scene_path):
# Check remote scene is newer than local.
if os.path.getmtime(scene_path) < os.path.getmtime(filepath):
try:
shutil.rmtree(temp_path)
except Exception as e:
self.log.error(e)
raise Exception("Cannot delete working folder") from e
unzip = True
else:
unzip = True
if unzip:
with _ZipFile(filepath, "r") as zip_ref:
zip_ref.extractall(temp_path)
# Close existing scene.
if self.pid:
os.kill(self.pid, signal.SIGTERM)
# Stop server.
if self.server:
self.server.stop()
# Launch Avalon server.
self.server = Server(self.port)
self.server.start()
# thread = threading.Thread(target=self.server.start)
# thread.daemon = True
# thread.start()
# Save workfile path for later.
self.workfile_path = filepath
# find any xstage files is directory, prefer the one with the same name
# as directory (plus extension)
xstage_files = []
for _, _, files in os.walk(temp_path):
for file in files:
if os.path.splitext(file)[1] == ".xstage":
xstage_files.append(file)
if not os.path.basename("temp.zip"):
if not xstage_files:
self.server.stop()
print("no xstage file was found")
return
# try to use first available
scene_path = os.path.join(
temp_path, xstage_files[0]
)
# prefer the one named as zip file
zip_based_name = "{}.xstage".format(
os.path.splitext(os.path.basename(filepath))[0])
if zip_based_name in xstage_files:
scene_path = os.path.join(
temp_path, zip_based_name
)
if not os.path.exists(scene_path):
print("error: cannot determine scene file")
self.server.stop()
return
print("Launching {}".format(scene_path))
ConsoleTrayApp.process = subprocess.Popen(
[self.application_path, scene_path],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
self.pid = ConsoleTrayApp.process.pid
def on_file_changed(path, threaded=True):
"""Threaded zipping and move of the project directory.
This method is called when the `.xstage` file is changed.
"""
self.log.debug("File changed: " + path)
if self.workfile_path is None:
return
if threaded:
thread = threading.Thread(
target=zip_and_move,
args=(os.path.dirname(path), self.workfile_path)
)
thread.start()
else:
zip_and_move(os.path.dirname(path), self.workfile_path)
def zip_and_move(source, destination):
"""Zip a directory and move to `destination`.
Args:
source (str): Directory to zip and move to destination.
destination (str): Destination file path to zip file.
"""
os.chdir(os.path.dirname(source))
shutil.make_archive(os.path.basename(source), "zip", source)
with _ZipFile(os.path.basename(source) + ".zip") as zr:
if zr.testzip() is not None:
raise Exception("File archive is corrupted.")
shutil.move(os.path.basename(source) + ".zip", destination)
self.log.debug(f"Saved '{source}' to '{destination}'")
def show(module_name):
"""Call show on "module_name".
This allows to make a QApplication ahead of time and always "exec_" to
prevent crashing.
Args:
module_name (str): Name of module to call "show" on.
"""
# Requests often get doubled up when showing tools, so we wait a second for
# requests to be received properly.
time.sleep(1)
# Import and show tool.
module = importlib.import_module(module_name)
use_context = False
if "loader" in module_name:
use_context = True
ConsoleTrayApp.execute_in_main_thread(lambda: module.show(use_context))
# Required return statement.
return "nothing"
def get_scene_data():
try:
return self.send(
{
"function": "AvalonHarmony.getSceneData"
})["result"]
except json.decoder.JSONDecodeError:
# Means no sceen metadata has been made before.
return {}
except KeyError:
# Means no existing scene metadata has been made.
return {}
def set_scene_data(data):
"""Write scene data to metadata.
Args:
data (dict): Data to write.
"""
# Write scene data.
self.send(
{
"function": "AvalonHarmony.setSceneData",
"args": data
})
def read(node_id):
"""Read object metadata in to a dictionary.
Args:
node_id (str): Path to node or id of object.
Returns:
dict
"""
scene_data = get_scene_data()
if node_id in scene_data:
return scene_data[node_id]
return {}
def remove(node_id):
"""
Remove node data from scene metadata.
Args:
node_id (str): full name (eg. 'Top/renderAnimation')
"""
data = get_scene_data()
del data[node_id]
set_scene_data(data)
def delete_node(node):
""" Physically delete node from scene. """
self.send(
{
"function": "AvalonHarmony.deleteNode",
"args": node
}
)
def imprint(node_id, data, remove=False):
"""Write `data` to the `node` as json.
Arguments:
node_id (str): Path to node or id of object.
data (dict): Dictionary of key/value pairs.
remove (bool): Removes the data from the scene.
Example:
>>> from avalon.harmony import lib
>>> node = "Top/Display"
>>> data = {"str": "someting", "int": 1, "float": 0.32, "bool": True}
>>> lib.imprint(layer, data)
"""
scene_data = get_scene_data()
if remove and (node_id in scene_data):
scene_data.pop(node_id, None)
else:
if node_id in scene_data:
scene_data[node_id].update(data)
else:
scene_data[node_id] = data
set_scene_data(scene_data)
@contextlib.contextmanager
def maintained_selection():
"""Maintain selection during context."""
selected_nodes = self.send(
{
"function": "AvalonHarmony.getSelectedNodes"
})["result"]
try:
yield selected_nodes
finally:
selected_nodes = self.send(
{
"function": "AvalonHarmony.selectNodes",
"args": selected_nodes
}
)
def send(request):
"""Public method for sending requests to Harmony."""
return self.server.send(request)
def select_nodes(nodes):
""" Selects nodes in Node View """
selected_nodes = self.send(
{
"function": "AvalonHarmony.selectNodes",
"args": nodes
}
)
@contextlib.contextmanager
def maintained_nodes_state(nodes):
"""Maintain nodes states during context."""
# Collect current state.
states = self.send(
{
"function": "AvalonHarmony.areEnabled", "args": nodes
})["result"]
# Disable all nodes.
self.send(
{
"function": "AvalonHarmony.disableNodes", "args": nodes
})
try:
yield
finally:
self.send(
{
"function": "AvalonHarmony.setState",
"args": [nodes, states]
})
def save_scene():
"""Save the Harmony scene safely.
The built-in (to Avalon) background zip and moving of the Harmony scene
folder, interfers with server/client communication by sending two requests
at the same time. This only happens when sending "scene.saveAll()". This
method prevents this double request and safely saves the scene.
"""
# Need to turn off the backgound watcher else the communication with
# the server gets spammed with two requests at the same time.
scene_path = self.send(
{"function": "AvalonHarmony.saveScene"})["result"]
# Manually update the remote file.
self.on_file_changed(scene_path, threaded=False)
# Re-enable the background watcher.
self.send({"function": "AvalonHarmony.enableFileWather"})
def save_scene_as(filepath):
"""Save Harmony scene as `filepath`."""
scene_dir = os.path.dirname(filepath)
destination = os.path.join(
os.path.dirname(self.workfile_path),
os.path.splitext(os.path.basename(filepath))[0] + ".zip"
)
if os.path.exists(scene_dir):
try:
shutil.rmtree(scene_dir)
except Exception as e:
self.log.error(f"Cannot remove {scene_dir}")
raise Exception(f"Cannot remove {scene_dir}") from e
send(
{"function": "scene.saveAs", "args": [scene_dir]}
)["result"]
zip_and_move(scene_dir, destination)
self.workfile_path = destination
send(
{"function": "AvalonHarmony.addPathToWatcher", "args": filepath}
)
def find_node_by_name(name, node_type):
"""Find node by its name.
Args:
name (str): Name of the Node. (without part before '/')
node_type (str): Type of the Node.
'READ' - for loaded data with Loaders (background)
'GROUP' - for loaded data with Loaders (templates)
'WRITE' - render nodes
Returns:
str: FQ Node name.
"""
nodes = send(
{"function": "node.getNodes", "args": [[node_type]]}
)["result"]
for node in nodes:
node_name = node.split("/")[-1]
if name == node_name:
return node
return None
|
bios_console.py
|
#!/usr/bin/env python
import os
import pty
import threading
import argparse
import subprocess
import shutil
from litex.tools.litex_term import LiteXTerm
from rowhammer_tester.scripts.utils import RemoteClient, litex_server
def pty2crossover(m, stop):
while not stop.is_set():
r = os.read(m, 1)
wb.regs.uart_xover_rxtx.write(ord(r))
def crossover2pty(m, stop):
while not stop.is_set():
if wb.regs.uart_xover_rxempty.read() == 0:
r = wb.regs.uart_xover_rxtx.read()
os.write(m, bytes(chr(r).encode("utf-8")))
if __name__ == "__main__":
term_priority = ['picocom', 'minicom', 'litex_term']
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--srv', action='store_true', help='Start litex server in background')
parser.add_argument('-b', '--baudrate', default='1e6', help='Serial baud rate')
parser.add_argument(
'-t',
'--term',
choices=['auto', *term_priority],
default='auto',
help='Select serial terminal emulator')
args = parser.parse_args()
if args.srv:
litex_server()
wb = RemoteClient()
wb.open()
m, s = pty.openpty()
tty = os.ttyname(s)
print("LiteX Crossover UART created: {}".format(tty))
stop_event = threading.Event()
threads = [
threading.Thread(target=pty2crossover, args=[m, stop_event], daemon=True),
threading.Thread(target=crossover2pty, args=[m, stop_event], daemon=True),
]
for thread in threads:
thread.start()
baudrate = int(float(args.baudrate))
term = args.term
if term == 'auto':
try:
term = next(filter(lambda t: shutil.which(t) is not None, term_priority))
except StopIteration:
term = 'litex_term'
print('Using serial backend: {}'.format(args.term))
if term == 'litex_term':
# installed with latex so no additional dependencies, but it is slow
term = LiteXTerm(
serial_boot=False, kernel_image=None, kernel_address=None, json_images=None)
term.open(tty, baudrate)
term.console.configure()
term.start()
term.join()
elif term == 'picocom':
subprocess.run(['picocom', '-b', str(baudrate), tty])
elif term == 'minicom':
subprocess.run(['minicom', '-b', str(baudrate), '-D', tty])
else:
raise ValueError(term)
stop_event.set()
for thread in threads:
thread.join(timeout=0.05)
wb.close()
|
Decorators.py
|
# -*- coding: utf-8 -*-
import codecs
import sys
import functools
import threading
import re
import ast
import collections
def Singleton(class_):
instances = {}
def getinstance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return getinstance
def setInterval(interval, max_run_count=0, call_on_init=False):
def decorator(function):
def wrapper(*args, **kwargs):
stopped = threading.Event()
def loop(): # executed in another thread
run_count = 0
if call_on_init:
function(*args, **kwargs)
while not stopped.wait(interval): # until stopped
function(*args, **kwargs)
if max_run_count > 0:
run_count += 1
if run_count == max_run_count:
break
t = threading.Thread(target=loop)
t.daemon = True # stop if the program exits
t.start()
return stopped
return wrapper
return decorator
def ModuleDocstringParser(cls):
@functools.wraps(cls)
def wrapper(*args, **kwargs):
instance = cls(*args, **kwargs)
instance.configuration_metadata = {}
config_option_regex = "\s*(?P<name>.*?):.*?#\s*<(?P<props>.*?)>"
regex = re.compile(config_option_regex, re.MULTILINE)
docstring = ""
# Get docstring from parents. Only single inheritance supported.
for parent_class in cls.__bases__:
if parent_class.__doc__:
docstring += parent_class.__doc__
if cls.__doc__:
docstring += cls.__doc__
for matches in regex.finditer(docstring):
config_option_info = matches.groupdict()
for prop_info in config_option_info['props'].split(";"):
try:
prop_name, prop_value = [m.strip() for m in prop_info.split(":", 1)]
# Replace escaped backslashes.
if "//" in prop_value:
prop_value = codecs.escape_decode(prop_value)
except ValueError:
instance.logger.debug("Could not parse config setting %s." % config_option_info)
continue
if prop_name in ["default", "values"]:
try:
prop_value = ast.literal_eval(prop_value)
except:
etype, evalue, etb = sys.exc_info()
instance.logger.error("Could not parse %s from docstring. Exception: %s, Error: %s." % (prop_value, etype, evalue))
continue
# Set default values in module configuration. Will be overwritten by custom values
if prop_name == "default":
instance.configuration_data[config_option_info['name'].strip()] = prop_value
# Support for multiple datatypes using the pattern: "type: string||list;"
if prop_name == "type":
prop_value = prop_value.split("||")
prop_value.append('Unicode')
try:
instance.configuration_metadata[config_option_info['name'].strip()].update({prop_name: prop_value})
except:
instance.configuration_metadata[config_option_info['name'].strip()] = {prop_name: prop_value}
# Set self_hostname
return instance
return wrapper
class BoundedOrderedDict(collections.OrderedDict):
def __init__(self, *args, **kwds):
self.maxlen = kwds.pop("maxlen", None)
collections.OrderedDict.__init__(self, *args, **kwds)
self._checklen()
def __setitem__(self, key, value):
collections.OrderedDict.__setitem__(self, key, value)
self._checklen()
def _checklen(self):
if self.maxlen is not None:
while len(self) > self.maxlen:
self.popitem(last=False)
def memoize(func=None, maxlen=None):
""" Bounded memoize decorator.
Found @http://stackoverflow.com/questions/9389307/how-do-i-create-a-bounded-memoization-decorator-in-python
"""
if func:
cache = BoundedOrderedDict(maxlen=maxlen)
@functools.wraps(func)
def memo_target(*args):
lookup_value = args
if lookup_value not in cache:
cache[lookup_value] = func(*args)
return cache[lookup_value]
return memo_target
else:
def memoize_factory(func):
return memoize(func, maxlen=maxlen)
return memoize_factory
|
bench.py
|
#!/usr/bin/env python3
import os
import sys
import time
import subprocess
import gc
import statistics
import json
import threading
import re
import csv
# Need to avoid as much extra CPU usage as possible
gc.disable()
# sysfs power supply nodes for power sampling
POWER_SUPPLY = None
POWER_SUPPLY_NODES = [
# Qualcomm Battery Management System + fuel gauge: preferred when available for more info
"/sys/class/power_supply/bms",
# Most common
"/sys/class/power_supply/battery",
]
# Some fuel gauges need current scaling
CURRENT_FACTOR = 1
# Default power sampling intervals
POWER_SAMPLE_INTERVAL = 1000 # ms
POWER_SAMPLE_FG_DEFAULT_INTERVALS = {
# qgauge updates every 100 ms, but sampling also uses power, so do it conservatively
"qpnp,qg": 250,
# qpnp-fg-gen3/4 update every 1000 ms
"qpnp,fg": 1000,
}
# Must also set in init
HOUSEKEEPING_CPU = 0
# cpu0 is for housekeeping, so we can't benchmark it
# Benchmark cpu1 instead, which is also in the little cluster
REPLACE_CPUS = {
HOUSEKEEPING_CPU: 1,
}
# How long to idle at each freq and measure power before benchmarking
FREQ_IDLE_TIME = 5 # sec
# To reduce chances of an array realloc + copy during benchmark runs
PREALLOC_SECONDS = 300 # seconds of power sampling
# CoreMark PERFORMANCE_RUN params with 250,000 iterations
COREMARK_PERFORMANCE_RUN = ["0x0", "0x0", "0x66", "250000", "7", "1", "2000"]
# Blank lines are for rounded corner & camera cutout protection
BANNER = """
__ _ _
/ _|_ __ ___ __ _| |__ ___ _ __ ___| |__
| |_| '__/ _ \/ _` | '_ \ / _ \ '_ \ / __| '_ \
| _| | | __/ (_| | |_) | __/ | | | (__| | | |
|_| |_| \___|\__, |_.__/ \___|_| |_|\___|_| |_|
|_|
CPU benchmark • by kdrag0n
------------------------------------------------
"""
SYS_CPU = "/sys/devices/system/cpu"
# "Constants" evaluated at runtime
for psy_node in POWER_SUPPLY_NODES:
if os.path.exists(psy_node):
POWER_SUPPLY = psy_node
break
psy_name = os.readlink(POWER_SUPPLY)
for fg_string, interval in POWER_SAMPLE_FG_DEFAULT_INTERVALS.items():
if fg_string in psy_name:
POWER_SAMPLE_INTERVAL = interval
break
if len(sys.argv) > 1:
override_interval = int(sys.argv[1])
if override_interval > 0:
POWER_SAMPLE_INTERVAL = override_interval
# Calculate prealloc slots now that the interval is known
PREALLOC_SLOTS = int(PREALLOC_SECONDS / (POWER_SAMPLE_INTERVAL / 1000))
_stop_power_mon = False
_prealloc_samples = [-1] * PREALLOC_SLOTS
_power_samples = _prealloc_samples
def pr_debug(*args, **kwargs):
if __debug__:
kwargs["flush"] = True
print(*args, **kwargs)
def run_cmd(args):
proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
if proc.returncode == 0:
return proc.stdout
else:
raise ValueError(f"Subprocess {args} failed with exit code {proc.returncode}:\n{proc.stdout}")
def sample_power():
with open(f"{POWER_SUPPLY}/current_now", "r") as f:
ma = int(f.read()) * CURRENT_FACTOR / 1000
with open(f"{POWER_SUPPLY}/voltage_now", "r") as f:
mv = int(f.read()) / 1000
mw = ma * mv / 1000
return ma, mv, abs(mw)
def start_power_thread(sample_interval=POWER_SAMPLE_INTERVAL):
def _power_thread():
global _power_samples
sample_dest = _prealloc_samples
count = 0
while True:
# Sleep before first sample to avoid a low first reading
time.sleep(sample_interval / 1000)
# Check stop flag immediately after sleep to avoid a low last reading
if _stop_power_mon:
pr_debug("Stopping power monitor due to global stop flag")
break
current, voltage, power = sample_power()
pr_debug(f"Power: {power} mW\t(sample {count} from {current} mA * {voltage} mV)")
try:
sample_dest[count] = power
except IndexError:
pr_debug("Pre-allocated sample slots exhausted, falling back to dynamic allocation")
# If out of pre-allocated slots
sample_dest.append(power)
count += 1
if count < len(sample_dest):
pr_debug(f"Truncating to first {count} samples from pre-allocated array")
_power_samples = sample_dest[:count]
pr_debug("Starting power monitor thread")
thread = threading.Thread(target=_power_thread, daemon=True)
thread.start()
return thread
def stop_power_thread(thread):
global _stop_power_mon
pr_debug("Setting flag to stop power monitor")
_stop_power_mon = True
pr_debug("Waiting for power monitor to stop")
thread.join()
_stop_power_mon = False
return _power_samples
def write_cpu(cpu, node, content):
pr_debug(f"Writing CPU value: cpu{cpu}/{node} => {content}")
with open(f"{SYS_CPU}/cpu{cpu}/{node}", "w") as f:
f.write(content)
def read_cpu(cpu, node):
with open(f"{SYS_CPU}/cpu{cpu}/{node}", "r") as f:
content = f.read().strip()
pr_debug(f"Reading CPU value: cpu{cpu}/{node} = {content}")
return content
def create_power_stats(time_ns, samples):
sec = time_ns / 1e9
power = statistics.mean(samples)
mj = power * sec
joules = mj / 1000
return {
"elapsed_sec": sec,
"elapsed_ns": time_ns,
"power_samples": samples,
"power_mean": power,
"energy_millijoules": mj,
"energy_joules": joules,
}
def init_cpus():
print("Frequency domains: ", end="", flush=True)
bench_cpus = []
for policy_dir in sorted(os.listdir(f"{SYS_CPU}/cpufreq")):
if policy_dir.startswith("policy"):
first_cpu = int(policy_dir[len("policy"):])
if first_cpu in REPLACE_CPUS:
first_cpu = REPLACE_CPUS[first_cpu]
print(f"cpu{first_cpu}", end=" ", flush=True)
bench_cpus.append(first_cpu)
else:
print(f"Unrecognized file/dir in cpufreq: {policy_dir}")
continue
print()
print("Offline CPUs: ", end="", flush=True)
with open("/proc/cpuinfo", "r") as f:
cpuinfo = f.read()
cpu_count = len(re.findall(r'processor\s+:\s+\d+', cpuinfo))
for cpu in range(cpu_count):
if cpu == HOUSEKEEPING_CPU:
continue
print(f"cpu{cpu}", end=" ", flush=True)
write_cpu(cpu, "online", "0")
print(flush=True)
pr_debug("Minimizing frequency of housekeeping CPU")
write_cpu(HOUSEKEEPING_CPU, "cpufreq/scaling_governor", "powersave")
pr_debug()
return bench_cpus, cpu_count
def check_charging(node, charging_value, charging_warned):
if os.path.exists(node):
with open(node, "r") as f:
psy_status = f.read().strip()
pr_debug(f"Power supply status at {node}: {psy_status}")
if psy_status == charging_value and not charging_warned:
print()
print("=============== WARNING ===============")
print("Detected power supply in charging state!")
print("Power measurements will be invalid and benchmark results may be affected.")
print("Unplug the device and restart the benchmark for valid results.")
print("=============== WARNING ===============")
print()
return True
return charging_warned
def init_power():
global CURRENT_FACTOR
pr_debug(f"Using power supply: {POWER_SUPPLY}")
charging_warned = False
charging_warned = check_charging(f"{POWER_SUPPLY}/status", "Charging", charging_warned)
charging_warned = check_charging(f"/sys/class/power_supply/battery/status", "Charging", charging_warned)
charging_warned = check_charging(f"/sys/class/power_supply/usb/present", "1", charging_warned)
charging_warned = check_charging(f"/sys/class/power_supply/dc/present", "1", charging_warned)
# Some PMICs may give unstable readings at this point
pr_debug("Waiting for power usage to settle for initial current measurement")
time.sleep(5)
# Maxim PMICs used on Exynos devices report current in mA, not µA
with open(f"{POWER_SUPPLY}/current_now", "r") as f:
# Assumption: will never be below 1 mA
ref_current = int(f.read())
if abs(ref_current) <= 1000:
CURRENT_FACTOR = 1000
pr_debug(f"Scaling current by {CURRENT_FACTOR}x (derived from initial sample: {ref_current})")
print(f"Sampling power every {POWER_SAMPLE_INTERVAL} ms")
pr_debug(f"Pre-allocated {PREALLOC_SLOTS} sample slots for {PREALLOC_SECONDS} seconds")
pr_debug(f"Power sample interval adjusted for power supply: {psy_name}")
print("Baseline power usage: ", end="", flush=True)
pr_debug("Waiting for power usage to settle")
time.sleep(15)
pr_debug()
pr_debug("Measuring base power usage with only housekeeping CPU")
# The power used for sampling might affect results here, so sample less often
thread = start_power_thread(sample_interval=POWER_SAMPLE_INTERVAL * 2)
time.sleep(60)
base_power_samples = stop_power_thread(thread)
base_power = min(base_power_samples)
print(f"{base_power:.0f} mW")
print()
return base_power, base_power_samples
def main():
bench_start_time = time.time()
print(BANNER)
pr_debug("Running in debug mode")
pr_debug("Initializing CPU states")
bench_cpus, cpu_count = init_cpus()
pr_debug("Initializing power measurements")
base_power, base_power_samples = init_power()
pr_debug("Starting benchmark")
pr_debug()
cpus_data = {}
for cpu in bench_cpus:
print()
print(f"===== CPU {cpu} =====")
cpu_data = {
"freqs": {}
}
cpus_data[cpu] = cpu_data
pr_debug("Onlining CPU")
write_cpu(cpu, "online", "1")
pr_debug("Setting governor")
write_cpu(cpu, "cpufreq/scaling_governor", "userspace")
pr_debug("Getting frequencies")
with open(f"{SYS_CPU}/cpu{cpu}/cpufreq/scaling_available_frequencies", "r") as f:
raw_freqs = f.read().replace("\n", "").split(" ")
freqs = [int(freq) for freq in raw_freqs if freq]
# Some kernels may change the defaults
pr_debug("Setting frequency limits")
write_cpu(cpu, "cpufreq/scaling_min_freq", str(min(freqs)))
write_cpu(cpu, "cpufreq/scaling_max_freq", str(max(freqs)))
# Bail out if the kernel is clamping our values
pr_debug("Validating frequency limits")
real_min_freq = int(read_cpu(cpu, "cpufreq/scaling_min_freq"))
if real_min_freq != min(freqs):
raise ValueError(f"Minimum frequency setting {min(freqs)} rejected by kernel; got {real_min_freq}")
real_max_freq = int(read_cpu(cpu, "cpufreq/scaling_max_freq"))
if real_max_freq != max(freqs):
raise ValueError(f"Maximum frequency setting {max(freqs)} rejected by kernel; got {real_max_freq}")
# Need to sort because different platforms have different orders
freqs.sort()
print("Frequencies:", " ".join(str(int(freq / 1000)) for freq in freqs))
print()
for freq in freqs:
mhz = freq / 1000
print(f"{int(mhz):4d}: ", end="", flush=True)
write_cpu(cpu, "cpufreq/scaling_setspeed", str(freq))
pr_debug("Waiting for frequency to settle")
time.sleep(0.1)
pr_debug("Validating frequency")
real_freq = int(read_cpu(cpu, "cpufreq/scaling_cur_freq"))
if real_freq != freq:
raise ValueError(f"Frequency setting is {freq} but kernel is using {real_freq}")
pr_debug("Waiting for power usage to settle")
time.sleep(3)
pr_debug("Measuring idle power usage")
thread = start_power_thread()
time.sleep(FREQ_IDLE_TIME)
idle_power_samples = stop_power_thread(thread)
idle_power = statistics.mean(idle_power_samples)
idle_mj = idle_power * FREQ_IDLE_TIME
idle_joules = idle_mj / 1000
pr_debug(f"Idle: {idle_power:4.0f} mW {idle_joules:4.1f} J")
pr_debug("Running CoreMark...")
thread = start_power_thread()
start_time = time.time_ns()
cm_out = run_cmd(["taskset", "-c", f"{cpu}", "coremark", *COREMARK_PERFORMANCE_RUN])
end_time = time.time_ns()
power_samples = stop_power_thread(thread)
pr_debug(cm_out)
elapsed_sec = (end_time - start_time) / 1e9
# Extract score and iterations
match = re.search(r'CoreMark 1\.0 : ([0-9.]+?) / ', cm_out)
score = float(match.group(1))
match = re.search(r'Iterations\s+:\s+(\d+)', cm_out)
iters = float(match.group(1))
# Adjust for base power usage
power_samples = [sample - base_power for sample in power_samples]
# Calculate power values
power = statistics.mean(power_samples)
# CoreMarks/MHz as per EEMBC specs
cm_mhz = score / mhz
# mW * sec = mJ
mj = power * elapsed_sec
joules = mj / 1000
# ULPMark-CM score = iterations per millijoule
ulpmark_score = iters / mj
print(f"{score:5.0f} {cm_mhz:3.1f} C/MHz {power:4.0f} mW {joules:4.1f} J {ulpmark_score:4.1f} I/mJ {elapsed_sec:5.1f} s")
cpu_data["freqs"][freq] = {
"active": {
**create_power_stats(end_time - start_time, power_samples),
"coremark_score": score,
"coremarks_per_mhz": cm_mhz,
"ulpmark_cm_score": ulpmark_score
},
"idle": create_power_stats(int(FREQ_IDLE_TIME * 1e9), idle_power_samples),
}
# In case the CPU shares a freq domain with the housekeeping CPU, e.g. cpu1
pr_debug("Reverting governor")
write_cpu(cpu, "cpufreq/scaling_governor", "powersave")
pr_debug("Offlining CPU")
write_cpu(cpu, "online", "0")
print()
# Make the rest run faster
pr_debug("Maxing housekeeping CPU frequency")
write_cpu(HOUSEKEEPING_CPU, "cpufreq/scaling_governor", "performance")
# OK to GC beyond this point as all the benchmarking is done
pr_debug("Enabling Python GC")
gc.enable()
print()
print("Benchmark finished!")
bench_finish_time = time.time()
pr_debug("Writing JSON data")
data = {
"version": 1,
"total_elapsed_sec": bench_finish_time - bench_start_time,
"housekeeping": create_power_stats(int(5 * 1e9), base_power_samples),
"cpus": cpus_data,
"meta": {
"housekeeping_cpu": HOUSEKEEPING_CPU,
"power_sample_interval": POWER_SAMPLE_INTERVAL,
"cpu_count": cpu_count,
},
}
results_json = json.dumps(data)
pr_debug(results_json)
with open("/tmp/results.json", "w+") as f:
f.write(results_json)
pr_debug("Writing CSV data")
with open("/tmp/results.csv", "w+") as f:
fields = [
"CPU",
"Frequency (kHz)",
"CoreMarks (iter/s)",
"CoreMarks/MHz",
"Power (mW)",
"Energy (J)",
"ULPMark-CM (iter/mJ)",
"Time (s)"
]
writer = csv.DictWriter(f, fieldnames=fields)
writer.writeheader()
for cpu, cpu_data in cpus_data.items():
for freq, freq_data in cpu_data["freqs"].items():
freq_data = freq_data["active"]
writer.writerow({
"CPU": cpu,
"Frequency (kHz)": freq,
"CoreMarks (iter/s)": freq_data["coremark_score"],
"CoreMarks/MHz": freq_data["coremarks_per_mhz"],
"Power (mW)": freq_data["power_mean"],
"Energy (J)": freq_data["energy_joules"],
"ULPMark-CM (iter/mJ)": freq_data["ulpmark_cm_score"],
"Time (s)": freq_data["elapsed_sec"],
})
if __name__ == "__main__":
main()
|
pubsub.py
|
from __future__ import absolute_import
import redis
import logging
import random
from django.conf import settings
from threading import Thread
from six.moves.queue import Queue, Full
class QueuedPublisher(object):
"""
A publisher that queues items locally and publishes them to a
remote pubsub service on a background thread.
Maintains a lossy internal queue for posting, will discard the
value if the queue is full or not immediately available. Will also
drop items if the publish operation to the remote service fails.
"""
def __init__(self, publisher):
self._started = False
self.publisher = publisher
def _start(self):
if self._started:
return True
self.q = q = Queue(maxsize=100)
def worker():
while True:
(channel, key, value) = q.get()
try:
self.publisher.publish(channel, key=key, value=value)
except Exception:
logger = logging.getLogger('sentry.errors')
logger.debug('could not submit event to pubsub')
finally:
q.task_done()
t = Thread(target=worker)
t.setDaemon(True)
t.start()
self._started = True
return True
def publish(self, channel, value, key=None):
if not self._start():
return
sample_channel = getattr(settings, 'PUBSUB_SAMPLING', 1.0)
if random.random() <= sample_channel:
try:
self.q.put((channel, key, value), block=False)
except Full:
return
class RedisPublisher(object):
def __init__(self, connection):
self.rds = None if connection is None else redis.StrictRedis(**connection)
def publish(self, channel, value, key=None):
if self.rds is not None:
self.rds.publish(channel, value)
|
helpers.py
|
"""Supporting functions for polydata and grid objects."""
import collections.abc
import enum
import logging
import signal
import sys
from threading import Thread
import threading
import traceback
import numpy as np
from pyvista import _vtk
import pyvista
from .fileio import from_meshio
from . import transformations
class FieldAssociation(enum.Enum):
"""Represents which type of vtk field a scalar or vector array is associated with."""
POINT = _vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS
CELL = _vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS
NONE = _vtk.vtkDataObject.FIELD_ASSOCIATION_NONE
ROW = _vtk.vtkDataObject.FIELD_ASSOCIATION_ROWS
def get_vtk_type(typ):
"""Look up the VTK type for a give python data type.
Corrects for string type mapping issues.
Returns
-------
int : the integer type id specified in vtkType.h
"""
typ = _vtk.get_vtk_array_type(typ)
# This handles a silly string type bug
if typ == 3:
return 13
return typ
def vtk_bit_array_to_char(vtkarr_bint):
"""Cast vtk bit array to a char array."""
vtkarr = _vtk.vtkCharArray()
vtkarr.DeepCopy(vtkarr_bint)
return vtkarr
def vtk_id_list_to_array(vtk_id_list):
"""Convert a vtkIdList to a NumPy array."""
return np.array([vtk_id_list.GetId(i) for i in range(vtk_id_list.GetNumberOfIds())])
def convert_string_array(arr, name=None):
"""Convert a numpy array of strings to a vtkStringArray or vice versa.
Note that this is terribly inefficient - inefficient support
is better than no support :). If you have ideas on how to make this faster,
please consider opening a pull request.
"""
if isinstance(arr, np.ndarray):
vtkarr = _vtk.vtkStringArray()
########### OPTIMIZE ###########
for val in arr:
vtkarr.InsertNextValue(val)
################################
if isinstance(name, str):
vtkarr.SetName(name)
return vtkarr
# Otherwise it is a vtk array and needs to be converted back to numpy
############### OPTIMIZE ###############
nvalues = arr.GetNumberOfValues()
return np.array([arr.GetValue(i) for i in range(nvalues)], dtype='|U')
########################################
def convert_array(arr, name=None, deep=0, array_type=None):
"""Convert a NumPy array to a vtkDataArray or vice versa.
Parameters
-----------
arr : ndarray or vtkDataArry
A numpy array or vtkDataArry to convert
name : str
The name of the data array for VTK
deep : bool
if input is numpy array then deep copy values
Returns
-------
vtkDataArray, ndarray, or DataFrame:
the converted array (if input is a NumPy ndaray then returns
``vtkDataArray`` or is input is ``vtkDataArray`` then returns NumPy
``ndarray``). If pdf==True and the input is ``vtkDataArry``,
return a pandas DataFrame.
"""
if arr is None:
return
if isinstance(arr, np.ndarray):
if arr.dtype is np.dtype('O'):
arr = arr.astype('|S')
arr = np.ascontiguousarray(arr)
if arr.dtype.type in (np.str_, np.bytes_):
# This handles strings
vtk_data = convert_string_array(arr)
else:
# This will handle numerical data
arr = np.ascontiguousarray(arr)
vtk_data = _vtk.numpy_to_vtk(num_array=arr, deep=deep, array_type=array_type)
if isinstance(name, str):
vtk_data.SetName(name)
return vtk_data
# Otherwise input must be a vtkDataArray
if not isinstance(arr, (_vtk.vtkDataArray, _vtk.vtkBitArray, _vtk.vtkStringArray)):
raise TypeError(f'Invalid input array type ({type(arr)}).')
# Handle booleans
if isinstance(arr, _vtk.vtkBitArray):
arr = vtk_bit_array_to_char(arr)
# Handle string arrays
if isinstance(arr, _vtk.vtkStringArray):
return convert_string_array(arr)
# Convert from vtkDataArry to NumPy
return _vtk.vtk_to_numpy(arr)
def is_pyvista_dataset(obj):
"""Return True if the Object is a PyVista wrapped dataset."""
return isinstance(obj, (pyvista.DataSet, pyvista.MultiBlock))
def point_array(mesh, name):
"""Return point array of a vtk object."""
vtkarr = mesh.GetPointData().GetAbstractArray(name)
return convert_array(vtkarr)
def field_array(mesh, name):
"""Return field array of a vtk object."""
vtkarr = mesh.GetFieldData().GetAbstractArray(name)
return convert_array(vtkarr)
def cell_array(mesh, name):
"""Return cell array of a vtk object."""
vtkarr = mesh.GetCellData().GetAbstractArray(name)
return convert_array(vtkarr)
def row_array(data_object, name):
"""Return row array of a vtk object."""
vtkarr = data_object.GetRowData().GetAbstractArray(name)
return convert_array(vtkarr)
def parse_field_choice(field):
"""Return the id of the given field."""
if isinstance(field, str):
field = field.strip().lower()
if field in ['cell', 'c', 'cells']:
field = FieldAssociation.CELL
elif field in ['point', 'p', 'points']:
field = FieldAssociation.POINT
elif field in ['field', 'f', 'fields']:
field = FieldAssociation.NONE
elif field in ['row', 'r']:
field = FieldAssociation.ROW
else:
raise ValueError(f'Data field ({field}) not supported.')
elif isinstance(field, FieldAssociation):
pass
else:
raise ValueError(f'Data field ({field}) not supported.')
return field
def get_array(mesh, name, preference='cell', info=False, err=False):
"""Search point, cell and field data for an array.
Parameters
----------
name : str
The name of the array to get the range.
preference : str, optional
When scalars is specified, this is the preferred array type to
search for in the dataset. Must be either ``'point'``,
``'cell'``, or ``'field'``
info : bool
Return info about the array rather than the array itself.
err : bool
Boolean to control whether to throw an error if array is not present.
"""
if isinstance(mesh, _vtk.vtkTable):
arr = row_array(mesh, name)
if arr is None and err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
field = FieldAssociation.ROW
if info:
return arr, field
return arr
parr = point_array(mesh, name)
carr = cell_array(mesh, name)
farr = field_array(mesh, name)
preference = parse_field_choice(preference)
if np.sum([parr is not None, carr is not None, farr is not None]) > 1:
if preference == FieldAssociation.CELL:
if info:
return carr, FieldAssociation.CELL
else:
return carr
elif preference == FieldAssociation.POINT:
if info:
return parr, FieldAssociation.POINT
else:
return parr
elif preference == FieldAssociation.NONE:
if info:
return farr, FieldAssociation.NONE
else:
return farr
else:
raise ValueError(f'Data field ({preference}) not supported.')
arr = None
field = None
if parr is not None:
arr = parr
field = FieldAssociation.POINT
elif carr is not None:
arr = carr
field = FieldAssociation.CELL
elif farr is not None:
arr = farr
field = FieldAssociation.NONE
elif err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
if info:
return arr, field
return arr
def vtk_points(points, deep=True):
"""Convert numpy array or array-like to a vtkPoints object."""
points = np.asarray(points)
# verify is numeric
if not np.issubdtype(points.dtype, np.number):
raise TypeError('Points must be a numeric type')
# check dimensionality
if points.ndim == 1:
points = points.reshape(-1, 3)
elif points.ndim > 2:
raise ValueError('Dimension of ``points`` should be 1 or 2, not '
f'{points.ndim}')
# verify shape
if points.shape[1] != 3:
raise ValueError('Points array must contain three values per point. '
f'Shape is {points.shape} and should be (X, 3)')
# points must be contiguous
points = np.ascontiguousarray(points)
vtkpts = _vtk.vtkPoints()
vtkpts.SetData(_vtk.numpy_to_vtk(points, deep=deep))
return vtkpts
def line_segments_from_points(points):
"""Generate non-connected line segments from points.
Assumes points are ordered as line segments and an even number of points
are
Parameters
----------
points : np.ndarray
Points representing line segments. An even number must be given as
every two vertices represent a single line segment. For example, two
line segments would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
Returns
-------
lines : pyvista.PolyData
PolyData with lines and cells.
Examples
--------
This example plots two line segments at right angles to each other line.
>>> import pyvista
>>> import numpy as np
>>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> lines = pyvista.lines_from_points(points)
>>> lines.plot() # doctest:+SKIP
"""
if len(points) % 2 != 0:
raise ValueError("An even number of points must be given to define each segment.")
# Assuming ordered points, create array defining line order
n_points = len(points)
n_lines = n_points // 2
lines = np.c_[(2 * np.ones(n_lines, np.int_),
np.arange(0, n_points-1, step=2),
np.arange(1, n_points+1, step=2))]
poly = pyvista.PolyData()
poly.points = points
poly.lines = lines
return poly
def lines_from_points(points, close=False):
"""Make a connected line set given an array of points.
Parameters
----------
points : np.ndarray
Points representing the vertices of the connected segments. For
example, two line segments would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
close : bool, optional
If True, close the line segments into a loop
Returns
-------
lines : pyvista.PolyData
PolyData with lines and cells.
"""
poly = pyvista.PolyData()
poly.points = points
cells = np.full((len(points)-1, 3), 2, dtype=np.int_)
cells[:, 1] = np.arange(0, len(points)-1, dtype=np.int_)
cells[:, 2] = np.arange(1, len(points), dtype=np.int_)
if close:
cells = np.append(cells, [[2, len(points)-1, 0]], axis=0)
poly.lines = cells
return poly
def make_tri_mesh(points, faces):
"""Construct a ``pyvista.PolyData`` mesh using points and faces arrays.
Construct a mesh from an Nx3 array of points and an Mx3 array of
triangle indices, resulting in a mesh with N vertices and M
triangles. This function does not require the standard VTK
"padding" column and simplifies mesh creation.
Parameters
----------
points : np.ndarray
Array of points with shape (N, 3) storing the vertices of the
triangle mesh.
faces : np.ndarray
Array of indices with shape (M, 3) containing the triangle
indices.
Returns
-------
tri_mesh : pyvista.PolyData
PolyData instance containing the triangle mesh.
Examples
--------
This example discretizes the unit square into a triangle mesh with
nine vertices and eight faces.
>>> import numpy as np
>>> import pyvista as pv
>>> points = np.array([[0, 0, 0], [0.5, 0, 0], [1, 0, 0], [0, 0.5, 0],
... [0.5, 0.5, 0], [1, 0.5, 0], [0, 1, 0], [0.5, 1, 0],
... [1, 1, 0]])
>>> faces = np.array([[0, 1, 4], [4, 7, 6], [2, 5, 4], [4, 5, 8],
... [0, 4, 3], [3, 4, 6], [1, 2, 4], [4, 8, 7]])
>>> tri_mesh = pyvista.make_tri_mesh(points, faces)
>>> tri_mesh.plot(show_edges=True) # doctest:+SKIP
"""
if points.shape[1] != 3:
raise ValueError("Points array should have shape (N, 3).")
if faces.ndim != 2 or faces.shape[1] != 3:
raise ValueError("Face array should have shape (M, 3).")
cells = np.empty((faces.shape[0], 4), dtype=faces.dtype)
cells[:, 0] = 3
cells[:, 1:] = faces
return pyvista.PolyData(points, cells)
def vector_poly_data(orig, vec):
"""Create a vtkPolyData object composed of vectors."""
# shape, dimension checking
if not isinstance(orig, np.ndarray):
orig = np.asarray(orig)
if not isinstance(vec, np.ndarray):
vec = np.asarray(vec)
if orig.ndim != 2:
orig = orig.reshape((-1, 3))
elif orig.shape[1] != 3:
raise ValueError('orig array must be 3D')
if vec.ndim != 2:
vec = vec.reshape((-1, 3))
elif vec.shape[1] != 3:
raise ValueError('vec array must be 3D')
# Create vtk points and cells objects
vpts = _vtk.vtkPoints()
vpts.SetData(_vtk.numpy_to_vtk(np.ascontiguousarray(orig), deep=True))
npts = orig.shape[0]
cells = np.empty((npts, 2), dtype=pyvista.ID_TYPE)
cells[:, 0] = 1
cells[:, 1] = np.arange(npts, dtype=pyvista.ID_TYPE)
vcells = pyvista.utilities.cells.CellArray(cells, npts)
# Create vtkPolyData object
pdata = _vtk.vtkPolyData()
pdata.SetPoints(vpts)
pdata.SetVerts(vcells)
# Add vectors to polydata
name = 'vectors'
vtkfloat = _vtk.numpy_to_vtk(np.ascontiguousarray(vec), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveVectors(name)
# Add magnitude of vectors to polydata
name = 'mag'
scalars = (vec * vec).sum(1)**0.5
vtkfloat = _vtk.numpy_to_vtk(np.ascontiguousarray(scalars), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveScalars(name)
return pyvista.PolyData(pdata)
def trans_from_matrix(matrix): # pragma: no cover
"""Convert a vtk matrix to a numpy.ndarray.
DEPRECATED: Please use ``array_from_vtkmatrix``.
"""
# import needs to happen here to prevent a circular import
from pyvista.core.errors import DeprecationError
raise DeprecationError('DEPRECATED: Please use ``array_from_vtkmatrix``.')
def array_from_vtkmatrix(matrix):
"""Convert a vtk matrix to a ``numpy.ndarray``.
Parameters
----------
matrix : vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4
The vtk matrix to be converted to a ``numpy.ndarray``.
Returned ndarray has shape (3, 3) or (4, 4) as appropriate.
"""
if isinstance(matrix, _vtk.vtkMatrix3x3):
shape = (3, 3)
elif isinstance(matrix, _vtk.vtkMatrix4x4):
shape = (4, 4)
else:
raise TypeError('Expected vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4 input,'
f' got {type(matrix).__name__} instead.')
array = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
array[i, j] = matrix.GetElement(i, j)
return array
def vtkmatrix_from_array(array):
"""Convert a ``numpy.ndarray`` or array-like to a vtk matrix.
Parameters
----------
array : numpy.ndarray or array-like
The array or array-like to be converted to a vtk matrix.
Shape (3, 3) gets converted to a ``vtk.vtkMatrix3x3``, shape (4, 4)
gets converted to a ``vtk.vtkMatrix4x4``. No other shapes are valid.
"""
array = np.asarray(array)
if array.shape == (3, 3):
matrix = _vtk.vtkMatrix3x3()
elif array.shape == (4, 4):
matrix = _vtk.vtkMatrix4x4()
else:
raise ValueError(f'Invalid shape {array.shape}, must be (3, 3) or (4, 4).')
m, n = array.shape
for i in range(m):
for j in range(n):
matrix.SetElement(i, j, array[i, j])
return matrix
def is_meshio_mesh(mesh):
"""Test if passed object is instance of ``meshio.Mesh``."""
try:
import meshio
return isinstance(mesh, meshio.Mesh)
except ImportError:
return False
def wrap(dataset):
"""Wrap any given VTK data object to its appropriate pyvista data object.
Other formats that are supported include:
* 2D :class:`numpy.ndarray` of XYZ vertices
* 3D :class:`numpy.ndarray` representing a volume. Values will be scalars.
* 3D :class:`trimesh.Trimesh` mesh.
* 3D :class:`meshio` mesh.
Parameters
----------
dataset : :class:`numpy.ndarray`, :class:`trimesh.Trimesh`, or VTK object
Dataset to wrap.
Returns
-------
wrapped_dataset : pyvista class
The `pyvista` wrapped dataset.
Examples
--------
Wrap a numpy array representing a random point cloud.
>>> import numpy as np
>>> import pyvista
>>> points = np.random.random((10, 3))
>>> cloud = pyvista.wrap(points)
>>> cloud # doctest:+SKIP
PolyData (0x7fc52db83d70)
N Cells: 10
N Points: 10
X Bounds: 1.123e-01, 7.457e-01
Y Bounds: 1.009e-01, 9.877e-01
Z Bounds: 2.346e-03, 9.640e-01
N Arrays: 0
Wrap a Trimesh object.
>>> import trimesh
>>> import pyvista
>>> points = [[0, 0, 0], [0, 0, 1], [0, 1, 0]]
>>> faces = [[0, 1, 2]]
>>> tmesh = trimesh.Trimesh(points, faces=faces, process=False)
>>> mesh = pyvista.wrap(tmesh)
>>> mesh # doctest:+SKIP
PolyData (0x7fc55ff27ad0)
N Cells: 1
N Points: 3
X Bounds: 0.000e+00, 0.000e+00
Y Bounds: 0.000e+00, 1.000e+00
Z Bounds: 0.000e+00, 1.000e+00
N Arrays: 0
Wrap a VTK object.
>>> import pyvista
>>> import vtk
>>> points = vtk.vtkPoints()
>>> p = [1.0, 2.0, 3.0]
>>> vertices = vtk.vtkCellArray()
>>> pid = points.InsertNextPoint(p)
>>> _ = vertices.InsertNextCell(1)
>>> _ = vertices.InsertCellPoint(pid)
>>> point = vtk.vtkPolyData()
>>> _ = point.SetPoints(points)
>>> _ = point.SetVerts(vertices)
>>> mesh = pyvista.wrap(point)
>>> mesh # doctest:+SKIP
PolyData (0x7fc55ff27ad0)
N Cells: 1
N Points: 3
X Bounds: 0.000e+00, 0.000e+00
Y Bounds: 0.000e+00, 1.000e+00
Z Bounds: 0.000e+00, 1.000e+00
N Arrays: 0
"""
# Return if None
if dataset is None:
return
# Check if dataset is a numpy array. We do this first since
# pyvista_ndarray contains a VTK type that we don't want to
# directly wrap.
if isinstance(dataset, (np.ndarray, pyvista.pyvista_ndarray)):
if dataset.ndim == 1 and dataset.shape[0] == 3:
return pyvista.PolyData(dataset)
if dataset.ndim > 1 and dataset.ndim < 3 and dataset.shape[1] == 3:
return pyvista.PolyData(dataset)
elif dataset.ndim == 3:
mesh = pyvista.UniformGrid(dataset.shape)
mesh['values'] = dataset.ravel(order='F')
mesh.active_scalars_name = 'values'
return mesh
else:
raise NotImplementedError('NumPy array could not be wrapped pyvista.')
wrappers = {
'vtkExplicitStructuredGrid': pyvista.ExplicitStructuredGrid,
'vtkUnstructuredGrid': pyvista.UnstructuredGrid,
'vtkRectilinearGrid': pyvista.RectilinearGrid,
'vtkStructuredGrid': pyvista.StructuredGrid,
'vtkPolyData': pyvista.PolyData,
'vtkImageData': pyvista.UniformGrid,
'vtkStructuredPoints': pyvista.UniformGrid,
'vtkMultiBlockDataSet': pyvista.MultiBlock,
'vtkTable': pyvista.Table,
# 'vtkParametricSpline': pyvista.Spline,
}
# Check if a dataset is a VTK type
if hasattr(dataset, 'GetClassName'):
key = dataset.GetClassName()
try:
return wrappers[key](dataset)
except KeyError:
logging.warning(f'VTK data type ({key}) is not currently supported by pyvista.')
return
# wrap meshio
if is_meshio_mesh(dataset):
return from_meshio(dataset)
# wrap trimesh
if dataset.__class__.__name__ == 'Trimesh':
# trimesh doesn't pad faces
n_face = dataset.faces.shape[0]
faces = np.empty((n_face, 4), dataset.faces.dtype)
faces[:, 1:] = dataset.faces
faces[:, 0] = 3
return pyvista.PolyData(np.asarray(dataset.vertices), faces)
# otherwise, flag tell the user we can't wrap this object
raise NotImplementedError(f'Unable to wrap ({type(dataset)}) into a pyvista type.')
def image_to_texture(image):
"""Convert ``vtkImageData`` (:class:`pyvista.UniformGrid`) to a ``vtkTexture``."""
return pyvista.Texture(image)
def numpy_to_texture(image):
"""Convert a NumPy image array to a vtk.vtkTexture."""
return pyvista.Texture(image)
def is_inside_bounds(point, bounds):
"""Check if a point is inside a set of bounds.
This is implemented through recursion so that this is N-dimensional.
"""
if isinstance(point, (int, float)):
point = [point]
if isinstance(point, (np.ndarray, collections.abc.Sequence)) and not isinstance(point, collections.deque):
if len(bounds) < 2 * len(point) or len(bounds) % 2 != 0:
raise ValueError('Bounds mismatch point dimensionality')
point = collections.deque(point)
bounds = collections.deque(bounds)
return is_inside_bounds(point, bounds)
if not isinstance(point, collections.deque):
raise TypeError(f'Unknown input data type ({type(point)}).')
if len(point) < 1:
return True
p = point.popleft()
lower, upper = bounds.popleft(), bounds.popleft()
if lower <= p <= upper:
return is_inside_bounds(point, bounds)
return False
def fit_plane_to_points(points, return_meta=False):
"""Fit a plane to a set of points.
Parameters
----------
points : np.ndarray
Size n by 3 array of points to fit a plane through
return_meta : bool
If true, also returns the center and normal used to generate the plane
"""
data = np.array(points)
center = data.mean(axis=0)
result = np.linalg.svd(data - center)
normal = np.cross(result[2][0], result[2][1])
plane = pyvista.Plane(center=center, direction=normal)
if return_meta:
return plane, center, normal
return plane
def raise_not_matching(scalars, mesh):
"""Raise exception about inconsistencies."""
if isinstance(mesh, _vtk.vtkTable):
raise ValueError(f'Number of scalars ({scalars.size}) must match number of rows ({mesh.n_rows}).')
raise ValueError(f'Number of scalars ({scalars.size}) '
f'must match either the number of points ({mesh.n_points}) '
f'or the number of cells ({mesh.n_cells}).')
def generate_plane(normal, origin):
"""Return a _vtk.vtkPlane."""
plane = _vtk.vtkPlane()
# NORMAL MUST HAVE MAGNITUDE OF 1
normal = normal / np.linalg.norm(normal)
plane.SetNormal(normal)
plane.SetOrigin(origin)
return plane
def try_callback(func, *args):
"""Wrap a given callback in a try statement."""
try:
func(*args)
except Exception:
etype, exc, tb = sys.exc_info()
stack = traceback.extract_tb(tb)[1:]
formatted_exception = \
'Encountered issue in callback (most recent call last):\n' + \
''.join(traceback.format_list(stack) +
traceback.format_exception_only(etype, exc)).rstrip('\n')
logging.warning(formatted_exception)
return
def check_depth_peeling(number_of_peels=100, occlusion_ratio=0.0):
"""Check if depth peeling is available.
Attempts to use depth peeling to see if it is available for the current
environment. Returns ``True`` if depth peeling is available and has been
successfully leveraged, otherwise ``False``.
"""
# Try Depth Peeling with a basic scene
source = _vtk.vtkSphereSource()
mapper = _vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = _vtk.vtkActor()
actor.SetMapper(mapper)
# requires opacity < 1
actor.GetProperty().SetOpacity(0.5)
renderer = _vtk.vtkRenderer()
renderWindow = _vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindow.SetOffScreenRendering(True)
renderWindow.SetAlphaBitPlanes(True)
renderWindow.SetMultiSamples(0)
renderer.AddActor(actor)
renderer.SetUseDepthPeeling(True)
renderer.SetMaximumNumberOfPeels(number_of_peels)
renderer.SetOcclusionRatio(occlusion_ratio)
renderWindow.Render()
return renderer.GetLastRenderingUsedDepthPeeling() == 1
def threaded(fn):
"""Call a function using a thread."""
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class conditional_decorator:
"""Conditional decorator for methods."""
def __init__(self, dec, condition):
"""Initialize."""
self.decorator = dec
self.condition = condition
def __call__(self, func):
"""Call the decorated function if condition is matched."""
if not self.condition:
# Return the function unchanged, not decorated.
return func
return self.decorator(func)
class ProgressMonitor():
"""A standard class for monitoring the progress of a VTK algorithm.
This must be use in a ``with`` context and it will block keyboard
interrupts from happening until the exit event as interrupts will crash
the kernel if the VTK algorithm is still executing.
"""
def __init__(self, algorithm, message="", scaling=100):
"""Initialize observer."""
try:
from tqdm import tqdm
except ImportError:
raise ImportError("Please install `tqdm` to monitor algorithms.")
self.event_type = _vtk.vtkCommand.ProgressEvent
self.progress = 0.0
self._last_progress = self.progress
self.algorithm = algorithm
self.message = message
self._interrupt_signal_received = False
self._old_progress = 0
self._old_handler = None
self._progress_bar = None
def handler(self, sig, frame):
"""Pass signal to custom interrupt handler."""
self._interrupt_signal_received = (sig, frame)
logging.debug('SIGINT received. Delaying KeyboardInterrupt until '
'VTK algorithm finishes.')
def __call__(self, obj, event, *args):
"""Call progress update callback.
On an event occurrence, this function executes.
"""
if self._interrupt_signal_received:
obj.AbortExecuteOn()
else:
progress = obj.GetProgress()
step = progress - self._old_progress
self._progress_bar.update(step)
self._old_progress = progress
def __enter__(self):
"""Enter event for ``with`` context."""
from tqdm import tqdm
# check if in main thread
if threading.current_thread().__class__.__name__ == '_MainThread':
self._old_handler = signal.signal(signal.SIGINT, self.handler)
self._progress_bar = tqdm(total=1, leave=True,
bar_format='{l_bar}{bar}[{elapsed}<{remaining}]')
self._progress_bar.set_description(self.message)
self.algorithm.AddObserver(self.event_type, self)
return self._progress_bar
def __exit__(self, type, value, traceback):
"""Exit event for ``with`` context."""
self._progress_bar.total = 1
self._progress_bar.refresh()
self._progress_bar.close()
self.algorithm.RemoveObservers(self.event_type)
if threading.current_thread().__class__.__name__ == '_MainThread':
signal.signal(signal.SIGINT, self._old_handler)
def abstract_class(cls_):
"""Decorate a class, overriding __new__.
Preventing a class from being instantiated similar to abc.ABCMeta
but does not require an abstract method.
"""
def __new__(cls, *args, **kwargs):
if cls is cls_:
raise TypeError(f'{cls.__name__} is an abstract class and may not be instantiated.')
return object.__new__(cls)
cls_.__new__ = __new__
return cls_
def axis_rotation(points, angle, inplace=False, deg=True, axis='z'):
"""Rotate points angle (in deg) about an axis.
Parameters
----------
points : numpy.ndarray
Array of points with shape ``(N, 3)``
angle : float
Rotation angle.
inplace : bool, optional
Updates points in-place while returning nothing.
deg : bool, optional
If `True`, the angle is interpreted as degrees instead of
radians. Default is `True`.
axis : str, optional
Name of axis to rotate about. Valid options are ``'x'``, ``'y'``,
and ``'z'``. Default value is ``'z'``.
Returns
-------
points : numpy.ndarray
Rotated points.
Examples
--------
Rotate a set of points by 90 degrees about the x-axis in-place.
>>> import numpy as np
>>> import pyvista
>>> from pyvista import examples
>>> points = examples.load_airplane().points
>>> points_orig = points.copy()
>>> pyvista.axis_rotation(points, 90, axis='x', deg=True, inplace=True)
>>> assert np.all(np.isclose(points[:, 0], points_orig[:, 0]))
>>> assert np.all(np.isclose(points[:, 1], -points_orig[:, 2]))
>>> assert np.all(np.isclose(points[:, 2], points_orig[:, 1]))
"""
axis = axis.lower()
axis_to_vec = {
'x': (1, 0, 0),
'y': (0, 1, 0),
'z': (0, 0, 1)
}
if axis not in axis_to_vec:
raise ValueError('Invalid axis. Must be either "x", "y", or "z"')
rot_mat = transformations.axis_angle_rotation(axis_to_vec[axis], angle, deg=deg)
return transformations.apply_transformation_to_points(rot_mat, points, inplace=inplace)
|
imagefolder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import http.server
import os
import re
import threading
import torch
import torch.utils.data.backward_compatibility
import torchvision.datasets as datasets
import torchvision.datasets.folder
import torchvision.transforms as transforms
from PIL import Image
from torch.utils.data import DataLoader
from torchdata.datapipes.iter import FileLister, HttpReader, IterDataPipe
IMAGES_ROOT = os.path.join("fakedata", "imagefolder")
USE_FORK_DATAPIPE = False
NUM_WORKERS = 5
BATCH_SIZE = None
data_transform = transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
# DataPipes implementation of ImageFolder constructs and executes graph of DataPipes (aka DataPipeline)
# FileLister -> ObtainCategories
# |
# V
# FileLister -> AttributeCategories -> LoadAndDecodeImages (using `map`) -> ApplyTorchVisionTransforms (using `map`)
def get_category_name(path):
rel_path = os.path.relpath(path, start=IMAGES_ROOT)
elements = rel_path.split(os.sep)
return elements[0]
class ObtainCategories(IterDataPipe):
def __init__(self, source_dp, parse_category_fn=get_category_name) -> None:
self.source_dp = source_dp
self.parse_category_fn = parse_category_fn
def __iter__(self):
categories = set()
for path in self.source_dp:
categories.add(self.parse_category_fn(path))
cat_to_id = {name: i for i, name in enumerate(sorted(categories))}
yield cat_to_id
class AttributeCategories(IterDataPipe):
def __init__(self, listfiles_dp, categories_dp, parse_category_fn=get_category_name) -> None:
self.listfiles_dp = listfiles_dp
self.categories_dp = categories_dp
self.parse_category_fn = parse_category_fn
def __iter__(self):
for categories in self.categories_dp:
cat_to_dp = categories
for data in self.listfiles_dp:
if isinstance(data, tuple):
category = cat_to_dp[self.parse_category_fn(data[0])]
yield data + (category,)
else:
category = cat_to_dp[self.parse_category_fn(data)]
yield (data, category)
def MyImageFolder(root=IMAGES_ROOT, transform=None):
if not USE_FORK_DATAPIPE:
# Yes, we had to scan files twice. Alternativelly it is possible to use
# `fork` DataPipe, but it will require buffer equal to the size of all
# full file names
# TODO(125): Make sure that `fork` complains when buffer becomes
# too large
list_files_0 = FileLister(root=IMAGES_ROOT, recursive=True)
list_files_1 = FileLister(root=IMAGES_ROOT, recursive=True).sharding_filter()
else:
list_files_0, list_files_1 = FileLister(root=IMAGES_ROOT, recursive=True).fork(2)
list_files_1 = list_files_1.sharding_filter()
categories = ObtainCategories(list_files_0)
with_categories = AttributeCategories(list_files_1, categories)
using_default_loader = with_categories.map(lambda x: (torchvision.datasets.folder.default_loader(x[0]), x[1]))
transformed = using_default_loader.map(lambda x: (transform(x[0]), x[1]))
return transformed
class ExpandURLPatternDataPipe(IterDataPipe):
def __init__(self, pattern) -> None:
result = re.match(r"(.*?)\{(.*?)}(.*)", pattern)
if result:
self.prefix = result.group(1)
self.pattern = result.group(2)
self.postfix = result.group(3)
result = re.match(r"(\d+)\.\.(\d+)", self.pattern)
if result:
self.start_str = result.group(1)
self.end_str = result.group(2)
else:
raise Exception("Invalid pattern")
else:
raise Exception("Invalid pattern")
def __iter__(self):
current_int = int(self.start_str)
end_int = int(self.end_str)
for i in range(current_int, end_int + 1):
str_i = str(i)
while len(str_i) < len(self.start_str):
str_i = "0" + str_i
yield self.prefix + str_i + self.postfix
HTTP_PATH_ROOT = "http://localhost:8000/"
HTTP_PATH_CAT = "http://localhost:8000/cat/{1..3}.jpg"
HTTP_PATH_DOG = "http://localhost:8000/dog/{1..3}.jpg"
def get_category_name_url(url):
rel_path = os.path.relpath(url, start=HTTP_PATH_ROOT)
elements = rel_path.split(os.sep)
return elements[0]
def stream_to_pil(stream):
img = Image.open(stream)
return img.convert("RGB")
def MyHTTPImageFolder(transform=None):
# HTTP Protocol doesn't support listing files, so we had to provide it explicitly
list_files = ExpandURLPatternDataPipe(HTTP_PATH_CAT) + ExpandURLPatternDataPipe(HTTP_PATH_DOG)
list_files_0, list_files_1 = list_files.fork(2)
list_files_1 = list_files_1.sharding_filter().shuffle()
categories = ObtainCategories(list_files_0, parse_category_fn=get_category_name_url)
loaded_files = HttpReader(list_files_1)
with_categories = AttributeCategories(loaded_files, categories, parse_category_fn=get_category_name_url)
pil_images = with_categories.map(lambda x: (x[0], stream_to_pil(x[1]), x[2]))
transformed = pil_images.map(lambda x: (transform(x[1]), x[2]))
return transformed
if __name__ == "__main__":
dataset = datasets.ImageFolder(root=IMAGES_ROOT, transform=data_transform)
dl = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)
items = list(dl)
assert len(items) == 6
dataset = MyImageFolder(root=IMAGES_ROOT, transform=data_transform)
dl = DataLoader(
dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=NUM_WORKERS,
worker_init_fn=torch.utils.data.backward_compatibility.worker_init_fn,
)
items = list(dl)
assert len(items) == 6
http_handler = http.server.SimpleHTTPRequestHandler
http_handler.log_message = lambda a, b, c, d, e: None
httpd = http.server.HTTPServer(("", 8000), http_handler)
os.chdir(IMAGES_ROOT)
thread = threading.Thread(target=httpd.serve_forever)
thread.start()
dataset = MyHTTPImageFolder(transform=data_transform)
dl = DataLoader(
dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=NUM_WORKERS,
worker_init_fn=torch.utils.data.backward_compatibility.worker_init_fn,
)
try:
items = list(dl)
assert len(items) == 6
finally:
httpd.shutdown()
|
test_venv.py
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import errno
import multiprocessing
import os
import shutil
import subprocess
import sys
import tempfile
from subprocess import CalledProcessError
from textwrap import dedent
import pytest
from pex.common import safe_mkdtemp, safe_open, temporary_dir, touch
from pex.compatibility import PY2
from pex.executor import Executor
from pex.interpreter import PythonInterpreter
from pex.layout import Layout
from pex.pex_builder import CopyMode, PEXBuilder
from pex.testing import IS_PYPY, PY310, PY_VER, ensure_python_interpreter, run_pex_command
from pex.typing import TYPE_CHECKING, cast
from pex.util import named_temporary_file
from pex.venv.virtualenv import Virtualenv
if TYPE_CHECKING:
from typing import Any, Dict, Iterable, Iterator, List, Optional, Protocol, Set, Text, Tuple
class CreatePexVenv(Protocol):
def __call__(self, *options):
# type: (*str) -> Virtualenv
pass
FABRIC_VERSION = "2.5.0"
@pytest.fixture(scope="module")
def pex():
# type: () -> Iterator[str]
with temporary_dir() as tmpdir:
pex_path = os.path.join(tmpdir, "fabric.pex")
src_dir = os.path.join(tmpdir, "src")
touch(os.path.join(src_dir, "user/__init__.py"))
touch(os.path.join(src_dir, "user/package/__init__.py"))
# Fabric dips into Invoke vendored code. It depends on "invoke<2.0,>=1.3", but in version
# 1.7.0, the vendored `decorator` module Fabric depends on inside Invoke no longer is
# importable under Python 2.7; so we pin low.
constraints = os.path.join(tmpdir, "constraints.txt")
with open(constraints, "w") as fp:
fp.write("Invoke==1.6.0")
# N.B.: --unzip just speeds up runs 2+ of the pex file and is otherwise not relevant to
# these tests.
run_pex_command(
args=[
"fabric=={}".format(FABRIC_VERSION),
"--constraints",
constraints,
"-c",
"fab",
"--sources-directory",
src_dir,
"-o",
pex_path,
"--include-tools",
]
)
yield os.path.realpath(pex_path)
def make_env(**kwargs):
# type: (**Any) -> Dict[str, str]
env = os.environ.copy()
env.update((k, str(v)) for k, v in kwargs.items())
return env
@pytest.fixture
def create_pex_venv(pex):
# type: (str) -> Iterator[CreatePexVenv]
with temporary_dir() as tmpdir:
venv_dir = os.path.join(tmpdir, "venv")
def _create_pex_venv(*options):
# type: (*str) -> Virtualenv
subprocess.check_call(
args=[pex, "venv", venv_dir] + list(options or ()), env=make_env(PEX_TOOLS="1")
)
return Virtualenv(venv_dir)
yield _create_pex_venv
def test_force(create_pex_venv):
# type: (CreatePexVenv) -> None
venv = create_pex_venv("--pip")
venv.interpreter.execute(args=["-m", "pip", "install", "ansicolors==1.1.8"])
venv.interpreter.execute(args=["-c", "import colors"])
with pytest.raises(CalledProcessError):
create_pex_venv()
venv_force = create_pex_venv("--force")
# The re-created venv should have no ansicolors installed like the prior venv.
with pytest.raises(Executor.NonZeroExit):
venv_force.interpreter.execute(args=["-c", "import colors"])
# The re-created venv should have no pip installed either.
with pytest.raises(Executor.NonZeroExit):
venv.interpreter.execute(args=["-m", "pip", "install", "ansicolors==1.1.8"])
def execute_venv_pex_interpreter(
venv, # type: Virtualenv
code=None, # type: Optional[str]
extra_args=(), # type: Iterable[str]
**extra_env # type: Any
):
# type: (...) -> Tuple[int, Text, Text]
process = subprocess.Popen(
args=[venv.join_path("pex")] + list(extra_args),
env=make_env(PEX_INTERPRETER=True, **extra_env),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
stdout, stderr = process.communicate(input=None if code is None else code.encode())
return process.returncode, stdout.decode("utf-8"), stderr.decode("utf-8")
def expected_file_path(
venv, # type: Virtualenv
package, # type: str
):
# type: (...) -> str
return os.path.realpath(
os.path.join(
venv.site_packages_dir,
os.path.sep.join(package.split(".")),
"__init__.{ext}".format(ext="pyc" if venv.interpreter.version[0] == 2 else "py"),
)
)
def parse_fabric_version_output(output):
# type: (Text) -> Dict[Text, Text]
return dict(cast("Tuple[Text, Text]", line.split(" ", 1)) for line in output.splitlines())
def test_venv_pex(create_pex_venv):
# type: (CreatePexVenv) -> None
venv = create_pex_venv()
venv_pex = venv.join_path("pex")
fabric_output = subprocess.check_output(args=[venv_pex, "-V"])
# N.B.: `fab -V` output looks like so:
# $ fab -V
# Fabric 2.5.0
# Paramiko 2.7.2
# Invoke 1.4.1
versions = parse_fabric_version_output(fabric_output.decode("utf-8"))
assert FABRIC_VERSION == versions["Fabric"]
invoke_version = "Invoke {}".format(versions["Invoke"])
invoke_script_output = subprocess.check_output(
args=[venv_pex, "-V"], env=make_env(PEX_SCRIPT="invoke")
)
assert invoke_version == invoke_script_output.decode("utf-8").strip()
invoke_entry_point_output = subprocess.check_output(
args=[venv_pex, "-V"],
env=make_env(PEX_MODULE="invoke.main:program.run"),
)
assert invoke_version == invoke_entry_point_output.decode("utf-8").strip()
pex_extra_sys_path = ["/dev/null", "Bob"]
returncode, _, stderr = execute_venv_pex_interpreter(
venv,
code=dedent(
"""\
from __future__ import print_function
import os
import sys
def assert_equal(test_num, expected, actual):
if expected == actual:
return
print(
"[{{}}] Expected {{}} but got {{}}".format(test_num, expected, actual),
file=sys.stderr,
)
sys.exit(test_num)
assert_equal(1, {pex_extra_sys_path!r}, sys.path[-2:])
import fabric
assert_equal(2, {fabric!r}, os.path.realpath(fabric.__file__))
import user.package
assert_equal(3, {user_package!r}, os.path.realpath(user.package.__file__))
""".format(
pex_extra_sys_path=pex_extra_sys_path,
fabric=expected_file_path(venv, "fabric"),
user_package=expected_file_path(venv, "user.package"),
)
),
PEX_EXTRA_SYS_PATH=os.pathsep.join(pex_extra_sys_path),
)
assert 0 == returncode, stderr
def test_binary_path(create_pex_venv):
# type: (CreatePexVenv) -> None
code = dedent(
"""\
import errno
import subprocess
import sys
# PEXed code should be able to find all (console) scripts on the $PATH when the venv is
# created with --bin-path set, and the scripts should all run with the venv interpreter in
# order to find their code.
def try_invoke(*args):
try:
subprocess.check_call(list(args))
return 0
except OSError as e:
if e.errno == errno.ENOENT:
# This is what we expect when scripts are not set up on PATH via --bin-path.
return 1
return 2
exit_code = try_invoke("fab", "-V")
exit_code += 10 * try_invoke("inv", "-V")
exit_code += 100 * try_invoke("invoke", "-V")
sys.exit(exit_code)
"""
)
venv = create_pex_venv()
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=code, PATH=tempfile.gettempdir()
)
assert 111 == returncode, stdout + stderr
venv_bin_path = create_pex_venv("-f", "--bin-path", "prepend")
returncode, _, _ = execute_venv_pex_interpreter(
venv_bin_path, code=code, PATH=tempfile.gettempdir()
)
assert 0 == returncode
def test_venv_pex_interpreter_special_modes(create_pex_venv):
# type: (CreatePexVenv) -> None
venv = create_pex_venv()
# special mode execute module: -m module
returncode, stdout, stderr = execute_venv_pex_interpreter(venv, extra_args=["-m"])
assert 2 == returncode, stderr
assert "" == stdout
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, extra_args=["-m", "fabric", "--version"]
)
assert 0 == returncode, stderr
versions = parse_fabric_version_output(stdout)
assert FABRIC_VERSION == versions["Fabric"]
# special mode execute code string: -c <str>
returncode, stdout, stderr = execute_venv_pex_interpreter(venv, extra_args=["-c"])
assert 2 == returncode, stderr
assert "" == stdout
fabric_file_code = "import fabric, os; print(os.path.realpath(fabric.__file__))"
expected_fabric_file_path = expected_file_path(venv, "fabric")
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, extra_args=["-c", fabric_file_code]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
# special mode execute stdin: -
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=fabric_file_code, extra_args=["-"]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
# special mode execute python file: <py file name>
with named_temporary_file(prefix="code", suffix=".py", mode="w") as fp:
fp.write(fabric_file_code)
fp.close()
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=fabric_file_code, extra_args=[fp.name]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
@pytest.mark.parametrize(
"start_method", getattr(multiprocessing, "get_all_start_methods", lambda: [None])()
)
def test_venv_multiprocessing_issues_1236(
tmpdir, # type: Any
start_method, # type: Optional[str]
):
# type: (...) -> None
src = os.path.join(str(tmpdir), "src")
with safe_open(os.path.join(src, "foo.py"), "w") as fp:
fp.write(
dedent(
"""\
def bar():
print('hello')
"""
)
)
with safe_open(os.path.join(src, "main.py"), "w") as fp:
fp.write(
dedent(
"""\
import multiprocessing
from foo import bar
if __name__ == '__main__':
if {start_method!r}:
multiprocessing.set_start_method({start_method!r})
p = multiprocessing.Process(target=bar)
p.start()
""".format(
start_method=start_method
)
)
)
pex_file = os.path.join(str(tmpdir), "mp.pex")
result = run_pex_command(args=["-D", src, "-m", "main", "-o", pex_file, "--include-tools"])
result.assert_success()
# Confirm multiprocessing works via normal PEX file execution.
output = subprocess.check_output(args=[pex_file])
assert "hello" == output.decode("utf-8").strip()
# Confirm multiprocessing works via the `pex` venv script.
venv = os.path.join(str(tmpdir), "venv")
subprocess.check_call(args=[pex_file, "venv", venv], env=make_env(PEX_TOOLS=True))
output = subprocess.check_output(args=[os.path.join(venv, "pex")])
assert "hello" == output.decode("utf-8").strip()
def test_venv_symlinked_source_issues_1239(tmpdir):
# type: (Any) -> None
src = os.path.join(str(tmpdir), "src")
main = os.path.join(src, "main.py")
with safe_open(main, "w") as fp:
fp.write("import sys; sys.exit(42)")
pex_builder = PEXBuilder(copy_mode=CopyMode.SYMLINK)
pex_builder.set_executable(main)
pex_file = os.path.join(str(tmpdir), "a.pex")
pex_builder.build(pex_file, bytecode_compile=False)
assert 42 == subprocess.Popen(args=[pex_file]).wait()
venv = os.path.join(str(tmpdir), "a.venv")
subprocess.check_call(
args=[sys.executable, "-m", "pex.tools", pex_builder.path(), "venv", venv]
)
venv_pex = os.path.join(venv, "pex")
shutil.rmtree(src)
assert 42 == subprocess.Popen(args=[venv_pex]).wait()
def test_venv_entrypoint_function_exit_code_issue_1241(tmpdir):
# type: (Any) -> None
pex_file = os.path.join(str(tmpdir), "ep-function.pex")
src = os.path.join(str(tmpdir), "src")
with safe_open(os.path.join(src, "module.py"), "w") as fp:
fp.write(
dedent(
"""\
import sys
def target():
args = sys.argv[1:]
if args:
exit = args[0]
try:
return int(exit)
except ValueError:
return exit
"""
)
)
result = run_pex_command(
args=["-D", src, "-e", "module:target", "--include-tools", "-o", pex_file]
)
result.assert_success()
venv = os.path.join(str(tmpdir), "ep-function.venv")
subprocess.check_call(args=[pex_file, "venv", venv], env=make_env(PEX_TOOLS=1))
venv_pex = os.path.join(venv, "pex")
assert 0 == subprocess.Popen(args=[venv_pex]).wait()
def assert_venv_process(
args, # type: List[str]
expected_returncode, # type: int
expected_stdout="", # type: str
expected_stderr="", # type: str
):
# type: (...) -> None
process = subprocess.Popen(
args=[venv_pex] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = process.communicate()
assert expected_returncode == process.returncode
assert expected_stdout == stdout.decode("utf-8")
assert expected_stderr == stderr.decode("utf-8")
assert_venv_process(args=["bob"], expected_returncode=1, expected_stderr="bob\n")
assert_venv_process(args=["42"], expected_returncode=42)
def test_venv_copies(tmpdir):
# type: (Any) -> None
python310 = ensure_python_interpreter(PY310)
pex_file = os.path.join(str(tmpdir), "venv.pex")
result = run_pex_command(args=["-o", pex_file, "--include-tools"], python=python310)
result.assert_success()
PEX_TOOLS = make_env(PEX_TOOLS=1)
venv_symlinks = os.path.join(str(tmpdir), "venv.symlinks")
subprocess.check_call(args=[python310, pex_file, "venv", venv_symlinks], env=PEX_TOOLS)
venv_symlinks_interpreter = PythonInterpreter.from_binary(
os.path.join(venv_symlinks, "bin", "python")
)
assert os.path.islink(venv_symlinks_interpreter.binary)
venv_copies = os.path.join(str(tmpdir), "venv.copies")
subprocess.check_call(
args=[python310, pex_file, "venv", "--copies", venv_copies], env=PEX_TOOLS
)
venv_copies_interpreter = PythonInterpreter.from_binary(
os.path.join(venv_copies, "bin", "python")
)
assert not os.path.islink(venv_copies_interpreter.binary)
def test_relocatable_venv(tmpdir):
# type: (Any) -> None
pex_file = os.path.join(str(tmpdir), "relocatable.pex")
src = os.path.join(str(tmpdir), "src")
with safe_open(os.path.join(src, "main.py"), "w") as fp:
fp.write(
dedent(
"""\
import sys
from colors import blue
print(blue(sys.executable))
"""
)
)
result = run_pex_command(
args=["-D", src, "ansicolors==1.1.8", "-m", "main", "--include-tools", "-o", pex_file]
)
result.assert_success()
venv = os.path.join(str(tmpdir), "relocatable.venv")
subprocess.check_call(args=[pex_file, "venv", venv], env=make_env(PEX_TOOLS=1))
subprocess.check_call(args=[os.path.join(venv, "pex")])
relocated_relpath = "relocated.venv"
relocated_venv = os.path.join(str(tmpdir), relocated_relpath)
# Since the venv pex script contains a shebang with an absolute path to the venv python
# interpreter, a move of the venv makes the script un-runnable directly.
shutil.move(venv, relocated_venv)
with pytest.raises(OSError) as exec_info:
subprocess.check_call(args=[os.path.join(relocated_venv, "pex")])
assert errno.ENOENT == exec_info.value.errno
# But we should be able to run the script using the moved venv's interpreter.
subprocess.check_call(
args=[
os.path.join(relocated_relpath, "bin", "python"),
os.path.join(relocated_relpath, "pex"),
],
cwd=str(tmpdir),
)
def test_compile(tmpdir):
# type: (Any) -> None
def collect_files(
root_dir, # type: str
extension, # type: str
):
# type: (...) -> Set[str]
return {
os.path.relpath(os.path.join(root, f), root_dir)
for root, _, files in os.walk(root_dir, followlinks=False)
for f in files
if f.endswith(extension)
}
pex_file = os.path.join(str(tmpdir), "compile.pex")
src = os.path.join(str(tmpdir), "src")
with safe_open(os.path.join(src, "main.py"), "w") as fp:
fp.write(
dedent(
"""\
from colors import yellow
print(yellow("Slartibartfast"))
"""
)
)
result = run_pex_command(
args=["-D", src, "ansicolors==1.0.2", "-m", "main", "--include-tools", "-o", pex_file]
)
result.assert_success()
venv = os.path.join(str(tmpdir), "venv")
subprocess.check_call(args=[pex_file, "venv", venv], env=make_env(PEX_TOOLS=1))
# N.B.: The right way to discover the site-packages dir is via site.getsitepackages().
# Unfortunately we use an old version of virtualenv to create PyPy <= 3.7 and CPython 2.7 venvs
# and it does not add a getsitepackages function to site.py; so we cheat.
if IS_PYPY and PY_VER <= (3, 7):
site_packages = "site-packages"
else:
site_packages = os.path.join(
"lib",
"{python}{major}.{minor}".format(
python="pypy" if IS_PYPY else "python",
major=sys.version_info[0],
minor=sys.version_info[1],
),
"site-packages",
)
# Ensure we have at least the basic direct dependency python files we expect.
venv_py_files = collect_files(venv, ".py")
assert os.path.join(site_packages, "main.py") in venv_py_files
assert os.path.join(site_packages, "colors.py") in venv_py_files
assert "__main__.py" in venv_py_files
compile_venv = os.path.join(str(tmpdir), "compile.venv")
subprocess.check_call(
args=[pex_file, "venv", "--compile", compile_venv], env=make_env(PEX_TOOLS=1)
)
# Ensure all original py files have a compiled counterpart.
for py_file in venv_py_files:
if PY2:
assert os.path.exists(os.path.join(compile_venv, py_file + "c"))
else:
name, _ = os.path.splitext(os.path.basename(py_file))
assert os.path.exists(
os.path.join(
compile_venv,
os.path.dirname(py_file),
"__pycache__",
"{name}.{cache_tag}.pyc".format(
name=name, cache_tag=sys.implementation.cache_tag
),
)
)
compile_venv_pyc_files = collect_files(compile_venv, ".pyc")
subprocess.check_call(args=[os.path.join(compile_venv, "pex")])
assert compile_venv_pyc_files == collect_files(
compile_venv, ".pyc"
), "Expected no new compiled python files."
def test_strip_pex_env(tmpdir):
# type: (Any) -> None
def create_pex_venv(strip_pex_env):
# type: (bool) -> str
pex = os.path.join(str(tmpdir), "strip_{}.pex".format(strip_pex_env))
run_pex_command(
args=[
"--strip-pex-env" if strip_pex_env else "--no-strip-pex-env",
"--include-tools",
"-o",
pex,
]
).assert_success()
venv = os.path.join(str(tmpdir), "strip_{}.venv".format(strip_pex_env))
subprocess.check_call(args=[pex, "venv", venv], env=make_env(PEX_TOOLS=1))
return venv
check_pex_env_vars_code = dedent(
"""\
from __future__ import print_function
import os
import sys
pex_env_vars = 0
for name, value in os.environ.items():
if name.startswith("PEX_"):
pex_env_vars += 1
print(
"Un-stripped: {name}={value}".format(name=name, value=value), file=sys.stderr
)
sys.exit(pex_env_vars)
"""
)
two_pex_env_vars = {
name: value
for name, value in make_env(PEX_ROOT="42", PEX_TOOLS=1).items()
if name in ("PEX_ROOT", "PEX_TOOLS") or not name.startswith("PEX_")
}
assert 2 == len([name for name in two_pex_env_vars if name.startswith("PEX_")])
strip_venv = create_pex_venv(strip_pex_env=True)
subprocess.check_call(
args=[os.path.join(strip_venv, "pex"), "-c", check_pex_env_vars_code], env=two_pex_env_vars
)
no_strip_venv = create_pex_venv(strip_pex_env=False)
process = subprocess.Popen(
args=[os.path.join(no_strip_venv, "pex"), "-c", check_pex_env_vars_code],
env=two_pex_env_vars,
)
assert 2 == process.wait()
def test_warn_unused_pex_env_vars():
# type: () -> None
# N.B.: We don't use the pytest tmpdir fixture here since it creates fairly length paths under
# /tmp and under macOS, where TMPDIR is already fairly deeply nested, we trigger Pex warnings
# about script shebang length. Those warnings pollute stderr.
tmpdir = safe_mkdtemp()
venv_pex = os.path.join(tmpdir, "venv.pex")
run_pex_command(["--venv", "-o", venv_pex]).assert_success()
def assert_execute_venv_pex(expected_stderr, **env_vars):
env = os.environ.copy()
env.update(env_vars)
process = subprocess.Popen(
[venv_pex, "-c", ""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
)
stdout, stderr = process.communicate()
assert 0 == process.returncode
assert not stdout
assert expected_stderr.strip() == stderr.decode("utf-8").strip()
assert_execute_venv_pex(expected_stderr="")
assert_execute_venv_pex(expected_stderr="", PEX_ROOT=os.path.join(tmpdir, "pex_root"))
assert_execute_venv_pex(expected_stderr="", PEX_VENV="1")
assert_execute_venv_pex(expected_stderr="", PEX_EXTRA_SYS_PATH="more")
assert_execute_venv_pex(expected_stderr="", PEX_VERBOSE="0")
assert_execute_venv_pex(
expected_stderr=dedent(
"""\
Ignoring the following environment variables in Pex venv mode:
PEX_INHERIT_PATH=fallback
"""
),
PEX_INHERIT_PATH="fallback",
)
assert_execute_venv_pex(
expected_stderr=dedent(
"""\
Ignoring the following environment variables in Pex venv mode:
PEX_COVERAGE=1
PEX_INHERIT_PATH=fallback
"""
),
PEX_COVERAGE="1",
PEX_INHERIT_PATH="fallback",
PEX_VERBOSE="0",
)
def test_custom_prompt(tmpdir):
# type: (Any) -> None
pex_root = os.path.join(str(tmpdir), "pex_root")
venv_pex = os.path.join(str(tmpdir), "venv.pex")
run_pex_command(
args=[
"--pex-root",
pex_root,
"--runtime-pex-root",
pex_root,
"-o",
venv_pex,
"--include-tools",
]
).assert_success()
venv_dir = os.path.join(str(tmpdir), "venv_dir")
subprocess.check_call(
args=[venv_pex, "venv", "--prompt", "jane", venv_dir], env=make_env(PEX_TOOLS=True)
)
if PY_VER == (2, 7) or (IS_PYPY and PY_VER <= (3, 7)):
# Neither CPython 2.7 not PyPy interpreters have (functioning) venv modules; so we create
# their venvs with an old copy of virtualenv that does not surround the prompt with parens.
expected_prompt = "jane"
elif PY_VER == (3, 5):
# We can't set the prompt for CPython 3.5 so we expect the name of the venv dir.
expected_prompt = "(venv_dir)"
else:
expected_prompt = "(jane)"
output = subprocess.check_output(
args=[
"/usr/bin/env",
"bash",
"-c",
"source {} && echo $PS1".format(os.path.join(venv_dir, "bin", "activate")),
],
env=make_env(TERM="dumb", COLS=80),
)
assert expected_prompt == output.decode("utf-8").strip()
@pytest.mark.parametrize(
"layout", [pytest.param(layout, id=layout.value) for layout in Layout.values()]
)
def test_remove(
tmpdir,
layout, # type: Layout.Value
):
# type: (...) -> None
pex_root = os.path.join(str(tmpdir), "pex_root")
def create_venv_pex():
# type: () -> str
venv_pex = os.path.join(str(tmpdir), "venv.pex")
run_pex_command(
args=[
"--pex-root",
pex_root,
"--runtime-pex-root",
pex_root,
"-o",
venv_pex,
"--include-tools",
]
).assert_success()
return venv_pex
venv_dir = os.path.join(str(tmpdir), "venv_dir")
assert not os.path.exists(venv_dir)
venv_pex = create_venv_pex()
subprocess.check_call(args=[venv_pex, "venv", venv_dir], env=make_env(PEX_TOOLS=True))
assert os.path.exists(venv_dir)
assert os.path.exists(venv_pex)
assert os.path.exists(pex_root)
shutil.rmtree(venv_dir)
assert not os.path.exists(venv_dir)
subprocess.check_call(
args=[venv_pex, "venv", "--rm", "pex", venv_dir], env=make_env(PEX_TOOLS=True)
)
assert os.path.exists(venv_dir)
assert not os.path.exists(venv_pex)
assert os.path.exists(pex_root)
shutil.rmtree(venv_dir)
assert not os.path.exists(venv_dir)
venv_pex = create_venv_pex()
subprocess.check_call(
args=[venv_pex, "venv", "--rm", "all", venv_dir], env=make_env(PEX_TOOLS=True)
)
assert os.path.exists(venv_dir)
assert not os.path.exists(venv_pex)
assert not os.path.exists(pex_root)
|
executorwebdriver.py
|
import json
import os
import socket
import threading
import time
import traceback
import urlparse
import uuid
from .base import (CallbackHandler,
RefTestExecutor,
RefTestImplementation,
TestharnessExecutor,
extra_timeout,
strip_server)
from .protocol import (BaseProtocolPart,
TestharnessProtocolPart,
Protocol,
SelectorProtocolPart,
ClickProtocolPart,
SendKeysProtocolPart,
ActionSequenceProtocolPart,
TestDriverProtocolPart)
from ..testrunner import Stop
import webdriver as client
here = os.path.join(os.path.split(__file__)[0])
class WebDriverBaseProtocolPart(BaseProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def execute_script(self, script, async=False):
method = self.webdriver.execute_async_script if async else self.webdriver.execute_script
return method(script)
def set_timeout(self, timeout):
try:
self.webdriver.timeouts.script = timeout
except client.WebDriverException:
# workaround https://bugs.chromium.org/p/chromedriver/issues/detail?id=2057
body = {"type": "script", "ms": timeout * 1000}
self.webdriver.send_session_command("POST", "timeouts", body)
@property
def current_window(self):
return self.webdriver.window_handle
def set_window(self, handle):
self.webdriver.window_handle = handle
def wait(self):
while True:
try:
self.webdriver.execute_async_script("")
except (client.TimeoutException, client.ScriptTimeoutException):
pass
except (socket.timeout, client.NoSuchWindowException,
client.UnknownErrorException, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
class WebDriverTestharnessProtocolPart(TestharnessProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
self.runner_handle = None
with open(os.path.join(here, "runner.js")) as f:
self.runner_script = f.read()
def load_runner(self, url_protocol):
if self.runner_handle:
self.webdriver.window_handle = self.runner_handle
url = urlparse.urljoin(self.parent.executor.server_url(url_protocol),
"/testharness_runner.html")
self.logger.debug("Loading %s" % url)
self.webdriver.url = url
self.runner_handle = self.webdriver.window_handle
format_map = {"title": threading.current_thread().name.replace("'", '"')}
self.parent.base.execute_script(self.runner_script % format_map)
def close_old_windows(self):
handles = [item for item in self.webdriver.handles if item != self.runner_handle]
for handle in handles:
try:
self.webdriver.window_handle = handle
self.webdriver.close()
except client.NoSuchWindowException:
pass
self.webdriver.window_handle = self.runner_handle
return self.runner_handle
def get_test_window(self, window_id, parent, timeout=5):
"""Find the test window amongst all the open windows.
This is assumed to be either the named window or the one after the parent in the list of
window handles
:param window_id: The DOM name of the Window
:param parent: The handle of the runner window
:param timeout: The time in seconds to wait for the window to appear. This is because in
some implementations there's a race between calling window.open and the
window being added to the list of WebDriver accessible windows."""
test_window = None
end_time = time.time() + timeout
while time.time() < end_time:
try:
# Try using the JSON serialization of the WindowProxy object,
# it's in Level 1 but nothing supports it yet
win_s = self.webdriver.execute_script("return window['%s'];" % window_id)
win_obj = json.loads(win_s)
test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
except Exception:
pass
if test_window is None:
after = self.webdriver.handles
if len(after) == 2:
test_window = next(iter(set(after) - set([parent])))
elif after[0] == parent and len(after) > 2:
# Hope the first one here is the test window
test_window = after[1]
if test_window is not None:
assert test_window != parent
return test_window
time.sleep(0.1)
raise Exception("unable to find test window")
class WebDriverSelectorProtocolPart(SelectorProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def elements_by_selector(self, selector):
return self.webdriver.find.css(selector)
class WebDriverClickProtocolPart(ClickProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def element(self, element):
self.logger.info("click " + repr(element))
return element.click()
class WebDriverSendKeysProtocolPart(SendKeysProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_keys(self, element, keys):
try:
return element.send_keys(keys)
except client.UnknownErrorException as e:
# workaround https://bugs.chromium.org/p/chromedriver/issues/detail?id=1999
if (e.http_status != 500 or
e.status_code != "unknown error"):
raise
return element.send_element_command("POST", "value", {"value": list(keys)})
class WebDriverActionSequenceProtocolPart(ActionSequenceProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_actions(self, actions):
self.webdriver.actions.perform(actions['actions'])
class WebDriverTestDriverProtocolPart(TestDriverProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_message(self, message_type, status, message=None):
obj = {
"type": "testdriver-%s" % str(message_type),
"status": str(status)
}
if message:
obj["message"] = str(message)
self.webdriver.execute_script("window.postMessage(%s, '*')" % json.dumps(obj))
class WebDriverProtocol(Protocol):
implements = [WebDriverBaseProtocolPart,
WebDriverTestharnessProtocolPart,
WebDriverSelectorProtocolPart,
WebDriverClickProtocolPart,
WebDriverSendKeysProtocolPart,
WebDriverActionSequenceProtocolPart,
WebDriverTestDriverProtocolPart]
def __init__(self, executor, browser, capabilities, **kwargs):
super(WebDriverProtocol, self).__init__(executor, browser)
self.capabilities = capabilities
self.url = browser.webdriver_url
self.webdriver = None
def connect(self):
"""Connect to browser via WebDriver."""
self.logger.debug("Connecting to WebDriver on URL: %s" % self.url)
host, port = self.url.split(":")[1].strip("/"), self.url.split(':')[-1].strip("/")
capabilities = {"alwaysMatch": self.capabilities}
self.webdriver = client.Session(host, port, capabilities=capabilities)
self.webdriver.start()
def after_conect(self):
pass
def teardown(self):
self.logger.debug("Hanging up on WebDriver session")
try:
self.webdriver.quit()
except Exception:
pass
del self.webdriver
def is_alive(self):
try:
# Get a simple property over the connection
self.webdriver.window_handle
except (socket.timeout, client.UnknownErrorException):
return False
return True
def after_connect(self):
self.testharness.load_runner(self.executor.last_environment["protocol"])
class WebDriverRun(object):
def __init__(self, func, protocol, url, timeout):
self.func = func
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
timeout = self.timeout
try:
self.protocol.base.set_timeout((timeout + extra_timeout))
except client.UnknownErrorException:
self.logger.error("Lost WebDriver connection")
return Stop
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(timeout + 2 * extra_timeout)
if self.result is None:
if flag:
# flag is True unless we timeout; this *shouldn't* happen, but
# it can if self._run fails to set self.result due to raising
self.result = False, ("INTERNAL-ERROR", "self._run didn't set a result")
else:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.protocol, self.url, self.timeout)
except (client.TimeoutException, client.ScriptTimeoutException):
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, client.UnknownErrorException):
self.result = False, ("CRASH", None)
except Exception as e:
if (isinstance(e, client.WebDriverException) and
e.http_status == 408 and
e.status_code == "asynchronous script timeout"):
# workaround for https://bugs.chromium.org/p/chromedriver/issues/detail?id=2001
self.result = False, ("EXTERNAL-TIMEOUT", None)
else:
message = str(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class WebDriverTestharnessExecutor(TestharnessExecutor):
supports_testdriver = True
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None,
supports_eager_pageload=True, **kwargs):
"""WebDriver-based executor for testharness.js tests"""
TestharnessExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = WebDriverProtocol(self, browser, capabilities)
with open(os.path.join(here, "testharness_webdriver_resume.js")) as f:
self.script_resume = f.read()
self.close_after_done = close_after_done
self.window_id = str(uuid.uuid4())
self.supports_eager_pageload = supports_eager_pageload
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
if new_environment["protocol"] != self.last_environment["protocol"]:
self.protocol.testharness.load_runner(new_environment["protocol"])
def do_test(self, test):
url = self.test_url(test)
success, data = WebDriverRun(self.do_testharness,
self.protocol,
url,
test.timeout * self.timeout_multiplier).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, protocol, url, timeout):
format_map = {"url": strip_server(url)}
parent_window = protocol.testharness.close_old_windows()
# Now start the test harness
protocol.base.execute_script("window.open('about:blank', '%s', 'noopener')" % self.window_id)
test_window = protocol.testharness.get_test_window(self.window_id,
parent_window,
timeout=5*self.timeout_multiplier)
self.protocol.base.set_window(test_window)
handler = CallbackHandler(self.logger, protocol, test_window)
protocol.webdriver.url = url
if not self.supports_eager_pageload:
self.wait_for_load(protocol)
while True:
result = protocol.base.execute_script(
self.script_resume % format_map, async=True)
done, rv = handler(result)
if done:
break
return rv
def wait_for_load(self, protocol):
# pageLoadStrategy=eager doesn't work in Chrome so try to emulate in user script
loaded = False
seen_error = False
while not loaded:
try:
loaded = protocol.base.execute_script("""
var callback = arguments[arguments.length - 1];
if (location.href === "about:blank") {
callback(false);
} else if (document.readyState !== "loading") {
callback(true);
} else {
document.addEventListener("readystatechange", () => {if (document.readyState !== "loading") {callback(true)}});
}""", async=True)
except client.JavascriptErrorException:
# We can get an error here if the script runs in the initial about:blank
# document before it has navigated, with the driver returning an error
# indicating that the document was unloaded
if seen_error:
raise
seen_error = True
class WebDriverRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, capabilities=None, **kwargs):
"""WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = WebDriverProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.close_after_done = close_after_done
self.has_window = False
with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
width_offset, height_offset = self.protocol.webdriver.execute_script(
"""return [window.outerWidth - window.innerWidth,
window.outerHeight - window.innerHeight];"""
)
self.protocol.webdriver.window.size = (600 + width_offset, 600 + height_offset)
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
return WebDriverRun(self._screenshot,
self.protocol,
self.test_url(test),
test.timeout).run()
def _screenshot(self, protocol, url, timeout):
webdriver = protocol.webdriver
webdriver.url = url
webdriver.execute_async_script(self.wait_script)
screenshot = webdriver.screenshot()
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshot = screenshot.split(",", 1)[1]
return screenshot
|
env_stock_papertrading.py
|
import datetime
import threading
from finrl.finrl_meta.data_processors.processor_alpaca import AlpacaProcessor
import alpaca_trade_api as tradeapi
import time
import pandas as pd
import numpy as np
import torch
import gym
class AlpacaPaperTrading():
def __init__(self,ticker_list, time_interval, drl_lib, agent, cwd, net_dim,
state_dim, action_dim, API_KEY, API_SECRET,
APCA_API_BASE_URL, tech_indicator_list, turbulence_thresh=30,
max_stock=1e2, latency = None):
#load agent
self.drl_lib = drl_lib
if agent =='ppo':
if drl_lib == 'elegantrl':
from elegantrl.agent import AgentPPO
from elegantrl.run import Arguments, init_agent
#load agent
config = {'state_dim':state_dim,
'action_dim':action_dim,}
args = Arguments(agent=AgentPPO, env=StockEnvEmpty(config))
args.cwd = cwd
args.net_dim = net_dim
# load agent
try:
agent = init_agent(args, gpu_id = 0)
self.act = agent.act
self.device = agent.device
except BaseException:
raise ValueError("Fail to load agent!")
elif drl_lib == 'rllib':
from ray.rllib.agents import ppo
from ray.rllib.agents.ppo.ppo import PPOTrainer
config = ppo.DEFAULT_CONFIG.copy()
config['env'] = StockEnvEmpty
config["log_level"] = "WARN"
config['env_config'] = {'state_dim':state_dim,
'action_dim':action_dim,}
trainer = PPOTrainer(env=StockEnvEmpty, config=config)
trainer.restore(cwd)
try:
trainer.restore(cwd)
self.agent = trainer
print("Restoring from checkpoint path", cwd)
except:
raise ValueError('Fail to load agent!')
elif drl_lib == 'stable_baselines3':
from stable_baselines3 import PPO
try:
#load agent
self.model = PPO.load(cwd)
print("Successfully load model", cwd)
except:
raise ValueError('Fail to load agent!')
else:
raise ValueError('The DRL library input is NOT supported yet. Please check your input.')
else:
raise ValueError('Agent input is NOT supported yet.')
#connect to Alpaca trading API
try:
self.alpaca = tradeapi.REST(API_KEY,API_SECRET,APCA_API_BASE_URL, 'v2')
except:
raise ValueError('Fail to connect Alpaca. Please check account info and internet connection.')
#read trading time interval
if time_interval == '1s':
self.time_interval = 1
elif time_interval == '5s':
self.time_interval = 5
elif time_interval == '1Min':
self.time_interval = 60
elif time_interval == '5Min':
self.time_interval = 60 * 5
elif time_interval == '15Min':
self.time_interval = 60 * 15
else:
raise ValueError('Time interval input is NOT supported yet.')
#read trading settings
self.tech_indicator_list = tech_indicator_list
self.turbulence_thresh = turbulence_thresh
self.max_stock = max_stock
#initialize account
self.stocks = np.asarray([0] * len(ticker_list)) #stocks holding
self.stocks_cd = np.zeros_like(self.stocks)
self.cash = None #cash record
self.stocks_df = pd.DataFrame(self.stocks, columns=['stocks'], index = ticker_list)
self.asset_list = []
self.price = np.asarray([0] * len(ticker_list))
self.stockUniverse = ticker_list
self.turbulence_bool = 0
self.equities = []
def test_latency(self, test_times = 10):
total_time = 0
for i in range(0, test_times):
time0 = time.time()
self.get_state()
time1 = time.time()
temp_time = time1 - time0
total_time += temp_time
latency = total_time/test_times
print('latency for data processing: ', latency)
return latency
def run(self):
orders = self.alpaca.list_orders(status="open")
for order in orders:
self.alpaca.cancel_order(order.id)
# Wait for market to open.
print("Waiting for market to open...")
tAMO = threading.Thread(target=self.awaitMarketOpen)
tAMO.start()
tAMO.join()
print("Market opened.")
while True:
# Figure out when the market will close so we can prepare to sell beforehand.
clock = self.alpaca.get_clock()
closingTime = clock.next_close.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
self.timeToClose = closingTime - currTime
if(self.timeToClose < (60)):
# Close all positions when 1 minutes til market close.
print("Market closing soon. Stop trading.")
break
'''# Close all positions when 1 minutes til market close.
print("Market closing soon. Closing positions.")
positions = self.alpaca.list_positions()
for position in positions:
if(position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(int(float(position.qty)))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
tSubmitOrder.join()
# Run script again after market close for next trading day.
print("Sleeping until market close (15 minutes).")
time.sleep(60 * 15)'''
else:
trade = threading.Thread(target=self.trade)
trade.start()
trade.join()
last_equity = float(self.alpaca.get_account().last_equity)
cur_time = time.time()
self.equities.append([cur_time,last_equity])
time.sleep(self.time_interval)
def awaitMarketOpen(self):
isOpen = self.alpaca.get_clock().is_open
while(not isOpen):
clock = self.alpaca.get_clock()
openingTime = clock.next_open.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
timeToOpen = int((openingTime - currTime) / 60)
print(str(timeToOpen) + " minutes til market open.")
time.sleep(60)
isOpen = self.alpaca.get_clock().is_open
def trade(self):
state = self.get_state()
if self.drl_lib == 'elegantrl':
with torch.no_grad():
s_tensor = torch.as_tensor((state,), device=self.device)
a_tensor = self.act(s_tensor)
action = a_tensor.detach().cpu().numpy()[0]
action = (action * self.max_stock).astype(int)
elif self.drl_lib == 'rllib':
action = self.agent.compute_single_action(state)
elif self.drl_lib == 'stable_baselines3':
action = self.model.predict(state)[0]
else:
raise ValueError('The DRL library input is NOT supported yet. Please check your input.')
self.stocks_cd += 1
if self.turbulence_bool == 0:
min_action = 10 # stock_cd
for index in np.where(action < -min_action)[0]: # sell_index:
sell_num_shares = min(self.stocks[index], -action[index])
qty = abs(int(sell_num_shares))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'sell', respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.cash = float(self.alpaca.get_account().cash)
self.stocks_cd[index] = 0
for index in np.where(action > min_action)[0]: # buy_index:
if self.cash < 0:
tmp_cash = 0
else:
tmp_cash = self.cash
buy_num_shares = min(tmp_cash // self.price[index], abs(int(action[index])))
qty = abs(int(buy_num_shares))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'buy', respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.cash = float(self.alpaca.get_account().cash)
self.stocks_cd[index] = 0
else: # sell all when turbulence
positions = self.alpaca.list_positions()
for position in positions:
if(position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(int(float(position.qty)))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.stocks_cd[:] = 0
def get_state(self):
alpaca = AlpacaProcessor(api=self.alpaca)
price, tech, turbulence = alpaca.fetch_latest_data(ticker_list = self.stockUniverse, time_interval='1Min',
tech_indicator_list=self.tech_indicator_list)
turbulence_bool = 1 if turbulence >= self.turbulence_thresh else 0
turbulence = (self.sigmoid_sign(turbulence, self.turbulence_thresh) * 2 ** -5).astype(np.float32)
tech = tech * 2 ** -7
positions = self.alpaca.list_positions()
stocks = [0] * len(self.stockUniverse)
for position in positions:
ind = self.stockUniverse.index(position.symbol)
stocks[ind] = ( abs(int(float(position.qty))))
stocks = np.asarray(stocks, dtype = float)
cash = float(self.alpaca.get_account().cash)
self.cash = cash
self.stocks = stocks
self.turbulence_bool = turbulence_bool
self.price = price
amount = np.array(self.cash * (2 ** -12), dtype=np.float32)
scale = np.array(2 ** -6, dtype=np.float32)
state = np.hstack((amount,
turbulence,
self.turbulence_bool,
price * scale,
self.stocks * scale,
self.stocks_cd,
tech,
)).astype(np.float32)
print(len(self.stockUniverse))
return state
def submitOrder(self, qty, stock, side, resp):
if(qty > 0):
try:
self.alpaca.submit_order(stock, qty, side, "market", "day")
print("Market order of | " + str(qty) + " " + stock + " " + side + " | completed.")
resp.append(True)
except:
print("Order of | " + str(qty) + " " + stock + " " + side + " | did not go through.")
resp.append(False)
else:
print("Quantity is 0, order of | " + str(qty) + " " + stock + " " + side + " | not completed.")
resp.append(True)
@staticmethod
def sigmoid_sign(ary, thresh):
def sigmoid(x):
return 1 / (1 + np.exp(-x * np.e)) - 0.5
return sigmoid(ary / thresh) * thresh
class StockEnvEmpty(gym.Env):
#Empty Env used for loading rllib agent
def __init__(self,config):
state_dim = config['state_dim']
action_dim = config['action_dim']
self.env_num = 1
self.max_step = 10000
self.env_name = 'StockEnvEmpty'
self.state_dim = state_dim
self.action_dim = action_dim
self.if_discrete = False
self.target_return = 9999
self.observation_space = gym.spaces.Box(low=-3000, high=3000, shape=(state_dim,), dtype=np.float32)
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(action_dim,), dtype=np.float32)
def reset(self):
return
def step(self, actions):
return
|
core.py
|
# Copyright 2018 John Reese
# Licensed under the MIT license
import asyncio
import sys
import time
from unittest import TestCase
from unittest.mock import patch
import aiomultiprocess as amp
from .base import (
async_test,
do_nothing,
get_dummy_constant,
initializer,
raise_fn,
sleepy,
two,
)
class CoreTest(TestCase):
def setUp(self):
# reset to default context before each test
amp.set_start_method()
@async_test
async def test_process(self):
p = amp.Process(target=sleepy, name="test_process")
p.start()
self.assertEqual(p.name, "test_process")
self.assertTrue(p.pid)
self.assertTrue(p.is_alive())
await p.join()
self.assertFalse(p.is_alive())
@async_test
async def test_process_await(self):
p = amp.Process(target=sleepy, name="test_process")
await p
self.assertIsNotNone(p.exitcode)
p = amp.Process(target=sleepy, name="test_process")
p.start()
await p
self.assertIsNotNone(p.exitcode)
@async_test
async def test_process_join(self):
p = amp.Process(target=sleepy, name="test_process")
with self.assertRaisesRegex(ValueError, "must start process"):
await p.join()
p.start()
await p.join()
self.assertIsNotNone(p.exitcode)
@async_test
async def test_process_daemon(self):
p = amp.Process(daemon=False)
self.assertEqual(p.daemon, False)
p.daemon = True
self.assertEqual(p.daemon, True)
p = amp.Process(daemon=True)
self.assertEqual(p.daemon, True)
p.daemon = False
self.assertEqual(p.daemon, False)
@async_test
async def test_process_terminate(self):
start = time.time()
p = amp.Process(target=asyncio.sleep, args=(1,), name="test_process")
p.start()
p.terminate()
await p.join()
self.assertLess(p.exitcode, 0)
self.assertLess(time.time() - start, 0.6)
@async_test
async def test_process_kill(self):
p = amp.Process(target=sleepy)
p.start()
if sys.version_info >= (3, 7):
p.kill()
await p.join()
self.assertLess(p.exitcode, 0)
else:
with self.assertRaises(AttributeError):
p.kill()
await p.join()
@async_test
async def test_process_close(self):
p = amp.Process(target=sleepy)
p.start()
if sys.version_info >= (3, 7):
with self.assertRaises(ValueError):
self.assertIsNone(p.exitcode)
p.close()
await p.join()
self.assertIsNotNone(p.exitcode)
p.close()
with self.assertRaises(ValueError):
_ = p.exitcode
else:
with self.assertRaises(AttributeError):
p.close()
await p.join()
@async_test
async def test_process_timeout(self):
p = amp.Process(target=sleepy)
p.start()
with self.assertRaises(asyncio.TimeoutError):
await p.join(timeout=0.01)
@async_test
async def test_worker(self):
p = amp.Worker(target=sleepy)
p.start()
with self.assertRaisesRegex(ValueError, "coroutine not completed"):
_ = p.result
await p.join()
self.assertFalse(p.is_alive())
self.assertEqual(p.result, p.pid)
@async_test
async def test_worker_join(self):
# test results from join
p = amp.Worker(target=sleepy)
p.start()
self.assertEqual(await p.join(), p.pid)
# test awaiting p directly, no need to start
p = amp.Worker(target=sleepy)
self.assertEqual(await p, p.pid)
@async_test
async def test_spawn_method(self):
self.assertEqual(amp.core.get_context().get_start_method(), "spawn")
async def inline(x):
return x
with self.assertRaisesRegex(AttributeError, "Can't pickle local object"):
_ = amp.Worker(target=inline, args=(1,), name="test_inline")
await _
result = await amp.Worker(target=two, name="test_global")
self.assertEqual(result, 2)
@async_test
async def test_set_start_method(self):
with self.assertRaises(ValueError):
amp.set_start_method("foo")
if sys.platform.startswith("win32"):
amp.set_start_method(None)
self.assertEqual(amp.core.get_context().get_start_method(), "spawn")
with self.assertRaises(ValueError):
amp.set_start_method("fork")
elif sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
amp.set_start_method("fork")
async def inline(x):
return x
result = await amp.Worker(target=inline, args=(17,), name="test_inline")
self.assertEqual(result, 17)
@patch("aiomultiprocess.core.set_start_method")
@async_test
async def test_set_context(self, ssm_mock):
amp.set_context()
ssm_mock.assert_called_with(None)
amp.set_context("foo")
ssm_mock.assert_called_with("foo")
ssm_mock.side_effect = Exception("fake exception")
with self.assertRaisesRegex(Exception, "fake exception"):
amp.set_context("whatever")
@async_test
async def test_initializer(self):
result = await amp.Worker(
target=get_dummy_constant,
name="test_process",
initializer=initializer,
initargs=(10,),
)
self.assertEqual(result, 10)
@async_test
async def test_async_initializer(self):
with self.assertRaises(ValueError) as _:
p = amp.Process(target=sleepy, name="test_process", initializer=sleepy)
p.start()
@async_test
async def test_raise(self):
result = await amp.Worker(
target=raise_fn, name="test_process", initializer=do_nothing
)
self.assertIsInstance(result, RuntimeError)
@async_test
async def test_sync_target(self):
with self.assertRaises(ValueError) as _:
p = amp.Process(
target=do_nothing, name="test_process", initializer=do_nothing
)
p.start()
@async_test
async def test_not_implemented(self):
with self.assertRaises(NotImplementedError):
await amp.core.not_implemented()
|
kafka_broker_integration_test.py
|
#!/usr/bin/python
import random
import os
import shutil
import socket
import subprocess
import tempfile
from threading import Thread, Semaphore
import time
import unittest
from kafka import KafkaAdminClient, KafkaConsumer, KafkaProducer, TopicPartition
from kafka.admin import ConfigResource, ConfigResourceType, NewPartitions, NewTopic
import urllib.request
class KafkaBrokerIntegrationTest(unittest.TestCase):
"""
All tests in this class depend on Envoy/Zookeeper/Kafka running.
For each of these tests we are going to create Kafka consumers/producers/admins and point them
to Envoy (that proxies Kafka).
We expect every operation to succeed (as they should reach Kafka) and the corresponding metrics
to increase on Envoy side (to show that messages were received and forwarded successfully).
"""
services = None
@classmethod
def setUpClass(cls):
KafkaBrokerIntegrationTest.services = ServicesHolder()
KafkaBrokerIntegrationTest.services.start()
@classmethod
def tearDownClass(cls):
KafkaBrokerIntegrationTest.services.shut_down()
def setUp(self):
# We want to check if our services are okay before running any kind of test.
KafkaBrokerIntegrationTest.services.check_state()
self.metrics = MetricsHolder(self)
def tearDown(self):
# We want to check if our services are okay after running any test.
KafkaBrokerIntegrationTest.services.check_state()
@classmethod
def kafka_address(cls):
return '127.0.0.1:%s' % KafkaBrokerIntegrationTest.services.kafka_envoy_port
@classmethod
def envoy_stats_address(cls):
return 'http://127.0.0.1:%s/stats' % KafkaBrokerIntegrationTest.services.envoy_monitoring_port
def test_kafka_consumer_with_no_messages_received(self):
"""
This test verifies that consumer sends fetches correctly, and receives nothing.
"""
consumer = KafkaConsumer(bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address(),
fetch_max_wait_ms=500)
consumer.assign([TopicPartition('test_kafka_consumer_with_no_messages_received', 0)])
for _ in range(10):
records = consumer.poll(timeout_ms=1000)
self.assertEqual(len(records), 0)
self.metrics.collect_final_metrics()
# 'consumer.poll()' can translate into 0 or more fetch requests.
# We have set API timeout to 1000ms, while fetch_max_wait is 500ms.
# This means that consumer will send roughly 2 (1000/500) requests per API call (so 20 total).
# So increase of 10 (half of that value) should be safe enough to test.
self.metrics.assert_metric_increase('fetch', 10)
# Metadata is used by consumer to figure out current partition leader.
self.metrics.assert_metric_increase('metadata', 1)
def test_kafka_producer_and_consumer(self):
"""
This test verifies that producer can send messages, and consumer can receive them.
"""
messages_to_send = 100
partition = TopicPartition('test_kafka_producer_and_consumer', 0)
producer = KafkaProducer(bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address())
for _ in range(messages_to_send):
future = producer.send(value=b'some_message_bytes',
topic=partition.topic,
partition=partition.partition)
send_status = future.get()
self.assertTrue(send_status.offset >= 0)
consumer = KafkaConsumer(bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address(),
auto_offset_reset='earliest',
fetch_max_bytes=100)
consumer.assign([partition])
received_messages = []
while (len(received_messages) < messages_to_send):
poll_result = consumer.poll(timeout_ms=1000)
received_messages += poll_result[partition]
self.metrics.collect_final_metrics()
self.metrics.assert_metric_increase('metadata', 2)
self.metrics.assert_metric_increase('produce', 100)
# 'fetch_max_bytes' was set to a very low value, so client will need to send a FetchRequest
# multiple times to broker to get all 100 messages (otherwise all 100 records could have been
# received in one go).
self.metrics.assert_metric_increase('fetch', 20)
# Both producer & consumer had to fetch cluster metadata.
self.metrics.assert_metric_increase('metadata', 2)
def test_consumer_with_consumer_groups(self):
"""
This test verifies that multiple consumers can form a Kafka consumer group.
"""
consumer_count = 10
consumers = []
for id in range(consumer_count):
consumer = KafkaConsumer(bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address(),
group_id='test',
client_id='test-%s' % id)
consumer.subscribe(['test_consumer_with_consumer_groups'])
consumers.append(consumer)
worker_threads = []
for consumer in consumers:
thread = Thread(target=KafkaBrokerIntegrationTest.worker, args=(consumer,))
thread.start()
worker_threads.append(thread)
for thread in worker_threads:
thread.join()
for consumer in consumers:
consumer.close()
self.metrics.collect_final_metrics()
self.metrics.assert_metric_increase('api_versions', consumer_count)
self.metrics.assert_metric_increase('metadata', consumer_count)
self.metrics.assert_metric_increase('join_group', consumer_count)
self.metrics.assert_metric_increase('find_coordinator', consumer_count)
self.metrics.assert_metric_increase('leave_group', consumer_count)
@staticmethod
def worker(consumer):
"""
Worker thread for Kafka consumer.
Multiple poll-s are done here, so that the group can safely form.
"""
poll_operations = 10
for i in range(poll_operations):
consumer.poll(timeout_ms=1000)
def test_admin_client(self):
"""
This test verifies that Kafka Admin Client can still be used to manage Kafka.
"""
admin_client = KafkaAdminClient(
bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address())
# Create a topic with 3 partitions.
new_topic_spec = NewTopic(name='test_admin_client', num_partitions=3, replication_factor=1)
create_response = admin_client.create_topics([new_topic_spec])
error_data = create_response.topic_errors
self.assertEqual(len(error_data), 1)
self.assertEqual(error_data[0], (new_topic_spec.name, 0, None))
# Alter topic (change some Kafka-level property).
config_resource = ConfigResource(ConfigResourceType.TOPIC, new_topic_spec.name,
{'flush.messages': 42})
alter_response = admin_client.alter_configs([config_resource])
error_data = alter_response.resources
self.assertEqual(len(error_data), 1)
self.assertEqual(error_data[0][0], 0)
# Add 2 more partitions to topic.
new_partitions_spec = {new_topic_spec.name: NewPartitions(5)}
new_partitions_response = admin_client.create_partitions(new_partitions_spec)
error_data = create_response.topic_errors
self.assertEqual(len(error_data), 1)
self.assertEqual(error_data[0], (new_topic_spec.name, 0, None))
# Delete a topic.
delete_response = admin_client.delete_topics([new_topic_spec.name])
error_data = create_response.topic_errors
self.assertEqual(len(error_data), 1)
self.assertEqual(error_data[0], (new_topic_spec.name, 0, None))
self.metrics.collect_final_metrics()
self.metrics.assert_metric_increase('create_topics', 1)
self.metrics.assert_metric_increase('alter_configs', 1)
self.metrics.assert_metric_increase('create_partitions', 1)
self.metrics.assert_metric_increase('delete_topics', 1)
class MetricsHolder:
"""
Utility for storing Envoy metrics.
Expected to be created before the test (to get initial metrics), and then to collect them at the
end of test, so the expected increases can be verified.
"""
def __init__(self, owner):
self.owner = owner
self.initial_requests, self.inital_responses = MetricsHolder.get_envoy_stats()
self.final_requests = None
self.final_responses = None
def collect_final_metrics(self):
self.final_requests, self.final_responses = MetricsHolder.get_envoy_stats()
def assert_metric_increase(self, message_type, count):
request_type = message_type + '_request'
response_type = message_type + '_response'
initial_request_value = self.initial_requests.get(request_type, 0)
final_request_value = self.final_requests.get(request_type, 0)
self.owner.assertGreaterEqual(final_request_value, initial_request_value + count)
initial_response_value = self.inital_responses.get(response_type, 0)
final_response_value = self.final_responses.get(response_type, 0)
self.owner.assertGreaterEqual(final_response_value, initial_response_value + count)
@staticmethod
def get_envoy_stats():
"""
Grab request/response metrics from envoy's stats interface.
"""
stats_url = KafkaBrokerIntegrationTest.envoy_stats_address()
requests = {}
responses = {}
with urllib.request.urlopen(stats_url) as remote_metrics_url:
payload = remote_metrics_url.read().decode()
lines = payload.splitlines()
for line in lines:
request_prefix = 'kafka.testfilter.request.'
response_prefix = 'kafka.testfilter.response.'
if line.startswith(request_prefix):
data = line[len(request_prefix):].split(': ')
requests[data[0]] = int(data[1])
pass
if line.startswith(response_prefix) and '_response:' in line:
data = line[len(response_prefix):].split(': ')
responses[data[0]] = int(data[1])
return [requests, responses]
class ServicesHolder:
"""
Utility class for setting up our external dependencies: Envoy, Zookeeper & Kafka.
"""
def __init__(self):
self.kafka_tmp_dir = None
self.envoy_worker = None
self.zk_worker = None
self.kafka_worker = None
@staticmethod
def get_random_listener_port():
"""
Here we count on OS to give us some random socket.
Obviously this method will need to be invoked in a try loop anyways, as in degenerate scenario
someone else might have bound to it after we had closed the socket and before the service
that's supposed to use it binds to it.
"""
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server_socket:
server_socket.bind(('0.0.0.0', 0))
socket_port = server_socket.getsockname()[1]
print('returning %s' % socket_port)
return socket_port
def start(self):
"""
Starts all the services we need for integration tests.
"""
# Find java installation that we are going to use to start Zookeeper & Kafka.
java_directory = ServicesHolder.find_java()
launcher_environment = os.environ.copy()
# Make `java` visible to build script:
# https://github.com/apache/kafka/blob/2.2.0/bin/kafka-run-class.sh#L226
new_path = os.path.abspath(java_directory) + os.pathsep + launcher_environment['PATH']
launcher_environment['PATH'] = new_path
# Both ZK & Kafka use Kafka launcher script.
# By default it sets up JMX options:
# https://github.com/apache/kafka/blob/2.2.0/bin/kafka-run-class.sh#L167
# But that forces the JVM to load file that is not present due to:
# https://docs.oracle.com/javase/9/management/monitoring-and-management-using-jmx-technology.htm
# Let's make it simple and just disable JMX.
launcher_environment['KAFKA_JMX_OPTS'] = ' '
# Setup a temporary directory, which will be used by Kafka & Zookeeper servers.
self.kafka_tmp_dir = tempfile.mkdtemp()
print('Temporary directory used for tests: ' + self.kafka_tmp_dir)
# This directory will store the configuration files fed to services.
config_dir = self.kafka_tmp_dir + '/config'
os.mkdir(config_dir)
# This directory will store Zookeeper's data (== Kafka server metadata).
zookeeper_store_dir = self.kafka_tmp_dir + '/zookeeper_data'
os.mkdir(zookeeper_store_dir)
# This directory will store Kafka's data (== partitions).
kafka_store_dir = self.kafka_tmp_dir + '/kafka_data'
os.mkdir(kafka_store_dir)
# Find the Kafka server 'bin' directory.
kafka_bin_dir = os.path.join('.', 'external', 'kafka_server_binary', 'bin')
# Main initialization block:
# - generate random ports,
# - render configuration with these ports,
# - start services and check if they are running okay,
# - if anything is having problems, kill everything and start again.
while True:
# Generate random ports.
zk_port = ServicesHolder.get_random_listener_port()
kafka_real_port = ServicesHolder.get_random_listener_port()
kafka_envoy_port = ServicesHolder.get_random_listener_port()
envoy_monitoring_port = ServicesHolder.get_random_listener_port()
# These ports need to be exposed to tests.
self.kafka_envoy_port = kafka_envoy_port
self.envoy_monitoring_port = envoy_monitoring_port
# Render config file for Envoy.
template = RenderingHelper.get_template('envoy_config_yaml.j2')
contents = template.render(
data={
'kafka_real_port': kafka_real_port,
'kafka_envoy_port': kafka_envoy_port,
'envoy_monitoring_port': envoy_monitoring_port
})
envoy_config_file = os.path.join(config_dir, 'envoy_config.yaml')
with open(envoy_config_file, 'w') as fd:
fd.write(contents)
print('Envoy config file rendered at: ' + envoy_config_file)
# Render config file for Zookeeper.
template = RenderingHelper.get_template('zookeeper_properties.j2')
contents = template.render(data={'data_dir': zookeeper_store_dir, 'zk_port': zk_port})
zookeeper_config_file = os.path.join(config_dir, 'zookeeper.properties')
with open(zookeeper_config_file, 'w') as fd:
fd.write(contents)
print('Zookeeper config file rendered at: ' + zookeeper_config_file)
# Render config file for Kafka.
template = RenderingHelper.get_template('kafka_server_properties.j2')
contents = template.render(
data={
'data_dir': kafka_store_dir,
'zk_port': zk_port,
'kafka_real_port': kafka_real_port,
'kafka_envoy_port': kafka_envoy_port
})
kafka_config_file = os.path.join(config_dir, 'kafka_server.properties')
with open(kafka_config_file, 'w') as fd:
fd.write(contents)
print('Kafka config file rendered at: ' + kafka_config_file)
# Start the services now.
try:
# Start Envoy in the background, pointing to rendered config file.
envoy_binary = ServicesHolder.find_envoy()
# --base-id is added to allow multiple Envoy instances to run at the same time.
envoy_args = [
os.path.abspath(envoy_binary), '-c', envoy_config_file, '--base-id',
str(random.randint(1, 999999))
]
envoy_handle = subprocess.Popen(envoy_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.envoy_worker = ProcessWorker(envoy_handle, 'Envoy',
'starting main dispatch loop')
self.envoy_worker.await_startup()
# Start Zookeeper in background, pointing to rendered config file.
zk_binary = os.path.join(kafka_bin_dir, 'zookeeper-server-start.sh')
zk_args = [os.path.abspath(zk_binary), zookeeper_config_file]
zk_handle = subprocess.Popen(zk_args,
env=launcher_environment,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.zk_worker = ProcessWorker(zk_handle, 'Zookeeper', 'binding to port')
self.zk_worker.await_startup()
# Start Kafka in background, pointing to rendered config file.
kafka_binary = os.path.join(kafka_bin_dir, 'kafka-server-start.sh')
kafka_args = [os.path.abspath(kafka_binary), kafka_config_file]
kafka_handle = subprocess.Popen(kafka_args,
env=launcher_environment,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.kafka_worker = ProcessWorker(kafka_handle, 'Kafka',
'[KafkaServer id=0] started')
self.kafka_worker.await_startup()
# All services have started without problems - now we can finally finish.
break
except Exception as e:
print('Could not start services, will try again', e)
if self.kafka_worker:
self.kafka_worker.kill()
self.kafka_worker = None
if self.zk_worker:
self.zk_worker.kill()
self.zk_worker = None
if self.envoy_worker:
self.envoy_worker.kill()
self.envoy_worker = None
@staticmethod
def find_java():
"""
This method just locates the Java installation in current directory.
We cannot hardcode the name, as the dirname changes as per:
https://github.com/bazelbuild/bazel/blob/master/tools/jdk/BUILD#L491
"""
external_dir = os.path.join('.', 'external')
for directory in os.listdir(external_dir):
if 'remotejdk11' in directory:
result = os.path.join(external_dir, directory, 'bin')
print('Using Java: ' + result)
return result
raise Exception('Could not find Java in: ' + external_dir)
@staticmethod
def find_envoy():
"""
This method locates envoy binary.
It's present at ./source/exe/envoy-static (at least for mac/bazel-asan/bazel-tsan),
or at ./external/envoy/source/exe/envoy-static (for bazel-compile_time_options).
"""
candidate = os.path.join('.', 'source', 'exe', 'envoy-static')
if os.path.isfile(candidate):
return candidate
candidate = os.path.join('.', 'external', 'envoy', 'source', 'exe', 'envoy-static')
if os.path.isfile(candidate):
return candidate
raise Exception("Could not find Envoy")
def shut_down(self):
# Teardown - kill Kafka, Zookeeper, and Envoy. Then delete their data directory.
print('Cleaning up')
if self.kafka_worker:
self.kafka_worker.kill()
if self.zk_worker:
self.zk_worker.kill()
if self.envoy_worker:
self.envoy_worker.kill()
if self.kafka_tmp_dir:
print('Removing temporary directory: ' + self.kafka_tmp_dir)
shutil.rmtree(self.kafka_tmp_dir)
def check_state(self):
self.envoy_worker.check_state()
self.zk_worker.check_state()
self.kafka_worker.check_state()
class ProcessWorker:
"""
Helper class that wraps the external service process.
Provides ability to wait until service is ready to use (this is done by tracing logs) and
printing service's output to stdout.
"""
# Service is considered to be properly initialized after it has logged its startup message
# and has been alive for INITIALIZATION_WAIT_SECONDS after that message has been seen.
# This (clunky) design is needed because Zookeeper happens to log "binding to port" and then
# might fail to bind.
INITIALIZATION_WAIT_SECONDS = 3
def __init__(self, process_handle, name, startup_message):
# Handle to process and pretty name.
self.process_handle = process_handle
self.name = name
self.startup_message = startup_message
self.startup_message_ts = None
# Semaphore raised when startup has finished and information regarding startup's success.
self.initialization_semaphore = Semaphore(value=0)
self.initialization_ok = False
self.state_worker = Thread(target=ProcessWorker.initialization_worker, args=(self,))
self.state_worker.start()
self.out_worker = Thread(target=ProcessWorker.pipe_handler,
args=(self, self.process_handle.stdout, 'out'))
self.out_worker.start()
self.err_worker = Thread(target=ProcessWorker.pipe_handler,
args=(self, self.process_handle.stderr, 'err'))
self.err_worker.start()
@staticmethod
def initialization_worker(owner):
"""
Worker thread.
Responsible for detecting if service died during initialization steps and ensuring if enough
time has passed since the startup message has been seen.
When either of these happens, we just raise the initialization semaphore.
"""
while True:
status = owner.process_handle.poll()
if status:
# Service died.
print('%s did not initialize properly - finished with: %s' % (owner.name, status))
owner.initialization_ok = False
owner.initialization_semaphore.release()
break
else:
# Service is still running.
startup_message_ts = owner.startup_message_ts
if startup_message_ts:
# The log message has been registered (by pipe_handler thread), let's just ensure that
# some time has passed and mark the service as running.
current_time = int(round(time.time()))
if current_time - startup_message_ts >= ProcessWorker.INITIALIZATION_WAIT_SECONDS:
print('Startup message seen %s seconds ago, and service is still running' %
(ProcessWorker.INITIALIZATION_WAIT_SECONDS),
flush=True)
owner.initialization_ok = True
owner.initialization_semaphore.release()
break
time.sleep(1)
print('Initialization worker for %s has finished' % (owner.name))
@staticmethod
def pipe_handler(owner, pipe, pipe_name):
"""
Worker thread.
If a service startup message is seen, then it just registers the timestamp of its appearance.
Also prints every received message.
"""
try:
for raw_line in pipe:
line = raw_line.decode().rstrip()
print('%s(%s):' % (owner.name, pipe_name), line, flush=True)
if owner.startup_message in line:
print('%s initialization message [%s] has been logged' %
(owner.name, owner.startup_message))
owner.startup_message_ts = int(round(time.time()))
finally:
pipe.close()
print('Pipe handler for %s(%s) has finished' % (owner.name, pipe_name))
def await_startup(self):
"""
Awaits on initialization semaphore, and then verifies the initialization state.
If everything is okay, we just continue (we can use the service), otherwise throw.
"""
print('Waiting for %s to start...' % (self.name))
self.initialization_semaphore.acquire()
try:
if self.initialization_ok:
print('Service %s started successfully' % (self.name))
else:
raise Exception('%s could not start' % (self.name))
finally:
self.initialization_semaphore.release()
def check_state(self):
"""
Verifies if the service is still running. Throws if it is not.
"""
status = self.process_handle.poll()
if status:
raise Exception('%s died with: %s' % (self.name, str(status)))
def kill(self):
"""
Utility method to kill the main service thread and all related workers.
"""
print('Stopping service %s' % self.name)
# Kill the real process.
self.process_handle.kill()
self.process_handle.wait()
# The sub-workers are going to finish on their own, as they will detect main thread dying
# (through pipes closing, or .poll() returning a non-null value).
self.state_worker.join()
self.out_worker.join()
self.err_worker.join()
print('Service %s has been stopped' % self.name)
class RenderingHelper:
"""
Helper for jinja templates.
"""
@staticmethod
def get_template(template):
import jinja2
import os
import sys
# Templates are resolved relatively to main start script, due to main & test templates being
# stored in different directories.
env = jinja2.Environment(loader=jinja2.FileSystemLoader(
searchpath=os.path.dirname(os.path.abspath(__file__))))
return env.get_template(template)
if __name__ == '__main__':
unittest.main()
|
framework.py
|
#!/usr/bin/env python
from __future__ import print_function
import gc
import sys
import os
import select
import unittest
import tempfile
import time
import faulthandler
import random
import copy
import psutil
from collections import deque
from threading import Thread, Event
from inspect import getdoc, isclass
from traceback import format_exception
from logging import FileHandler, DEBUG, Formatter
from scapy.packet import Raw
from hook import StepHook, PollHook, VppDiedError
from vpp_pg_interface import VppPGInterface
from vpp_sub_interface import VppSubInterface
from vpp_lo_interface import VppLoInterface
from vpp_papi_provider import VppPapiProvider
from vpp_papi.vpp_stats import VPPStats
from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \
get_logger, colorize
from vpp_object import VppObjectRegistry
from util import ppp, is_core_present
from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest
from scapy.layers.inet6 import ICMPv6EchoReply
if os.name == 'posix' and sys.version_info[0] < 3:
# using subprocess32 is recommended by python official documentation
# @ https://docs.python.org/2/library/subprocess.html
import subprocess32 as subprocess
else:
import subprocess
PASS = 0
FAIL = 1
ERROR = 2
SKIP = 3
TEST_RUN = 4
debug_framework = False
if os.getenv('TEST_DEBUG', "0") == "1":
debug_framework = True
import debug_internal
"""
Test framework module.
The module provides a set of tools for constructing and running tests and
representing the results.
"""
class _PacketInfo(object):
"""Private class to create packet info object.
Help process information about the next packet.
Set variables to default values.
"""
#: Store the index of the packet.
index = -1
#: Store the index of the source packet generator interface of the packet.
src = -1
#: Store the index of the destination packet generator interface
#: of the packet.
dst = -1
#: Store expected ip version
ip = -1
#: Store expected upper protocol
proto = -1
#: Store the copy of the former packet.
data = None
def __eq__(self, other):
index = self.index == other.index
src = self.src == other.src
dst = self.dst == other.dst
data = self.data == other.data
return index and src and dst and data
def pump_output(testclass):
""" pump output from vpp stdout/stderr to proper queues """
stdout_fragment = ""
stderr_fragment = ""
while not testclass.pump_thread_stop_flag.is_set():
readable = select.select([testclass.vpp.stdout.fileno(),
testclass.vpp.stderr.fileno(),
testclass.pump_thread_wakeup_pipe[0]],
[], [])[0]
if testclass.vpp.stdout.fileno() in readable:
read = os.read(testclass.vpp.stdout.fileno(), 102400)
if len(read) > 0:
split = read.splitlines(True)
if len(stdout_fragment) > 0:
split[0] = "%s%s" % (stdout_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stdout_fragment = split[-1]
testclass.vpp_stdout_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.debug(
"VPP STDOUT: %s" % line.rstrip("\n"))
if testclass.vpp.stderr.fileno() in readable:
read = os.read(testclass.vpp.stderr.fileno(), 102400)
if len(read) > 0:
split = read.splitlines(True)
if len(stderr_fragment) > 0:
split[0] = "%s%s" % (stderr_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stderr_fragment = split[-1]
testclass.vpp_stderr_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.debug(
"VPP STDERR: %s" % line.rstrip("\n"))
# ignoring the dummy pipe here intentionally - the flag will take care
# of properly terminating the loop
def running_extended_tests():
s = os.getenv("EXTENDED_TESTS", "n")
return True if s.lower() in ("y", "yes", "1") else False
def running_on_centos():
os_id = os.getenv("OS_ID", "")
return True if "centos" in os_id.lower() else False
class KeepAliveReporter(object):
"""
Singleton object which reports test start to parent process
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
self._pipe = None
@property
def pipe(self):
return self._pipe
@pipe.setter
def pipe(self, pipe):
if self._pipe is not None:
raise Exception("Internal error - pipe should only be set once.")
self._pipe = pipe
def send_keep_alive(self, test, desc=None):
"""
Write current test tmpdir & desc to keep-alive pipe to signal liveness
"""
if self.pipe is None:
# if not running forked..
return
if isclass(test):
desc = '%s (%s)' % (desc, unittest.util.strclass(test))
else:
desc = test.id()
self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid))
class VppTestCase(unittest.TestCase):
"""This subclass is a base class for VPP test cases that are implemented as
classes. It provides methods to create and run test case.
"""
@property
def packet_infos(self):
"""List of packet infos"""
return self._packet_infos
@classmethod
def get_packet_count_for_if_idx(cls, dst_if_index):
"""Get the number of packet info for specified destination if index"""
if dst_if_index in cls._packet_count_for_dst_if_idx:
return cls._packet_count_for_dst_if_idx[dst_if_index]
else:
return 0
@classmethod
def instance(cls):
"""Return the instance of this testcase"""
return cls.test_instance
@classmethod
def set_debug_flags(cls, d):
cls.debug_core = False
cls.debug_gdb = False
cls.debug_gdbserver = False
if d is None:
return
dl = d.lower()
if dl == "core":
cls.debug_core = True
elif dl == "gdb":
cls.debug_gdb = True
elif dl == "gdbserver":
cls.debug_gdbserver = True
else:
raise Exception("Unrecognized DEBUG option: '%s'" % d)
@staticmethod
def get_least_used_cpu():
cpu_usage_list = [set(range(psutil.cpu_count()))]
vpp_processes = [p for p in psutil.process_iter(attrs=['pid', 'name'])
if 'vpp_main' == p.info['name']]
for vpp_process in vpp_processes:
for cpu_usage_set in cpu_usage_list:
try:
cpu_num = vpp_process.cpu_num()
if cpu_num in cpu_usage_set:
cpu_usage_set_index = cpu_usage_list.index(
cpu_usage_set)
if cpu_usage_set_index == len(cpu_usage_list) - 1:
cpu_usage_list.append({cpu_num})
else:
cpu_usage_list[cpu_usage_set_index + 1].add(
cpu_num)
cpu_usage_set.remove(cpu_num)
break
except psutil.NoSuchProcess:
pass
for cpu_usage_set in cpu_usage_list:
if len(cpu_usage_set) > 0:
min_usage_set = cpu_usage_set
break
return random.choice(tuple(min_usage_set))
@classmethod
def print_header(cls):
if not hasattr(cls, '_header_printed'):
print(double_line_delim)
print(colorize(getdoc(cls).splitlines()[0], GREEN))
print(double_line_delim)
cls._header_printed = True
@classmethod
def setUpConstants(cls):
""" Set-up the test case class based on environment variables """
s = os.getenv("STEP", "n")
cls.step = True if s.lower() in ("y", "yes", "1") else False
d = os.getenv("DEBUG", None)
c = os.getenv("CACHE_OUTPUT", "1")
cls.cache_vpp_output = False if c.lower() in ("n", "no", "0") else True
cls.set_debug_flags(d)
cls.vpp_bin = os.getenv('VPP_TEST_BIN', "vpp")
cls.plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
plugin_path = None
if cls.plugin_path is not None:
if cls.extern_plugin_path is not None:
plugin_path = "%s:%s" % (
cls.plugin_path, cls.extern_plugin_path)
else:
plugin_path = cls.plugin_path
elif cls.extern_plugin_path is not None:
plugin_path = cls.extern_plugin_path
debug_cli = ""
if cls.step or cls.debug_gdb or cls.debug_gdbserver:
debug_cli = "cli-listen localhost:5002"
coredump_size = None
size = os.getenv("COREDUMP_SIZE")
if size is not None:
coredump_size = "coredump-size %s" % size
if coredump_size is None:
coredump_size = "coredump-size unlimited"
cpu_core_number = cls.get_least_used_cpu()
cls.vpp_cmdline = [cls.vpp_bin, "unix",
"{", "nodaemon", debug_cli, "full-coredump",
coredump_size, "runtime-dir", cls.tempdir, "}",
"api-trace", "{", "on", "}", "api-segment", "{",
"prefix", cls.shm_prefix, "}", "cpu", "{",
"main-core", str(cpu_core_number), "}", "statseg",
"{", "socket-name", cls.stats_sock, "}", "plugins",
"{", "plugin", "dpdk_plugin.so", "{", "disable",
"}", "plugin", "unittest_plugin.so", "{", "enable",
"}", "}", ]
if plugin_path is not None:
cls.vpp_cmdline.extend(["plugin_path", plugin_path])
cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline)
cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline))
@classmethod
def wait_for_enter(cls):
if cls.debug_gdbserver:
print(double_line_delim)
print("Spawned GDB server with PID: %d" % cls.vpp.pid)
elif cls.debug_gdb:
print(double_line_delim)
print("Spawned VPP with PID: %d" % cls.vpp.pid)
else:
cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
return
print(single_line_delim)
print("You can debug the VPP using e.g.:")
if cls.debug_gdbserver:
print("gdb " + cls.vpp_bin + " -ex 'target remote localhost:7777'")
print("Now is the time to attach a gdb by running the above "
"command, set up breakpoints etc. and then resume VPP from "
"within gdb by issuing the 'continue' command")
elif cls.debug_gdb:
print("gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
print("Now is the time to attach a gdb by running the above "
"command and set up breakpoints etc.")
print(single_line_delim)
raw_input("Press ENTER to continue running the testcase...")
@classmethod
def run_vpp(cls):
cmdline = cls.vpp_cmdline
if cls.debug_gdbserver:
gdbserver = '/usr/bin/gdbserver'
if not os.path.isfile(gdbserver) or \
not os.access(gdbserver, os.X_OK):
raise Exception("gdbserver binary '%s' does not exist or is "
"not executable" % gdbserver)
cmdline = [gdbserver, 'localhost:7777'] + cls.vpp_cmdline
cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
try:
cls.vpp = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1)
except subprocess.CalledProcessError as e:
cls.logger.critical("Couldn't start vpp: %s" % e)
raise
cls.wait_for_enter()
@classmethod
def wait_for_stats_socket(cls):
deadline = time.time() + 3
ok = False
while time.time() < deadline or \
cls.debug_gdb or cls.debug_gdbserver:
if os.path.exists(cls.stats_sock):
ok = True
break
time.sleep(0.8)
if not ok:
cls.logger.critical("Couldn't stat : {}".format(cls.stats_sock))
@classmethod
def setUpClass(cls):
"""
Perform class setup before running the testcase
Remove shared memory files, start vpp and connect the vpp-api
"""
gc.collect() # run garbage collection first
random.seed()
cls.print_header()
cls.logger = get_logger(cls.__name__)
if hasattr(cls, 'parallel_handler'):
cls.logger.addHandler(cls.parallel_handler)
cls.logger.propagate = False
cls.tempdir = tempfile.mkdtemp(
prefix='vpp-unittest-%s-' % cls.__name__)
cls.stats_sock = "%s/stats.sock" % cls.tempdir
cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
cls.file_handler.setFormatter(
Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
datefmt="%H:%M:%S"))
cls.file_handler.setLevel(DEBUG)
cls.logger.addHandler(cls.file_handler)
cls.shm_prefix = os.path.basename(cls.tempdir)
os.chdir(cls.tempdir)
cls.logger.info("Temporary dir is %s, shm prefix is %s",
cls.tempdir, cls.shm_prefix)
cls.setUpConstants()
cls.reset_packet_infos()
cls._captures = []
cls._zombie_captures = []
cls.verbose = 0
cls.vpp_dead = False
cls.registry = VppObjectRegistry()
cls.vpp_startup_failed = False
cls.reporter = KeepAliveReporter()
# need to catch exceptions here because if we raise, then the cleanup
# doesn't get called and we might end with a zombie vpp
try:
cls.run_vpp()
cls.reporter.send_keep_alive(cls, 'setUpClass')
VppTestResult.current_test_case_info = TestCaseInfo(
cls.logger, cls.tempdir, cls.vpp.pid, cls.vpp_bin)
cls.vpp_stdout_deque = deque()
cls.vpp_stderr_deque = deque()
cls.pump_thread_stop_flag = Event()
cls.pump_thread_wakeup_pipe = os.pipe()
cls.pump_thread = Thread(target=pump_output, args=(cls,))
cls.pump_thread.daemon = True
cls.pump_thread.start()
if cls.debug_gdb or cls.debug_gdbserver:
read_timeout = 0
else:
read_timeout = 5
cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls,
read_timeout)
if cls.step:
hook = StepHook(cls)
else:
hook = PollHook(cls)
cls.vapi.register_hook(hook)
cls.wait_for_stats_socket()
cls.statistics = VPPStats(socketname=cls.stats_sock)
try:
hook.poll_vpp()
except VppDiedError:
cls.vpp_startup_failed = True
cls.logger.critical(
"VPP died shortly after startup, check the"
" output to standard error for possible cause")
raise
try:
cls.vapi.connect()
except Exception:
try:
cls.vapi.disconnect()
except Exception:
pass
if cls.debug_gdbserver:
print(colorize("You're running VPP inside gdbserver but "
"VPP-API connection failed, did you forget "
"to 'continue' VPP from within gdb?", RED))
raise
except Exception:
try:
cls.quit()
except Exception:
pass
raise
@classmethod
def quit(cls):
"""
Disconnect vpp-api, kill vpp and cleanup shared memory files
"""
if (cls.debug_gdbserver or cls.debug_gdb) and hasattr(cls, 'vpp'):
cls.vpp.poll()
if cls.vpp.returncode is None:
print(double_line_delim)
print("VPP or GDB server is still running")
print(single_line_delim)
raw_input("When done debugging, press ENTER to kill the "
"process and finish running the testcase...")
# first signal that we want to stop the pump thread, then wake it up
if hasattr(cls, 'pump_thread_stop_flag'):
cls.pump_thread_stop_flag.set()
if hasattr(cls, 'pump_thread_wakeup_pipe'):
os.write(cls.pump_thread_wakeup_pipe[1], 'ding dong wake up')
if hasattr(cls, 'pump_thread'):
cls.logger.debug("Waiting for pump thread to stop")
cls.pump_thread.join()
if hasattr(cls, 'vpp_stderr_reader_thread'):
cls.logger.debug("Waiting for stdderr pump to stop")
cls.vpp_stderr_reader_thread.join()
if hasattr(cls, 'vpp'):
if hasattr(cls, 'vapi'):
cls.vapi.disconnect()
del cls.vapi
cls.vpp.poll()
if cls.vpp.returncode is None:
cls.logger.debug("Sending TERM to vpp")
cls.vpp.kill()
cls.logger.debug("Waiting for vpp to die")
cls.vpp.communicate()
del cls.vpp
if cls.vpp_startup_failed:
stdout_log = cls.logger.info
stderr_log = cls.logger.critical
else:
stdout_log = cls.logger.info
stderr_log = cls.logger.info
if hasattr(cls, 'vpp_stdout_deque'):
stdout_log(single_line_delim)
stdout_log('VPP output to stdout while running %s:', cls.__name__)
stdout_log(single_line_delim)
vpp_output = "".join(cls.vpp_stdout_deque)
with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
f.write(vpp_output)
stdout_log('\n%s', vpp_output)
stdout_log(single_line_delim)
if hasattr(cls, 'vpp_stderr_deque'):
stderr_log(single_line_delim)
stderr_log('VPP output to stderr while running %s:', cls.__name__)
stderr_log(single_line_delim)
vpp_output = "".join(cls.vpp_stderr_deque)
with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
f.write(vpp_output)
stderr_log('\n%s', vpp_output)
stderr_log(single_line_delim)
@classmethod
def tearDownClass(cls):
""" Perform final cleanup after running all tests in this test-case """
cls.reporter.send_keep_alive(cls, 'tearDownClass')
cls.quit()
cls.file_handler.close()
cls.reset_packet_infos()
if debug_framework:
debug_internal.on_tear_down_class(cls)
def tearDown(self):
""" Show various debug prints after each test """
self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
if not self.vpp_dead:
self.logger.debug(self.vapi.cli("show trace"))
self.logger.info(self.vapi.ppcli("show interface"))
self.logger.info(self.vapi.ppcli("show hardware"))
self.logger.info(self.statistics.set_errors_str())
self.logger.info(self.vapi.ppcli("show run"))
self.logger.info(self.vapi.ppcli("show log"))
self.registry.remove_vpp_config(self.logger)
# Save/Dump VPP api trace log
api_trace = "vpp_api_trace.%s.log" % self._testMethodName
tmp_api_trace = "/tmp/%s" % api_trace
vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
vpp_api_trace_log))
os.rename(tmp_api_trace, vpp_api_trace_log)
self.logger.info(self.vapi.ppcli("api trace custom-dump %s" %
vpp_api_trace_log))
else:
self.registry.unregister_all(self.logger)
def setUp(self):
""" Clear trace before running each test"""
self.reporter.send_keep_alive(self)
self.logger.debug("--- setUp() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
if self.vpp_dead:
raise Exception("VPP is dead when setting up the test")
self.sleep(.1, "during setUp")
self.vpp_stdout_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vpp_stderr_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vapi.cli("clear trace")
# store the test instance inside the test class - so that objects
# holding the class can access instance methods (like assertEqual)
type(self).test_instance = self
@classmethod
def pg_enable_capture(cls, interfaces=None):
"""
Enable capture on packet-generator interfaces
:param interfaces: iterable interface indexes (if None,
use self.pg_interfaces)
"""
if interfaces is None:
interfaces = cls.pg_interfaces
for i in interfaces:
i.enable_capture()
@classmethod
def register_capture(cls, cap_name):
""" Register a capture in the testclass """
# add to the list of captures with current timestamp
cls._captures.append((time.time(), cap_name))
# filter out from zombies
cls._zombie_captures = [(stamp, name)
for (stamp, name) in cls._zombie_captures
if name != cap_name]
@classmethod
def pg_start(cls):
""" Remove any zombie captures and enable the packet generator """
# how long before capture is allowed to be deleted - otherwise vpp
# crashes - 100ms seems enough (this shouldn't be needed at all)
capture_ttl = 0.1
now = time.time()
for stamp, cap_name in cls._zombie_captures:
wait = stamp + capture_ttl - now
if wait > 0:
cls.sleep(wait, "before deleting capture %s" % cap_name)
now = time.time()
cls.logger.debug("Removing zombie capture %s" % cap_name)
cls.vapi.cli('packet-generator delete %s' % cap_name)
cls.vapi.cli("trace add pg-input 50") # 50 is maximum
cls.vapi.cli('packet-generator enable')
cls._zombie_captures = cls._captures
cls._captures = []
@classmethod
def create_pg_interfaces(cls, interfaces):
"""
Create packet-generator interfaces.
:param interfaces: iterable indexes of the interfaces.
:returns: List of created interfaces.
"""
result = []
for i in interfaces:
intf = VppPGInterface(cls, i)
setattr(cls, intf.name, intf)
result.append(intf)
cls.pg_interfaces = result
return result
@classmethod
def create_loopback_interfaces(cls, count):
"""
Create loopback interfaces.
:param count: number of interfaces created.
:returns: List of created interfaces.
"""
result = [VppLoInterface(cls) for i in range(count)]
for intf in result:
setattr(cls, intf.name, intf)
cls.lo_interfaces = result
return result
@staticmethod
def extend_packet(packet, size, padding=' '):
"""
Extend packet to given size by padding with spaces or custom padding
NOTE: Currently works only when Raw layer is present.
:param packet: packet
:param size: target size
:param padding: padding used to extend the payload
"""
packet_len = len(packet) + 4
extend = size - packet_len
if extend > 0:
num = (extend / len(padding)) + 1
packet[Raw].load += (padding * num)[:extend]
@classmethod
def reset_packet_infos(cls):
""" Reset the list of packet info objects and packet counts to zero """
cls._packet_infos = {}
cls._packet_count_for_dst_if_idx = {}
@classmethod
def create_packet_info(cls, src_if, dst_if):
"""
Create packet info object containing the source and destination indexes
and add it to the testcase's packet info list
:param VppInterface src_if: source interface
:param VppInterface dst_if: destination interface
:returns: _PacketInfo object
"""
info = _PacketInfo()
info.index = len(cls._packet_infos)
info.src = src_if.sw_if_index
info.dst = dst_if.sw_if_index
if isinstance(dst_if, VppSubInterface):
dst_idx = dst_if.parent.sw_if_index
else:
dst_idx = dst_if.sw_if_index
if dst_idx in cls._packet_count_for_dst_if_idx:
cls._packet_count_for_dst_if_idx[dst_idx] += 1
else:
cls._packet_count_for_dst_if_idx[dst_idx] = 1
cls._packet_infos[info.index] = info
return info
@staticmethod
def info_to_payload(info):
"""
Convert _PacketInfo object to packet payload
:param info: _PacketInfo object
:returns: string containing serialized data from packet info
"""
return "%d %d %d %d %d" % (info.index, info.src, info.dst,
info.ip, info.proto)
@staticmethod
def payload_to_info(payload):
"""
Convert packet payload to _PacketInfo object
:param payload: packet payload
:returns: _PacketInfo object containing de-serialized data from payload
"""
numbers = payload.split()
info = _PacketInfo()
info.index = int(numbers[0])
info.src = int(numbers[1])
info.dst = int(numbers[2])
info.ip = int(numbers[3])
info.proto = int(numbers[4])
return info
def get_next_packet_info(self, info):
"""
Iterate over the packet info list stored in the testcase
Start iteration with first element if info is None
Continue based on index in info if info is specified
:param info: info or None
:returns: next info in list or None if no more infos
"""
if info is None:
next_index = 0
else:
next_index = info.index + 1
if next_index == len(self._packet_infos):
return None
else:
return self._packet_infos[next_index]
def get_next_packet_info_for_interface(self, src_index, info):
"""
Search the packet info list for the next packet info with same source
interface index
:param src_index: source interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info(info)
if info is None:
return None
if info.src == src_index:
return info
def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
"""
Search the packet info list for the next packet info with same source
and destination interface indexes
:param src_index: source interface index to search for
:param dst_index: destination interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info_for_interface(src_index, info)
if info is None:
return None
if info.dst == dst_index:
return info
def assert_equal(self, real_value, expected_value, name_or_class=None):
if name_or_class is None:
self.assertEqual(real_value, expected_value)
return
try:
msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
msg = msg % (getdoc(name_or_class).strip(),
real_value, str(name_or_class(real_value)),
expected_value, str(name_or_class(expected_value)))
except Exception:
msg = "Invalid %s: %s does not match expected value %s" % (
name_or_class, real_value, expected_value)
self.assertEqual(real_value, expected_value, msg)
def assert_in_range(self,
real_value,
expected_min,
expected_max,
name=None):
if name is None:
msg = None
else:
msg = "Invalid %s: %s out of range <%s,%s>" % (
name, real_value, expected_min, expected_max)
self.assertTrue(expected_min <= real_value <= expected_max, msg)
def assert_packet_checksums_valid(self, packet,
ignore_zero_udp_checksums=True):
received = packet.__class__(str(packet))
self.logger.debug(
ppp("Verifying packet checksums for packet:", received))
udp_layers = ['UDP', 'UDPerror']
checksum_fields = ['cksum', 'chksum']
checksums = []
counter = 0
temp = received.__class__(str(received))
while True:
layer = temp.getlayer(counter)
if layer:
for cf in checksum_fields:
if hasattr(layer, cf):
if ignore_zero_udp_checksums and \
0 == getattr(layer, cf) and \
layer.name in udp_layers:
continue
delattr(layer, cf)
checksums.append((counter, cf))
else:
break
counter = counter + 1
if 0 == len(checksums):
return
temp = temp.__class__(str(temp))
for layer, cf in checksums:
calc_sum = getattr(temp[layer], cf)
self.assert_equal(
getattr(received[layer], cf), calc_sum,
"packet checksum on layer #%d: %s" % (layer, temp[layer].name))
self.logger.debug(
"Checksum field `%s` on `%s` layer has correct value `%s`" %
(cf, temp[layer].name, calc_sum))
def assert_checksum_valid(self, received_packet, layer,
field_name='chksum',
ignore_zero_checksum=False):
""" Check checksum of received packet on given layer """
received_packet_checksum = getattr(received_packet[layer], field_name)
if ignore_zero_checksum and 0 == received_packet_checksum:
return
recalculated = received_packet.__class__(str(received_packet))
delattr(recalculated[layer], field_name)
recalculated = recalculated.__class__(str(recalculated))
self.assert_equal(received_packet_checksum,
getattr(recalculated[layer], field_name),
"packet checksum on layer: %s" % layer)
def assert_ip_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'IP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_tcp_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'TCP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_udp_checksum_valid(self, received_packet,
ignore_zero_checksum=True):
self.assert_checksum_valid(received_packet, 'UDP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_embedded_icmp_checksum_valid(self, received_packet):
if received_packet.haslayer(IPerror):
self.assert_checksum_valid(received_packet, 'IPerror')
if received_packet.haslayer(TCPerror):
self.assert_checksum_valid(received_packet, 'TCPerror')
if received_packet.haslayer(UDPerror):
self.assert_checksum_valid(received_packet, 'UDPerror',
ignore_zero_checksum=True)
if received_packet.haslayer(ICMPerror):
self.assert_checksum_valid(received_packet, 'ICMPerror')
def assert_icmp_checksum_valid(self, received_packet):
self.assert_checksum_valid(received_packet, 'ICMP')
self.assert_embedded_icmp_checksum_valid(received_packet)
def assert_icmpv6_checksum_valid(self, pkt):
if pkt.haslayer(ICMPv6DestUnreach):
self.assert_checksum_valid(pkt, 'ICMPv6DestUnreach', 'cksum')
self.assert_embedded_icmp_checksum_valid(pkt)
if pkt.haslayer(ICMPv6EchoRequest):
self.assert_checksum_valid(pkt, 'ICMPv6EchoRequest', 'cksum')
if pkt.haslayer(ICMPv6EchoReply):
self.assert_checksum_valid(pkt, 'ICMPv6EchoReply', 'cksum')
def assert_packet_counter_equal(self, counter, expected_value):
counters = self.vapi.cli("sh errors").split('\n')
counter_value = -1
for i in range(1, len(counters)-1):
results = counters[i].split()
if results[1] == counter:
counter_value = int(results[0])
break
self.assert_equal(counter_value, expected_value,
"packet counter `%s'" % counter)
@classmethod
def sleep(cls, timeout, remark=None):
if hasattr(cls, 'logger'):
cls.logger.debug("Starting sleep for %ss (%s)" % (timeout, remark))
before = time.time()
time.sleep(timeout)
after = time.time()
if after - before > 2 * timeout:
cls.logger.error("unexpected time.sleep() result - "
"slept for %ss instead of ~%ss!" % (
after - before, timeout))
if hasattr(cls, 'logger'):
cls.logger.debug(
"Finished sleep (%s) - slept %ss (wanted %ss)" % (
remark, after - before, timeout))
def send_and_assert_no_replies(self, intf, pkts, remark="", timeout=None):
self.vapi.cli("clear trace")
intf.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
if not timeout:
timeout = 1
for i in self.pg_interfaces:
i.get_capture(0, timeout=timeout)
i.assert_nothing_captured(remark=remark)
timeout = 0.1
def send_and_expect(self, input, pkts, output):
self.vapi.cli("clear trace")
input.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
if isinstance(object, (list,)):
rx = []
for o in output:
rx.append(output.get_capture(len(pkts)))
else:
rx = output.get_capture(len(pkts))
return rx
def send_and_expect_only(self, input, pkts, output, timeout=None):
self.vapi.cli("clear trace")
input.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
if isinstance(object, (list,)):
outputs = output
rx = []
for o in outputs:
rx.append(output.get_capture(len(pkts)))
else:
rx = output.get_capture(len(pkts))
outputs = [output]
if not timeout:
timeout = 1
for i in self.pg_interfaces:
if i not in outputs:
i.get_capture(0, timeout=timeout)
i.assert_nothing_captured()
timeout = 0.1
return rx
def get_testcase_doc_name(test):
return getdoc(test.__class__).splitlines()[0]
def get_test_description(descriptions, test):
short_description = test.shortDescription()
if descriptions and short_description:
return short_description
else:
return str(test)
class TestCaseInfo(object):
def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path):
self.logger = logger
self.tempdir = tempdir
self.vpp_pid = vpp_pid
self.vpp_bin_path = vpp_bin_path
self.core_crash_test = None
class VppTestResult(unittest.TestResult):
"""
@property result_string
String variable to store the test case result string.
@property errors
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test which
raised an unexpected exception.
@property failures
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test where
a failure was explicitly signalled using the TestCase.assert*()
methods.
"""
failed_test_cases_info = set()
core_crash_test_cases_info = set()
current_test_case_info = None
def __init__(self, stream, descriptions, verbosity, runner):
"""
:param stream File descriptor to store where to report test results.
Set to the standard error stream by default.
:param descriptions Boolean variable to store information if to use
test case descriptions.
:param verbosity Integer variable to store required verbosity level.
"""
unittest.TestResult.__init__(self, stream, descriptions, verbosity)
self.stream = stream
self.descriptions = descriptions
self.verbosity = verbosity
self.result_string = None
self.runner = runner
def addSuccess(self, test):
"""
Record a test succeeded result
:param test:
"""
if self.current_test_case_info:
self.current_test_case_info.logger.debug(
"--- addSuccess() %s.%s(%s) called" % (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc))
unittest.TestResult.addSuccess(self, test)
self.result_string = colorize("OK", GREEN)
self.send_result_through_pipe(test, PASS)
def addSkip(self, test, reason):
"""
Record a test skipped.
:param test:
:param reason:
"""
if self.current_test_case_info:
self.current_test_case_info.logger.debug(
"--- addSkip() %s.%s(%s) called, reason is %s" %
(test.__class__.__name__, test._testMethodName,
test._testMethodDoc, reason))
unittest.TestResult.addSkip(self, test, reason)
self.result_string = colorize("SKIP", YELLOW)
self.send_result_through_pipe(test, SKIP)
def symlink_failed(self):
if self.current_test_case_info:
try:
failed_dir = os.getenv('VPP_TEST_FAILED_DIR')
link_path = os.path.join(
failed_dir,
'%s-FAILED' %
os.path.basename(self.current_test_case_info.tempdir))
if self.current_test_case_info.logger:
self.current_test_case_info.logger.debug(
"creating a link to the failed test")
self.current_test_case_info.logger.debug(
"os.symlink(%s, %s)" %
(self.current_test_case_info.tempdir, link_path))
if os.path.exists(link_path):
if self.current_test_case_info.logger:
self.current_test_case_info.logger.debug(
'symlink already exists')
else:
os.symlink(self.current_test_case_info.tempdir, link_path)
except Exception as e:
if self.current_test_case_info.logger:
self.current_test_case_info.logger.error(e)
def send_result_through_pipe(self, test, result):
if hasattr(self, 'test_framework_result_pipe'):
pipe = self.test_framework_result_pipe
if pipe:
pipe.send((test.id(), result))
def log_error(self, test, err, fn_name):
if self.current_test_case_info:
if isinstance(test, unittest.suite._ErrorHolder):
test_name = test.description
else:
test_name = '%s.%s(%s)' % (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc)
self.current_test_case_info.logger.debug(
"--- %s() %s called, err is %s" %
(fn_name, test_name, err))
self.current_test_case_info.logger.debug(
"formatted exception is:\n%s" %
"".join(format_exception(*err)))
def add_error(self, test, err, unittest_fn, error_type):
if error_type == FAIL:
self.log_error(test, err, 'addFailure')
error_type_str = colorize("FAIL", RED)
elif error_type == ERROR:
self.log_error(test, err, 'addError')
error_type_str = colorize("ERROR", RED)
else:
raise Exception('Error type %s cannot be used to record an '
'error or a failure' % error_type)
unittest_fn(self, test, err)
if self.current_test_case_info:
self.result_string = "%s [ temp dir used by test case: %s ]" % \
(error_type_str,
self.current_test_case_info.tempdir)
self.symlink_failed()
self.failed_test_cases_info.add(self.current_test_case_info)
if is_core_present(self.current_test_case_info.tempdir):
if not self.current_test_case_info.core_crash_test:
if isinstance(test, unittest.suite._ErrorHolder):
test_name = str(test)
else:
test_name = "'{}' ({})".format(
get_testcase_doc_name(test), test.id())
self.current_test_case_info.core_crash_test = test_name
self.core_crash_test_cases_info.add(
self.current_test_case_info)
else:
self.result_string = '%s [no temp dir]' % error_type_str
self.send_result_through_pipe(test, error_type)
def addFailure(self, test, err):
"""
Record a test failed result
:param test:
:param err: error message
"""
self.add_error(test, err, unittest.TestResult.addFailure, FAIL)
def addError(self, test, err):
"""
Record a test error result
:param test:
:param err: error message
"""
self.add_error(test, err, unittest.TestResult.addError, ERROR)
def getDescription(self, test):
"""
Get test description
:param test:
:returns: test description
"""
return get_test_description(self.descriptions, test)
def startTest(self, test):
"""
Start a test
:param test:
"""
test.print_header()
unittest.TestResult.startTest(self, test)
if self.verbosity > 0:
self.stream.writeln(
"Starting " + self.getDescription(test) + " ...")
self.stream.writeln(single_line_delim)
def stopTest(self, test):
"""
Called when the given test has been run
:param test:
"""
unittest.TestResult.stopTest(self, test)
if self.verbosity > 0:
self.stream.writeln(single_line_delim)
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
self.stream.writeln(single_line_delim)
else:
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
self.send_result_through_pipe(test, TEST_RUN)
def printErrors(self):
"""
Print errors from running the test case
"""
if len(self.errors) > 0 or len(self.failures) > 0:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
# ^^ that is the last output from unittest before summary
if not self.runner.print_summary:
devnull = unittest.runner._WritelnDecorator(open(os.devnull, 'w'))
self.stream = devnull
self.runner.stream = devnull
def printErrorList(self, flavour, errors):
"""
Print error list to the output stream together with error type
and test case description.
:param flavour: error type
:param errors: iterable errors
"""
for test, err in errors:
self.stream.writeln(double_line_delim)
self.stream.writeln("%s: %s" %
(flavour, self.getDescription(test)))
self.stream.writeln(single_line_delim)
self.stream.writeln("%s" % err)
class VppTestRunner(unittest.TextTestRunner):
"""
A basic test runner implementation which prints results to standard error.
"""
@property
def resultclass(self):
"""Class maintaining the results of the tests"""
return VppTestResult
def __init__(self, keep_alive_pipe=None, descriptions=True, verbosity=1,
result_pipe=None, failfast=False, buffer=False,
resultclass=None, print_summary=True):
# ignore stream setting here, use hard-coded stdout to be in sync
# with prints from VppTestCase methods ...
super(VppTestRunner, self).__init__(sys.stdout, descriptions,
verbosity, failfast, buffer,
resultclass)
KeepAliveReporter.pipe = keep_alive_pipe
self.orig_stream = self.stream
self.resultclass.test_framework_result_pipe = result_pipe
self.print_summary = print_summary
def _makeResult(self):
return self.resultclass(self.stream,
self.descriptions,
self.verbosity,
self)
def run(self, test):
"""
Run the tests
:param test:
"""
faulthandler.enable() # emit stack trace to stderr if killed by signal
result = super(VppTestRunner, self).run(test)
if not self.print_summary:
self.stream = self.orig_stream
result.stream = self.orig_stream
return result
class Worker(Thread):
def __init__(self, args, logger, env={}):
self.logger = logger
self.args = args
self.result = None
self.env = copy.deepcopy(env)
super(Worker, self).__init__()
def run(self):
executable = self.args[0]
self.logger.debug("Running executable w/args `%s'" % self.args)
env = os.environ.copy()
env.update(self.env)
env["CK_LOG_FILE_NAME"] = "-"
self.process = subprocess.Popen(
self.args, shell=False, env=env, preexec_fn=os.setpgrp,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = self.process.communicate()
self.logger.debug("Finished running `%s'" % executable)
self.logger.info("Return code is `%s'" % self.process.returncode)
self.logger.info(single_line_delim)
self.logger.info("Executable `%s' wrote to stdout:" % executable)
self.logger.info(single_line_delim)
self.logger.info(out)
self.logger.info(single_line_delim)
self.logger.info("Executable `%s' wrote to stderr:" % executable)
self.logger.info(single_line_delim)
self.logger.info(err)
self.logger.info(single_line_delim)
self.result = self.process.returncode
|
tests (copy).py
|
#!/usr/bin/env python
import unittest
import contextlib
import json
import constants as cst
import calendar
import time
import rospy
from std_msgs.msg import String
from threading import Thread
PKG = 'social_robotics'
import sys
def load_json(path):
with contextlib.closing(open(path)) as json_data:
return json.loads(json_data.read())
intents = load_json(cst.INTENTS_JSON)
actions = load_json(cst.ACTIONS_JSON)
sentences = load_json(cst.SENTENCES_JSON)
#!/usr/bin/env python
import rospy
from std_msgs.msg import String, Bool
import constants as cst
import contextlib
import json
import string
import pyttsx
import features
from time import sleep
class TestStaticVocabulary(unittest.TestCase):
def __init__(self, *args, **kwargs):
rospy.init_node("test_dialog")
rospy.loginfo('start')
self.pub = rospy.Publisher("transcription", String, queue_size=10)
rospy.sleep(2)
self.timeout = 8
self.result = None
#self.wait()
self.listener()
def run(self):
print self.test_name()
def wait(self):
while self.sub.get_num_connections() == 0:
print 'waiting ...'
rospy.sleep(0.2)
def listener(self):
rospy.Subscriber("speaker", String, self.callback)
rospy.loginfo(rospy.get_caller_id() + ' Listening ...')
rospy.sleep(2)
Thread(target = self.run).start()
rospy.spin()
def test_name(self):
#rospy.loginfo(rospy.get_caller_id() + ' re re')
self.send('what time is it')
now = calendar.timegm(time.gmtime())
while calendar.timegm(time.gmtime()) - now < self.timeout:
if self.result != None:
return self.result, intents['intents']['0']['answer']
time.sleep(0.2)
return self.result, intents['intents']['0']['answer']
#self.assertEquals(1, 2, 'timeout '+self.result)
def callback(self, data):
rospy.logwarn('callback '+data)
self.result = data.data
#self.assertEquals(self.result, intents['intents']['0']['answer'])
def send(self, data):
rospy.loginfo(rospy.get_caller_id() + ' send '+data)
self.pub.publish(data)
if __name__ == '__main__':
TestStaticVocabulary()
|
contextlog.py
|
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import logging
import os
import sys
import io
import requests
import calendar
import threading
import json
import platform
from datetime import datetime
from django.conf import settings
from uuid import uuid4
from .common import get_app_version, get_bool_env, get_client_ip
from .io import get_config_dir
logger = logging.getLogger(__name__)
class ContextLog(object):
def __init__(self):
self.collect_analytics = get_bool_env('collect_analytics', True)
self.version = get_app_version()
self.server_id = self._get_server_id()
def _get_label_studio_env(self):
env = {}
for env_key, env_value in os.environ.items():
if env_key.startswith('LABEL_STUDIO_'):
env[env_key] = env_value
return env
def _get_server_id(self):
user_id_file = os.path.join(get_config_dir(), 'user_id')
if not os.path.exists(user_id_file):
user_id = str(uuid4())
with io.open(user_id_file, mode='w', encoding='utf-8') as fout:
fout.write(user_id)
else:
with io.open(user_id_file, encoding='utf-8') as f:
user_id = f.read()
return user_id
def _is_docker(self):
path = '/proc/self/cgroup'
return (
os.path.exists('/.dockerenv') or
os.path.isfile(path) and any('docker' in line for line in open(path, encoding='utf-8'))
)
def _get_timestamp_now(self):
return calendar.timegm(datetime.now().utctimetuple())
def _prepare_json(self, payload, request):
j = payload['json']
view_name = payload['view_name']
if view_name in ('tasks:api:task-annotations', 'tasks:api-annotations:annotation-detail'):
types = [r.get('type') for r in j.get('result', [])]
payload['json'] = {'result': types, 'lead_time': j.get('lead_time')}
def _get_response_content(self, response):
try:
return json.loads(response.content)
except:
return
def _prepare_response(self, payload):
view_name = payload['view_name']
if view_name in (
'data_export:api-projects:project-export',
'data_manager:api:view-tasks',
'data_manager:data_manager.api.ProjectActionsAPI',
'data_manager:data_manager.api.TaskAPI',
'projects:api-templates:template-list',
'data_import:api-projects:project-file-upload-list',
'tasks:api:task-annotations',
'tasks:api-annotations:annotation-detail'
) and payload['status_code'] in (200, 201):
payload['response'] = None
def _exclude_endpoint(self, request):
if request.resolver_match and request.resolver_match.view_name in [
'django.views.static.serve',
'data_import:data-upload',
'version'
]:
return True
if request.GET.get('interaction', None) == 'timer':
return True
def send(self, request=None, response=None, body=None):
if settings.DEBUG:
try:
payload = self.create_payload(request, response, body)
except Exception as exc:
logger.error(exc, exc_info=True)
else:
if get_bool_env('DEBUG_CONTEXTLOG', False):
logger.debug(json.dumps(payload, indent=2))
pass
else:
# ignore specific events
if not self.collect_analytics or self._exclude_endpoint(request):
return
thread = threading.Thread(target=self.send_job, args=(request, response, body))
thread.start()
@staticmethod
def browser_exists(request):
return hasattr(request, 'user_agent') and request.user_agent and \
hasattr(request.user_agent, 'browser') and request.user_agent.browser
def create_payload(self, request, response, body):
payload = {
'url': request.build_absolute_uri(),
'server_id': self._get_server_id(),
'server_time': self._get_timestamp_now(),
'session_id': request.session.get('uid', None),
'client_ip': get_client_ip(request),
'is_docker': self._is_docker(),
'python': str(sys.version_info[0]) + '.' + str(sys.version_info[1]),
'env': self._get_label_studio_env(),
'version': self.version,
'view_name': request.resolver_match.view_name if request.resolver_match else None,
'namespace': request.resolver_match.namespace if request.resolver_match else None,
'scheme': request.scheme,
'method': request.method,
'values': request.GET.dict(),
'json': body,
'language': request.LANGUAGE_CODE,
'content_type': request.content_type,
'content_length': int(request.environ.get('CONTENT_LENGTH')) if request.environ.get('CONTENT_LENGTH') else None,
'status_code': response.status_code,
'response': self._get_response_content(response)
}
if self.browser_exists(request):
payload.update({
'is_mobile': request.user_agent.is_mobile,
'is_tablet': request.user_agent.is_tablet,
'is_touch_capable': request.user_agent.is_touch_capable,
'is_pc': request.user_agent.is_pc,
'is_bot': request.user_agent.is_bot,
'browser': request.user_agent.browser.family,
'browser_version': request.user_agent.browser.version_string,
'os': request.user_agent.os.family,
'platform_system': platform.system(),
'platform_release': platform.release(),
'os_version': request.user_agent.os.version_string,
'device': request.user_agent.device.family,
})
self._prepare_json(payload, request)
self._prepare_response(payload)
return payload
def send_job(self, request, response, body):
try:
payload = self.create_payload(request, response, body)
except:
pass
else:
try:
url = 'https://tele.labelstud.io'
requests.post(url=url, json=payload)
except:
pass
|
cli.py
|
from __future__ import absolute_import
import sys
import logging
from flask_assistant.utils import get_assistant
from .schema_handlers import IntentGenerator, EntityGenerator, TemplateCreator
from .api import ApiAi
from . import logger
from multiprocessing import Process
logger.setLevel(logging.INFO)
api = ApiAi()
def file_from_args():
try:
return sys.argv[1]
except IndexError:
raise IndexError('Please provide the file containing the Assistant object')
def gen_templates():
filename = file_from_args()
assist = get_assistant(filename)
templates = TemplateCreator(assist)
templates.generate()
def intents():
logger.info('Getting Registered Intents...')
filename = file_from_args()
assist = get_assistant(filename)
intents = assist.api.agent_intents
for i in intents:
logger.info(i.name)
return intents
def entities():
logger.info('Getting Registered Entities...')
filename = file_from_args()
assist = get_assistant(filename)
ents = assist.api.agent_entities
for i in ents:
logger.info(i.name)
return ents
def schema():
filename = file_from_args()
assist = get_assistant(filename)
intents = IntentGenerator(assist)
entities = EntityGenerator(assist)
templates = TemplateCreator(assist)
templates.generate()
intents.generate()
entities.generate()
def check():
filename = file_from_args()
assist = get_assistant(filename)
# reg_total = len(assist.api.agent_intents)
# map_total = len(assist._intent_action_funcs)
reg_names = [i.name for i in assist.api.agent_intents]
map_names = [i for i in assist._intent_action_funcs.keys()]
extra_reg = set(reg_names) - set(map_names)
extra_map = set(map_names) - set(reg_names)
if extra_reg != set():
print('\nThe following Intents are registered but not mapped to an action function:')
print(extra_reg)
print()
else:
print('\n All registered intents are mapped\n')
if extra_map != set():
print('\nThe Following Intents are mapped to an action fucntion, but not registered: ')
print(extra_map)
print()
else:
print('\n All mapped intents are regitsered\n')
print('Registered Entities:')
print([i.name for i in assist.api.agent_entities])
def query():
filename = file_from_args()
assist = get_assistant(filename)
p = Process(target=assist.app.run)
p.start()
while True:
q = input('Enter query...\n')
resp = assist.api.post_query(q).json()
try:
print('Matched: {}'.format(resp['result']['metadata']['intentName']))
print('Params: {}'.format(resp['result']['parameters']))
print(resp['result']['fulfillment']['speech'])
except KeyError:
logger.error('Error:')
logger.error(resp['status'])
|
function.py
|
#
# public function used by CUIT web site
#
from threading import Thread
from flask.ext.mail import Message
import config
def user_rank_color(score):
if score in xrange(0,1000):
return "#24e5bf"
elif score in xrange(1000,1500):
return "#f2cf2e"
else :
return "#fd8321"
def submit_result_color(res):
result = res.lower()
if result == 'ok' or result == 'accepted':
return 'blue'
else :
return 'red'
problem_page_mapper = {
'hdu': 'http://acm.hdu.edu.cn/showproblem.php?pid={pid}',
'poj': 'http://poj.org/problem?id={pid}',
'zoj': 'http://acm.zju.edu.cn/onlinejudge/showProblem.do?problemCode={pid}',
'bnu': 'http://acm.bnu.edu.cn/v3/problem_show.php?pid={pid}',
'vj': 'http://acm.hust.edu.cn/vjudge/problem/viewProblem.action?id={pid}',
}
def submit_problem_page(oj_name, pid):
if oj_name in problem_page_mapper:
return "<a href='{url}'>{pid}</a>".format(url = problem_page_mapper[oj_name].format(pid=pid), pid = pid)
return str(pid)
def async(func):
def wrapper(*args, **kwargs):
func_thread = Thread(target = func, args = args, kwargs = kwargs)
func_thread.start()
return wrapper
#
# @brief: send ail
# @allowed user: all
#
def send_mail(sender, msg):
sender.send(msg)
@async
def reply_of_apply(sender, user, app, opt):
if opt == 'accept':
template_mail = config.APPLY_ACCEPT_MAIL
else:
template_mail = config.APPLY_REJECT_MAIL
subject = template_mail['subject']
body = template_mail['body'].format(name=user['name'])
recipient = user['email']
with app:
msg = Message(subject, recipients=[recipient], body=body)
send_mail(sender, msg)
|
__init__.py
|
import json
import re
import threading
import time
from urllib.request import Request, urlopen
from i3pystatus import SettingsBase, IntervalModule, formatp
from i3pystatus.core.util import user_open, internet, require
class WeatherBackend(SettingsBase):
settings = ()
@require(internet)
def http_request(self, url, headers=None):
req = Request(url, headers=headers or {})
with urlopen(req) as content:
try:
content_type = dict(content.getheaders())['Content-Type']
charset = re.search(r'charset=(.*)', content_type).group(1)
except AttributeError:
charset = 'utf-8'
return content.read().decode(charset)
@require(internet)
def api_request(self, url, headers=None):
self.logger.debug('Making API request to %s', url)
try:
response_json = self.http_request(url, headers=headers).strip()
if not response_json:
self.logger.debug('JSON response from %s was blank', url)
return {}
try:
response = json.loads(response_json)
except json.decoder.JSONDecodeError as exc:
self.logger.error('Error loading JSON: %s', exc)
self.logger.debug('JSON text that failed to load: %s',
response_json)
return {}
self.logger.log(5, 'API response: %s', response)
error = self.check_response(response)
if error:
self.logger.error('Error in JSON response: %s', error)
return {}
return response
except Exception as exc:
self.logger.error(
'Failed to make API request to %s. Exception follows:', url,
exc_info=True
)
return {}
def check_response(self, response):
return False
class Weather(IntervalModule):
'''
This is a generic weather-checker which must use a configured weather
backend. For list of all available backends see :ref:`weatherbackends`.
Double-clicking on the module will launch the forecast page for the
location being checked, and single-clicking will trigger an update.
.. _weather-formatters:
.. rubric:: Available formatters
* `{city}` — Location of weather observation
* `{condition}` — Current weather condition (Rain, Snow, Overcast, etc.)
* `{icon}` — Icon representing the current weather condition
* `{observation_time}` — Time of weather observation (supports strftime format flags)
* `{current_temp}` — Current temperature, excluding unit
* `{low_temp}` — Forecasted low temperature, excluding unit
* `{high_temp}` — Forecasted high temperature, excluding unit (may be
empty in the late afternoon)
* `{temp_unit}` — Either ``°C`` or ``°F``, depending on whether metric or
* `{feelslike}` — "Feels Like" temperature, excluding unit
* `{dewpoint}` — Dewpoint temperature, excluding unit
imperial units are being used
* `{wind_speed}` — Wind speed, excluding unit
* `{wind_unit}` — Either ``kph`` or ``mph``, depending on whether metric or
imperial units are being used
* `{wind_direction}` — Wind direction
* `{wind_gust}` — Speed of wind gusts in mph/kph, excluding unit
* `{pressure}` — Barometric pressure, excluding unit
* `{pressure_unit}` — ``mb`` or ``in``, depending on whether metric or
imperial units are being used
* `{pressure_trend}` — ``+`` if rising, ``-`` if falling, or an empty
string if the pressure is steady (neither rising nor falling)
* `{visibility}` — Visibility distance, excluding unit
* `{visibility_unit}` — Either ``km`` or ``mi``, depending on whether
metric or imperial units are being used
* `{humidity}` — Current humidity, excluding percentage symbol
* `{uv_index}` — UV Index
* `{update_error}` — When the configured weather backend encounters an
error during an update, this formatter will be set to the value of the
backend's **update_error** config value. Otherwise, this formatter will
be an empty string.
This module supports the :ref:`formatp <formatp>` extended string format
syntax. This allows for values to be hidden when they evaluate as False.
The default **format** string value for this module makes use of this
syntax to conditionally show the value of the **update_error** config value
when the backend encounters an error during an update.
See the following links for usage examples for the available weather
backends:
- :ref:`Weather.com <weather-usage-weathercom>`
- :ref:`Weather Underground <weather-usage-wunderground>`
.. rubric:: Troubleshooting
If an error is encountered while updating, the ``{update_error}`` formatter
will be set, and (provided it is in your ``format`` string) will show up
next to the forecast to alert you to the error. The error message will (by
default be logged to ``~/.i3pystatus-<pid>`` where ``<pid>`` is the PID of
the update thread. However, it may be more convenient to manually set the
logfile to make the location of the log data predictable and avoid clutter
in your home directory. Additionally, using the ``DEBUG`` log level can
be helpful in revealing why the module is not working as expected. For
example:
.. code-block:: python
import logging
from i3pystatus import Status
from i3pystatus.weather import weathercom
status = Status(logfile='/home/username/var/i3pystatus.log')
status.register(
'weather',
format='{condition} {current_temp}{temp_unit}[ {icon}][ Hi: {high_temp}][ Lo: {low_temp}][ {update_error}]',
colorize=True,
hints={'markup': 'pango'},
update_error='<span color="#ff0000">!</span>',
log_level=logging.DEBUG,
backend=weathercom.Weathercom(
location_code='94107:4:US',
units='imperial',
log_level=logging.DEBUG,
),
)
.. note::
The log level must be set separately in both the module and backend
contexts.
'''
settings = (
('colorize', 'Vary the color depending on the current conditions.'),
('color_icons', 'Dictionary mapping weather conditions to tuples '
'containing a UTF-8 code for the icon, and the color '
'to be used.'),
('color', 'Display color (or fallback color if ``colorize`` is True). '
'If not specified, falls back to default i3bar color.'),
('backend', 'Weather backend instance'),
('refresh_icon', 'Text to display (in addition to any text currently '
'shown by the module) when refreshing weather data. '
'**NOTE:** Depending on how quickly the update is '
'performed, the icon may not be displayed.'),
('online_interval', 'seconds between updates when online (defaults to interval)'),
('offline_interval', 'seconds between updates when offline (default: 300)'),
'format',
)
required = ('backend',)
colorize = False
color_icons = {
'Fair': (u'\u263c', '#ffcc00'),
'Fog': (u'', '#949494'),
'Cloudy': (u'\u2601', '#f8f8ff'),
'Partly Cloudy': (u'\u2601', '#f8f8ff'), # \u26c5 is not in many fonts
'Rainy': (u'\u26c8', '#cbd2c0'),
'Thunderstorm': (u'\u26a1', '#cbd2c0'),
'Sunny': (u'\u2600', '#ffff00'),
'Snow': (u'\u2603', '#ffffff'),
'default': ('', None),
}
color = None
backend = None
interval = 1800
offline_interval = 300
online_interval = None
refresh_icon = '⟳'
format = '{current_temp}{temp_unit}[ {update_error}]'
output = {'full_text': ''}
on_doubleleftclick = ['launch_web']
on_leftclick = ['check_weather']
def launch_web(self):
if self.backend.forecast_url and self.backend.forecast_url != 'N/A':
self.logger.debug('Launching %s in browser', self.backend.forecast_url)
user_open(self.backend.forecast_url)
def init(self):
if self.online_interval is None:
self.online_interval = int(self.interval)
if self.backend is None:
raise RuntimeError('A backend is required')
self.backend.data = {
'city': '',
'condition': '',
'observation_time': '',
'current_temp': '',
'low_temp': '',
'high_temp': '',
'temp_unit': '',
'feelslike': '',
'dewpoint': '',
'wind_speed': '',
'wind_unit': '',
'wind_direction': '',
'wind_gust': '',
'pressure': '',
'pressure_unit': '',
'pressure_trend': '',
'visibility': '',
'visibility_unit': '',
'humidity': '',
'uv_index': '',
'update_error': '',
}
self.backend.init()
self.condition = threading.Condition()
self.thread = threading.Thread(target=self.update_thread, daemon=True)
self.thread.start()
def update_thread(self):
if internet():
self.interval = self.online_interval
else:
self.interval = self.offline_interval
try:
self.check_weather()
while True:
with self.condition:
self.condition.wait(self.interval)
self.check_weather()
except Exception:
msg = 'Exception in {thread} at {time}, module {name}'.format(
thread=threading.current_thread().name,
time=time.strftime('%c'),
name=self.__class__.__name__,
)
self.logger.error(msg, exc_info=True)
def check_weather(self):
'''
Check the weather using the configured backend
'''
self.output['full_text'] = \
self.refresh_icon + self.output.get('full_text', '')
self.backend.check_weather()
self.refresh_display()
def get_color_data(self, condition):
'''
Disambiguate similarly-named weather conditions, and return the icon
and color that match.
'''
if condition not in self.color_icons:
# Check for similarly-named conditions if no exact match found
condition_lc = condition.lower()
if 'cloudy' in condition_lc or 'clouds' in condition_lc:
if 'partly' in condition_lc:
condition = 'Partly Cloudy'
else:
condition = 'Cloudy'
elif condition_lc == 'overcast':
condition = 'Cloudy'
elif 'thunder' in condition_lc or 't-storm' in condition_lc:
condition = 'Thunderstorm'
elif 'snow' in condition_lc:
condition = 'Snow'
elif 'rain' in condition_lc or 'showers' in condition_lc:
condition = 'Rainy'
elif 'sunny' in condition_lc:
condition = 'Sunny'
elif 'clear' in condition_lc or 'fair' in condition_lc:
condition = 'Fair'
elif 'fog' in condition_lc:
condition = 'Fog'
return self.color_icons['default'] \
if condition not in self.color_icons \
else self.color_icons[condition]
def refresh_display(self):
self.logger.debug('Weather data: %s', self.backend.data)
self.backend.data['icon'], condition_color = \
self.get_color_data(self.backend.data['condition'])
color = condition_color if self.colorize else self.color
self.output = {
'full_text': formatp(self.format, **self.backend.data).strip(),
'color': color,
}
def run(self):
pass
|
client.py
|
"""
This module contains the HiSockClient, used to power the client
of HiSock, but also contains a `connect` function, to pass in
things automatically. It is strongly advised to use `connect`
over `HiSockClient`, as `connect` passes in some key arguments
that `HiSockClient` does not provide
====================================
Copyright SSS_Says_Snek, 2021-present
====================================
"""
# Imports
from __future__ import annotations # Remove when 3.10 is used by majority
import socket
import json # Handle sending dictionaries
import errno # Handle fatal errors with the server
import sys # Utilize stderr
import threading # Threaded client and decorators
import traceback # Error handling
from typing import Callable, Union # Type hints
from ipaddress import IPv4Address # Comparisons
from time import time # Unix timestamp support
try:
# Pip builds require relative import
from .utils import (
ClientException,
ClientNotFound,
ServerException,
ServerNotRunning,
MessageCacheMember,
ClientInfo,
Sendable,
Client,
_removeprefix,
_type_cast,
make_header,
iptup_to_str,
validate_ipv4,
)
from ._shared import _HiSockBase
except ImportError:
# Relative import doesn't work for non-pip builds
from utils import (
ClientException,
ClientNotFound,
ServerException,
ServerNotRunning,
MessageCacheMember,
ClientInfo,
Sendable,
Client,
_removeprefix,
_type_cast,
make_header,
iptup_to_str,
validate_ipv4,
)
from _shared import _HiSockBase
# ░█████╗░░█████╗░██╗░░░██╗████████╗██╗░█████╗░███╗░░██╗██╗
# ██╔══██╗██╔══██╗██║░░░██║╚══██╔══╝██║██╔══██╗████╗░██║██║
# ██║░░╚═╝███████║██║░░░██║░░░██║░░░██║██║░░██║██╔██╗██║██║
# ██║░░██╗██╔══██║██║░░░██║░░░██║░░░██║██║░░██║██║╚████║╚═╝
# ╚█████╔╝██║░░██║╚██████╔╝░░░██║░░░██║╚█████╔╝██║░╚███║██╗
# ░╚════╝░╚═╝░░╚═╝░╚═════╝░░░░╚═╝░░░╚═╝░╚════╝░╚═╝░░╚══╝╚═╝
# Change this code only if you know what you are doing!
# If this code is changed, the client may not work properly
class HiSockClient(_HiSockBase):
"""
The client class for :mod:`HiSock`.
:param addr: A two-element tuple, containing the IP address and the
port number of where the server is hosted.
**Only IPv4 is currently supported.**
:type addr: tuple
:param name: Either a string or NoneType, representing the name the client
goes by. Having a name provides an easy interface of sending.
data to a specific client and identifying clients. It is therefore
highly recommended to pass in a name.
Pass in NoneType for no name (:meth:`connect` should handle that)
:type name: str, optional
:param group: Either a string or NoneType representing the group the client
is in. Being in a group provides an easy interface of sending
data to multiple specific clients, and identifying multiple clients.
It is highly recommended to provide a group for complex servers.
Pass in NoneType for no group (:meth:`connect` should handle that).
:type group: str, optional
:param header_len: An integer defining the header length of every message.
A larger header length would mean a larger maximum message length
(about 10**header_len).
**MUST** be the same header length as the server, or else it will crash
(hard to debug too!).
Default sets to 16 (maximum length of content: 10 quadrillion bytes).
:type header_len: int, optional
:param cache_size: The size of the message cache.
-1 or below for no message cache, 0 for an unlimited cache size,
and any other number for the cache size.
:type cache_size: int, optional
:ivar tuple addr: A two-element tuple containing the IP address and the
port number of the server.
:ivar int header_len: An integer storing the header length of each "message".
:ivar str name: A string representing the name of the client to identify by.
Default is None.
:ivar str group: A string representing the group of the client to identify by.
Default is None.
:ivar dict funcs: A list of functions registered with decorator :meth:`on`.
**This is mainly used for under-the-hood-code.**
:ivar int connect_time: An integer sotring the Unix timestamp of when the
client connected to the server.
"""
def __init__(
self,
addr: tuple[str, int],
name: Union[str, None] = None,
group: Union[str, None] = None,
header_len: int = 16,
cache_size: int = -1,
):
super().__init__(addr=addr, header_len=header_len, cache_size=cache_size)
self.name = name
self.group = group
self.original_name = name
self.original_group = group
# Socket initialization
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.sock.connect(self.addr)
except ConnectionRefusedError:
raise ServerNotRunning("Server is not running! Aborting...") from None
self.sock.setblocking(True)
# Stores the names of the reserved functions and information about them
self._reserved_funcs = {
"client_connect": {
"number_arguments": 1,
"type_cast_arguments": ("client_data",),
},
"client_disconnect": {
"number_arguments": 1,
"type_cast_arguments": ("client_data",),
},
"force_disconnect": {
"number_arguments": 0,
"type_cast_arguments": (),
},
"*": {"number_arguments": 2, "type_cast_arguments": ("command", "message")},
}
self._unreserved_func_arguments = ("message",)
# Flags
self.connected = False
self.connect_time = 0 # Unix timestamp
# Send client hello
self._send_client_hello()
# Dunders
def __str__(self) -> str:
"""Example: <HiSockClient connected to 192.168.1.133:5000>"""
return f"<HiSockClient connected to {iptup_to_str(self.addr)}>"
def __repr__(self):
return self.__str__()
# Comparisons
def __gt__(self, other: Union[HiSockClient, str]) -> bool:
"""Example: HiSockClient(...) > "192.168.1.133:5000" """
if type(other) not in (HiSockClient, str):
raise TypeError("Type not supported for > comparison")
if isinstance(other, HiSockClient):
return IPv4Address(self.addr[0]) > IPv4Address(other.addr[0])
ip = other.split(":")
return IPv4Address(self.addr[0]) > IPv4Address(ip[0])
def __ge__(self, other: Union[HiSockClient, str]) -> bool:
"""Example: HiSockClient(...) >= "192.168.1.133:5000" """
if type(other) not in (HiSockClient, str):
raise TypeError("Type not supported for >= comparison")
if isinstance(other, HiSockClient):
return IPv4Address(self.addr[0]) >= IPv4Address(other.addr[0])
ip = other.split(":")
return IPv4Address(self.addr[0]) >= IPv4Address(ip[0])
def __lt__(self, other: Union[HiSockClient, str]) -> bool:
"""Example: HiSockClient(...) < "192.168.1.133:5000" """
if type(other) not in (HiSockClient, str):
raise TypeError("Type not supported for < comparison")
if isinstance(other, HiSockClient):
return IPv4Address(self.addr[0]) < IPv4Address(other.addr[0])
ip = other.split(":")
return IPv4Address(self.addr[0]) < IPv4Address(ip[0])
def __le__(self, other: Union[HiSockClient, str]) -> bool:
"""Example: HiSockClient(...) <= "192.168.1.133:5000" """
if type(other) not in (HiSockClient, str):
raise TypeError("Type not supported for <= comparison")
if isinstance(other, HiSockClient):
return IPv4Address(self.addr[0]) <= IPv4Address(other.addr[0])
ip = other.split(":")
return IPv4Address(self.addr[0]) <= IPv4Address(ip[0])
def __eq__(self, other: Union[HiSockClient, str]) -> bool:
"""Example: HiSockClient(...) == "192.168.1.133:5000" """
if type(other) not in (HiSockClient, str):
raise TypeError("Type not supported for == comparison")
if isinstance(other, HiSockClient):
return IPv4Address(self.addr[0]) == IPv4Address(other.addr[0])
ip = other.split(":")
return IPv4Address(self.addr[0]) == IPv4Address(ip[0])
# Internal methods
def _send_client_hello(self):
"""
Sends a hello to the server for the first connection.
:raises ClientException: If the client is already connected.
"""
if self.connected:
raise ClientException(
f"Client is already connected! (connected {time() - self.connect_time} seconds ago)"
)
hello_dict = {"name": self.name, "group": self.group}
self._send_raw(f"$CLTHELLO${json.dumps(hello_dict)}")
self.connected = True
self.connect_time = time()
def _handle_keepalive(self):
"""Handle a keepalive sent from the server."""
self._send_raw("$KEEPACK$")
# On decorator
def on(
self, command: str, threaded: bool = False, override: bool = False
) -> Callable:
"""
A decorator that adds a function that gets called when the client
receives a matching command.
Reserved functions are functions that get activated on
specific events, and they are:
1. ``client_connect`` - Activated when a client connects to the server
2. ``client_disconnect`` - Activated when a client disconnects from the server
The parameters of the function depend on the command to listen.
For example, reserved functions ``client_connect`` and
``client_disconnect`` gets the client's data passed in as an argument.
All other unreserved functions get the message passed in.
In addition, certain type casting is available to unreserved functions.
That means, that, using type hints, you can automatically convert
between needed instances. The type casting currently supports:
- ``bytes``
- ``str``
- ``int``
- ``float``
- ``bool``
- ``None``
- ``list`` (with the types listed here)
- ``dict`` (with the types listed here)
For more information, read the documentation for type casting.
:param command: A string, representing the command the function should activate
when receiving it.
:type command: str
:param threaded: A boolean, representing if the function should be run in a thread
in order to not block the update loop.
Default is False.
:type threaded: bool, optional
:param override: A boolean representing if the function should override the
reserved function with the same name and to treat it as an unreserved function.
Default is False.
:type override: bool, optional
:return: The same function (the decorator just appended the function to a stack).
:rtype: function
:raises TypeError: If the number of function arguments is invalid.
"""
return self._on(self, command, threaded, override)
# Getters
def get_cache(
self,
idx: Union[int, slice, None] = None,
) -> list[MessageCacheMember]:
"""
Gets the message cache.
:param idx: An integer or ``slice``, specifying what specific message caches to return.
Default is None (retrieves the entire cache).
:type idx: Union[int, slice], optional
:return: A list of dictionaries, representing the cache
:rtype: list[dict]
"""
if idx is None:
return self.cache
return self.cache[idx]
def get_client(
self, client: Client, get_as_dict: bool = False
) -> Union[ClientInfo, dict]:
"""
Gets the client data for a client.
:param client: The client name or IP+port to get.
:type client: Client
:param get_as_dict: A boolean representing if the client data should be
returned as a dictionary. Otherwise, it'll be returned as an
instance of :class:`ClientInfo`.
Default is False.
:type get_as_dict: bool, optional
:return: The client data.
:rtype: Union[ClientInfo, dict]
:raises ValueError: If the client IP is invalid.
:raises ClientNotFound: If the client couldn't be found.
:raises ServerException: If another error occurred.
"""
try:
validate_ipv4(iptup_to_str(client))
except ValueError as e:
# Names are allowed, too.
if not isinstance(client, str):
raise e
self._send_raw(f"$GETCLT${client}")
response = self.recv()
response = _type_cast(
type_cast=dict,
content_to_type_cast=response,
func_name="<get_client response>",
)
# Validate response
if "traceback" in response:
if response["traceback"] == "$NOEXIST$":
raise ClientNotFound(f"Client {client} not connected to the server.")
raise ServerException(
f"Failed to get client from server: {response['traceback']}"
)
# Type cast
if get_as_dict:
return response
return ClientInfo(**response)
def get_server_addr(self) -> tuple[str, int]:
"""
Gets the address of where the client is connected to.
:return: A tuple, with the format (str IP, int port)
:rtype: tuple[str, int]
"""
return self.addr
def get_client_addr(self) -> tuple[str, int]:
"""
Gets the address of the client.
:return: A tuple, with the format (IP, port).
:rtype: tuple[str, int]
"""
return self.sock.getsockname()
# Transmit data
def send(self, command: str, content: Sendable = None):
"""
Sends a command & content to the server.
:param command: A string, containing the command to send
:type command: str
:param content: The message / content to send
:type content: Sendable, optional
"""
data_to_send = (
b"$CMD$" + command.encode() + b"$MSG$" + self._send_type_cast(content)
)
content_header = make_header(data_to_send, self.header_len)
self.sock.send(content_header + data_to_send)
def _send_raw(self, content: Sendable = None):
"""
Sends a message to the server: NO COMMAND REQUIRED.
This is preferable in some situations, where clients need to send
multiple data over the server, without overcomplicating it with commands
:param content: The message / content to send
:type content: Sendable, optional
"""
data_to_send = self._send_type_cast(content)
header = make_header(data_to_send, self.header_len)
self.sock.send(header + data_to_send)
# Changers
def change_name(self, new_name: Union[str, None]):
"""
Changes the name of the client
:param new_name: The new name for the client to be called.
If left blank, then the name will be reset.
:type new_name: Union[str, None]
"""
if new_name is None:
new_name = self.original_name
data_to_send = "$CHNAME$" + new_name
self._send_raw(data_to_send)
def change_group(self, new_group: Union[str, None]):
"""
Changes the client's group.
:param new_group: The new group name for the client to be called.
if left blank, then the group will be reset
:type new_group: Union[str, None]
"""
if new_group is None:
new_group = self.original_group
data_to_send = "$CHGROUP$" + new_group
self._send_raw(data_to_send)
# Update
def _update(self):
"""
Handles new messages and sends them to the appropriate functions. This method
should be called in a while loop in a thread. If this function isn't in its
own thread, then :meth:`recv` won't work.
.. warning::
Don't call this method on its own; instead use :meth:`start`.
"""
if self.closed:
# This shouldn't happen due to `start` handling it, but just in case...
return
try:
### Receiving data ###
self._receiving_data = True
try:
content_header = self.sock.recv(self.header_len)
except ConnectionResetError:
raise ServerNotRunning(
"Server has stopped running, aborting..."
) from None
except ConnectionAbortedError:
# Keepalive timeout reached
self.closed = True
self._receiving_data = False
self.close(emit_leave=False)
if content_header == b"":
# Happens when the client is closing the connection while receiving
# data. The content header will be empty.
return
data = self.sock.recv(int(content_header.decode()))
self._receiving_data = False
if not data:
# Happens when the client is closing the connection while receiving
# data. The data will be empty.
return
### Reserved commands ###
# Handle keepalive
if data == b"$KEEPALIVE$":
self._handle_keepalive()
return
# Handle force disconnection
elif data == b"$DISCONN$":
self.close(emit_leave=False) # The server already knows we're gone
self._call_function_reserved("force_disconnect")
return
# Handle new client connection
elif data.startswith(b"$CLTCONN$") and "client_connect" in self.funcs:
client_data = self._type_cast_client_data(
"client_connect",
_type_cast(
type_cast=dict,
content_to_type_cast=_removeprefix(data, b"$CLTCONN$"),
func_name="<client connect in update>",
),
)
self._call_function_reserved("client_connect", client_data)
return
# Handle client disconnection
elif data.startswith(b"$CLTDISCONN$"):
client_data = self._type_cast_client_data(
"client_disconnect",
_type_cast(
type_cast=dict,
content_to_type_cast=_removeprefix(data, b"$CLTDISCONN$"),
func_name="<client disconnect in update>",
),
)
self._call_function_reserved("client_disconnect", client_data)
return
### Unreserved commands ###
# Handle random data
elif not data.startswith(b"$CMD$") and "*" in self.funcs:
self._call_wildcard_function(
client_data=None, command=None, content=data
)
return
has_listener = False # For cache
# Get the command and the message
command = data.lstrip(b"$CMD$").split(b"$MSG$")[0].decode()
content = _removeprefix(data, f"$CMD${command}$MSG$".encode())
# No content? (`_removeprefix` didn't do anything)
if not content or content == data:
content = None
# Call functions that are listening for this command from the `on`
# decorator
for matching_command, func in self.funcs.items():
if command != matching_command:
continue
has_listener = True
# Call function with dynamic args
arguments = ()
if len(func["type_hint"]) == 1:
arguments = (
_type_cast(
type_cast=func["type_hint"]["message"],
content_to_type_cast=content,
func_name=func["name"],
),
)
self._call_function(matching_command, *arguments)
break
else:
has_listener = self._handle_recv_commands(command, content)
# No listener found
if not has_listener and "*" in self.funcs:
# No recv and no catchall. A command and some data.
self._call_wildcard_function(
client_data=None, command=command, content=content
)
# Caching
self._cache(has_listener, command, content, data, content_header)
except IOError as e:
# Normal, means message has ended
if not (
e.errno != errno.EAGAIN
and e.errno != errno.EWOULDBLOCK
and not self.closed
):
return
# Fatal error, abort client
traceback.print_exception(type(e), e, e.__traceback__, file=sys.stderr)
print(
"\nServer error encountered, aborting client...",
file=sys.stderr,
)
self.close()
raise SystemExit from e
# Stop
def close(self, emit_leave: bool = True):
"""
Closes the client; running ``client.update()`` won't do anything now
:param emit_leave: Decides if the client will emit `leave` to the server or not
:type emit_leave: bool
"""
self.closed = True
if emit_leave:
try:
self._send_raw("$USRCLOSE$")
except OSError: # Server already closed socket
return
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
# Main loop
def start(self, callback: Callable = None, error_handler: Callable = None):
"""
Start the main loop for the client.
:param callback: A function that will be called every time the
client receives and handles a message.
:type callback: Callable, optional
:param error_handler: A function that will be called every time the
client encounters an error.
:type error_handler: Callable, optional
"""
try:
while not self.closed:
self._update()
if isinstance(callback, Callable):
callback()
except Exception as e:
if isinstance(error_handler, Callable):
error_handler(e)
else:
raise e
class ThreadedHiSockClient(HiSockClient):
"""
:class:`HiSockClient`, but running in its own thread as to not block the
main loop. Please note that while this is running in its own thread, the
event handlers will still be running in the main thread. To avoid this,
use the ``threaded=True`` argument for the ``on`` decorator.
For documentation purposes, see :class:`HiSockClient`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._thread: threading.Thread
self._stop_event = threading.Event()
def close(self, *args, **kwargs):
"""
Closes the client. Blocks the thread until the client is closed.
For documentation, see :meth:`HiSockClient.close`.
"""
super().close(*args, **kwargs)
self._stop_event.set()
try:
self._thread.join()
except RuntimeError:
# Cannot join current thread
return
def _start(self, callback: Callable = None, error_handler: Callable = None):
"""Start the main loop for the threaded client."""
def updated_callback():
if self._stop_event.is_set() and not self.closed:
self.close()
# Original callback
if isinstance(callback, Callable):
callback()
super().start(callback=updated_callback, error_handler=error_handler)
def start(self, callback: Callable = None, error_handler: Callable = None):
"""
Starts the main client loop.
For documentation, see :meth:`HiSockClient.start`.
"""
self._thread = threading.Thread(
target=self._start, args=(callback, error_handler)
)
self._thread.start()
def connect(addr, name=None, group=None, header_len=16, cache_size=-1):
"""
Creates a `HiSockClient` instance. See HiSockClient for more details
:param addr: A two-element tuple containing the IP address and
the port number of the server.
:type addr: tuple
:param name: A string containing the name of what the client should go by.
This argument is optional.
:type name: str, optional
:param group: A string, containing the "group" the client is in.
Groups can be utilized to send specific messages to them only.
This argument is optional.
:type group: str, optional
:param header_len: An integer defining the header length of every message.
Default is True.
:type header_len: int, optional
:param cache_size: The size of the message cache.
-1 or below for no message cache, 0 for an unlimited cache size,
and any other number for the cache size.
:type cache_size: int, optional
:return: A :class:`HiSockClient` instance.
:rtype: instance
.. note::
A simple way to use this function is to use :func:`utils.input_client_config`
which will ask you for the server IP, port, name, and group. Then, you can
call this function by simply doing ``connect(*input_client_config())``
"""
return HiSockClient(addr, name, group, header_len, cache_size)
def threaded_connect(*args, **kwargs):
"""
Creates a :class:`ThreadedHiSockClient` instance. See :class:`ThreadedHiSockClient`
for more details
:return: A :class:`ThreadedHiSockClient` instance
"""
return ThreadedHiSockClient(*args, **kwargs)
if __name__ == "__main__":
print("Testing client!")
client = connect(
("127.0.0.1", int(input("Port: "))),
name=input("Name: "),
group=input("Group: "),
)
# print(
# "The HiSock police are on to you. "
# "You must change your name and group before they catch you."
# )
# client.change_name(input("New name: "))
# client.change_group(input("New group: "))
@client.on("client_connect")
def on_connect(client_data):
print(
f"{client_data.name} has joined! "
f"Their IP is {iptup_to_str(client_data.ip)}. "
f'Their group is {client_data["group"]}.'
)
@client.on("client_disconnect", override=True)
def on_disconnect(leave_data: dict):
print(
f'{leave_data["name"]} disconnected from the server because {leave_data["reason"]} :('
)
@client.on("force_disconnect")
def on_force_disconnect():
print("You have been disconnected from the server.")
client.close()
__import__("os")._exit(0)
@client.on("message", threaded=True)
def on_message(message: str):
print(f"Message received:\n{message}")
@client.on("genocide")
def on_genocide():
print("It's time to die!")
client.close()
__import__("os")._exit(0)
@client.on("*")
def on_wildcard(command: str, data: str):
print(f"There was some unhandled data from the server. {command=}, {data=}")
def choices():
print(
"Your choices are:"
"\n\tsend\n\tsend_to_group\n\tchange_name\n\tchange_group\n\tset_timer\n\tstop"
"\n\tgenocide\n\tsend_random_data"
)
while True:
choice = input("What would you like to do? ")
if choice == "send":
client.send("broadcast_message", input("Message: "))
elif choice == "send_to_group":
client.send("broadcast_message_to_group", input("Message: "))
elif choice == "ping":
client.send("ping")
ping_time = time()
client.recv("pong")
print(f"Pong! Took {time() - ping_time} seconds.")
elif choice == "change_name":
client.change_name(input("New name: "))
elif choice == "change_group":
client.change_group(input("New group: "))
elif choice == "set_timer":
client.send("set_timer", input("Seconds: "))
client.recv("timer_done")
print("Timer done!")
elif choice == "get_all_clients":
client.send("get_all_clients")
print(client.recv("all_clients", recv_as=dict))
elif choice == "stop":
client.close()
return
elif choice == "genocide":
input("You will kill many people. Do you wish to proceed? ")
print("Just kidding, your input had no effect. Time for genocide!")
client.send(
"set_timer", input("How many seconds for the genocide to last?")
)
client.recv("timer_done")
print("Genociding...")
client.send("commit_genocide")
elif choice == "send_random_data":
print("Sending some random data...")
choice, randint = (
__import__("random").choice,
__import__("random").randint,
)
client.send(
"uncaught_command",
"Random data: "
+ "".join(
[
chr(choice((randint(65, 90), randint(97, 122))))
for _ in range(100)
]
),
)
else:
print("Invalid choice.")
choices_thread = threading.Thread(target=choices, daemon=False)
choices_thread.start()
client.start()
|
articlecrawler.py
|
#!/usr/bin/env python
# -*- coding: utf-8, euc-kr -*-
#사용 모듈
from time import sleep
from bs4 import BeautifulSoup
from multiprocessing import Process
from korea_news_crawler.exceptions import *
from korea_news_crawler.articleparser import ArticleParser
from korea_news_crawler.writer import Writer
import os
import platform
import calendar
import requests
import re
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
#크롤러 클래스
class ArticleCrawler(object):
#생성자 역할 함수
#Category와 date등 주요 변수들 생성
def __init__(self):
self.categories = {'정치': 100, '경제': 101, '사회': 102, '생활문화': 103, '세계': 104, 'IT과학': 105,
'오피니언': 110,'politics': 100, 'economy': 101, 'society': 102, 'living_culture': 103,
'world': 104, 'IT_science': 105,'opinion': 110}
self.selected_categories = []
self.date = {'start_year': 0, 'start_month': 0, 'start_date':0, 'end_year': 0, 'end_month': 0, 'end_date': 0}
self.user_operating_system = str(platform.system())
self.keyword = ""
self.captureFlag = False
#Category 설정 함수
def set_category(self, *args):
for key in args:
if self.categories.get(key) is None:
raise InvalidCategory(key)
self.selected_categories = args
#keyword 설정 함수
def set_keyword(self, str):
self.keyword = str
#captureFlag 설정 함수
def set_captureFlag(self, flag):
self.captureFlag = flag
#크롤링할 기사 날짜 설정
def set_date_range(self, start_year, start_month, start_date, end_year, end_month, end_date):
args = [start_year, start_month, start_date, end_year, end_month, end_date]
if start_year > end_year:
raise InvalidYear(start_year, end_year)
if start_month < 1 or start_month > 12:
raise InvalidMonth(start_month)
if end_month < 1 or end_month > 12:
raise InvalidMonth(end_month)
if start_date < 1 or start_date > 32:
raise InvalidDate(start_date)
if end_date < 1 or end_date > 32:
raise InvalidDate(end_date)
if start_year == end_year and start_month > end_month:
raise OverbalanceMonth(start_month, end_month)
if start_year == end_year and start_date > end_date:
raise OverbalanceMonth(start_date, end_date)
for key, date in zip(self.date, args):
self.date[key] = date
print(self.date)
#url 설정 함수
@staticmethod
def make_news_page_url(category_url, start_year, end_year, start_month, end_month, start_date, end_date):
made_urls = []
#전달받은 기간동안 수행
for year in range(start_year, end_year + 1):
if start_year == end_year:
year_startmonth = start_month
year_endmonth = end_month
else:
if year == start_year:
year_startmonth = start_month
year_endmonth = 12
elif year == end_year:
year_startmonth = 1
year_endmonth = end_month
else:
year_startmonth = 1
year_endmonth = 12
for month in range(year_startmonth, year_endmonth + 1):
if year_startmonth == year_endmonth:
for month_day in range(1, end_date + 1):
if len(str(month)) == 1:
month = "0" + str(month)
if len(str(month_day)) == 1:
month_day = "0" + str(month_day)
# 날짜별로 Page Url 생성
url = category_url + str(year) + str(month) + str(month_day)
# 전체 페이지 설정(Redirect)
totalpage = ArticleParser.find_news_totalpage(url + "&page=10000")
print(totalpage)
for page in range(1, totalpage + 1):
made_urls.append(url + "&page=" + str(page))
else:
for month_day in range(1, calendar.monthrange(year, month)[1] + 1):
if len(str(month)) == 1:
month = "0" + str(month)
if len(str(month_day)) == 1:
month_day = "0" + str(month_day)
# 날짜별로 Page Url 생성
url = category_url + str(year) + str(month) + str(month_day)
# 전체 페이지 설정(Redirect)
totalpage = ArticleParser.find_news_totalpage(url + "&page=10000")
print(totalpage)
for page in range(1, totalpage + 1):
made_urls.append(url + "&page=" + str(page))
return made_urls
#data를 받아오는 함수
@staticmethod
def get_url_data(url, max_tries=10):
remaining_tries = int(max_tries)
headers = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.90 Safari/537.36'}
while remaining_tries > 0:
try:
return requests.get(url,headers=headers)
except requests.exceptions:
sleep(60)
remaining_tries = remaining_tries - 1
raise ResponseTimeout()
#crawling 함수
def crawling(self, category_name):
# Multi Process PID
print(category_name + " PID: " + str(os.getpid()))
writer = Writer(category_name=category_name, date=self.date)
# 기사 URL 형식
url = "http://news.naver.com/main/list.nhn?mode=LSD&mid=sec&sid1=" + str(self.categories.get(category_name)) + "&date="
# 설정 기간 동안 crawling
day_urls = self.make_news_page_url(url, self.date['start_year'], self.date['end_year'],
self.date['start_month'], self.date['end_month'], self.date['start_date'], self.date['end_date'] )
print(category_name + " Urls are generated")
print("The crawler starts")
for URL in day_urls:
regex = re.compile("date=(\d+)")
news_date = regex.findall(URL)[0]
request = self.get_url_data(URL)
document = BeautifulSoup(request.content, 'html.parser')
# 각 페이지에 있는 기사들 가져오기
post_temp = document.select('.newsflash_body .type06_headline li dl')
post_temp.extend(document.select('.newsflash_body .type06 li dl'))
# 각 페이지에 있는 기사들의 url 저장
post = []
for line in post_temp:
post.append(line.a.get('href')) # 해당되는 page에서 모든 기사들의 URL을 post 리스트에 넣음
del post_temp
for content_url in post: # 기사 URL
# 크롤링 대기 시간
sleep(0.01)
# 기사 HTML 가져옴
request_content = self.get_url_data(content_url)
try:
document_content = BeautifulSoup(request_content.content, 'html.parser')
except:
continue
try:
# 기사 제목 가져옴
tag_headline = document_content.find_all('h3', {'id': 'articleTitle'}, {'class': 'tts_head'})
text_headline = '' # 뉴스 기사 제목 초기화
text_headline = text_headline + ArticleParser.clear_headline(str(tag_headline[0].find_all(text=True)))
#keyword 검사
if not self.keyword in text_headline:
continue
if not text_headline: # 공백일 경우 기사 제외 처리
continue
# 기사 본문 가져옴
tag_content = document_content.find_all('div', {'id': 'articleBodyContents'})
text_sentence = '' # 뉴스 기사 본문 초기화
text_sentence = text_sentence + ArticleParser.clear_content(str(tag_content[0].find_all(text=True)))
#keyword 검사
if not self.keyword in text_sentence:
continue
if not text_sentence: # 공백일 경우 기사 제외 처리
continue
# 기사 언론사 가져옴
tag_company = document_content.find_all('meta', {'property': 'me2:category1'})
text_company = '' # 언론사 초기화
text_company = text_company + str(tag_company[0].get('content'))
if not text_company: # 공백일 경우 기사 제외 처리
continue
#사진 저장
if(self.captureFlag):
browser = webdriver.Chrome(ChromeDriverManager().install())
browser.get(content_url)
#element not found error 처리
try:
element = browser.find_element_by_class_name('end_photo_org')
location = element.location
y = location.get('y')
#사진 padding 처리 (y-60)
browser.execute_script("window.scrollTo(%d,%d);"%(0,y-60))
size = element.size
title = text_headline+'.png'
#기사 제목으로 사진제목 설정
element.screenshot(title)
except Exception as ex:
print('Not find element')
browser.quit()
# CSV 작성
wcsv = writer.get_writer_csv()
wcsv.writerow([news_date, category_name, text_company, text_headline, text_sentence, content_url])
print('작성완료')
del text_company, text_sentence, text_headline
del tag_company
del tag_content, tag_headline
del request_content, document_content
except Exception as ex:
del request_content, document_content
pass
writer.close()
# crawling 시작 함수
def start(self):
# MultiProcess crawling 시작
for category_name in self.selected_categories:
proc = Process(target=self.crawling, args=(category_name,))
proc.start()
#test용
if __name__ == "__main__":
Crawler = ArticleCrawler()
Crawler.set_category("생활문화", "IT과학")
Crawler.set_date_range(2017, 1, 2018, 4)
Crawler.start()
|
test_kex_gss.py
|
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
# Copyright (C) 2013-2014 science + computing ag
# Author: Sebastian Deiss <sebastian.deiss@t-online.de>
#
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Unit Tests for the GSS-API / SSPI SSHv2 Diffie-Hellman Key Exchange and user
authentication
"""
import socket
import threading
import unittest
import paramiko
class NullServer (paramiko.ServerInterface):
def get_allowed_auths(self, username):
return 'gssapi-keyex'
def check_auth_gssapi_keyex(self, username,
gss_authenticated=paramiko.AUTH_FAILED,
cc_file=None):
if gss_authenticated == paramiko.AUTH_SUCCESSFUL:
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def enable_auth_gssapi(self):
UseGSSAPI = True
return UseGSSAPI
def check_channel_request(self, kind, chanid):
return paramiko.OPEN_SUCCEEDED
def check_channel_exec_request(self, channel, command):
if command != 'yes':
return False
return True
class GSSKexTest(unittest.TestCase):
@staticmethod
def init(username, hostname):
global krb5_principal, targ_name
krb5_principal = username
targ_name = hostname
def setUp(self):
self.username = krb5_principal
self.hostname = socket.getfqdn(targ_name)
self.sockl = socket.socket()
self.sockl.bind((targ_name, 0))
self.sockl.listen(1)
self.addr, self.port = self.sockl.getsockname()
self.event = threading.Event()
thread = threading.Thread(target=self._run)
thread.start()
def tearDown(self):
for attr in "tc ts socks sockl".split():
if hasattr(self, attr):
getattr(self, attr).close()
def _run(self):
self.socks, addr = self.sockl.accept()
self.ts = paramiko.Transport(self.socks, gss_kex=True)
host_key = paramiko.RSAKey.from_private_key_file('tests/test_rsa.key')
self.ts.add_server_key(host_key)
self.ts.set_gss_host(targ_name)
try:
self.ts.load_server_moduli()
except:
print ('(Failed to load moduli -- gex will be unsupported.)')
server = NullServer()
self.ts.start_server(self.event, server)
def _test_gsskex_and_auth(self, gss_host, rekey=False):
"""
Verify that Paramiko can handle SSHv2 GSS-API / SSPI authenticated
Diffie-Hellman Key Exchange and user authentication with the GSS-API
context created during key exchange.
"""
host_key = paramiko.RSAKey.from_private_key_file('tests/test_rsa.key')
public_host_key = paramiko.RSAKey(data=host_key.asbytes())
self.tc = paramiko.SSHClient()
self.tc.get_host_keys().add('[%s]:%d' % (self.hostname, self.port),
'ssh-rsa', public_host_key)
self.tc.connect(self.hostname, self.port, username=self.username,
gss_auth=True, gss_kex=True, gss_host=gss_host)
self.event.wait(1.0)
self.assert_(self.event.is_set())
self.assert_(self.ts.is_active())
self.assertEquals(self.username, self.ts.get_username())
self.assertEquals(True, self.ts.is_authenticated())
self.assertEquals(True, self.tc.get_transport().gss_kex_used)
stdin, stdout, stderr = self.tc.exec_command('yes')
schan = self.ts.accept(1.0)
if rekey:
self.tc.get_transport().renegotiate_keys()
schan.send('Hello there.\n')
schan.send_stderr('This is on stderr.\n')
schan.close()
self.assertEquals('Hello there.\n', stdout.readline())
self.assertEquals('', stdout.readline())
self.assertEquals('This is on stderr.\n', stderr.readline())
self.assertEquals('', stderr.readline())
stdin.close()
stdout.close()
stderr.close()
def test_1_gsskex_and_auth(self):
"""
Verify that Paramiko can handle SSHv2 GSS-API / SSPI authenticated
Diffie-Hellman Key Exchange and user authentication with the GSS-API
context created during key exchange.
"""
self._test_gsskex_and_auth(gss_host=None)
def test_2_gsskex_and_auth_rekey(self):
"""
Verify that Paramiko can rekey.
"""
self._test_gsskex_and_auth(gss_host=None, rekey=True)
|
test_autograd.py
|
import contextlib
import gc
import sys
import io
import math
import random
import tempfile
import time
import threading
import unittest
import warnings
from copy import deepcopy
from collections import OrderedDict
from itertools import product, permutations
from operator import mul
from functools import reduce, partial
import torch
import json
# TODO: remove this global setting
# Autograd tests use double as the default dtype
torch.set_default_dtype(torch.double)
from torch import nn
from torch._six import inf, nan
from torch.autograd.function import once_differentiable
from torch.autograd.profiler import (profile, format_time, EventList,
FunctionEvent, FunctionEventAvg,
record_function, emit_nvtx)
import torch.autograd.functional as autogradF
from torch.utils.checkpoint import checkpoint
from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_version
from torch.testing._internal.common_utils import (TestCase, run_tests, skipIfNoLapack,
suppress_warnings, slowTest,
load_tests, random_symmetric_matrix,
IS_WINDOWS, IS_MACOS, CudaMemoryLeakCheck,
TemporaryFileName, TEST_WITH_ROCM,
gradcheck, gradgradcheck)
from torch.autograd import Variable, Function, detect_anomaly, kineto_available
from torch.autograd.function import InplaceFunction
import torch.autograd.forward_ad as fwAD
from torch.testing import randn_like
from torch.testing._internal.common_methods_invocations import (method_tests,
create_input, unpack_variables,
EXCLUDE_FUNCTIONAL, EXCLUDE_GRADCHECK,
EXCLUDE_GRADGRADCHECK,
EXCLUDE_GRADGRADCHECK_BY_TEST_NAME,
exclude_tensor_method,
mask_not_all_zeros,
S)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm,
onlyCPU, onlyCUDA, onlyOnCPUAndCUDA, dtypes, dtypesIfCUDA,
deviceCountAtLeast, skipCUDAIfCudnnVersionLessThan,
skipCUDAIf)
_END_SENTINEL = object()
def getattr_qualified(obj, qname, default=None):
""" Like getattr but works with qualified names
e.g. getattr(torch, 'fft.rfft')
"""
path = qname.split('.')
for name in path:
obj = getattr(obj, name, _END_SENTINEL)
if obj is _END_SENTINEL:
return default
return obj
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
import pickle
PRECISION = 1e-4
@contextlib.contextmanager
def backward_engine(engine):
_prev_engine = Variable._execution_engine
Variable._execution_engine = engine()
try:
yield
finally:
Variable._execution_engine = _prev_engine
def graph_desc(fn):
if fn is None:
return 'None'
result = type(fn).__name__ + '('
next_functions = fn.next_functions
for next_fn, _ in next_functions:
result += graph_desc(next_fn)
result += ', '
if next_functions:
result = result[:-2]
return result + ')'
class TestAutograd(TestCase):
def test_tensor_grad_warnings(self):
dummy = torch.empty(1)
with warnings.catch_warnings(record=True) as w:
# Accessing .grad on leaf
dummy.requires_grad_()
foo = dummy.grad
self.assertEqual(len(w), 0)
# Accessing .grad on non-leaf
dummy = dummy.clone()
foo = dummy.grad
self.assertEqual(len(w), 1)
# Accessing .grad on non-leaf that retains gradients
dummy.retain_grad()
foo = dummy.grad
self.assertEqual(len(w), 1)
def _function_test(self, cls):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
result = cls.apply(x, 2, y)
go = torch.ones((), requires_grad=True)
result.sum().backward(go, create_graph=True)
self.assertEqual(x.grad, y + torch.ones(5, 5))
self.assertEqual(y.grad, x + torch.ones(5, 5) * 2)
self.assertIsNotNone(x.grad.grad_fn)
self.assertIsNotNone(y.grad.grad_fn)
return x, y
def test_function(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_tensors
# NOTE: self is the test case here
self.assertIsInstance(var1, torch.Tensor)
self.assertIsInstance(var2, torch.Tensor)
self.assertIsInstance(grad_output, torch.Tensor)
return (grad_output + grad_output * var2, None,
grad_output * ctx.pyscalar + grad_output * var1)
x, y = self._function_test(MyFunction)
x_grad_desc = graph_desc(x.grad.grad_fn)
y_grad_desc = graph_desc(y.grad.grad_fn)
self.assertExpected(x_grad_desc, "x_grad_desc")
self.assertExpected(y_grad_desc, "y_grad_desc")
def test_once_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
self.assertFalse(torch.is_grad_enabled())
t1, t2 = ctx.saved_tensors
return (grad_output + grad_output * t2, None,
grad_output * ctx.pyscalar + grad_output * t1)
x, y = self._function_test(MyFunction)
self.assertEqual(graph_desc(x.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
self.assertEqual(graph_desc(y.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
def test_function_returns_input(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad * 2
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
with torch.no_grad():
v.grad.zero_()
MyFunction.apply(v.clone()).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
def test_function_returns_undefined_tensor(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad):
return None
# Test that undefined tensors returned from custom backward function
# are propagated as undefined and not tensor full of zeroes
x = torch.ones(1, requires_grad=True)
MyFunction.apply(x).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x ** 2).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x).sum().backward()
self.assertIsNone(x.grad)
self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0])
def test_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
self.assertEqual(grad, torch.zeros(1))
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_dont_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
ctx.set_materialize_grads(False)
return x
@staticmethod
def backward(ctx, grad):
self.assertIsNone(grad)
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_legacy_function_deprecation_exception(self):
# Trigger exception
class MyFunction(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
# Check exception occurs
with self.assertRaisesRegex(
RuntimeError,
'Legacy autograd function with non-static forward method is deprecated'):
MyFunction()(torch.randn(3, 4))
class SimulateBackwardError(Function):
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
@once_differentiable
def backward(ctx, input):
raise Exception("Simulate error on backward pass")
def test_custom_function_exception(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
tmp = (t1 + t2) * (t1 + t2)
t3 = TestAutograd.SimulateBackwardError.apply(tmp)
with self.assertRaisesRegex(Exception, "Simulate error on backward pass"):
t3.sum().backward()
def test_custom_function_non_tensor_inputs_outputs(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
# Save scale
ctx.scale = scale
ctx.save_for_backward(t1, t2, t3)
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *grads):
# Verify grads
self.assertEqual(7, len(grads))
self.assertIsNone(grads[0])
self.assertIsNone(grads[2])
self.assertIsNone(grads[3])
self.assertIsNone(grads[5])
scale = ctx.scale
var1, var2, var3 = ctx.saved_tensors
return (
grads[1] * scale + grads[4] * var2 * scale + grads[6],
grads[1] * var3 * scale + grads[4] * var1 * scale,
None,
grads[1] * var2 * scale + grads[4] * scale,
)
t1 = torch.rand(10, requires_grad=True)
t2 = torch.rand(10, requires_grad=True)
t3 = torch.rand(10)
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
self.assertIsNone(t3.grad)
# Test gradcheck
def foo(t1, t2, t3):
res = MyFunction.apply(t1, t2, scale, t3)
return res[1], res[4], res[6]
gradcheck(foo, (t1, t2, t3))
def test_custom_function_no_tensors(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *args):
return (args[0], args[1], None, args[2])
t1 = random.random()
t2 = random.random()
t3 = random.random()
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
def test_invalid_gradients(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad_output):
return torch.randn(10, dtype=torch.float)
with self.assertRaisesRegex(RuntimeError, 'expected shape'):
input = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
MyFunction.apply(input).sum().backward()
def test_accumulate_grad(self):
grad_output = torch.ones(5, 5)
def compute_grad(create_graph):
x = torch.randn(5, 5, requires_grad=True)
y = x + 2
y.backward(grad_output, retain_graph=True)
x_grad = x.grad
x_grad_clone = x.grad.clone()
y.backward(grad_output, create_graph=create_graph)
return x_grad, x_grad_clone
# Accumulate in-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=False)
self.assertEqual(x_grad, x_grad_clone * 2)
# Accumulate out-of-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=True)
self.assertEqual(x_grad, x_grad_clone)
def test_accumulate_grad_tensor_reference(self):
def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph):
params = torch.tensor([1.5, 1.5]).requires_grad_()
params.grad = params_grad_tensor
grad_saved = params.grad
params.backward(backward_grad_tensor, create_graph=create_graph)
self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference)
for create_graph in (False, True):
# Accumulate dense gradient to sparse gradient will change the `params.grad` reference
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.tensor([1.5, 1.5]),
False, # never accumulates in-place
create_graph)
# Accumulate dense gradient to dense gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5]),
not create_graph,
create_graph)
# Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
not create_graph,
create_graph)
@skipIfNoLapack
def test_slogdet_sign(self):
a = torch.randn(3, 3, requires_grad=True)
s, logdet = a.slogdet()
# test that sign should not require grad
self.assertFalse(s.requires_grad)
# test that backward through computation involving sign works
def sign_mul_logdet(mat):
s, logdet = mat.slogdet()
return s * logdet
u, s, v = a.detach().svd()
s.abs_().clamp_(0.0001)
for sign in (-1, 1):
s[-1] = sign
mat = torch.linalg.multi_dot([u, s.diag(), v.t()]).requires_grad_()
gradcheck(sign_mul_logdet, mat)
gradgradcheck(sign_mul_logdet, mat)
def test_sum_to_with_empty_dim_grad(self):
a = torch.rand(4, 0, requires_grad=True)
b = torch.rand(4, 1, requires_grad=True)
c = a + b
assert c.shape == (4, 0)
c.sum().backward()
self.assertEqual(b.grad, torch.zeros(4, 1))
self.assertEqual(a.grad, torch.zeros(4, 0))
def test_hessian_vector(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
with torch.no_grad():
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
grad_sum.backward(torch.ones(2, 2))
x_hv = torch.ones(2, 2) * 5
y_hv = torch.ones(2, 2) * 4
self.assertEqual(x.grad, x_grad + x_hv)
self.assertEqual(y.grad, y_grad + y_hv)
def test_grad(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
x_hv = torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)],
inputs=[x], create_graph=True)
expected_x_hv = torch.ones(2, 2) * 5
expected_y_hv = torch.ones(2, 2) * 4
self.assertEqual(x_hv[0], expected_x_hv)
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
# Test that grad_outputs and outputs have the same shape
grad_out = torch.ones(2)
try:
torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[grad_out],
inputs=[x], create_graph=True)
self.assertFail()
except RuntimeError as error:
self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of "
+ str(grad_out.shape) + " and output[0] has a shape of "
+ str(grad_sum.shape) + ".")
def test_grad_nonleaf(self):
x_init = torch.randn(2, 2, requires_grad=True)
x = x_init
y = torch.randn(2, 2, requires_grad=True)
grad_output = torch.ones(2, 2)
def fn(x):
return x ** 2 + y * x + y ** 2
for _ in range(5):
grad_x, = torch.autograd.grad(
fn(x), x, grad_outputs=grad_output, create_graph=True)
grad_x_expected = 2 * x + y
self.assertIsNone(y.grad)
self.assertIsNone(x.grad)
self.assertEqual(grad_x, grad_x_expected)
x = x + 0.05 * grad_x
val_init = fn(x_init).sum()
val_final = fn(x).sum()
self.assertGreater(val_final, val_init)
x.backward(grad_output)
self.assertIsNotNone(y.grad)
self.assertIsNotNone(x_init.grad)
def test_grad_nonleaf_many_outputs(self):
# This checks an edge case for function callbacks
# We want to capture two grads of a function, but can only
# register a single callback.
x = torch.randn(4, 2, requires_grad=True)
a, b = x.chunk(2)
def hook(*grads):
hook_called[0] = True
hook_called = [False]
x.register_hook(hook)
go = torch.randn(2, 2)
grad_a, grad_b = torch.autograd.grad(
(a + 2 * b), [a, b], grad_outputs=go, create_graph=True)
self.assertEqual(grad_a, go)
self.assertEqual(grad_b, go * 2)
self.assertFalse(hook_called[0])
self.assertIsNone(x.grad)
def test_grad_nonleaf_register_hook(self):
# This checks an edge case for register_hook.
# We want to capture grad of a nonleaf tensor,
# but avoid segfault during backward of other nonleaf tensors
x = torch.randn(5, requires_grad=True)
x_list = x.unbind()
x0 = x_list[0]
hook_results = [None]
def hook(grad):
hook_results[0] = grad
x0.register_hook(hook)
x_list[0].backward()
self.assertEqual(hook_results[0], torch.tensor(1.))
expected_grad = torch.tensor([1., 0, 0, 0, 0])
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[0].grad)
for i in range(1, 5, 1):
x_list[i].backward()
self.assertEqual(hook_results[0], None)
expected_grad[i] = 1.0
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[i].grad)
def test_hook_with_no_name(self):
# Create a hook that do not have a __name__ attribute
class MyHookClass:
def __call__(self, grad):
return grad.clone()
x = torch.randn(5, requires_grad=True).clone()
x.register_hook(MyHookClass())
x.sum().backward()
# Should run fine
def test_sharded_grad(self):
leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)]
intermediates = [l * i + l * l for i, l in enumerate(leaves)]
loss = sum(v * i for i, v in enumerate(intermediates)).sum()
# define a helper for dividing intermediates into groups
def group(l, group_size):
return (l[i:i + group_size] for i in range(0, len(l), group_size))
# Compute the d loss / d intermediates in chunks of shard_size
shard_size = 2
d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size)
for d_i in torch.autograd.grad(loss, intermediates_batch)]
# Compute rest of backward pass
torch.autograd.backward(intermediates, d_intermediates)
for i, l in enumerate(leaves):
self.assertEqual(l.grad, i * i * (1 + l))
def test_backward_badcalls(self):
x = torch.ones(1)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
x.backward()
def test_grad_badcalls(self):
x = torch.ones(1)
y = x ** 2
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(x, y)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(y, x)
x = torch.ones(1, requires_grad=True)
y = x ** 2
torch.autograd.grad(y, x) # this should succeed now
def test_grad_fn_badcalls(self):
error_regex = 'expected .* arguments, got .* instead'
x = torch.ones(1, requires_grad=True)
y = x ** 2
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn(x.detach(), x.detach()) # too many
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn() # too few
y.grad_fn(x.detach()) # this should succeed
def test_grad_unreachable(self):
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
# Make sure x and y have grad accumulators allocated
z = x * 2
w = y * 2
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_y)
# This is slightly different than the case above, because z doesn't even
# have a grad accumulator allocated.
z = torch.ones(1, requires_grad=True)
grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_z)
# allow_unused=False, but grads contains None inside, should throw
with self.assertRaisesRegex(RuntimeError,
"Set allow_unused=True"):
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False)
def test_grad_unreachable_discovery(self):
# Test that certain nodes are not erroneously executed when an input
# is unreachable. See #39784
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
self.fail("This node should not be executed!")
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
(gY,) = torch.autograd.grad(x, (y, ), allow_unused=True)
self.assertIsNone(gY)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
z = torch.randn(1, requires_grad=True)
(gY, gZ) = torch.autograd.grad(x + z, (y, z), allow_unused=True)
self.assertIsNone(gY)
self.assertIsNotNone(gZ)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
torch.autograd.backward(x, inputs=(y, )) # allow_unused is implicitly True!
self.assertIsNone(y.grad)
def test_hooks(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
y.requires_grad_(True)
counter = [0]
def bw_hook(inc, grad):
self.assertIsInstance(grad, torch.Tensor)
counter[0] += inc
z = x ** 2 + x * 2 + x * y + y
x.register_hook(lambda *args: bw_hook(0, *args))
test = z.register_hook(lambda *args: bw_hook(1, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 1)
test2 = z.register_hook(lambda *args: bw_hook(2, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 4)
test2.remove()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 5)
def bw_hook_modify(grad):
return grad.mul(2)
test.remove()
z.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(y.grad, (x + 1) * 2)
y.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5))
self.assertEqual(y.grad, (x + 1) * 4)
def test_hooks_cpp(self):
# Tests hooks for autograd function implemented in C++
bn = torch.nn.BatchNorm1d(5, affine=False)
bn.eval()
counter = [0]
def bw_hook(grad):
counter[0] += 1
return grad * 2
x = torch.ones(5, 5, requires_grad=True)
z = bn(x)
z.register_hook(bw_hook)
z.sum().backward()
self.assertEqual(counter[0], 1, msg='bw_hook not called')
self.assertEqual(x.grad, torch.ones(5, 5) * 2, atol=1e-5, rtol=0)
def test_hook_none(self):
# WARNING: this is a test for autograd internals.
# You should never have to use such things in your code.
class NoneGradientFunction(Function):
@staticmethod
def forward(ctx, x, y):
assert ctx.needs_input_grad[0]
assert not ctx.needs_input_grad[1]
return x, y
@staticmethod
def backward(ctx, grad_x, grad_y):
return grad_x, None
was_called = [False]
def hook(grad):
self.assertIsNotNone(grad)
was_called[0] = True
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5)
rx, ry = NoneGradientFunction.apply(x, y)
rx.register_hook(hook)
ry.register_hook(hook)
sum(rx, ry).sum().backward()
self.assertTrue(was_called[0])
def test_retain_grad(self):
input = torch.rand(1, 3, requires_grad=True)
h1 = input * 3
out = (h1 * h1).sum()
# It should be possible to call retain_grad() multiple times
h1.retain_grad()
h1.retain_grad()
# Gradient should be accumulated
out.backward(retain_graph=True)
self.assertEqual(h1 * 2, h1.grad)
out.backward(retain_graph=True)
self.assertEqual(h1 * 4, h1.grad)
with torch.no_grad():
input.grad.zero_()
# It should be a no-op for leaves
input.retain_grad()
input.retain_grad()
out.backward()
self.assertEqual(input * 18, input.grad)
def test_retain_grad_cycle(self):
import gc
import weakref
counter = [0]
refs = [None]
x = torch.ones(5, 5, requires_grad=True)
def run_test():
y = x * 2
y.retain_grad()
def inc(*args):
counter[0] += 1
refs[0] = weakref.ref(y, inc)
return y / 2
z = run_test()
gc.collect()
self.assertIsNone(refs[0]())
self.assertEqual(counter[0], 1)
z.sum().backward()
def test_backward(self):
v = torch.randn(5, 5, requires_grad=True)
x = torch.randn(5, 5, requires_grad=True)
y = (torch.rand(5, 5) + 0.1).requires_grad_(True)
z = torch.randn(5, 5, requires_grad=True)
grad_output = torch.randn(5, 5)
v.backward(grad_output)
self.assertEqual(v.grad, grad_output)
a = x + (y * z) + 4 * z ** 2 * x / y
a.backward(grad_output)
x_grad = 4 * z.pow(2) / y + 1
y_grad = z - 4 * x * z.pow(2) / y.pow(2)
z_grad = 8 * x * z / y + y
self.assertEqual(x.grad, x_grad * grad_output)
self.assertEqual(y.grad, y_grad * grad_output)
self.assertEqual(z.grad, z_grad * grad_output)
def test_sparse_backward(self):
class FixedGradientFunction(Function):
@staticmethod
def forward(ctx, x, grad_x):
ctx.save_for_backward(grad_x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_grad_x, = ctx.saved_tensors
return saved_grad_x, None
size = torch.Size([6, 3, 2])
i1 = torch.LongTensor([
[0, 3, 4],
[0, 2, 2],
])
v1 = torch.DoubleTensor([[1, 2], [4, 5], [7, 8]])
sparse_grad1 = torch.sparse.DoubleTensor(i1, v1, size)
i2 = torch.LongTensor([
[0, 1, 3, 4],
[0, 1, 2, 2],
])
v2 = torch.DoubleTensor([[1, 2], [4, 3], [4, 5], [7, 8]])
sparse_grad2 = torch.sparse.DoubleTensor(i2, v2, size)
dense_grad = torch.rand(size).double()
fn = FixedGradientFunction
# sparse first
x = torch.randn(size, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# dense first
x = torch.randn(size, requires_grad=True)
(fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# sparse only
x = torch.randn(size, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, sparse_grad1 + sparse_grad2)
def test_sparse_mm_backward(self):
size = (3, 3)
sparse = torch.sparse_coo_tensor(size, requires_grad=True)
dense = torch.randn(size, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
"The backward pass for this operation requires the 'mat1' tensor to be strided,"):
z = dense.addmm(sparse, dense)
mm_test_cases = [
# a requires grad, a is sparse, b requires grad, b is sparse, error message
(False, True, True, False, None),
(False, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(False, True, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, True, False, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, True, "The backward pass for this operation requires the 'mat2'"),
]
for a_req_grad, a_is_sparse, b_req_grad, b_is_sparse, err_msg in mm_test_cases:
# We should only be testing cases with sparse inputs, and at least one
# input needs to require grad so we can call a backward pass
assert a_is_sparse or b_is_sparse
assert a_req_grad or b_req_grad
a = torch.randn(size, requires_grad=a_req_grad)
if a_is_sparse:
a = a.to_sparse()
b = torch.randn(size, requires_grad=b_req_grad)
if b_is_sparse:
b = b.to_sparse()
# If no error expected, check that sparse and dense cases match
if err_msg is None:
r = a.mm(b)
r.sum().backward()
a_grad = None if a.grad is None else a.grad.clone().detach()
b_grad = None if b.grad is None else b.grad.clone().detach()
# Redo with only dense tensors
a = (a.to_dense() if a.is_sparse else a).clone().detach()
a.requires_grad = a_req_grad
b = (b.to_dense() if b.is_sparse else b).clone().detach()
b.requires_grad = b_req_grad
r = a.mm(b)
r.sum().backward()
self.assertEqual(a_grad, a.grad)
self.assertEqual(b_grad, b.grad)
else:
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mm(b)
def test_multi_backward(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
q2 = q * 2
z = x + y + q2
c = a * b + q2
grad_z = torch.randn(5, 5)
grad_c = torch.randn(5, 5)
torch.autograd.backward([z, c], [grad_z, grad_c])
self.assertEqual(x.grad, grad_z)
self.assertEqual(y.grad, grad_z)
self.assertEqual(a.grad, grad_c * b)
self.assertEqual(b.grad, grad_c * a)
self.assertEqual(q.grad, (grad_c + grad_z) * 2)
def test_multi_backward_no_grad(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=False)
z = x + y
q = y * 2
# NB: we currently raise an exception if any arguments to backwards
# have requires_grad=False and don't have a grad_fn. We may want to
# relax that check to a warning.
def call_backwards():
torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)])
self.assertRaises(RuntimeError, call_backwards)
def test_backward_with_inputs(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
def fn():
return x ** 2 + y * x + y ** 2
gradient = torch.ones(2, 2)
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
@torch.no_grad()
def reset_grad():
x.grad.zero_()
y.grad.zero_()
torch.autograd.backward(fn(), gradient, inputs=[x, y])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, y_grad_expected)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[x])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, torch.zeros(2, 2))
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[y])
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2))
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=y)
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2))
reset_grad()
self.assertRaisesRegex(RuntimeError, 'cannot be empty',
lambda: torch.autograd.backward(fn(), gradient, inputs=[]))
def test_backward_with_nonleaf_inputs(self):
x = torch.randn(2, 2, requires_grad=True)
x_nonleaf = x * 1
y = torch.randn(2, 2, requires_grad=True)
z = torch.randn(2, 2, requires_grad=True)
out = x_nonleaf ** 2 + y * x_nonleaf + y ** 2
out.backward(torch.ones(2, 2), create_graph=True, inputs=[x, y])
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, x_grad_expected)
self.assertRaisesRegex(RuntimeError, 'not a leaf Tensor',
lambda: out.backward(torch.ones(2, 2), create_graph=True, inputs=[x, y, x_nonleaf]))
# backward doesn't have an allow_unused flag, so the behavior of backward
# when variable is not part of the graph is as if allow_used were true
# x.grad will simply be None.
out.backward(torch.ones(2, 2), create_graph=True, inputs=[z])
self.assertIsNone(z.grad)
def test_dependent_backward(self):
x = torch.randn(10, requires_grad=True)
y = x ** 2
z = y ** 3
go_y = torch.randn(10)
go_z = torch.randn(10)
torch.autograd.backward([y, z], [go_y, go_z])
xd = x
self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z)
def test_save_output_nr(self):
x = torch.randn(10, requires_grad=True)
class MultiOutputFn(Function):
@staticmethod
def forward(ctx, x):
return x[:5], x[5:]
@staticmethod
def backward(ctx, *grad):
return torch.cat(grad)
a, b = MultiOutputFn.apply(x)
self.assertEqual(b.output_nr, 1)
class TestFn(Function):
@staticmethod
def forward(ctx, b):
ctx.save_for_backward(b)
return b * 2
@staticmethod
def backward(ctx, grad_b):
b, = ctx.saved_tensors
self.assertEqual(b.output_nr, 1)
TestFn.apply(b).sum().backward()
def test_free_deep_graph(self):
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build a "chain" computation graph
for _ in range(depth):
y = y + y * 0.000001
# graph deletion occurs when the above locals go out of scope.
# In this case `del y` will trigger it but it's easier to leave
# it to Python to delete the locals.
# Should not stack overflow
scope()
def test_free_deep_graph_complicated(self):
def scope():
depth = 100000
randchoice = torch.randint(2, [depth, 2])
x = torch.randn(1, requires_grad=True)
y = x.clone()
# Hold the two previous values
prev_values = [None, None]
# Build a "chain with skip connections" graph
for _ in range(depth):
prev_tensors = [tensor for tensor in prev_values[:-1]
if tensor is not None]
prev_values.append(y)
prev_values.pop(0)
# Definitely pick one tensor to add
y += y * 0.000001
# Possibly add other tensors
nprev = len(prev_tensors)
if nprev == 2:
y += randchoice[depth].mul(torch.cat(prev_tensors)).sum()
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_free_deep_graph_pyfunction(self):
class MyOp(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build deeply nested computation graph
for _ in range(depth):
y = MyOp.apply(y, y)
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_no_unnecessary_save(self):
# If we kept x in the derivative Function of x * 2 we would
# get an error in the backward that would complain that we've
# modified x, which was needed for gradient computation.
# Since we should elide unnecessary saves, this test should pass.
mu = torch.ones(1, requires_grad=True)
x = torch.empty(1)
loss = 0
for i in range(3):
x.detach_()
x.copy_(mu + i)
ft = torch.tensor([float(i)])
multiplied = x * ft
s = multiplied.sum()
loss += s
loss.backward()
def test_no_grad(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
with torch.no_grad():
w = x + y
@torch.no_grad()
def adder(x, y):
return x + y
z = adder(x, y)
self.assertFalse(w.requires_grad)
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
self.assertIsNone(w.grad_fn)
self.assertFalse(z.requires_grad)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
self.assertIsNone(z.grad_fn)
# test nested decorator and with-statement on no_grad
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
w = adder(x, y)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_generator_functions(self):
@torch.no_grad()
def gen_no_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), False)
yield i
with torch.enable_grad():
for _ in gen_no_grad():
self.assertEqual(torch.is_grad_enabled(), True)
@torch.enable_grad()
def gen_enable_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), True)
yield i
with torch.no_grad():
for _ in gen_enable_grad():
self.assertEqual(torch.is_grad_enabled(), False)
def test_set_grad_generator_functions_recursive(self):
# enable_grad_decorator_recursive and no_grad_decorator_recursive call each other
# recursively, to ensure that the decorators preserve the caller's setting
@torch.enable_grad()
def enable_grad_decorator_recursive(depth):
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_decorator_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
@torch.no_grad()
def no_grad_decorator_recursive(depth):
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_decorator_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
# enable_grad_context_manager_recursive and no_grad_context_manager_recursive call
# each other recursively, to ensure that the decorators preserve the caller's setting
def enable_grad_context_manager_recursive(depth):
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_context_manager_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
def no_grad_context_manager_recursive(depth):
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_context_manager_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertTrue(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertTrue(torch.is_grad_enabled())
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertFalse(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_coroutines(self):
@torch.no_grad()
def coro_no_grad(n=10):
self.assertFalse(torch.is_grad_enabled())
for i in range(n):
self.assertFalse(torch.is_grad_enabled())
r = yield i
self.assertFalse(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertFalse(torch.is_grad_enabled())
@torch.enable_grad()
def coro_enable_grad(n=10):
self.assertTrue(torch.is_grad_enabled())
for i in range(n):
self.assertTrue(torch.is_grad_enabled())
r = yield i
self.assertTrue(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertTrue(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
coro, r = coro_no_grad(), None
try:
while True:
self.assertTrue(torch.is_grad_enabled())
r = coro.send(r)
self.assertTrue(torch.is_grad_enabled())
except StopIteration:
pass
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
coro, r = coro_enable_grad(), None
try:
while True:
self.assertFalse(torch.is_grad_enabled())
r = coro.send(r)
self.assertFalse(torch.is_grad_enabled())
except StopIteration:
pass
def test_set_grad_coroutines_benign_exceptions(self):
class RecoverableException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertFalse(torch.is_grad_enabled())
has_raised = True
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertTrue(torch.is_grad_enabled())
has_raised = True
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
def test_set_grad_coroutines_critical_exceptions(self):
class UnrecoverableException(Exception):
pass
class SecondaryException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertFalse(torch.is_grad_enabled())
raise SecondaryException
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertTrue(torch.is_grad_enabled())
raise SecondaryException
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
def test_set_grad_coroutines_exit(self):
@torch.no_grad()
def coro_no_grad(state):
for i in range(10):
try:
self.assertFalse(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertFalse(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
@torch.enable_grad()
def coro_enable_grad(state):
for i in range(10):
try:
self.assertTrue(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertTrue(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
state = set()
with torch.enable_grad():
coro = coro_no_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
state = set()
with torch.no_grad():
coro = coro_enable_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
def test_no_grad_python_function(self):
"""Python Functions should respect grad mode."""
x = torch.ones(5, 5, requires_grad=True)
class MyOp(Function):
@staticmethod
def forward(self, x):
return x + 1
@staticmethod
def backward(self, dy):
return dy
with torch.no_grad():
y = MyOp.apply(x)
self.assertFalse(y.requires_grad)
def test_indexing(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
def compare(x, y, idx, indexed_tensor, indexed_var):
indexed_var_t = indexed_var.data
if not isinstance(indexed_tensor, torch.Tensor):
indexed_var_t = indexed_var_t[0]
self.assertEqual(indexed_tensor, indexed_var_t)
indexed_var.sum().backward()
expected_grad = torch.Tensor(x.size()).fill_(0)
expected_grad[idx] = 1
self.assertEqual(y.grad, expected_grad)
def check_index(x, y, idx):
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[idx]
indexed_var = y[idx]
compare(x, y, idx, indexed_tensor, indexed_var)
check_index(x, y, 1)
check_index(x, y, (1, 1))
check_index(x, y, slice(1, None))
check_index(x, y, slice(None, 2))
check_index(x, y, (slice(None, 2), 2))
check_index(x, y, (slice(1, 2), 2))
check_index(x, y, (1, slice(2, None)))
check_index(x, y, (slice(None, None), slice(2, None)))
check_index(x, y, torch.LongTensor([0, 2]))
check_index(x, y, torch.rand(4, 4).bernoulli().bool())
check_index(x, y, (Ellipsis, slice(2, None)))
check_index(x, y, ([0], [0]))
check_index(x, y, ([1, 2, 3], [0]))
check_index(x, y, ([1, 2], [2, 1]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([slice(None), [2, 3]]))
check_index(x, y, ([[2, 3], slice(None)]))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0]))
check_index(x, y, ([0], ))
x = torch.arange(1., 49).view(4, 3, 4)
y = Variable(x, requires_grad=True)
check_index(x, y, (slice(None), [0], [0]))
check_index(x, y, ([0], [0], slice(None)))
check_index(x, y, (slice(None), [0, 1, 2], [0]))
check_index(x, y, ([0, 1, 2], [0], slice(None)))
check_index(x, y, (slice(None), [1, 2], [2, 1]))
check_index(x, y, ([1, 2], [2, 1], slice(None)))
check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None)))
check_index(x, y, (slice(None), slice(None), [2, 1]))
check_index(x, y, (slice(None), [2, 1], slice(None)))
check_index(x, y, ([2, 1], slice(None), slice(None)))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0], ))
check_index(x, y, ([0], slice(None)))
check_index(x, y, ([0], Ellipsis))
check_index(x, y, ([1, 2], [0, 1]))
check_index(x, y, ([1, 2], [0, 1], Ellipsis))
check_index(x, y, (Ellipsis, [1, 2], [0, 1]))
# advanced indexing, with a tensor wrapped in a variable
z = torch.LongTensor([0, 1])
zv = Variable(z, requires_grad=False)
seq = [z, Ellipsis]
seqv = [zv, Ellipsis]
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[seq]
indexed_var = y[seqv]
compare(x, y, seq, indexed_tensor, indexed_var)
def test_indexing_duplicates(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = torch.LongTensor([1, 1, 3, 2, 1, 2])
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx:
expected_grad[i] += 1
self.assertEqual(y.grad, expected_grad)
# with advanced indexing
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 3, 2, 1, 2], [0]]
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx[0]:
for j in idx[1]:
expected_grad[i][j] += 1
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]]
y[idx].sum().backward()
expected_grad = torch.Tensor([[0, 2, 0, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]])
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 65).view(4, 4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 1], slice(None), slice(None)]
y[idx].sum().backward()
expected_grad = torch.Tensor(4, 4, 4).zero_()
expected_grad[1].fill_(3)
self.assertEqual(y.grad, expected_grad)
def test_index_backward_does_not_save_tensor(self):
# Example from https://github.com/pytorch/pytorch/issues/24853.
# if `index(tensor, indices)` saves `tensor` for backwards, then it will
# trigger a version check on `tensor` during the backward pass, which
# will cause the following code to error because `tensor` gets modified
# by the indexing line.
a = torch.tensor([1., 0, 0])
b = torch.zeros(3, requires_grad=True)
tensor = b + 0
tensor[a != 0] = tensor[a != 0]
tensor.backward(torch.zeros_like(tensor))
def test_volatile_deprecated(self):
v = torch.autograd.torch.randn(3, 3)
with warnings.catch_warnings(record=True) as w:
self.assertFalse(v.volatile)
self.assertIn('volatile', str(w[0].message))
def test_saved_variables_deprecated(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_variables
return (grad_output, grad_output)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
x = torch.randn((3, 3), requires_grad=True)
y = torch.randn((3, 3), requires_grad=True)
model = MyFunction()
model.apply(x, y).sum().backward()
has_deprecated = map(lambda warn:
'deprecated' in str(warn) and
'saved_variables' in str(warn),
warns)
has_deprecated = reduce(lambda x, y: x or y, has_deprecated)
self.assertTrue(has_deprecated)
def test_requires_grad(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
z = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertFalse(a.requires_grad)
b = a + z
self.assertTrue(b.requires_grad)
def error():
raise RuntimeError
# Make sure backward isn't called on these
a._backward_hooks = OrderedDict()
x._backward_hooks = OrderedDict()
y._backward_hooks = OrderedDict()
a._backward_hooks['test'] = error
x._backward_hooks['test'] = error
y._backward_hooks['test'] = error
b.backward(torch.ones(5, 5))
def test_requires_grad_(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
self.assertIs(x, x.requires_grad_())
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_())
self.assertTrue(y.requires_grad)
self.assertIs(x, x.requires_grad_(True))
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_(True))
self.assertTrue(y.requires_grad)
z = x * y
self.assertRaises(RuntimeError, lambda: z.requires_grad_(False))
self.assertIs(z, z.requires_grad_())
self.assertTrue(z.requires_grad)
self.assertIs(z, z.requires_grad_(True))
self.assertTrue(z.requires_grad)
self.assertIs(x, x.requires_grad_(False))
self.assertFalse(x.requires_grad)
self.assertIs(y, y.requires_grad_(False))
self.assertFalse(y.requires_grad)
def test_requires_grad_inplace(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
# non-leaf
a = torch.randn(5, 5) + 0
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
def test_no_requires_grad_inplace(self):
# basic case, should be able to modify inplace while requires_grad is False
a = torch.randn(2, 3)
a.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# same but with a view
a = torch.randn(2, 3)
b = a[:]
b.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# should fail if requires_grad = True when we modify inplace
a = torch.randn(2, 3)
b = a[:]
a.requires_grad = True
with self.assertRaises(RuntimeError):
a.add_(5)
with self.assertRaises(RuntimeError):
b.add_(5)
def test_attribute_deletion(self):
x = torch.randn((5, 5), requires_grad=True)
del x.grad
self.assertIsNone(x.grad)
with self.assertRaises(RuntimeError):
del x.data
with self.assertRaises(TypeError):
x.data = None
with self.assertRaises(RuntimeError):
del x.requires_grad
with self.assertRaises(RuntimeError):
del x._grad_fn
with self.assertRaises(RuntimeError):
del x._backward_hooks
def test_duplicate_backward_root(self):
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
x = a * b
grad_output = torch.randn_like(x)
torch.autograd.backward([x, x], [grad_output, grad_output])
self.assertEqual(a.grad, b * grad_output * 2)
self.assertEqual(b.grad, a * grad_output * 2)
def test_backward_no_grad(self):
a = torch.randn(5, 5, requires_grad=True)
b = a + 2
with self.assertRaises(RuntimeError):
torch.autograd.backward([b], [None])
def test_backward_twice_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double)))
def test_backward_twice_retained_graph_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b + 1
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_retained_graph_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_next_functions(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertIsNotNone(a.grad_fn)
next_functions = a.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[0][1], 0)
self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[1][1], 0)
b = a + 5
next_functions = b.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIs(next_functions[0][0], a.grad_fn)
self.assertIs(next_functions[1][0], None)
def test_inplace(self):
x = torch.ones(5, 5, requires_grad=True)
y = Variable(torch.ones(5, 5) * 4, requires_grad=True)
z = x * y
q = z + y
w = z * y
z.add_(2)
# Add doesn't need it's inputs to do backward, so it shouldn't raise
q.backward(torch.ones(5, 5), retain_graph=True)
# Mul saves both inputs in forward, so it should raise
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
z = x * y
q = z * y
r = z + y
w = z.add_(y)
# w is a the last expression, so this should succeed
w.backward(torch.ones(5, 5), retain_graph=True)
# r doesn't use the modified value in backward, so it should succeed
r.backward(torch.ones(5, 5), retain_graph=True)
# q uses dirty z, so it should raise
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
with torch.no_grad():
x.grad.zero_()
m = x / 2
z = m + y / 8
q = z * y
r = z + y
prev_version = z._version
w = z.exp_()
self.assertNotEqual(z._version, prev_version)
r.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.ones(5, 5) / 2)
w.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.Tensor(5, 5).fill_((1 + math.e) / 2))
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
leaf = torch.ones(5, 5, requires_grad=True)
x = leaf.clone()
x.add_(10)
self.assertEqual(x, torch.ones(5, 5) * 11)
# x should be still usable
y = x + 2
y.backward(torch.ones(5, 5))
self.assertEqual(leaf.grad, torch.ones(5, 5))
z = x * y
x.add_(2)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
def test_mark_non_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input > 0
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return (grad_output * 0).to(torch.double)
x = torch.randn(5, 5, requires_grad=True)
mask = MyFunction.apply(x)
self.assertFalse(mask.requires_grad)
y = x.masked_fill(mask, 0)
y.sum().backward()
def test_mark_non_differentiable_mixed(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
a = input + 1
b = input + 2
ctx.mark_non_differentiable(a)
return a, b
@staticmethod
def backward(ctx, grad_a, grad_b):
self.assertTrue((grad_a == 0).all())
self.assertTrue((grad_b == 1).all())
return grad_b
x = torch.randn(5, 5, requires_grad=True)
a, b = MyFunction.apply(x)
self.assertFalse(a.requires_grad)
self.assertTrue(b.requires_grad)
b.sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5))
def test_mark_non_differentiable_none(self):
# This used to segfault because MyFunction would send back null
# gradients to MulBackward, which is implemented in C++. C++
# implemented functions expect incoming grad_ouptuts to be non-null.
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input.clone()
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return None
x = torch.randn(5, 5, requires_grad=True)
r = MyFunction.apply(x * x)
(r * x).sum().backward()
def test_return_duplicate(self):
class DoubleDuplicate(Function):
@staticmethod
def forward(ctx, x):
output = x * 2
return output, output
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def fn(x):
a, b = DoubleDuplicate.apply(x)
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, requires_grad=True)
gradcheck(fn, [x])
gradgradcheck(fn, [x])
def test_return_duplicate_inplace(self):
class DoubleInplace(Function):
@staticmethod
def forward(ctx, x):
x.mul_(2)
ctx.mark_dirty(x)
return x, x
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def inplace_fn(x):
a, b = DoubleInplace.apply(x.clone())
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, requires_grad=True)
gradcheck(inplace_fn, [x])
gradgradcheck(inplace_fn, [x])
# Can't modify leaf variables in-place
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x))
# Functions which modify views in-place must return only one output
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0]))
@suppress_warnings
def test_resize(self):
x = torch.ones(2, 3)
self.assertTrue(x.resize(3, 2).size() == (3, 2))
def _test_setitem(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
y[index] = 2
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad = torch.ones(*size)
expected_grad[index] = 0
self.assertEqual(x.grad, expected_grad)
def _test_setitem_tensor(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
value = x.new(x[index].size()).fill_(7)
value.requires_grad = True
y[index] = value
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad_input = torch.ones(*size)
expected_grad_input[index] = 0
self.assertEqual(x.grad, expected_grad_input)
self.assertEqual(value.grad, torch.ones_like(value))
# case when x broadcasts to as y[1]
x = torch.randn(4, requires_grad=True)
y = torch.zeros(2, 3, 4)
y[1] = x
y.backward(torch.randn(2, 3, 4))
self.assertEqual(x.size(), x.grad.size())
def test_setitem(self):
self._test_setitem((5, 5), 1)
self._test_setitem((5,), 1)
self._test_setitem((1,), 0)
self._test_setitem((10,), [[0, 4, 2]])
self._test_setitem((5, 5), [[0, 4], [2, 2]])
self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5), 3)
self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]])
self._test_setitem_tensor((5,), 3)
self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum())
self._test_setitem_tensor((5,), [[0, 1, 2, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1,
3]), requires_grad=False), [2, 4], slice(None)])
def test_setitem_mask(self):
mask = torch.BoolTensor(5, 5).bernoulli_()
self._test_setitem((5, 5), Variable(mask))
self._test_setitem((5,), Variable(mask[0]))
self._test_setitem((1,), Variable(mask[0, 0:1]))
self._test_setitem_tensor((5, 5), Variable(mask))
self._test_setitem_tensor((5,), Variable(mask[0]))
def test_select_sum(self):
# both select and sum return Scalars in ATen; ensure they work together.
x = torch.randn(10, requires_grad=True)
def func(x):
return x.select(0, 1).sum()
gradcheck(func, [x])
gradgradcheck(func, [x])
def test_diagonal_expanded_v(self):
value = torch.rand([])
v_expanded = torch.tensor(value).expand(10)
a = torch.rand(10, 10, requires_grad=True)
result, = torch.autograd.grad(a.diagonal(), a, v_expanded)
self.assertEqual(result, torch.eye(10) * value)
def test_select_expanded_v(self):
v_expanded = torch.rand(10).expand(10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[0], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[0] = v_expanded
self.assertEqual(result, expected)
def test_slice_expanded_v(self):
v_expanded = torch.rand(10, 1).expand(2, 10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[3:5], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[3:5] = v_expanded
self.assertEqual(result, expected)
def test_unbind(self):
stacked = torch.randn(3, 10, 10, requires_grad=True)
x, y, z = stacked.unbind()
grad = torch.randn(3, 10, 10)
torch.autograd.backward([x, y, z], grad.unbind())
self.assertEqual(stacked.grad, grad)
# check that it works with only one gradient provided (#9977)
for i in range(3):
stacked = torch.randn(3, 10, 10, requires_grad=True)
outs = stacked.unbind()
gi = grad.unbind()[i]
g, = torch.autograd.grad(outs[i], stacked, gi)
g_expected = torch.stack([gi if j == i else torch.zeros_like(gi)
for j in range(3)], dim=0)
self.assertEqual(g, g_expected)
def test_put(self):
root = torch.randn(4, 5, requires_grad=True)
values = torch.randn(6, requires_grad=True)
idx = Variable(torch.LongTensor([1, 2, 3, -1, -2, -3]))
def func(root, values):
x = root.clone()
x.put_(idx, values)
return x
gradcheck(func, [root, values])
gradgradcheck(func, [root, values])
def test_put_accumulate(self):
root = torch.randn(4, 5, requires_grad=True)
values = torch.randn(6, requires_grad=True)
idx = Variable(torch.LongTensor([1, 2, 3, 1, 2, 3]))
def func(root, values):
x = root.clone()
x.put_(idx, values, accumulate=True)
return x
gradcheck(func, [root, values])
gradgradcheck(func, [root, values])
def test_fill(self):
root = torch.randn(4, 5, requires_grad=True)
def func(root):
x = root.clone()
x.fill_(2)
return x
gradcheck(func, [root])
gradgradcheck(func, [root])
def test_unused_output(self):
x = torch.randn(10, 10, requires_grad=True)
outputs = x.chunk(5)
o = outputs[2]
o = o * 4 + 2
o.sum().backward()
expected_grad = torch.zeros(10, 10)
expected_grad[4:6] = 4
self.assertEqual(x.grad, expected_grad)
with torch.no_grad():
x.grad.zero_()
grad_output = torch.randn(2, 10)
outputs = x.chunk(5)
outputs[0].backward(grad_output)
expected_grad = torch.zeros(10, 10)
expected_grad[:2] = grad_output
self.assertEqual(x.grad, expected_grad)
def _test_sparse_gather(self, size_x, size_ind, dim):
x = torch.randn(size_x, requires_grad=True)
if len(size_ind) > 0 and len(size_x) > 0:
ind = torch.randint(x.size(dim), size_ind)
else:
ind = torch.zeros(size_ind, dtype=torch.int64)
out = torch.gather(x, dim, ind, sparse_grad=False)
grad = torch.rand_like(out)
out.backward(grad)
grad_dense = x.grad.clone()
x.grad = None
out = torch.gather(x, dim, ind, sparse_grad=True)
out.backward(grad)
self.assertEqual(grad_dense, x.grad.to_dense())
def test_sparse_gather_dim0(self):
self._test_sparse_gather((10, 10), (5, 10), 0)
def test_sparse_gather_dim1(self):
self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1)
def test_sparse_gather_dim_neg(self):
self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1)
def test_sparse_gather_ind_scalar(self):
self._test_sparse_gather((10,), (), 0)
def test_sparse_gather_x_scalar(self):
self._test_sparse_gather((), (2,), 0)
def test_sparse_gather_both_scalar(self):
self._test_sparse_gather((), (), 0)
def test_gc_in_destructor(self):
"""
Previously, if a Function destructor triggered a garbage collection,
the Variable's tp_dealloc handler would get called twice leading to a
segfault.
"""
class CollectOnDelete(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
def __del__(self):
gc.collect()
for _ in range(10):
CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward()
# Delete this test when legacy custom autograd functions are deleted.
def test_naughty_legacy_variable_grad_fn(self):
class Id(Function):
def forward(self, x):
return x
def backward(self, grad_x):
return grad_x
self.assertRaises(RuntimeError, lambda: Variable(torch.zeros(1), _grad_fn=Id()))
# Delete this test when legacy custom autograd functions are deleted.
def test_naughty_legacy_function_backward_before_forward(self):
class Id(Function):
def forward(self, x):
return x
def backward(self, grad_x):
return grad_x
f = Id()
self.assertRaises(RuntimeError, lambda: f._do_backward((torch.zeros(0), ), False))
# Delete this test when legacy custom autograd functions are deleted.
def test_naughty_legacy_function_early_access(self):
class Id(Function):
def forward(self, x):
return x
def backward(self, grad_x):
return grad_x
f = Id()
# A legacy autograd function is not fully initialized until you actually
# apply it. That means a lot of accessors on them don't actually work.
# Test that we properly error in this case.
self.assertRaises(RuntimeError, lambda: f.register_hook(lambda x, y: None))
self.assertRaises(RuntimeError, lambda: f.next_functions)
self.assertRaises(RuntimeError, lambda: f.metadata)
@unittest.expectedFailure
def test_naughty_anomaly_access(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, g):
return g
x = torch.zeros(1, requires_grad=True)
y = MyFunction.apply(x)
y.backward()
y.grad_fn.metadata
g = y.grad_fn
del y
g.metadata # this currently fails, but shouldn't
def test_naughty_autograd_function_stashing_ctx(self):
saved_ctx = []
class Id(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_ctx.append(ctx)
return ctx.saved_tensors
p = torch.zeros(1, requires_grad=True)
loss = Id.apply(p)
loss.backward(retain_graph=True)
del loss
# At this point in time, it complains that the graph has been freed
# (which indeed true, although a somewhat indirect way of stating the
# problem).
self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors)
def test_custom_autograd_repeated_grad_grad(self):
# This test failed the equality check in PR #22983; it's an interesting
# and different test case worth enshrining. mult1 is not testing
# anything that interesting, but mult2 is the interesting case.
def mult1(x):
return x.prod(dim=-1).prod(dim=-1)
class Mult(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = mult1(x)
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return (grad_output * y)[:, None, None] / x
mult2 = Mult.apply
def check_gradgrad_repeated(x, y):
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1])
x = torch.ones(2, 4, 4).requires_grad_()
check_gradgrad_repeated(x, mult1(x))
check_gradgrad_repeated(x, mult2(x))
def test_custom_autograd_no_early_free(self):
# This test failed complaining that buffers had already been freed
# prior to #22983. Also pretty interesting test case.
class Double(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = x ** 2
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, _ = ctx.saved_tensors
return grad_output * 2 * x
# this is equivalent, but uses the output of .forward() in .backward()
class Double2(Double):
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return grad_output * 2 * y / x
double = Double.apply
double2 = Double2.apply
x = torch.tensor(2).double().requires_grad_()
self.assertTrue(gradcheck(double, x))
self.assertTrue(gradgradcheck(double, x))
self.assertTrue(gradcheck(double2, x))
self.assertTrue(gradgradcheck(double2, x))
y = double(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x)
y = double2(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x) # should not error!
def test_detach(self):
x = torch.randn(10, 10, requires_grad=True)
y = x + 2
y = y.detach()
z = y * 4 + 2
self.assertFalse(y.requires_grad)
self.assertFalse(z.requires_grad)
x = torch.randn(10, 10, requires_grad=True)
y = x * 2
y = y.detach()
self.assertFalse(y.requires_grad)
self.assertIsNone(y.grad_fn)
z = x + y
z.sum().backward()
# This is an incorrect gradient, but we assume that's what the user
# wanted. detach() is an advanced option.
self.assertEqual(x.grad, torch.ones(10, 10))
# in-place detach
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
a = x * 2
(y + a).sum().backward(retain_graph=True)
a.detach_()
self.assertFalse(a.requires_grad)
(y + a).sum().backward() # this won't backprop to x
self.assertEqual(x.grad, torch.ones(10, 10) * 2)
self.assertEqual(y.grad, torch.ones(10, 10) * 2)
# in-place deatch on a view raises an exception
view = x.narrow(0, 1, 4)
self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_())
def test_detach_base(self):
"detaching base does not detach view"
x = torch.randn(10, 10, requires_grad=True)
view = x.narrow(0, 1, 4)
x.detach_()
self.assertFalse(x.requires_grad)
self.assertTrue(view.requires_grad)
self.assertIsNotNone(view.grad_fn)
self.assertIs(view._base, x)
def _test_type_conversion_backward(self, t, ):
fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True)
fvar.double().sum().backward()
self.assertEqual(fvar.grad, torch.ones_like(fvar))
self.assertEqual(type(fvar.grad), type(fvar))
dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True)
dvar.float().sum().backward()
self.assertEqual(dvar.grad, torch.ones_like(dvar))
self.assertEqual(type(dvar.grad), type(dvar))
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.int(), torch.IntTensor)
if torch.cuda.is_available():
self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor)
self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor)
self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor)
if torch.cuda.device_count() >= 2:
x2 = x.float().cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
x2 = x.float().cuda()
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 0)
x2 = x2.cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
y = Variable(torch.randn(5).cuda(1), requires_grad=True)
y.cpu().sum().backward()
self.assertIs(y.grad.get_device(), 1)
self.assertIs(y.long().get_device(), 1)
for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
for y_var in (True, False):
y = torch.randint(5, (5, 5), dtype=t.dtype)
y = Variable(y) if y_var else y
self.assertIsInstance(x.type(t), t)
self.assertIsInstance(x.type_as(y), t)
# TODO: t.dtype should work
t_dtype = t().dtype
self.assertIsInstance(x.type(t_dtype), t)
self.assertIs(t_dtype, x.type(t_dtype).dtype)
self.assertEqual(y.data_ptr(), y.type(t).data_ptr())
if torch.cuda.is_available():
for x_cuda in (True, False):
for y_cuda in (True, False):
x_c = x.cuda() if x_cuda else x
y_c = y.cuda() if y_cuda else y
_, y_type = y_c.type().rsplit('.', 1)
y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type
self.assertEqual(y_c.type(), x_c.type(y_typestr).type())
self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype)
self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr())
self._test_type_conversion_backward(lambda x: x)
if torch.cuda.is_available():
self._test_type_conversion_backward(lambda x: x.cuda())
if torch.cuda.device_count() >= 2:
# one of these has to be the non-default device
self._test_type_conversion_backward(lambda x: x.cuda(0))
self._test_type_conversion_backward(lambda x: x.cuda(1))
def test_isolated_node(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
b = torch.max(a, 1, True)[1].repeat(1, 5).double()
o = (b + a).sum()
o.backward()
def test_shape(self):
x = torch.randn(3, 4)
self.assertEqual(2, len(x.shape))
self.assertEqual(x.shape[0], 3)
self.assertEqual(x.shape[1], 4)
def test_numpy_requires_grad(self):
x = torch.randn(2, 2, requires_grad=True)
err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead."
with self.assertRaisesRegex(RuntimeError, err_msg_outputs):
x.numpy()
with torch.no_grad():
x.numpy()
x = torch.randn(2, 2)
x.numpy()
with torch.no_grad():
x.numpy()
def test_return_leaf(self):
class Identity(Function):
@staticmethod
def forward(ctx, a, b):
return a, a + b
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a + grad_b, grad_b
hook_called = [False]
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q, p = Identity.apply(x, y)
# Make sure hooks only receive grad from usage of q, not x.
def hook(grad):
hook_called[0] = True
self.assertEqual(grad, torch.ones(5, 5))
q.register_hook(hook)
(q + p + x).sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5) * 3)
self.assertEqual(y.grad, torch.ones(5, 5))
self.assertTrue(hook_called[0])
def test_return_leaf_inplace(self):
class Inplace(InplaceFunction):
@staticmethod
def forward(ctx, a, b):
ctx.mark_dirty(a)
return a.add_(b), b + 2
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a, grad_a + grad_b
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
fn = Inplace(True)
q, p = fn.apply(x, y)
self.assertIs(q, x)
self.assertIs(q.grad_fn.__class__, fn._backward_cls)
self.assertTrue(q.requires_grad)
q.sum().backward()
self.assertEqual(y.grad, torch.ones(5, 5))
def test_leaf_assignment(self):
x = torch.randn(5, 5)
y = torch.randn(5, requires_grad=True)
z = torch.randn(5, requires_grad=True)
x[0] = y
x[1] = 2 * z
self.assertTrue(x.requires_grad)
self.assertIsNot(x.grad_fn, None)
x.sum().backward()
self.assertEqual(y.grad, torch.ones(5))
self.assertEqual(z.grad, torch.ones(5) * 2)
def test_no_grad_assignment(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5)
with torch.no_grad():
x[0] = y
self.assertTrue(x.requires_grad)
self.assertIsNone(x.grad_fn)
def test_no_grad_modifies_version(self):
x = torch.randn(5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
z = (x * y).sum()
with torch.no_grad():
x *= 2
self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation',
lambda: z.backward())
def test_no_grad_input(self):
class MyFunction(Function):
@staticmethod
def forward(self, x):
return x
@staticmethod
def backward(self, grad_output):
return grad_output
x = torch.randn(5, requires_grad=True)
with torch.no_grad():
y = MyFunction.apply(x)
self.assertTrue(x.requires_grad)
self.assertIsNone(y.grad_fn)
def test_backward_copy(self):
# This tests checks backward engine for a very subtle bug that appreared
# in one of the initial versions of autograd. Gradients tensors were
# simply stored in lists while the function waited for all its gradients
# to be computed. However, sometimes an output was used multiple times,
# so the gradients needed to be summed. Engine used to keep a need_copy
# set of tensors that will need a clone upon next addition and removed
# them from the set as soon as the clone was performed. However, this
# could lead to incorrect results if the same gradient tensor was
# buffered in three places in the graph:
# 1. When accumulating gradients in one of these places it was cloned
# and removed from need_copy set.
# 2. When accumulating in second place, it wasn't in the need_copy set,
# so the gradients were simply accumulated in-place (which already
# modified the grad in 3rd place)
# 3. When accumulating in the third place, it wasn't in the need_copy set
# as well, so the incoming gradient was summed in-place, yielding
# incorrect results in all functions, except the first one.
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5, requires_grad=True)
# Simulate that we're in the middle of the graph
a = x + 2
b = y + 2
c = x + 2
# This op will just return grad_output two times in backward
add1 = a + b
add2 = add1 + c
# Simulate a long branch, so grad_output will get buffered.
for _ in range(4):
a = a * 2
b = b * 2
c = c * 2
branch = a + b + c
out = add2 + branch
# expected gradients are:
# for x: 34 (16 from final a, 16 from final c, 2 from add2)
# for y: 17 (16 from final b, 1 from add2)
grad_output = torch.ones(5, 5)
out.backward(grad_output)
self.assertEqual(x.grad, torch.ones(5, 5) * 34)
self.assertEqual(y.grad, torch.ones(5, 5) * 17)
def test_save_none_for_backward(self):
test_case = self
class MyFn(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(None, input, None)
return input * input
@staticmethod
def backward(ctx, grad_output):
n1, input, n2 = ctx.saved_tensors
test_case.assertIsNone(n1)
test_case.assertIsNone(n2)
return 2 * input * grad_output
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, 2 * x)
def test_too_many_grads(self):
class MyFn(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, torch.ones_like(x))
def test_pickle(self):
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=False)
def assert_strict_equal(var1, var2):
self.assertEqual(var1, var2)
self.assertEqual(var1.requires_grad, var2.requires_grad)
serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)]
for dump in serialized:
xc, yc = pickle.loads(dump)
assert_strict_equal(xc, x)
assert_strict_equal(yc, y)
def test_dep_nograd(self):
class F1(Function):
@staticmethod
def forward(ctx, input):
out = torch.randn(input.size())
ctx.mark_non_differentiable(out)
return input, out
@staticmethod
def backward(ctx, grad_output, ignored):
return grad_output
class F2(Function):
@staticmethod
def forward(ctx, input, ignored):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
x = torch.randn(5, requires_grad=True)
a, b = F1.apply(x)
b = b + 1 # separate F1 from F2 by another op
self.assertTrue(a.requires_grad)
self.assertFalse(b.requires_grad)
c = F2.apply(a, b)
c.backward(torch.ones(c.size()))
self.assertEqual(x.grad, torch.ones(x.size()))
def test_set_grad_enabled(self):
x = torch.tensor([1.], requires_grad=True)
with torch.set_grad_enabled(False):
y = x * 2
self.assertFalse(y.requires_grad)
with torch.set_grad_enabled(True):
y = x * 2
self.assertTrue(y.requires_grad)
with torch.set_grad_enabled(False):
torch.set_grad_enabled(True)
y = x * 2
self.assertTrue(y.requires_grad)
def test_simple_reentrant(self):
y_data = torch.randn(2, 2)
class Reenter(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x, requires_grad=True)
ctx.y = Variable(y_data, requires_grad=True)
ctx.output_var = ctx.x * ctx.y
return ctx.output_var.detach()
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
ctx.output_var.sum().backward()
return ctx.x.grad * grad_output
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
out = Reenter.apply(x)
out.sum().backward()
self.assertEqual(x.grad, y_data)
def test_reentrant_child_error(self):
# Parent graph.
a = torch.rand(3, 3, requires_grad=True)
c = a * a
# Reentrant child graph.
b = torch.rand(3, 3, requires_grad=True)
e = b * b
f = TestAutograd.SimulateBackwardError.apply(e)
reentrant_root = f.sum()
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will throw an error.
reentrant_root.backward()
return grad
d = ReentrantFunc.apply(c)
with self.assertRaisesRegex(Exception, 'Simulate error'):
d.sum().backward()
def test_broadcast_tensors(self):
f_args_variable = (torch.randn(3, requires_grad=True),
torch.randn(1, 2, 1, requires_grad=True),
torch.randn(1, 1, requires_grad=True),
torch.randn(5, 1, 1, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_broadcast_tensors", "broadcast",
lambda a, b, c, d: torch.broadcast_tensors(a, b, c, d),
True, f_args_variable, f_args_tensor)
def test_block_diag(self):
f_args_variable = (torch.randn(1, S, requires_grad=True),
torch.randn(2, S, requires_grad=True),
torch.randn(3, S, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_block_diag", "block_diag",
lambda a, b, c: torch.block_diag(a, b, c),
True, f_args_variable, f_args_tensor)
def test_cat(self):
f_args_variable = (torch.randn(1, S, S, requires_grad=True),
torch.randn(2, S, S, requires_grad=True),
torch.randn(3, S, S, requires_grad=True),
0)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor)
def test_cat_negdim_1(self):
f_args_variable = (torch.randn(S, S, 1, requires_grad=True),
torch.randn(S, S, 2, requires_grad=True),
torch.randn(S, S, 3, requires_grad=True),
-1)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_negdim_1", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor)
def test_cat_negdim_2(self):
f_args_variable = (torch.randn(S, 1, S, requires_grad=True),
torch.randn(S, 2, S, requires_grad=True),
torch.randn(S, 3, S, requires_grad=True),
-2)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_negdim_2", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor)
def test_cat_empty_legacy(self):
f_args_variable = (torch.randn(0, requires_grad=True),
torch.randn(S, S, requires_grad=True))
# gradgradcheck doesn't work, probably because legacy size tracking is wrong somewhere,
# hence False passed below, but gradcheck checked explicitly.
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_empty_legacy", "cat",
lambda a, b: torch.cat((a, b)),
False, f_args_variable, f_args_tensor)
self.assertTrue(gradcheck(lambda a, b: torch.cat((a, b)), f_args_variable, eps=1e-6, atol=PRECISION))
def test_cat_empty(self):
f_args_variable = (torch.randn(0, S, requires_grad=True),
torch.randn(S, S, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_empty", "cat",
lambda a, b: torch.cat((a, b)),
True, f_args_variable, f_args_tensor)
def test_trapz(self):
f_args_variable = (torch.randn(2, 3, requires_grad=True),
torch.tensor([[1.0, 2.0, 5.5], [2.3, 0.5, 6.2]], requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_trapz", "trapz",
lambda y, x: torch.trapz(y, x),
True, f_args_variable, f_args_tensor)
def test_var_mean_differentiable(self):
dim = [2, 4]
keepdim = False
input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True)
input2 = deepcopy(input1)
var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim)
var2 = input2.var(dim=dim, keepdim=keepdim)
mean2 = input2.mean(dim=dim, keepdim=keepdim)
grad = torch.randn(3, 4, 6, 3, requires_grad=True)
r1 = var1 * var1 * mean1 * mean1
r2 = var2 * var2 * mean2 * mean2
self.assertTrue(torch.allclose(r1, r2, rtol=0.01, atol=0.0))
torch.autograd.backward(r1, grad)
torch.autograd.backward(r2, grad)
self.assertTrue(torch.allclose(input1.grad, input2.grad, rtol=0.01, atol=0.0))
@skipIfNoLapack
def test_symeig(self):
def func(root, upper):
x = 0.5 * (root + root.transpose(-2, -1))
return torch.symeig(x, eigenvectors=True, upper=upper)
def run_test(upper, dims):
root = torch.rand(*dims, requires_grad=True)
gradcheck(func, [root, upper])
gradgradcheck(func, [root, upper])
root = random_symmetric_matrix(dims[-1], *dims[:-2]).requires_grad_()
w, v = root.symeig(eigenvectors=True)
(w.sum() + v.sum()).backward()
self.assertEqual(root.grad, root.grad.transpose(-1, -2)) # Check the gradient is symmetric
for upper, dims in product([True, False], [(3, 3), (5, 3, 3), (4, 3, 2, 2)]):
run_test(upper, dims)
@slowTest
@skipIfNoLapack
def test_lobpcg(self):
def func(k, A, largest=True, B=None):
X_shape = list(A.shape)
X_shape[-1] = k
X = torch.eye(A.size(-2), k, dtype=A.dtype, device=A.device)
if A.dim() > 2:
X = X.expand(X_shape)
D, U = torch.lobpcg(A=A, k=k, B=B, X=X)
# LOBPCG uses a random initial eigenspace approximation
# if parameter `X` is not provided.
# This may cause a non-deterministic behavior
# when it comes to the sign of an eigenvector
# (note if v is an eigenvector, so is -v),
# hence we eliminate this non-determinism
# by making sure that each column of U
# gets multiplied by the sign of its max (in absolute value) element.
# Also, gradcheck changes the content of the input by +/- eps (default to 1e-06)
# to compute the numerical gradient which can also cause the signs to flip.
_, idx = U.abs().max(-2, keepdim=True)
sign = U.gather(-2, idx).sign()
U = U * sign
return D, U
def run_symeig_test(k, sizes, largest=True):
A = torch.rand(*sizes).double()
A = A.matmul(A.transpose(-1, -2)) / 10
A.requires_grad_(True)
gradcheck(lambda A: func(k, A, largest), A, check_batched_grad=False)
# Custom gradient vectors for better stability due to some
# non-determinism in the lobpcg's forward.
# Note it is not required if symeig is in forward instead (tested).
D_grad = torch.rand(*A.shape[:-2], k) / 100
U_grad = torch.rand(*A.shape[:-1], k) / 100
gradgradcheck(lambda A: func(k, A, largest), A, [D_grad, U_grad], atol=1e-4, check_batched_grad=False)
# check whether A.grad is symmetric
A = A.detach().requires_grad_(True)
D, U = func(k, A, largest)
(D.sum() + U.sum()).backward()
self.assertEqual(A.grad, A.grad.transpose(-1, -2))
# the tests below take about 1-2 minutes to finish,
# but we want to be extra sure that the backward is correct.
for largest in [True, False]:
run_symeig_test(1, (6, 6), largest=largest)
run_symeig_test(1, (2, 6, 6), largest=largest)
run_symeig_test(1, (2, 2, 6, 6), largest=largest)
run_symeig_test(2, (6, 6), largest=largest)
run_symeig_test(2, (2, 6, 6), largest=largest)
run_symeig_test(2, (2, 2, 6, 6), largest=largest)
run_symeig_test(3, (9, 9), largest=largest)
run_symeig_test(3, (2, 9, 9), largest=largest)
run_symeig_test(3, (2, 2, 9, 9), largest=largest)
def test_variable_traverse(self):
def get_out_and_unrefed_cycle():
inp = torch.randn(10, requires_grad=True)
tmp = inp.view(10, 1)
out = tmp.view(10)
# Create a reference cycle that contains an
# intermediary Variable in the graph
my_list = []
my_list.append(tmp)
my_list.append(my_list)
return out
out = get_out_and_unrefed_cycle()
gc.collect()
# This will segfault if things have been erroneously released
out.backward(torch.randn(out.size()))
def test_norm_subgradient(self):
def run_test(input_size, norm_deg):
input = torch.zeros(*input_size, requires_grad=True)
input.norm(norm_deg).backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), 2)
run_test((10, 10), 2)
run_test((10,), 3)
run_test((10,), 1)
run_test((10,), 1.5)
run_test((10,), inf)
def test_norm_inf_subgradient(self):
def run_test(input, expected, dim=None):
x = torch.tensor(input, requires_grad=True)
out = x.norm(inf, dim=dim, keepdim=True)
out.backward(torch.ones(out.size()))
self.assertEqual(x.grad, expected)
run_test([0., 0., 0.], [0., 0., 0.])
run_test([1., 0., 1.], [0.5, 0., 0.5])
run_test([[1., 0., 1.], [0., 1., 1.]], [[0.25, 0., 0.25], [0., 0.25, 0.25]])
run_test([[1., 0., 1.], [0., 1., 0.]], [[0.5, 0., 0.5], [0., 1., 0.]], (1,))
run_test(torch.ones((2, 2, 2)), torch.full((2, 2, 2), 0.25), (0, 2))
def test_pow_zero_tensor_gradient(self):
def run_test(input_size, exponent):
input = torch.zeros(*input_size, requires_grad=True)
input.pow(exponent).sum().backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), torch.zeros(10))
run_test((10, 10), torch.zeros(10, 10))
run_test((10,), 0)
def test_pow_scalar_base(self):
a = torch.arange(1, 13, dtype=torch.double).view(3, 4).requires_grad_()
gradcheck(lambda a: torch.pow(2, a), (a,))
def test_igamma(self):
# 1e-3 offset to avoid zeros
# NOTE: derivative for s is not implemented
s = (torch.rand(100, dtype=torch.double) + 1e-3)
x = (torch.rand(100, dtype=torch.double) + 1e-3).requires_grad_()
gradcheck(torch.igamma, (s, x))
gradgradcheck(torch.igamma, (s, x))
def test_igammac(self):
# 1e-3 offset to avoid zeros in s
# NOTE: derivative for s is not implemented
s = (torch.rand(100, dtype=torch.double) + 1e-3)
x = (torch.rand(100, dtype=torch.double)).requires_grad_()
gradcheck(torch.igamma, (s, x))
gradgradcheck(torch.igamma, (s, x))
def test_profiler_tracing(self):
t1, t2 = torch.ones(1), torch.ones(1)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
torch.add(t1, t2)
with TemporaryFileName(mode="w+") as fname:
prof.export_chrome_trace(fname)
# read the trace and expect valid json
# if the JSON generated by export_chrome_trace is not valid, this will throw and fail the test.
with io.open(fname, 'r') as f:
json.load(f)
# Same test but for cuda.
if not torch.cuda.is_available():
return
device = torch.device("cuda:0")
t1, t2 = torch.ones(1, device=device), torch.ones(1, device=device)
with torch.autograd.profiler.profile(use_cuda=True, use_kineto=kineto_available()) as prof:
torch.add(t1, t2)
with TemporaryFileName(mode="w+") as fname:
prof.export_chrome_trace(fname)
# Now validate the json
with io.open(fname, 'r') as f:
json.load(f)
def test_profiler(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
self.assertTrue(torch.autograd._profiler_enabled())
y = x * 2 + 4
self.assertFalse(torch.autograd._profiler_enabled())
last_end = 0
names = ['aten::mul', 'aten::to', 'aten::empty_strided', 'aten::copy_',
'aten::empty', 'aten::add', 'aten::to', 'aten::empty_strided',
'aten::copy_', 'aten::empty']
top_level_names = ['aten::mul', 'aten::add']
for evt in p.function_events:
if evt.time_range.start > last_end:
self.assertTrue(evt.name in top_level_names)
last_end = evt.time_range.end
self.assertTrue(evt.name in names)
def test_profiler_seq_nr(self):
with profile(use_kineto=kineto_available()) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
s = z.sum()
s.backward()
print(p.key_averages().table(
sort_by="self_cpu_time_total", row_limit=-1))
# expecting aten::add, aten::sum to have the sequence numbers,
# expecting the corresponding backward nodes to have the same numbers
# as the forward ops
add_seq_nr = -1
sum_seq_nr = -1
found_add = found_sum = False
found_bwd_add = found_bwd_sum = False
found_empty = False
for e in p.function_events:
if e.name == "aten::add":
add_seq_nr = e.sequence_nr
self.assertFalse(found_add)
found_add = True
elif e.name == "aten::sum":
sum_seq_nr = e.sequence_nr
self.assertFalse(found_sum)
found_sum = True
elif "Add" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, add_seq_nr)
self.assertFalse(found_bwd_add)
found_bwd_add = True
elif "Sum" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, sum_seq_nr)
self.assertFalse(found_bwd_sum)
found_bwd_sum = True
# check that nested ops (e.g. empty) don't have
# sequence number
if e.name == "aten::empty":
self.assertEqual(e.sequence_nr, -1)
found_empty = True
self.assertGreaterEqual(add_seq_nr, 0)
self.assertGreaterEqual(sum_seq_nr, 0)
self.assertNotEqual(add_seq_nr, sum_seq_nr)
self.assertTrue(found_add)
self.assertTrue(found_sum)
self.assertTrue(found_bwd_add)
self.assertTrue(found_bwd_sum)
self.assertTrue(found_empty)
def test_profiler_unboxed_only(self):
x = torch.rand(3, 4)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
x.resize_([3, 2])
def test_profiler_propagation(self):
def foo(x):
with record_function("in_foo") as rf:
return x * 2
x = torch.rand(3, 4)
traced_foo = torch.jit.trace(foo, x)
def bar(x):
with record_function("in_bar") as rf:
# we expect that profiler will be able
# propagate across fork
fut = torch.jit._fork(traced_foo, x)
y = torch.jit._wait(fut)
# note: continuation (and rf's end) can
# be executed in a different thread
with record_function("in_bar_after_wait") as rf2:
y = y * 2
return y
traced_bar = torch.jit.trace(bar, x)
with profile(use_kineto=kineto_available()) as p:
traced_bar(x)
found_foo = False
found_bar = False
found_bar_after_wait = False
for info in p.function_events:
if info.name == "in_foo":
self.assertFalse(found_foo)
found_foo = True
elif info.name == "in_bar":
self.assertFalse(found_bar)
found_bar = True
elif info.name == "in_bar_after_wait":
self.assertFalse(found_bar_after_wait)
found_bar_after_wait = True
self.assertTrue(found_foo)
self.assertTrue(found_bar)
self.assertTrue(found_bar_after_wait)
def test_record_function_callbacks(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
with record_function("foo"):
y = x * 2 + 4
function_events = p.function_events
foo_event = [event for event in function_events if "foo" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_profiler_aggregation_fake(self):
events = EventList()
id = [0]
def get_id():
id[0] = id[0] + 1
return id[0]
# [[thread_id, [(start, end, id), ....]], ...]
# Using list instead of a dict so order is guaranteed for any Python
# version
threads = [
[1, [(0, 1, get_id()), (1, 2, get_id())]],
[0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]],
]
for thread, ranges in threads:
for range in ranges:
assert(len(range) == 3)
events.append(
FunctionEvent(
id=range[2],
node_id=0,
name="",
thread=thread,
start_us=range[0],
end_us=range[1],
)
)
events._populate_cpu_children()
# Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2]
# as a child of [1, 3]
res = [[], [], [], [], [4]]
def get_children_ids(event):
return [child.id for child in event.cpu_children]
assert([get_children_ids(event) for event in events] == res)
def test_profiler_aggregation_table(self):
"""
Test if the profiling result is aggregated for `str(prof)`
See: https://github.com/pytorch/pytorch/issues/37500
"""
x = torch.randn(1024)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
torch.einsum("i->", x)
prof_str = str(prof)
prof_table = prof.table()
self.assertEqual(prof_table, prof_str)
def test_profiler_function_event_avg(self):
avg = FunctionEventAvg()
avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, start_us=10, end_us=15))
avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, start_us=20, end_us=30))
avg.add(avg)
self.assertEqual(avg.key, "foo")
# aggregate stats
self.assertEqual(avg.count, 4)
self.assertEqual(avg.cpu_time_total, 30)
self.assertEqual(avg.self_cpu_time_total, 30)
self.assertEqual(avg.cuda_time_total, 0)
# average stats
self.assertEqual(avg.cpu_time, 7.5)
self.assertEqual(avg.cuda_time_total, 0)
def test_profiler_shapes(self):
print("")
layer1 = torch.nn.Linear(20, 30)
layer2 = torch.nn.Linear(30, 40)
input = torch.randn(128, 20)
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
layer2(layer1(input))
print(prof.function_events)
top_level_expected_events_and_shapes = [
('aten::linear', [[128, 20], [30, 20], [30]]),
('aten::linear', [[128, 30], [40, 30], [40]])
]
expected_iter = iter(top_level_expected_events_and_shapes)
last_end = 0
for event in prof.function_events:
if event.time_range.start > last_end:
name_expected, input_shape_expected = next(expected_iter)
if name_expected is not None:
self.assertEqual(event.name, name_expected)
self.assertEqual(event.input_shapes, input_shape_expected)
last_end = event.time_range.end
def test_profiler_no_cuda(self):
print("")
layer = torch.nn.Linear(20, 30)
x = torch.randn(128, 20)
with profile(use_cuda=False, use_kineto=kineto_available()) as prof:
layer(x)
prof_str = str(prof)
print(prof_str)
self.assertTrue('cpu' in prof_str.lower())
self.assertTrue('cuda' not in prof_str.lower())
def test_profiler_aggregation_lstm(self):
print("")
rnn = torch.nn.LSTM(10, 20, 2)
total_time_s = 0
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
for i in range(20):
input = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
c = torch.randn(2, 3, 20)
start = time.time()
rnn(input, (h, c))
end = time.time()
total_time_s += end - start
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, header="TEST"))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10))
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, max_src_column_width=300, header="TEST", top_level_events_only=True))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True))
total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default
print(
"Total time based on python measurements: ",
format_time(total_time_us)
)
print(
"CPU time measurement python side overhead: {:.2f}%".format(
(total_time_us / prof.self_cpu_time_total - 1.0) * 100.0
)
)
if sys.platform != "win32":
with tempfile.NamedTemporaryFile() as trace_file:
prof.export_chrome_trace(trace_file.name)
def test_record_function(self):
x = torch.randn(10, 10)
def forward(x):
with record_function("outer"):
y = x * 2 + 4
with record_function("inner"):
y = y - 1
y = y / 1
forward(x)
with profile(use_kineto=kineto_available()) as p:
forward(x)
events = p.function_events
important_events = [
'outer',
'aten::mul',
'aten::add',
'inner',
'aten::sub',
'aten::div'
]
idx = 0
for info in events:
if info.name == important_events[idx]:
idx = idx + 1
if idx == len(important_events):
break
self.assertEqual(idx, len(important_events))
# We can also use record_function to decorate arbitrary function
@record_function('my_func')
def f(x, y):
return x + y
with profile(use_kineto=kineto_available()) as p:
f(1, 2)
self.assertTrue('my_func' in str(p))
def test_record_function_multithreaded(self):
rf = record_function("outer")
rf.__enter__()
with record_function("inner"):
# test that exiting the record function after starting another one
# doesn't throw.
rf.__exit__(None, None, None)
with record_function("inner"):
rf.__enter__()
# test that exiting the record function after ending another one
# doesn't throw.
rf.__exit__(None, None, None)
def test_dir(self):
x = torch.randn(10, 10)
keys = dir(x)
self.assertIn('shape', keys)
# real and imag are only implemented for complex tensors.
y = torch.randn(10, 10, dtype=torch.cfloat)
for key in ['real', 'imag']:
self.assertRaises(RuntimeError, lambda: hasattr(x, key))
self.assertTrue(hasattr(y, key))
keys.remove(key)
for key in keys:
self.assertTrue(hasattr(x, key))
def test_as_strided(self):
def test(x, prepro_fn, size, strides, offset=None):
x = x.to(torch.double).detach().requires_grad_()
# Check that forward will **not** resize storage because it may
# cause NaN in output and fail numerical Jacobian check consequently
with torch.no_grad():
y = prepro_fn(x) if prepro_fn is not None else x
max_offset = sum((si - 1) * st for si, st in zip(size, strides))
max_offset += offset if offset is not None else y.storage_offset()
assert max_offset < len(y.storage()), "test case resizes storage"
def closure(x):
if prepro_fn is not None:
x = prepro_fn(x)
return x.as_strided(size, strides, offset)
gradcheck(closure, [x])
gradgradcheck(closure, [x])
# test
test(torch.arange(0, 25), lambda x: x.view(5, 5), [3, 3], [6, 2], 2)
# test crazy stride at dim with size 1 case
test(torch.randn(12), None, [1, 2, 1, 5], [0, 5, 100, 1], 2)
# test expand case
test(torch.randn(5), None, [3, 3, 3], [0, 1, 0], 2)
test(torch.randn(5), None, [3, 3, 3], [0, 0, 0], 4)
test(torch.randn(5), lambda x: x.expand(5, 5), [5, 5], [0, 1], 0)
# test non-expand overlapping case
test(torch.randn(35), None, [6, 6], [5, 1], 2)
test(torch.randn(15), None, [3, 2], [3, 6], 2)
# test transpose case
test(torch.randn(3, 4), None, [4, 3], [1, 4])
# test "getting things outside the input" case
x = torch.randn(6, 2)
test(x[3:], None, [3, 2], [2, 1], 0) # should be all zeros
self.assertEqual(x[3:].as_strided([3, 2], [2, 1], 0), x[:3])
# test select on expanded input case
test(torch.randn(2, 3), lambda x: x.expand(10, 2, 3), [2, 3], [3, 1], 0)
def _test_lerp_tensor_weights(self, cast):
def construct_inputs(*shapes):
start = cast(torch.randn(shapes[0])).requires_grad_()
end = cast(torch.randn(shapes[1])).requires_grad_()
weight = cast(torch.randn(shapes[2])).requires_grad_()
return [start, end, weight]
all_test_shapes = [((3, 3, 3), (3, 3, 3), (3, 3, 3)), # no broadcasting
((3,), (3, 3, 3), (3, 3, 3)), # start broadcasting - 1
((3, 3, 3), (3,), (3, 3, 3)), # end broadcasting - 1
((3, 3, 3), (3, 3, 3), (3,)), # weight broadcasting - 1
((), (3, 3, 3), (3, 3, 3)), # start broadcasting - 2
((3, 3, 3), (), (3, 3, 3)), # end broadcasting - 2
((3, 3, 3), (3, 3, 3), ()), # weight broadcasting - 2
((3, 3), (3, 3, 3), (3,))] # all broadcasting
for shapes in all_test_shapes:
cur_inputs = construct_inputs(*shapes)
gradcheck(torch.lerp, cur_inputs)
gradgradcheck(torch.lerp, cur_inputs)
def test_lerp_tensor_weights(self):
self._test_lerp_tensor_weights(lambda t: t)
def test_reduce_dtype(self):
def test_reduction(op, has_no_dim, takes_dtype=True):
x = torch.randn(3, 3, dtype=torch.float, requires_grad=True)
if has_no_dim:
grad1, = torch.autograd.grad([op(x)], [x])
grad2, = torch.autograd.grad([op(x, dtype=torch.double)], [x])
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
gi = torch.randn(op(x, dim=0).shape, dtype=torch.float)
grad1, = torch.autograd.grad([op(x, dim=0)], [x], gi)
if takes_dtype:
grad2, = torch.autograd.grad([op(x, dim=0, dtype=torch.double)], [x], gi.double())
else:
grad2, = torch.autograd.grad([op(x.double(), dim=0)], [x], gi.double())
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
test_reduction(torch.sum, True)
test_reduction(torch.prod, True)
test_reduction(torch.cumsum, False)
test_reduction(torch.cumprod, False)
test_reduction(torch.logcumsumexp, False, takes_dtype=False)
def test_inplace_view_saved_output(self):
# Test an in-place operation on a view in which the in-place op saves
# its output. Previously, this created a reference cycle.
dealloc = [0]
class IncrementOnDelete(object):
def __del__(self):
dealloc[0] += 1
def test():
root = torch.randn(3, 3, requires_grad=True)
copy = root.clone()
copy.grad_fn.register_hook(IncrementOnDelete())
view = copy.view(9)
torch.nn.functional.relu(view, inplace=True)
test()
self.assertEqual(dealloc[0], 1)
def test_inplace_view_leaf_errors(self):
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
x = torch.zeros(1, requires_grad=True)
y = x.view_as(x)
with self.assertRaisesRegex(RuntimeError,
"a view of a leaf Variable that "
"requires grad is being used in "
"an in-place operation."):
y.add_(1)
def test_inplace_view_backward(self):
# Issue #10532: Make sure that this does not raise RuntimeError.
net = nn.Sequential(
nn.InstanceNorm2d(2),
nn.ReLU(True)
)
x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True)
g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True)
torch.autograd.grad(g.sum(), [x])
self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]]))
# https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8
inputs = torch.ones((1, 3, 256, 256), requires_grad=True)
tmp1 = (inputs + 1).view_as(inputs)
tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True)
prob_interpolated = torch.sigmoid(tmp2)
gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs,
grad_outputs=torch.ones(prob_interpolated.size()),
create_graph=True, retain_graph=True)[0]
gradient_penalty = gradients.sum()
gradient_penalty.backward()
fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0]
self.assertEqual(fn.name(), "ThresholdBackwardBackward")
def test_inplace_view_weak_grad_fn(self):
# Issue 23502: Test that b's grad_fn is preserved.
a = torch.arange(10.0, requires_grad=True)
b = a.narrow(0, 0, 2).clone().view(-1)
b.relu_()
c = b.clone()
del b
gc.collect()
s = c.sum()
s.backward()
self.assertEqual(s, torch.tensor(1.0))
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
a = torch.rand(10, requires_grad=True).narrow(0, 0, 10)
with self.assertRaises(RuntimeError):
b = a.relu_()
def test_mul_out(self):
a = torch.randn(2, 2, requires_grad=True)
b = torch.randn(2, 2, requires_grad=True)
x = torch.zeros_like(a)
# out=... functions don't support automatic differentiation currently
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# the inputs can require grad if we're in no_grad() mode
with torch.no_grad():
torch.mul(a, b, out=x)
self.assertEqual(x, a * b)
def test_mul_out_result_requires_grad(self):
a = torch.randn(2, 2)
b = torch.randn(2, 2)
x = torch.zeros(2, 2, requires_grad=True)
# we should throw an exception if the output requires grad
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
def test_diagonal_derivative_requires_grad(self):
# test that the backward requires grad
# we do this is because diagonal_backward uses inplace
# operations and gradgradcheck does not catch whether
# they works as expected (it will succeed even if
# the gradient has requires_grad == False
a = torch.randn(5, 6, requires_grad=True)
b = torch.diagonal(a)**2
c = b.sum()
d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True)
self.assertTrue(d.requires_grad)
def test_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
gI = gO.clone().expand(size)
gI[0] = 0
gI[0] /= 0 # Generate a nan
if ctx.fail_0th:
return gI, None, None
else:
return None, gI, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
out.backward() # Should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out.backward()
self.assertIn('No forward pass information', str(w[0].message))
inp = torch.rand(size, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out = MyFunc.apply(inp, inp, False)
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))
def test_nested_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, fail_0th):
ctx.fail_0th = fail_0th
ctx.save_for_backward(inp1)
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
inp, = ctx.saved_tensors
fail_0th = ctx.fail_0th
g = gO.clone().expand(size)
gI = MyFunc2.apply(g * inp, g + inp, fail_0th)
return gI, None
class MyFunc2(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1 * 2.0 + inp2
@staticmethod
def backward(ctx, gO):
fail_0th = ctx.fail_0th
g1 = gO.clone()
g2 = gO.clone()
g1[0] = 0
g2[0] = 0
# generate a nan
if fail_0th:
g1[0] /= 0
else:
g2[0] /= 0
return g1, g2, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward() # should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
gsum.backward()
self.assertIn('No forward pass information', str(w[1].message))
inp = torch.rand(size, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."):
with detect_anomaly():
out = MyFunc.apply(inp, False)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward()
self.assertIn('MyFunc2.apply', str(w[1].message))
self.assertIn('MyFunc.apply', str(w[2].message))
def test_anomaly_grad_warnings(self):
# PyTorch won't throw warnings if there is an error
# but we'd want to at least see them in stderr
class StdErrDiverter:
def __enter__(self):
self.stderr_orig = sys.stderr
self.stderr_new = io.StringIO()
sys.stderr = self.stderr_new
return self
def __exit__(self, *args):
self.captured = self.stderr_new.getvalue()
sys.stderr = self.stderr_orig
# if the warnings don't throw, they will be handled as regular warnings
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 2)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', str(w[1].message))
# if the warning throws, it will be printed to sys.stderr
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
warnings.simplefilter("error")
with StdErrDiverter() as s:
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 1)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', s.captured)
def test_anomaly_assign_parent_cleanup(self):
# Test that python objects created are properly cleaned up when assign_parent is called
import weakref
def get_ref():
# we use torch.exp here but any function that will construct a new node in its
# backward call in grad mode will work
x = torch.randn(2, 2, requires_grad=True)
t = x.exp()
# ExpBackward calls mul, creating the MulBackward node when create_graph=True.
# In anomaly mode, a PyObject referencing MulBackward's "parent" ExpBackward is added to
# MulBackward's anomaly metadata dict, creating the following reference chain:
#
# grad -> MulBackward -> PyObject -> ExpBackward
#
with detect_anomaly():
grad = torch.autograd.grad(t, x, torch.ones_like(t), create_graph=True)
# We add a weak reference to a new Foo object, which we insert into ExpBackward's metadata dict
#
# (PyObject) -> ExpBackward -> dict -> *Foo*
# t ----^ WeakRef ---^
#
# We want to test that when grad goes out of scope at the end of this function that PyObject is destroyed
# We can test this by seeing whether Foo is not kept alive once t is destroyed
class Foo(object):
pass
my_obj = Foo()
meta_dict = t.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return t, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
def test_nested_anomaly_printstack_cleanup(self):
# Test if metadata dict PyObject is properly destroyed
import weakref
def get_ref():
# This is similar to the construction in test_anomaly_assign_parent_cleanup:
#
# MyFuncBackward2 -> PyObject -> MyFuncBackward -> dict -> Foo
# out ---^ WeakRef ---^
#
# We want to check that Foo is still properly destroyed even when MyFunc2Backward's
# AnomalyMetadata calls printstack, which does some python object manipulation.
#
# You might be wondering why we still have to test_anomaly_assign_parent_cleanup,
# since if PyObject is not destroyed here, wouldn't this test would detect that also?
# The answer is that custom function's PyObject (THPFunction) actually only hold
# a weak reference to the c++ node!
class MyFunc(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
return MyFunc2.apply(x)
class MyFunc2(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
return gO + float("NaN")
inp = torch.rand(1, requires_grad=True)
out = MyFunc.apply(inp)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
ginp.backward()
class Foo(object):
pass
my_obj = Foo()
meta_dict = out.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return out, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
@skipIfNoLapack
def test_eig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_eig_complex_eigenvalues(self):
A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=True)
with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_svd_no_singularvectors(self):
A = torch.randn(2, 2, dtype=torch.float32, requires_grad=True)
u, s, v = torch.svd(A, compute_uv=False)
with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'):
torch.autograd.backward([u, s, v], [torch.ones_like(u), torch.ones_like(s), torch.ones_like(v)])
def test_no_grad_copy(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
NonContGradFunc.apply(MyFunc.apply(a, b)).backward()
self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for one of a,b
a.grad = b.grad = None
MyFunc.apply(a, b)[1][0].backward()
p_g = MyFunc.static_grad_ptr
p_a = a.grad.data_ptr()
p_b = b.grad.data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad, grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for one of a,b
emb_matrix = MyFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = MyFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
a.grad = b.grad = None
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = NonContGradFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
def test_gradcheck_single_input(self):
def f(inp):
return inp.mul(5)
gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True))
gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True))
def test_gradcheck_sparse_input(self):
def fn(sparse):
return torch.sparse.sum(sparse)
gradcheck(fn, torch.rand(10).to_sparse().requires_grad_(True), check_sparse_nnz=True)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(10).to_sparse().requires_grad_(True), check_sparse_nnz=False)
def test_gradcheck_nondeterministic(self):
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
inp = torch.randn(5, 5, requires_grad=True)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, check_batched_grad=False)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, check_batched_grad=False)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, check_batched_grad=False)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5, check_batched_grad=False)
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5, check_batched_grad=False)
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5, check_batched_grad=False)
def test_gradcheck_validates_inputs(self):
# when inputs are not dense, but check_sparse_nnz is false
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'dense when check_sparse_nnz is set to False.'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False,
check_batched_grad=False, raise_exception=False))
# when none of the inputs require grad (always raises even if raise_exception=False)
x = torch.rand(10, requires_grad=False)
with self.assertRaisesRegex(ValueError, 'at least one input tensor to require gradient'):
gradcheck(lambda x: x, (x,), raise_exception=False)
# (warning) when inputs are not double precision
x = torch.ones(1, dtype=torch.float32, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
self.assertTrue(gradcheck(lambda x: x, (x,), atol=1e-1))
# when layout is not mkldnn(aka has strides) and input has a dimension with stride 0. (always raises
# even if raise_exception=False)
x = torch.ones(1, dtype=torch.float64, requires_grad=True)
x = x.expand((2, 2))
with self.assertRaisesRegex(RuntimeError, 'The 0th input has a dimension with stride 0'):
gradcheck(lambda x: x, (x,), raise_exception=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_test_outputs(self):
# when sparse outputs (always raise even if raise_exception=False)
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(ValueError, 'Sparse output is not supported at gradcheck yet'):
gradcheck(lambda x: x, (x,), check_sparse_nnz=True, check_batched_grad=False, raise_exception=False)
# when mkldnn outputs (always raise even if raise_exception=False)
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
with self.assertRaisesRegex(ValueError, 'MKLDNN output is not supported at gradcheck yet'):
gradcheck(lambda x: x.to_mkldnn(), (root,), check_batched_grad=False, raise_exception=False)
def test_gradcheck_check_no_differentiable_outputs(self):
# When none of the outputs are differentiable, but numerical gradient is not zero
x = torch.ones((1,), requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero'):
gradcheck(lambda x: torch.tensor([x]), x)
self.assertFalse(gradcheck(lambda x: torch.tensor([x]), x, raise_exception=False))
def test_gradcheck_check_batched_grad(self):
x = torch.rand(10, requires_grad=True).to_sparse()
# runtime error while compute batched grad (print big error)
with self.assertRaisesRegex(RuntimeError, 'gradcheck or gradgradcheck failed while testing batched gradient'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True,
raise_exception=False))
def test_gradcheck_backward_mul_by_grad_output(self):
# when grad_input is sparse and has incorrect sparse_dim/dense_dim
def fn(x):
def hook(grad):
if grad is not None:
return grad.to_dense().to_sparse(1)
return grad
y = x.clone()
y.register_hook(hook)
return y.to_dense()
x = torch.ones((2, 2), requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'grad is sparse tensor, but has incorrect sparse_dim'):
gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False)
self.assertFalse(gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False))
# when backward not multiplied by grad_output (non-sparse case)
def fn2(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn2, (x,), atol=1e-1)
self.assertFalse(gradcheck(fn2, (x,), atol=1e-1, raise_exception=False))
# when backward not multiplied by grad_output (sparse case)
def fn3(x):
y = x.clone().to_dense()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False)
self.assertFalse(gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False))
# when layout of grad_input is not the same as input
class Test(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
return x.to_sparse()
x = torch.ones(1, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'grad is incorrect layout'):
gradcheck(Test.apply, (x,), check_batched_grad=False)
self.assertFalse(gradcheck(Test.apply, (x,), check_batched_grad=False, raise_exception=False))
def test_gradcheck_undefined_grad(self):
# when encounter runtime error while running backward
def fn(x):
def hook(x):
if x is None:
raise RuntimeError("x is undefined")
y = x.clone()
y.register_hook(hook)
return y
x = torch.ones(1, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Backwards compatibility: New undefined gradient support checking feature"):
with self.assertRaisesRegex(RuntimeError, 'Expected backward function to handle undefined output grads'):
gradcheck(fn, (x,))
self.assertFalse(gradcheck(fn, (x,), raise_exception=False))
def test_gradcheck_jacobian_mismatch(self):
def fn(x): # R -> R, C -> C
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,))
self.assertFalse(gradcheck(fn, (x,), raise_exception=False))
x_c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, 'Gradients failed to compare equal for grad output = 1j'):
gradcheck(fn, (x_c,))
self.assertFalse(gradcheck(fn, (x_c,), raise_exception=False))
def fn2(x): # R -> C
y = torch.complex(x, x)
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Gradients failed to compare equal for grad output = 1j'):
gradcheck(fn2, (x,))
self.assertFalse(gradcheck(fn2, (x,), raise_exception=False))
def fn3(x): # C -> R
y = torch.real(x)
y.register_hook(lambda x: x + 1e-2)
return y
with self.assertRaisesRegex(RuntimeError, 'Gradients failed to compare equal for grad output = 1'):
gradcheck(fn3, (x_c,))
self.assertFalse(gradcheck(fn3, (x_c,), raise_exception=False))
def test_version_counter(self):
x = torch.randn(1, 2)
# In-place op bumps version
x_saved_version = x._version
x.add_(1).add_(1)
self.assertTrue(x._version > x_saved_version)
# Differentiable view shares version counter
xz = x[:]
self.assertTrue(x._version == xz._version)
xz.add_(1)
self.assertTrue(x._version == xz._version)
# `x.data = y` preserves version counter of `x`
x_saved_version = x._version
x.data = torch.randn(2, 3)
self.assertTrue(x._version == x_saved_version)
x.add_(1)
self.assertTrue(x._version > x_saved_version)
# Make sure `x` is still using the same version counter it shares with `xz`
self.assertTrue(x._version == xz._version)
# In-place op on `xz` also updates version of `x`,
# because they share the version counter
xz.add_(1)
self.assertTrue(x._version == xz._version)
def test_set_data_tensorimpl_type(self):
# Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl
# of type `SparseTensorImpl`.
x = torch.randn(1, 2)
x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1]))
with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'):
x.data = x_s
def test_set_data_preserve_pyobj(self):
a = torch.randn(1, 2)
b = torch.randn(1, 2)
b_id_saved = id(b)
b.data = a
self.assertTrue(b_id_saved == id(b))
@unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows")
def test_thread_shutdown(self):
code = """import torch
from torch.autograd import Function
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
"""
s = TestCase.runWithPytorchAPIUsageStderr(code)
self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
@unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941")
def test_deep_reentrant(self):
class DeepReentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
if ctx.x < 0:
return x
with torch.enable_grad():
DeepReentrant.apply(ctx.x).sum().backward()
return x
# Test stack overflow escape mechanism
v = torch.tensor(2000.0, requires_grad=True)
# This will cause stack overflow if reentrant calls are handled
# in the same thread recursively
DeepReentrant.apply(v).sum().backward()
# Test stack overflow escape mechanism multiple times
# to ensure reusing workers in the pool works fine
v2 = torch.tensor(200.0, requires_grad=True)
DeepReentrant.apply(v2).sum().backward()
def test_reentrant_priority(self):
order = []
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
order.append("MyFunction")
return x
class Reentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
order.append("Reentrant")
if ctx.x < 0:
return x
with torch.enable_grad():
Reentrant.apply(ctx.x).backward()
return x
a = MyFunction.apply(torch.tensor(6.0, requires_grad=True))
b = Reentrant.apply(torch.tensor(9.0, requires_grad=True))
v = a * b
v.backward()
# The tasks for the Reentrant and MyFunction backward() will be added
# to the queue in the autograd engine at the same time. The backward
# for Reentrant will be executed first, which will then add other
# backward tasks to the queue. We want to ensure all the reentrant tasks
# are prioritized over the MyFunction backward task regardless of their
# sequence numbers
self.assertEqual(len(order), 11)
self.assertEqual(order.count("Reentrant"), 10)
self.assertEqual(order[-1], "MyFunction")
@slowTest
def test_checkpointing(self):
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
feat_combined = []
for r in range(num_inp):
data_r = torch.Tensor(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = True
feat_r = checkpoint(module, data_r)
feat_combined.append(feat_r)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
def test_checkpoint_valid_reset_on_error(self):
a = torch.randn(2, 2, requires_grad=True)
with self.assertRaisesRegex(Exception, "Checkpointing is not compatible with .grad()"):
b = checkpoint(torch.exp, a).sum()
torch.autograd.grad(b, (a,))
c = checkpoint(torch.exp, a).sum()
c.backward()
def _test_reentrant_with_callbacks(self, install_callbacks_in_depths):
counter = {}
counter["inner"] = 0
counter["outer"] = 0
def inc_inner_counter():
counter["inner"] += 1
def inc_outer_counter():
counter["outer"] += 1
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 1 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_inner_counter)
return input
class MyReentrantFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 0 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_outer_counter)
# Reentrant backward call.
tmp_inp = input.detach().requires_grad_()
with torch.enable_grad():
tmp_out = (MyFunc.apply(tmp_inp)).sum()
tmp_out.backward()
return input
t1 = torch.rand((3, 3), requires_grad=True)
t2 = MyReentrantFunc.apply(t1)
t3 = t2.sum()
torch.autograd.backward([t3])
return counter
def test_reentrant_with_callbacks_depth_0(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([0])
self.assertEqual(1, ret["outer"])
self.assertEqual(0, ret["inner"])
def test_reentrant_with_callbacks_depth_1(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([1])
self.assertEqual(0, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_callbacks_both_depths(self):
# Verify callback is called twice.
ret = self._test_reentrant_with_callbacks([0, 1])
self.assertEqual(1, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def add_gradient_penalty_to_grad(grad):
handle.remove()
old_param_grad = grad
param.grad = None
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
new_param = param.detach().requires_grad_()
out = ((g * 2) + new_param).sum()
out.backward()
res = g.grad + grad
param.grad = old_param_grad
return res
handle = param.register_hook(add_gradient_penalty_to_grad)
# Forward pass
tmp = (param * param)
loss = tmp.sum()
# Compute the gradients
loss.backward()
def test_reentrant_with_non_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def manual_increase_gradient(grad):
handle.remove()
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
out = ((g * 2) + 5).sum()
out.backward()
res = g.grad + grad
return res
# Forward pass
tmp = (param * param)
handle = tmp.register_hook(manual_increase_gradient)
loss = tmp.sum()
# Compute the gradients
loss.backward()
self.assertEqual(param.grad, 6 * param)
def test_grad_fn_attr_bindings(self):
# Check that the getter of each type returns what we want
# See `gen_autograd_functions.py` for how the getters are generated
#
# This test is only meant to check if the codegen'd bindings work
# Please help update this test if you update the names of any the fields we check!
#
a = torch.ones(1, requires_grad=True)
b = torch.ones(1, requires_grad=True)
out = torch.stack([a, b], dim=0)
self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TensorList -> Tuple[Tensor]
self.assertIsInstance(out.grad_fn._saved_tensors[0], torch.Tensor)
self.assertEqual(out.grad_fn._saved_dim, 0) # int64_t -> int
self.assertIsInstance(out.grad_fn._saved_dim, int)
a = torch.ones(2, 2, requires_grad=True)
indices = torch.tensor([0, 1])
out = a[:, indices]
self.assertEqual(out.grad_fn._saved_indices, (None, indices)) # c10::List<c10::optional<Tensor>> -> Tuple[Tensor?]
self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor)
self.assertEqual(out.grad_fn._saved_self_sizes, a.shape) # IntArrayRef -> Tuple[int]
self.assertIsInstance(out.grad_fn._saved_self_sizes[0], int)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.nn.functional.interpolate(a, 4, mode="linear")
self.assertEqual(out.grad_fn._saved_output_size, (4,)) # c10::optional<IntArrayRef> -> int[]?
self.assertIsInstance(out.grad_fn._saved_output_size[0], int)
self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool
self.assertIsInstance(out.grad_fn._saved_align_corners, bool)
self.assertIsNone(out.grad_fn._saved_scale_factors) # c10::optional<ArrayRef<double>> -> float[]?
out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear")
self.assertIsNone(out.grad_fn._saved_output_size)
self.assertEqual(out.grad_fn._saved_scale_factors, (0.5,))
self.assertIsInstance(out.grad_fn._saved_scale_factors[0], float)
a = torch.ones(2, 2, requires_grad=True)
out = torch.pdist(a, p=1)
self.assertEqual(out.grad_fn._saved_p, 1.) # double -> float
self.assertIsInstance(out.grad_fn._saved_p, float)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.logit(a, 1.)
self.assertEqual(out.grad_fn._saved_eps, 1.) # c10:optional<double> -> float?
self.assertIsInstance(out.grad_fn._saved_eps, float)
out = torch.logit(a)
self.assertIsNone(out.grad_fn._saved_eps)
a = torch.tensor([1.], requires_grad=True)
out = torch.div(a, 2., rounding_mode="trunc")
self.assertEqual(out.grad_fn._saved_rounding_mode, "trunc") # std::string -> str
x = torch.zeros(5, requires_grad=True)
out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex double) -> complex
cfloat = torch.tensor(1 + 0j, dtype=torch.complex64)
out = torch.threshold(x, threshold=cfloat, value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex float) -> complex
out = torch.threshold(x, threshold=1., value=1.)
self.assertIsInstance(out.grad_fn._saved_threshold, float) # Scalar(floating point) -> float
out = torch.threshold(x, threshold=1, value=1)
self.assertIsInstance(out.grad_fn._saved_threshold, int) # Scalar(integral) -> int
out = torch.threshold(x, threshold=False, value=False)
self.assertIsInstance(out.grad_fn._saved_threshold, bool) # Scalar(bool) -> bool
a = torch.ones(2, 2, requires_grad=True)
out = a.as_strided((3,), (1,), 1)
self.assertEqual(out.grad_fn._saved_storage_offset, 1) # c10:optional<int64_t> -> int?
self.assertIsInstance(out.grad_fn._saved_storage_offset, int)
out = a.as_strided((3,), (1,))
self.assertIsNone(out.grad_fn._saved_storage_offset)
a = torch.ones(2, requires_grad=True)
out = torch.tanh(a)
self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output
def test_autograd_views_codegen(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks the behavior of two codegen functions (view_as and unbind)
# with respect to view tracking and inplace operation on the output.
def run_test(grad_mode, requires_grad, is_view, should_raise_tuple):
def maybe_check_raise(fn, should_raise):
self.assertTrue(should_raise is None or isinstance(should_raise, str))
if should_raise is not None:
with self.assertRaisesRegex(RuntimeError, should_raise):
fn()
else:
fn()
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.view_as(inp)
# Are they differentiable views?
self.assertTrue(out._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0])
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.unbind()
# Are they differentiable views?
self.assertTrue(out[0]._is_view() == is_view)
self.assertTrue(out[1]._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1])
maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2])
# should_raise contains None if it should not raise
# should_raise contains a string of the error if it should raise
# The 3 elements are for view_as, first output of unbind and second output of unbind
run_test(grad_mode=True, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
inp_change_err = "Output {} of UnbindBackward is a view and is being modified inplace."
run_test(grad_mode=True, requires_grad=True, is_view=True,
should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1")))
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
run_test(grad_mode=False, requires_grad=True, is_view=True,
should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err))
run_test(grad_mode=False, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
def test_inplace_not_requires_grad(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.view_as(inp)
@staticmethod
def backward(ctx, grad):
return grad
# Original Tensor does not require grad
a = torch.rand(1, 2)
# Tensor being written does require grad
b = torch.rand(1, requires_grad=True)
# Take an invalid view on 'a' that should raise an error (warns during deprecation)
view_a = MyFn.apply(a)
with self.assertWarnsRegex(UserWarning, "This view was created inside a custom Function"):
view_a += b
# Extra test for copy_ that is a manual implementation and could be easily
# forgotten when the codegen is updated (warns during deprecation)
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = MyFn.apply(a)
with self.assertWarnsRegex(UserWarning, "This view was created inside a custom Function"):
view_a.copy_(b)
# Functions that should throw must properly throw
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = a.unbind()[0]
with self.assertRaisesRegex(RuntimeError, "This view is the output of a function that returns "
"multiple views."):
view_a.copy_(b)
# Sanity check that views that should work still work
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
a.select(1, 0).copy_(b)
def _do_test_autograd_simple_views_python(self, dtype):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks the autograd.Function behavior when we return one or multiple outputs
# while one of these is an input, a view of an input or of a temporary tensor.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
# This indicator is used to check if the argument `ga` contains non-zero values
ga_nz = [False]
class IdOneOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a
@staticmethod
def backward(ctx, ga):
bw_called[0] += 1
return ga, None, None
class IdTwoOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
if ga.eq(0).all():
ga_nz[0] = False
else:
ga_nz[0] = True
return ga + gab, gab, None
err_msg_two_outputs = "Output 0 of IdTwoOutputBackward is a view and is being modified inplace."
err_msg_two_outputs += " This view is the output of a function that returns multiple views."
class ViewOfTemp(Function):
@staticmethod
def forward(ctx, a, make_view):
ctx.save_for_backward(a)
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
b = a.clone()
return b.select(0, 0)
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, 0).copy_(grad)
return res, None
for fn_id in ["one_output", "two_output", "view_of_temp"]:
for inplace in [True, False]:
for make_view in [True, False]:
# Used for special casing the tests below
output_is_a_view = (make_view or fn_id == "view_of_temp")
def fn(a, b):
# never modify a, b inplace for gracheck
a = a.clone()
b = b.clone()
if fn_id == "two_output":
tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view)
if inplace:
tmp1 += 3
tmp2 += 3
else:
tmp1 = tmp1 + 3
tmp2 = tmp2 + 3
tmp = tmp1 * tmp2
else:
if fn_id == "one_output":
tmp = IdOneOutput.apply(a, b, make_view)
else:
tmp = ViewOfTemp.apply(a + b, make_view)
if inplace:
tmp += 3
else:
tmp = tmp + 3
return tmp.sum()
a = torch.ones(2, dtype=dtype, requires_grad=True)
b = torch.ones(2, dtype=dtype, requires_grad=True)
if fn_id == "two_output" and inplace and output_is_a_view:
with self.assertRaisesRegex(RuntimeError, err_msg_two_outputs):
fn(a, b)
else:
# Are the computed gradients correct ?
if inplace and output_is_a_view:
with warnings.catch_warnings(record=True) as w:
if fn_id == "view_of_temp":
# This will be fixed after the deprecation cycle and the warning becomes
# an error.
with self.assertRaisesRegex(RuntimeError,
"a view of a leaf Variable that requires grad "
"is being used in an in-place operation."):
gradcheck(fn, (a, b), check_batched_grad=False)
else:
# This works but the custom backward is not called (or called with partial)
# gradients as tested below
gradcheck(fn, (a, b), check_batched_grad=False)
self.assertTrue(len(w) > 0)
else:
gradcheck(fn, (a, b), check_batched_grad=False)
# Was the custom backward called properly
bw_called[0] = 0
ga_nz[0] = True # For the case where the backward is called
with warnings.catch_warnings(record=True) as w:
if inplace and output_is_a_view and fn_id != "one_output":
with self.assertRaisesRegex(RuntimeError,
"a view of a leaf Variable that requires grad "
"is being used in an in-place operation."):
fn(a, b).backward()
else:
fn(a, b).backward()
expected_called = 1
expected_ga_nz = True
expected_warning = False
if output_is_a_view and inplace:
expected_called = 0
expected_warning = True
self.assertTrue(bw_called[0] == expected_called)
self.assertTrue(ga_nz[0] == expected_ga_nz)
self.assertTrue((len(w) == 1) == expected_warning)
def test_autograd_simple_views_python(self):
self._do_test_autograd_simple_views_python(torch.double)
self._do_test_autograd_simple_views_python(torch.cdouble)
def test_autograd_complex_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks that multiples views in the forward are properly traced and how they
# behave with respect to inplace operations.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
class ComplexView(Function):
@staticmethod
def forward(ctx, a, idx):
res = a.narrow(0, idx, 1)
res = a.select(0, idx)
ctx.save_for_backward(a)
ctx.idx = idx
return res
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, ctx.idx).copy_(grad)
return res, None
a = torch.ones(2, requires_grad=True)
idx = 1
bw_called[0] = 0
out = ComplexView.apply(a.clone(), idx)
out.sum().backward()
self.assertTrue(bw_called[0] == 1)
out = ComplexView.apply(a.clone(), idx)
with warnings.catch_warnings(record=True) as w:
out += 1
self.assertEqual(len(w), 1)
def test_autograd_inplace_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks custom autograd.Function that perform inplace operations
bw_called = [0]
# I) Single output
class MyAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
# No extra inplace
c = MyAdder.apply(a.clone(), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c = MyAdder.apply(a.clone(), b)
c += 2
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
bw_called[0] = 0
c = MyAdder.apply(a.clone().view_as(a), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# Should not give non-inputs to mark_dirty
class MyAdderBad(Function):
@staticmethod
def forward(ctx, a, b):
c = 3 * a
c.add_(b)
ctx.mark_dirty(c)
return c
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
grad = 3 * grad
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
MyAdderBad.apply(a.clone(), b)
self.assertEqual(len(w), 1)
# II) Multiple outputs
class MyBadAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + gab
# No extra inplace
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
c += 2
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor"
with self.assertRaisesRegex(RuntimeError, inplace_on_view_err):
c, d = MyBadAdder.apply(a.clone().view_as(a), b)
# III) Inplace + other op
class MyOutPlaceAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a.clone(), a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + 2 * gab
# We don't reuse the input
def fn(a, b):
orig_a = a.clone().view_as(a)
c, d = MyOutPlaceAdder.apply(orig_a, b)
return (c * d).sum()
bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output."
with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err):
fn(a, b)
def test_named_tensor_for_complex_views(self):
names = ["batch", "height", "width", "complex"]
z = torch.ones((5, 12, 14, 2), requires_grad=True)
z_named = z.refine_names(*names)
z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(*names[:-1])
z_complex.sum().backward()
self.assertEqual(z.grad, torch.view_as_real(torch.ones_like(z_complex).rename(None)))
def test_custom_function_return_view_in_nograd(self):
class Alias(Function):
@staticmethod
def forward(ctx, x):
return x[:]
@staticmethod
def backward(ctx, gx):
return gx
inp = torch.rand(2, requires_grad=True)
with torch.no_grad():
output = Alias.apply(inp)
with torch.no_grad():
expected_output = inp[:]
# Calling the custom function should operate as if we called an equivalent op
self.assertEqual(output.requires_grad, expected_output.requires_grad)
# Check that in-place modification on view throws
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, leaf_grad_err):
output.zero_()
def test_grad_mode_restored_reentrant(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, go):
original = torch._C.is_grad_enabled()
with torch.enable_grad():
self.assertTrue(torch._C.is_grad_enabled())
foo = torch.rand(go.size(), requires_grad=True)
grad, = torch.autograd.grad(
foo ** 3, foo, grad_outputs=go
)
self.assertTrue(torch._C.is_grad_enabled())
self.assertTrue(torch._C.is_grad_enabled() == original)
return grad
inp = torch.rand(3, requires_grad=True)
# Case where original==False
MyFunction.apply(inp).sum().backward()
# Case where original==True
MyFunction.apply(inp).sum().backward(create_graph=True)
def test_power_function(self):
a = torch.tensor([0., 0., 0.])
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(a**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
s = 0
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(s**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
def test_nansum_with_nans(self):
a = torch.randn(2, 2, 2, 2)
with torch.no_grad():
a[a < 0.2] = float('nan')
a.requires_grad = True
# No args
gradcheck(lambda x: x.nansum(), a)
gradgradcheck(lambda x: x.nansum(), a)
# Single dim
gradcheck(lambda x: x.nansum((0)), a)
gradgradcheck(lambda x: x.nansum((0)), a)
# Multi dim
gradcheck(lambda x: x.nansum((0, 2)), a)
gradgradcheck(lambda x: x.nansum((0, 2)), a)
gradcheck(lambda x: x.nansum((0, -1)), a)
gradgradcheck(lambda x: x.nansum((0, -1)), a)
# With keep-dim
gradcheck(lambda x: x.nansum((0, -1), True), a)
gradgradcheck(lambda x: x.nansum((0, -1), True), a)
def test_nansum_dtype(self):
inp = torch.randn(2, 2, 2, 2)
with torch.no_grad():
inp[inp < 0.2] = float('nan')
def test(inp, inp_dtype, out_dtype):
with torch.no_grad():
a = inp.to(inp_dtype)
a.requires_grad = True
b = torch.sum(a, dtype=out_dtype)
b.backward()
self.assertEqual(a.dtype, a.grad.dtype)
test(inp, torch.float, torch.double)
test(inp, torch.double, torch.float)
def test_nan_to_num(self):
a = torch.randn(3, 3, 3, 3)
with torch.no_grad():
a[torch.rand_like(a) < 0.2] = float('nan')
a[torch.rand_like(a) < 0.2] = float('inf')
a[torch.rand_like(a) < 0.2] = -float('inf')
a.requires_grad = True
gradcheck(lambda x: x.nan_to_num(), a)
gradgradcheck(lambda x: x.nan_to_num(), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a)
gradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a)
gradcheck(lambda x: x.nan_to_num(neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(neginf=-2.0), a)
def test_custom_function_error(self):
class BadFw(Function):
@staticmethod
def backward(ctx, foo):
return foo
class BadBw(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
inp = torch.rand(1, requires_grad=True)
with self.assertRaisesRegex(NotImplementedError, "must implement the forward"):
BadFw.apply(inp)
with self.assertRaisesRegex(RuntimeError, "must implement the backward"):
BadBw.apply(inp).sum().backward()
def test_custom_function_local_inplace(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, inplace):
view = inp.clone()[:3]
if inplace:
view += 2
return view
@staticmethod
def backward(ctx, grad):
return grad, None
base = torch.rand(10, requires_grad=True)
foo = MyFn.apply(base, False)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
foo = MyFn.apply(base, True)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
def test_integer_outputs(self):
inp = torch.rand(4, requires_grad=True)
out = inp.argmax()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argmin()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argsort()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.rand((), requires_grad=True)
out = torch.searchsorted(inp, val)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
bins = torch.linspace(0, 1.0, steps=100, requires_grad=True)
vals = torch.rand(5, 5, requires_grad=True)
out = torch.bucketize(vals, bins)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.empty(5).requires_grad_()
out = val.count_nonzero()
self.assertFalse(out.requires_grad)
def assert_only_first_requires_grad(res):
if not isinstance(res, tuple):
res = (res,)
self.assertTrue(res[0].requires_grad)
for out in res[1:]:
if out is not None:
self.assertFalse(out.requires_grad)
for sort in [True, False]:
for return_inverse in [True, False]:
for return_counts in [True, False]:
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
# Here we test the internal functions to make sure all of them are
# covered on top of the public API
res = torch._unique(inp, sorted=sort, return_inverse=return_inverse)
assert_only_first_requires_grad(res)
# This looks public but is actually manually deleted from the
# torch namespace in torch/functional.py
res = torch._VF.unique_dim(inp, dim=0, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
# We don't test `unique_dim_consecutive` here.
# It looks public but the python binding is actually manually disabled in
# tools/autograd/gen_python_functions.py
res = torch._unique2(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.uint8).bernoulli_()
def gradgradcheck_method_precision_override(test_name):
# these are just empirical observations, we should improve
gradgradcheck_precision_override = {
'test_norm': {'atol': 2e-2, 'rtol': 1e-2},
'test_norm_1_5': {'atol': 1.5e-2, 'rtol': 1e-2},
'test_norm_3': {'atol': 5e-2, 'rtol': 1e-2},
'test_dist': {'atol': 5e-2, 'rtol': 1e-2},
'test_dist_4': {'atol': 8e-2, 'rtol': 1e-2},
}
non_broadcasted_test_name = test_name.split("_broadcast")[0]
override = gradgradcheck_precision_override.get(non_broadcasted_test_name)
if override:
if 'broadcast_lhs' in test_name or 'broadcast_rhs' in test_name:
# errors accumulated across 1 dimension
override = {'atol': override['atol'] * S, 'rtol': override['atol'] * S}
elif 'broadcast_all' in test_name:
# errors accumulated across multiple dimensions
override = {'atol': override['atol'] * S * S, 'rtol': override['atol'] * S * S}
return override
def run_grad_and_gradgrad_checks(test_case, name, test_name, apply_method, output_variable,
input_variables, run_gradgradcheck=True, check_batched_grad=True):
test_case.assertTrue(gradcheck(apply_method, input_variables, eps=1e-6, atol=PRECISION,
check_batched_grad=check_batched_grad))
if name in EXCLUDE_GRADGRADCHECK or test_name in EXCLUDE_GRADGRADCHECK_BY_TEST_NAME:
return
gradgradcheck_precision_override = gradgradcheck_method_precision_override(test_name)
if gradgradcheck_precision_override is not None:
atol = gradgradcheck_precision_override['atol']
rtol = gradgradcheck_precision_override['rtol']
test_case.assertTrue(gradgradcheck(apply_method, input_variables, None, atol=atol, rtol=rtol,
gen_non_contig_grad_outputs=True,
check_batched_grad=check_batched_grad))
else:
test_case.assertTrue(gradgradcheck(apply_method, input_variables,
gen_non_contig_grad_outputs=True,
check_batched_grad=check_batched_grad))
def run_functional_checks(test_case, test_name, name, apply_fn, run_grad_checks,
f_args_variable, f_args_tensor):
output_variable = apply_fn(*f_args_variable)
if run_grad_checks:
run_grad_and_gradgrad_checks(test_case, name, test_name, apply_fn,
output_variable, f_args_variable)
self_variable = f_args_variable[0]
if isinstance(output_variable, torch.Tensor) and output_variable.requires_grad and self_variable is not None:
output_variable.backward(randn_like(output_variable))
test_case.assertEqualTypeString(self_variable, self_variable.grad)
test_case.assertEqual(self_variable.size(), self_variable.grad.size())
# this list corresponds to ops which have separate tests defined for complex dtypes in
# common_methods_invocations.py
# test for these ops with 'complex' in variant should only run for complex and
# the tests for these ops which do not have 'complex' in variant should not run for complex
# and only run for floating point
separate_complex_tests = ['view_as_real', 'real', 'imag', 'div', 'pow', '__rdiv__', 'add', 'sub']
# NOTE: Some non-holomorphic are separately tested in TestAutogradComplex until gradcheck works properly
# for non-holomorphic functions
complex_list_filter = []
# TODO: Add back 'sgn' to complex_list; removed because of Windows test failure with 11.2
# See: https://github.com/pytorch/pytorch/issues/51980
if _get_torch_cuda_version() != (11, 2):
complex_list_filter.append('sgn')
# allow list for complex
complex_list = ['t', 'view', 'reshape', 'reshape_as', 'view_as', 'roll', 'clone',
'expand', 'rot90', 'transpose',
'permute', 'squeeze', 'unsqueeze', 'resize', 'resize_as', 'tril', 'triu',
'chunk', 'split', 'split_with_sizes', 'zero_',
'eq_', 'ne_', 'add', '__radd__', 'sum', 'mul',
'__rmul__', 'dot', 'vdot', 'matmul',
'bmm', 'mv', 'ger', 'diagonal', 'fill_', 'sub',
'mean', 'inverse', 'addcmul',
'addcdiv', 'linalg.tensorinv', 'matrix_exp',
'narrow', 'swapaxes', 'swapdims', 'tensor_split',
'baddbmm', 'addbmm', 'addmv'] + complex_list_filter + separate_complex_tests
# deny list for batched grad computation
EXCLUDE_BATCHED_GRAD_TESTS = set([
'test_to_sparse',
])
def add_test(
name,
self_size,
args,
variant_name='',
check_ad=(), # only used in test_jit
dim_args_idx=(),
skipTestIf=(),
output_process_fn=lambda x: x,
kwargs=None):
kwargs = kwargs if kwargs else {}
basic_test_name = 'test_' + name
if variant_name != '':
basic_test_name += '_' + variant_name
if name in separate_complex_tests and 'complex' in variant_name:
run_only_complex = True
else:
run_only_complex = False
for dtype in [torch.double, torch.cdouble]:
for dim_perm in product([-1, 1], repeat=len(dim_args_idx)):
test_name = basic_test_name
new_args = [arg * dim_perm[dim_args_idx.index(i)] if i in dim_args_idx else arg for i, arg in enumerate(args)]
test_name = basic_test_name + ''.join('_neg' + str(i) for i, idx in enumerate(dim_perm) if idx < 0)
if dtype.is_complex:
# TODO: remove this. this is temporary while we ramp up the complex support.
if name in complex_list:
if name in separate_complex_tests and 'complex' not in variant_name:
continue
if not run_only_complex:
test_name = test_name + '_complex'
else:
continue
elif run_only_complex:
continue
new_args = tuple(new_args)
# for-loop bodies don't define scopes, so we have to save the variables
# we want to close over in some way
def do_test(self, device, dtype=dtype, name=name, self_size=self_size, args=new_args, test_name=test_name,
output_process_fn=output_process_fn):
def check(name):
is_magic_method = name[:2] == '__' and name[-2:] == '__'
is_inplace = name[-1] == "_" and not is_magic_method
self_variable = create_input((self_size,), dtype=dtype, device=device)[0][0]
# FixMe: run grad checks on inplace self
if is_inplace:
self_variable.requires_grad = False
# need to record this because methods can change the size (e.g. unsqueeze)
args_variable, kwargs_variable = create_input(args, requires_grad=not is_inplace,
call_kwargs=kwargs, dtype=dtype, device=device)
self_tensor = deepcopy(self_variable)
args_tensor = deepcopy(unpack_variables(args_variable))
if not exclude_tensor_method(name, test_name):
output_variable = getattr(self_variable, name)(*args_variable, **kwargs_variable)
output_tensor = getattr(self_tensor, name)(*args_tensor, **kwargs_variable)
if not isinstance(output_tensor, torch.Tensor) and not isinstance(output_tensor, tuple):
if dtype.is_complex:
output_tensor = torch.tensor((output_tensor, ), dtype=torch.cfloat, device=device)
else:
output_tensor = torch.tensor((output_tensor, ), dtype=torch.float, device=device)
self.assertEqual(unpack_variables(output_variable), output_tensor)
# TODO: check that both have changed after adding all inplace ops
def fn(*inputs):
output = getattr(inputs[0], name)(*inputs[1:], **kwargs)
return output_process_fn(output)
if not is_inplace and name not in EXCLUDE_GRADCHECK:
check_batched_grad = test_name not in EXCLUDE_BATCHED_GRAD_TESTS
run_grad_and_gradgrad_checks(self, name, test_name, fn,
output_variable, (self_variable,) + args_variable,
check_batched_grad=check_batched_grad)
# functional interface tests
torch_fn = getattr_qualified(torch, name)
if torch_fn is not None and name not in EXCLUDE_FUNCTIONAL:
def fn(*inputs):
output = torch_fn(*inputs, **kwargs)
return output_process_fn(output)
f_args_variable = (self_variable,) + args_variable
f_args_tensor = (self_tensor,) + args_tensor
# could run the gradchecks again, but skip since we did it for the methods above.
run_gradcheck = exclude_tensor_method(name, test_name) and not is_inplace and name not in EXCLUDE_GRADCHECK
run_functional_checks(self, test_name, name, fn,
run_gradcheck, f_args_variable, f_args_tensor)
# check for correct type of input and input.grad
if not is_inplace:
self_variable = create_input((self_size,), requires_grad=True, dtype=dtype)[0][0]
args_variable, kwargs_variable = create_input(args, requires_grad=False, call_kwargs=kwargs, dtype=dtype)
if hasattr(self_variable, name):
attribute_result = getattr(self_variable, name)
if callable(attribute_result):
output_variable = attribute_result(*args_variable, **kwargs_variable)
else:
self.assertTrue(len(args_variable) == 0)
self.assertTrue(len(kwargs_variable) == 0)
output_variable = attribute_result
else:
self_and_args_variable = (self_variable,) + args_variable
output_variable = torch_fn(*self_and_args_variable, **kwargs_variable)
if isinstance(output_variable, torch.autograd.Variable):
if output_variable.is_sparse:
rand = randn_like(output_variable.to_dense()).to_sparse()
else:
rand = randn_like(output_variable)
output_variable.backward(rand)
self.assertTrue(type(self_variable) == type(self_variable.grad))
self.assertTrue(self_variable.size() == self_variable.grad.size())
# compare grads to inplace grads
inplace_name = name + '_'
# can't broadcast inplace to left hand side
skip_inplace = ('broadcast_lhs' in test_name or
'broadcast_all' in test_name or
'atanh' in test_name or
'acosh' in test_name or
'asinh' in test_name or
'abs_complex' in test_name or
'abs_scalar_complex' in test_name)
if hasattr(torch.ones(1), inplace_name) and not skip_inplace:
output_variable = getattr(self_variable, name)(*args_variable, **kwargs_variable)
if not isinstance(output_variable, tuple):
output_variable = (output_variable,)
inplace_self_variable = deepcopy(self_variable)
inplace_self_variable_copy = tuple(i.clone() if isinstance(i, torch.Tensor) else i
for i in (inplace_self_variable,))
inplace_args_variable = deepcopy(args_variable)
inplace_args_variable_copy = tuple(i.clone() if isinstance(i, torch.Tensor) else i
for i in inplace_args_variable)
inplace_output_variable = (
getattr(inplace_self_variable_copy[0], inplace_name)(*inplace_args_variable_copy,
**kwargs_variable))
if not isinstance(inplace_output_variable, tuple):
inplace_output_variable = (inplace_output_variable,)
self.assertEqual(inplace_output_variable, output_variable)
# Check that gradient is the same
for inp_i, i in zip((inplace_self_variable,) + inplace_args_variable,
(self_variable,) + args_variable):
if not isinstance(inp_i, torch.Tensor):
assert not isinstance(i, torch.Tensor)
continue
if inp_i.grad is not None:
with torch.no_grad():
inp_i.grad.zero_()
if i.grad is not None:
with torch.no_grad():
i.grad.zero_()
for i_o, o in zip(inplace_output_variable, output_variable):
if dtype.is_complex:
grad = randn_like(i_o).to(torch.cdouble)
else:
grad = randn_like(i_o).double()
i_o.backward(grad)
o.backward(grad)
for inp_i, i in zip((inplace_self_variable,) + inplace_args_variable,
(self_variable,) + args_variable):
if not isinstance(inp_i, torch.Tensor):
continue
self.assertEqual(inp_i.grad, i.grad)
check(name)
inplace_name = name + '_'
# can't broadcast inplace to left hand side
broadcast_skip_inplace = 'broadcast_lhs' in test_name or 'broadcast_all' in test_name
# skip C -> R inplace tests
skip_c_to_r_inplace = 'abs_complex' in test_name or 'abs_scalar_complex' in test_name
skip_inplace = broadcast_skip_inplace or skip_c_to_r_inplace
if hasattr(torch.ones(1), inplace_name) and not skip_inplace:
check(inplace_name)
assert not hasattr(TestAutograd, test_name), 'Two tests have the same name: ' + test_name
for skip in skipTestIf:
do_test = skip(do_test)
setattr(TestAutogradDeviceType, test_name, do_test)
class TestAutogradComplex(TestCase):
def test_view_func_for_complex_views(self):
# case 1: both parent and child have view_func
x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True)
y = x.detach().requires_grad_(True)
x0 = x.clone()
x1 = torch.view_as_complex(x0)
x2 = torch.view_as_real(x1)
x2.mul_(2)
x2.sum().backward()
y0 = y.clone()
y0.mul_(2)
y0.sum().backward()
self.assertEqual(x.grad, y.grad)
# case 2: parent has view_func but child does not
x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True)
y = x.detach().requires_grad_(True)
def fn(a):
b = a.clone()
b1 = torch.view_as_complex(b)
b2 = b1.reshape(b1.numel())
return b2
x0 = fn(x)
x0.mul_(2)
x0.sum().backward()
y0 = fn(y)
y1 = y0.mul(2)
y1.sum().backward()
self.assertEqual(x.grad, y.grad)
# case 3: parent does not have a view_func but child does
x = torch.randn(10, dtype=torch.cdouble, requires_grad=True)
y = x.detach().requires_grad_(True)
def fn(a, dim0_size=5):
b = a.clone()
b1 = b.reshape(dim0_size, 2)
b2 = torch.view_as_real(b1)
return b2
x0 = fn(x)
x0.mul_(2)
x0.sum().backward()
y0 = fn(y)
y1 = y0.mul(2)
y1.sum().backward()
self.assertEqual(x.grad, y.grad)
def test_view_with_multi_output(self):
x = torch.randn(2, 2, 2, dtype=torch.double)
x1 = torch.view_as_complex(x)
# Taking an invalid view should always be allowed as long as it is not
# modified inplace
res = x1.unbind(0)
with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"):
res[0] += torch.rand(2, requires_grad=True)
x.requires_grad_(True)
x1 = torch.view_as_complex(x)
# Taking an invalid view should always be allowed as long as it is not
# modified inplace
res = x1.unbind(0)
with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"):
res[0] += torch.rand(2, requires_grad=True)
def as_identity(self):
# view_as_real and view_as_complex behavior should be like an identity
def func(z):
z_ = torch.view_as_complex(z)
z_select = torch.select(z_, z_.dim() - 1, 0)
z_select_real = torch.view_as_real(z_select)
return z_select_real.sum()
z = torch.randn(10, 2, 2, dtype=torch.double, requires_grad=True)
gradcheck(func, [z])
func(z).backward()
z1 = z.clone().detach().requires_grad_(True)
torch.select(z1, z1.dim() - 2, 0).sum().backward()
self.assertEqual(z.grad, z1.grad)
class TestAutogradFunctional(TestCase):
def _assert_same_struct(self, res, base):
# base and res should be Tensors or tuple of Tensors with the same size
if isinstance(base, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(base.size(), res.size())
elif isinstance(base, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(base), len(res))
for el_base, el_res in zip(base, res):
self.assertTrue(isinstance(el_base, torch.Tensor))
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertEqual(el_base.size(), el_res.size())
else:
# Wrong base
raise RuntimeError("The base given to `_assert_same_struct` doesn't have"
" the right structure.")
def _assert_interleaved_struct(self, res, base1, base2):
# base1 and base2 can be Tensors or tuples of Tensors.
# If they are tuples, res should be a tuple as well.
# The indexing works as follows for base1, base2 being
# - tuple, tuple: res[i][j][k][l] = (base1[i][k], base2[j][l])
# - tuple, Tensor: res[i][k][l] = (base1[i][k], base2[l])
# - Tensor, tuple: res[i][j][l] = (base1[i], base2[j][l])
# - Tensor, Tensor: res[k][l] = (base1[k], base2[l])
if isinstance(base1, torch.Tensor) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(res.size(), base1.size() + base2.size())
elif isinstance(base1, tuple) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base1, torch.Tensor))
self.assertEqual(el_res.size(), el_base1.size() + base2.size())
elif isinstance(base1, torch.Tensor) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base2))
for el_res, el_base2 in zip(res, base2):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_res.size(), base1.size() + el_base2.size())
elif isinstance(base1, tuple) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, tuple))
self.assertEqual(len(res), len(base2))
for el_el_res, el_base2 in zip(el_res, base2):
self.assertTrue(isinstance(el_el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_el_res.size(), el_base1.size() + el_base2.size())
else:
# Wrong bases
raise RuntimeError("The bases given to `_assert_interleaved_struct` don't have"
" the right structure.")
def test_vjp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.ones(3)
with self.assertRaisesRegex(TypeError, "The inputs given to vjp must be either a Tensor"):
res = autogradF.vjp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vjp must"):
res = autogradF.vjp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the user-provided function returns"):
res = autogradF.vjp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.vjp(foo, inp, (torch.ones_like(inp), torch.ones_like(inp)))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.vjp(foo, inp, v[:2])
res = autogradF.vjp(foo, inp, v)[1]
self._assert_same_struct(res, inp)
def test_vjp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vjp(foo, inp, v, strict=True)
res = autogradF.vjp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vjp(bar, inp, v, strict=True)
res = autogradF.vjp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_vjp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vjp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = torch.ones(2)
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def test_vjp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones([])
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vjp(reducer, inputs)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones(4)
res = autogradF.vjp(expander, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def test_vjp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2)
v = torch.ones(2)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, requires_grad=True), torch.rand(2, requires_grad=True))
v = (torch.tensor([1., 0.], requires_grad=True), torch.tensor([1., 0.], requires_grad=True))
gradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vjp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jvp must be either a Tensor"):
res = autogradF.jvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jvp must"):
res = autogradF.jvp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the input to the user-provided function"):
res = autogradF.jvp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.jvp(foo, inp, (v, v))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.jvp(foo, inp, v[:2])
res = autogradF.jvp(foo, inp, v)[1]
self._assert_same_struct(res, foo(inp))
def test_jvp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jvp(foo, inp, v, strict=True)
res = autogradF.jvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.jvp(bar, inp, v, strict=True)
res = autogradF.jvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_jvp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_jvp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[1], res[0])
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.ones(2), torch.ones(2))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out.grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def test_jvp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[0], torch.zeros([]))
self._assert_same_struct(res[1], res[0])
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones([])
res = autogradF.jvp(expander, inputs, v)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
res = autogradF.jvp(expander, inputs)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
def test_jvp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2)
v = torch.ones(2, 2)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], res[0])
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, requires_grad=True), torch.rand(2, requires_grad=True))
v = (torch.tensor([1., 0.], requires_grad=True), torch.tensor([1., 0.], requires_grad=True))
gradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.jvp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def _test_construct_standard_basis_for(self, inputs):
numels = tuple(tensor.numel() for tensor in inputs)
results = autogradF._construct_standard_basis_for(inputs, numels)
for result, inp in zip(results, inputs):
self.assertEqual(result.dtype, inp.dtype)
self.assertEqual(result.device, inp.device)
results = torch.cat([result.to(device='cpu', dtype=torch.float)
for result in results], dim=1)
expected = torch.eye(results[0].shape[0], dtype=torch.float)
self.assertEqual(results, expected)
def test_construct_standard_basis_for(self):
test_cases = [
(torch.randn(2, 3),),
(torch.randn(1),),
(torch.randn([]),),
(torch.randn(1), torch.randn([]), torch.randn([])),
(torch.randn(2), torch.randn(3), torch.randn([])),
(torch.randn(2), torch.randn([]), torch.randn(3)),
(torch.randn(2, 3), torch.randn(3), torch.randn(3, 4, 2)),
(torch.randn(2, dtype=torch.float64), torch.randn(3, dtype=torch.float32)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_construct_standard_basis_for_cuda(self):
test_cases = [
(torch.randn(2), torch.randn(3, device='cuda')),
(torch.randn(3, device='cuda'), torch.randn(2)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
def _test_vectorize_raises_no_warnings(self, api):
# vmap is an experimental prototype. When someone calls torch.vmap,
# it raises a python warning. This test checks that
# autogradF.{jacobian, hessian} don't raise that experimental prototype
# warning; it is not nice for a public-facing API to raise a warning
# no matter how it is called.
def foo(a):
return (a ** 2).sum()
x = torch.randn(3)
with warnings.catch_warnings(record=True) as wa:
result = api(foo, x, vectorize=True)
self.assertEqual(len(wa), 0)
def test_jacobian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.jacobian)
def test_hessian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.hessian)
def _test_jacobian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jacobian must be either a Tensor"):
res = autogradF.jacobian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jacobian must"):
res = autogradF.jacobian(bar, inp, vectorize=vectorize)
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(inp), inp)
def foo(a, b):
return b, 3 * a.narrow(0, 0, 3)
inp = (torch.rand(4), torch.rand(5))
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(*inp), inp)
def test_jacobian_err_check(self):
return self._test_jacobian_err_check(vectorize=False)
def test_jacobian_err_check_vectorize(self):
return self._test_jacobian_err_check(vectorize=True)
def test_jacobian_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jacobian(foo, inp, strict=True)
res = autogradF.jacobian(foo, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function is independent of input 0."):
res = autogradF.jacobian(bar, inp, strict=True)
res = autogradF.jacobian(bar, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jacobian(foo, inp, create_graph=True, strict=True)
res = autogradF.jacobian(foo, inp, create_graph=True, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res, torch.eye(4))
def test_jacobian_err_check_strict_vectorize(self):
def foo(x):
return x
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.jacobian(foo, inp, strict=True, vectorize=True)
def test_jacobian_no_grad(self):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs)
self.assertIsNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True)
self.assertIsNotNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
def _test_jacobian_output(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
res = autogradF.jacobian(exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNone(res.grad_fn)
def identity(x):
return x.clone()
inputs = torch.rand(4)
res = autogradF.jacobian(identity, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, identity(inputs), inputs)
self.assertIsNone(res.grad_fn)
self.assertEqual(res, torch.eye(4))
def add_exp_reducer(x, y):
return (x + y.exp()).sum(dim=1)
inputs = (torch.rand(4, 4), torch.rand(4, 4))
res = autogradF.jacobian(add_exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def test_jacobian_output(self):
self._test_jacobian_output(vectorize=False)
def test_jacobian_output_vectorize(self):
self._test_jacobian_output(vectorize=True)
def _test_jacobian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.jacobian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
res = autogradF.jacobian(expander, inputs, vectorize=vectorize)
self._assert_same_struct(res, torch.zeros(4))
def test_jacobian_scalar(self):
self._test_jacobian_scalar(vectorize=False)
def test_jacobian_scalar_vectorize(self):
self._test_jacobian_scalar(vectorize=True)
def _test_jacobian_create_graph(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4, requires_grad=True)
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_exp_reducer(x, y):
return (x + y).exp().sum(dim=1)
inputs = (torch.rand(4, 4, requires_grad=True), torch.rand(4, 4, requires_grad=True))
res = autogradF.jacobian(add_exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def foo(x, y):
x = x.cos()
val, jac = autogradF.jacobian(add_exp_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].exp().sum() + val[1].exp().sum() + jac[0].exp().sum()
res = res + jac[1].exp().sum() + x.exp().sum() + y.exp().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_jacobian_create_graph(self):
self._test_jacobian_create_graph(vectorize=False)
def test_jacobian_create_graph_vectorize(self):
self._test_jacobian_create_graph(vectorize=True)
def _check_jacobian_vectorize_correctness(self, f, inputs):
expected = autogradF.jacobian(f, inputs, vectorize=False)
result = autogradF.jacobian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
def test_jacobian_vectorize_correctness_simple(self):
def f(x):
return 3 * x ** 2
x = torch.randn(2, 3, 5)
self._check_jacobian_vectorize_correctness(f, x)
def test_jacobian_vectorize_correctness_multi_input(self):
def f(x, y):
return (x.cos() * x) @ y.sin()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_multi_input_multi_output(self):
def f(x, y):
return (x * x) @ y, x @ (x.sum(1) * y), y.sum()
x = torch.randn(5, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_unrelated_outputs(self):
def f(x, y):
return x, y, x, y
x = torch.randn(2)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_zero_dim(self):
# zero-dim output
def f(x, y):
return x.sum(), y.sum(), x * y
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
# zero-dim input
def g(x):
return torch.stack([x, x, x])
x = torch.randn([])
self._check_jacobian_vectorize_correctness(g, x)
# Mixed zero-dim input / zero-dim output
def h(x, y):
return y.sum(), x * y
x = torch.randn([])
y = torch.randn(1)
self._check_jacobian_vectorize_correctness(h, (x, y))
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_jacobian_vectorize_correctness_different_devices(self):
def f(x, y):
return x * y, (x * y).cuda()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_different_dtype(self):
def f(x, y):
return (x * y).float(), (x * y).double()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def _check_hessian_vectorize_correctness(self, f, inputs):
expected = autogradF.hessian(f, inputs, vectorize=False)
result = autogradF.hessian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
def test_hessian_vectorize_correctness_simple(self):
def f(x):
return (3 * x ** 2).sum()
x = torch.randn(2, 3, 5)
self._check_hessian_vectorize_correctness(f, x)
def test_hessian_vectorize_correctness_multi_input(self):
def f(x, y, z):
return ((x.relu() * x) @ y.sin() @ z).sum()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
z = torch.randn(5, 5)
self._check_hessian_vectorize_correctness(f, (x, y, z))
def test_hessian_vectorize_correctness_unrelated_outputs(self):
# output unrelated to one input
def f(x, y):
return (x ** 2).sum()
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
# output unrelated to all inputs
def f(x, y):
return torch.randn([])
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
def _test_hessian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
def bar3(a):
return 3 * a.narrow(0, 0, 3), 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to hessian must be either a Tensor"):
res = autogradF.hessian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hessian must"):
res = autogradF.hessian(bar, inp, vectorize=vectorize)
err_msg_out = "The Tensor returned by the function given to hessian should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hessian(bar2, inp, vectorize=vectorize)
with self.assertRaisesRegex(RuntimeError, "The function given to hessian should return a single Tensor"):
res = autogradF.hessian(bar3, inp, vectorize=vectorize)
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def test_hessian_err_check(self):
self._test_hessian_err_check(vectorize=False)
def test_hessian_err_check_vectorize(self):
self._test_hessian_err_check(vectorize=True)
def test_hessian_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hessian(foo, inp, strict=True)
res = autogradF.hessian(foo, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0"):
res = autogradF.hessian(bar, inp, strict=True)
res = autogradF.hessian(bar, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hessian(bar2, inp, strict=True)
res = autogradF.hessian(bar2, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
def test_hessian_err_check_strict_vectorize(self):
def foo(x):
return (x ** 3).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.hessian(foo, inp, strict=True, vectorize=True)
def test_hessian_no_grad(self):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs, create_graph=True)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
def _test_hessian_output(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
res = autogradF.hessian(pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res.grad_fn)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2), torch.rand(2, 2))
res = autogradF.hessian(add_pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
def test_hessian_output(self):
self._test_hessian_output(vectorize=False)
def test_hessian_output_vectorize(self):
self._test_hessian_output(vectorize=True)
def _test_hessian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
inputs = torch.rand([])
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
res = autogradF.hessian(bad_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
def test_hessian_scalar(self):
return self._test_hessian_scalar(vectorize=False)
def test_hessian_scalar_vectorize(self):
return self._test_hessian_scalar(vectorize=True)
def _test_hessian_create_graph(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2, requires_grad=True)
res = autogradF.hessian(pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2, requires_grad=True), torch.rand(2, 2, requires_grad=True))
res = autogradF.hessian(add_pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
def flatten(inp):
return tuple(el_lvl2 for el_lvl1 in inp for el_lvl2 in el_lvl1)
gradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
gradgradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
def foo(x, y):
x = x.cos()
val, hess = autogradF.hessian(add_pow_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].cos().sum() + val[1].cos().sum() + hess[0].cos().sum()
res = res + hess[1].cos().sum() + x.cos().sum() + y.cos().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_hessian_create_graph(self):
self._test_hessian_create_graph(vectorize=False)
def test_hessian_create_graph_vectorize(self):
self._test_hessian_create_graph(vectorize=True)
def test_vhp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to vhp must be either a Tensor"):
res = autogradF.vhp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vhp must"):
res = autogradF.vhp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to vhp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.vhp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.vhp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to vhp must be either a Tensor or a tuple of Tensors"):
res = autogradF.vhp(foo, inp, (v, 2))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_vhp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vhp(foo, inp, v, strict=True)
res = autogradF.vhp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vhp(bar, inp, v, strict=True)
res = autogradF.vhp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.vhp(bar2, inp, v, strict=True)
res = autogradF.vhp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_vhp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vhp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, vhp_val = autogradF.vhp(bar, inputs, v)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vhp_val[0].grad_fn)
self.assertIsNone(vhp_val[1].grad_fn)
def test_vhp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vhp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.vhp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_vhp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, requires_grad=True)
v = torch.ones(4, 4, requires_grad=True)
res = autogradF.vhp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, requires_grad=True), torch.rand(4, requires_grad=True))
v = (torch.ones(3, requires_grad=True), torch.ones(4, requires_grad=True))
out, vhp_val = autogradF.vhp(bar, inputs, v, create_graph=True)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(vhp_val[0].grad_fn)
self.assertIsNotNone(vhp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vhp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_hvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
res = autogradF.hvp(foo, inp, v)
with self.assertRaisesRegex(TypeError, "The inputs given to hvp must be either a Tensor"):
res = autogradF.hvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hvp must"):
res = autogradF.hvp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to hvp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hvp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.hvp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to hvp must be either a Tensor or a tuple of Tensors"):
res = autogradF.hvp(foo, inp, (v, 2))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_hvp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hvp(foo, inp, v, strict=True)
res = autogradF.hvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.hvp(bar, inp, v, strict=True)
res = autogradF.hvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hvp(bar2, inp, v, strict=True)
res = autogradF.hvp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_hvp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_hvp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, hvp_val = autogradF.hvp(bar, inputs, v)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(hvp_val[0].grad_fn)
self.assertIsNone(hvp_val[1].grad_fn)
def test_hvp_scalar(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.hvp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.exp().sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.hvp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_hvp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, requires_grad=True)
v = torch.ones(4, 4, requires_grad=True)
res = autogradF.hvp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, requires_grad=True), torch.rand(4, requires_grad=True))
v = (torch.ones(3, requires_grad=True), torch.ones(4, requires_grad=True))
out, hvp_val = autogradF.hvp(bar, inputs, v, create_graph=True)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(hvp_val[0].grad_fn)
self.assertIsNotNone(hvp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.hvp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jacobian_match_vjp_jvp(self):
def foo(x):
return x ** 3 + x.sum()
inputs = torch.rand(4)
v = torch.rand(4)
jac = autogradF.jacobian(foo, inputs)
jvp = autogradF.jvp(foo, inputs, v)[1]
vjp = autogradF.vjp(foo, inputs, v)[1]
self.assertEqual(jvp, torch.mm(jac, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vjp, torch.mm(v.unsqueeze(0), jac).squeeze(0))
def test_hessian_match_vhp_hvp(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4)
v = torch.rand(4)
hes = autogradF.hessian(foo, inputs)
hvp = autogradF.hvp(foo, inputs, v)[1]
vhp = autogradF.vhp(foo, inputs, v)[1]
self.assertEqual(hvp, torch.mm(hes, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vhp, torch.mm(v.unsqueeze(0), hes).squeeze(0))
class TestAutogradForwardMode(TestCase):
def test_forward_level_cleanup(self):
import weakref
def get_tensor_and_weak_ref():
# Helper function to get a Tensor and a weak ref that tells us
# if the c++ version of this Tensor is still alive or not.
#
# Create the following reference chain to do so:
# - python Tensor t
# - c++ Tensor corresponding by t
# - c++ Node corresponding to t.grad_fn
# - python dict of metadata from this Node
# - an object in this dict that we can take a weakref of
# Create a new Tensor and Node
t = torch.rand(2, requires_grad=True).clone()
# Create the metadata dict
meta_dict = t.grad_fn.metadata
# Create the object in the dict
class Foo(object):
pass
my_obj = Foo()
meta_dict[0] = my_obj
# After exiting this function, the python Tensor t is the only
# thing keeping ref alive
ref = weakref.ref(my_obj)
return t, ref
# Sanity check that the helper function works as expected
t, t_ref = get_tensor_and_weak_ref()
self.assertIsNotNone(t_ref())
del t
self.assertIsNone(t_ref())
# Main test code
foo = torch.rand(2)
with fwAD.dual_level():
tangent, tangent_ref = get_tensor_and_weak_ref()
self.assertIsNotNone(tangent_ref())
dual = fwAD.make_dual(foo, tangent)
self.assertIsNotNone(tangent_ref())
# Make sure that the tangent we provided has been re-used as is
self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent)
# Make sure that dual is keeping the tangent alive
del tangent
self.assertIsNotNone(tangent_ref())
# Make sure that the dual level does not keep the c++
# version of the tangent alive
del dual
self.assertIsNone(tangent_ref())
def test_size_check(self):
foo = torch.rand(2)
tangent = torch.rand(3)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Trying to set a forward gradient that has a different size"):
dual = fwAD.make_dual(foo, tangent)
dual = fwAD.make_dual(foo, tangent[1:])
# Generic device type autograd tests.
class TestAutogradDeviceType(TestCase):
def test_min_max_median_backprops_to_all_values(self, device):
for f in [torch.min, torch.max, torch.median, torch.nanmedian]:
x1 = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True)
x2 = torch.tensor([float('nan'), float('nan'), float('nan')], requires_grad=True)
for x in [x1, x2]:
y = f(x)
y.backward()
self.assertEqual(x.grad.sum(), 1.)
self.assertEqual((x.grad == 1 / 3).sum(), 3)
def test_cdist(self, device):
def _test_cdist_for_size(sizex, sizey=None):
if sizey is None:
sizey = sizex
for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]:
x = torch.randn(sizex, device=device, dtype=torch.double)
y = torch.randn(sizey, device=device, dtype=torch.double)
eps = 1e-6
# to avoid extremum
x = x - (((x - y) < eps).double() * 2 * eps)
x.requires_grad = True
y.requires_grad = True
f_args_variable = (x, y)
def f(a, b):
return torch.cdist(a, b, p)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cdist", "cdist", f,
True, f_args_variable, f_args_tensor)
def _test_euclidean_large_cdist(sizex, sizey=None):
if sizey is None:
sizey = sizex
x = torch.randn(sizex, device=device, dtype=torch.float)
y = torch.randn(sizey, device=device, dtype=torch.float)
eps = 1e-6
# to avoid extremum
x = x - (((x - y) < eps).float() * 2 * eps)
x.requires_grad = True
y.requires_grad = True
dist = torch.cdist(x, y, p=2)
# Do a backward pass to check that it is valid for large
# matrices
loss = dist.sum()
loss.backward()
_test_cdist_for_size((S, S))
_test_cdist_for_size((S, S, S))
_test_cdist_for_size((3, 5))
_test_cdist_for_size((2, 3, 5))
_test_cdist_for_size((1, 2, 3))
_test_cdist_for_size((1, 1), (S, 1))
_test_euclidean_large_cdist((2000, 5))
# Ensure that cdist backward with p<1 does not produce NaNs
def test_cdist_grad_p_lt_1_no_nan(self, device):
for p in [0.99, 0.7, 0.5, 0.1, 0.01]:
x = torch.randn(1, 2, device=device)
y = x.clone().detach() + torch.tensor([[1., 0.]], device=device)
x.requires_grad = True
y.requires_grad = True
result = torch.cdist(x, y, p=p)
result.backward(torch.ones_like(result))
self.assertFalse(torch.isnan(x.grad).any())
self.assertFalse(torch.isnan(y.grad).any())
def test_cdist_same_inputs(self, device):
# Test to detect issues in cdist gradient calculation
# When the distances are 0
sizex = (1, 27, 32)
for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]:
x = torch.randn(sizex, device=device, dtype=torch.float)
dist_grad = torch.randn((1, 27, 27), device=device, dtype=torch.float)
y = x.clone()
eps = 1e-6
x.requires_grad = True
d = torch.cdist(x, y)
d.backward(dist_grad)
# Check that the backward passs does not contain invalid
# values such as nan or inf
assert torch.isfinite(x.grad).all()
def test_parameter_resize(self, device):
asd = torch.nn.Parameter(torch.ones(16, device=device))
for i in range(2):
with torch.no_grad():
asd.set_(asd[1:])
asd.grad = None
m = torch.cat((asd, asd))
m.sum().backward()
def test_sparse_ctor_getter_backward(self, device):
# See NOTE [ Sparse: autograd and API ] on the expected behavior of this test
def _test(size, sparse_dim, nnz, device):
v_size = [nnz] + list(size[sparse_dim:])
i = torch.rand(sparse_dim, nnz)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
inp = torch.randn(v_size, requires_grad=True)
other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True)[0]
other = other.to(device)
def fn(v):
x = torch.sparse_coo_tensor(i, v, size, device=device)
y = (x + other).coalesce()
yv = y.values()
new_v = yv.tanh()
z = torch.sparse_coo_tensor(y.indices(), new_v, y.size())
return z.coalesce().values()
gradcheck(fn, (inp,), check_batched_grad=False)
# FIXME: make gradgradcheck work.
# gradgradcheck(fn, (inp,), check_batched_grad=False)
# assert that _values is non-differentiable
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.detach().requires_grad_()._values().backward(torch.ones_like(other._values()))
for empty_i, empty_v, empty_nnz in product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
_test(sparse_size + dense_size, len(sparse_size), nnz, device)
# autograd tests via common_method_invocations don't allow input tensors to
# be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when
# check_sparse_nnz is set to False.)
def test_sparse_mask_autograd(self, device):
tensor = torch.randn(3, requires_grad=True, device=device)
mask = torch.ones(3, device=device)
mask[1] = 0
mask = mask.to_sparse()
converted = tensor.sparse_mask(mask).to_dense()
converted.sum().backward()
self.assertEqual(tensor.grad, mask.to_dense())
def test_pyscalar_conversions(self, device):
def _test_pyscalar_conversions(t, integral_conv):
# integral -> integral
l = t(torch.zeros(1, 1, 1, dtype=torch.long))
pyscalar = -12345
l[0] = pyscalar
self.assertEqual(integral_conv(l), pyscalar)
# floating point -> floating point
f = Variable(t(torch.randn(1, 1)))
pyscalar = -12345.1
f[0] = pyscalar
self.assertEqual(float(f), pyscalar)
f[0] = nan
self.assertTrue(math.isnan(float(f)))
f[0] = inf
self.assertEqual(float(f), inf)
f[0] = -inf
self.assertEqual(float(f), -inf)
# integral -> floating point
# check we can convert something that loses precision
pyscalar = 1234567890123456789
self.assertNotEqual(pyscalar, integral_conv(float(pyscalar)))
l[0] = pyscalar
self.assertEqual(float(l), float(pyscalar))
# floating point -> integral
f[0] = nan
self.assertRaises(ValueError, lambda: integral_conv(f[0]))
f[0] = inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = -inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = sys.float_info.max
self.assertEqual(integral_conv(f), sys.float_info.max)
# bool, nonzero
def test_nonzero(tensor, value, expected):
tensor[0] = value
self.assertEqual(expected, bool(tensor))
self.assertEqual(expected, True if tensor else False)
test_nonzero(l, 0, False)
test_nonzero(l, -2, True)
test_nonzero(f, 0.0, False)
test_nonzero(f, sys.float_info.min, True)
test_nonzero(f, nan, bool(nan))
test_nonzero(f, inf, bool(inf))
test_nonzero(f, -inf, bool(-inf))
_test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x))
@dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_set_requires_grad_only_for_floats(self, device, dtype):
def f1():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad_()
def f2():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = True
def f3():
torch.ones(1, dtype=dtype, device=device, requires_grad=True)
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = False # should always work
a.requires_grad_(False)
for f in [f1, f2, f3]:
if dtype.is_floating_point:
f()
else:
with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)):
f()
@onlyCUDA
def test_advanced_indexing_backwards_large(self, device):
# See https://github.com/pytorch/pytorch/issues/22843
n = (1 << 16)
x = torch.rand(n, 1, device=device, requires_grad=True)
a = x[:, [0]]
a.sum().backward()
self.assertEqual(x.grad, torch.ones(n, 1, device=device))
def test_advanced_indexing_backwards_memory_format(self, device):
# See https://github.com/pytorch/pytorch/issues/36956
shape = (2, 8, 1, 2)
i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last)
x = torch.randn(shape, requires_grad=True, device=device)
x[i].sum().backward()
def _test_reentrant_parent_error_on_cpu(self, device):
t1 = torch.rand([3, 3], requires_grad=True)
t2 = torch.rand([3, 3], device=device, requires_grad=True)
t3 = torch.rand([3, 3], device=device, requires_grad=True)
# Parent graph cpu graph.
t4 = t1 * t1
t5 = TestAutograd.SimulateBackwardError.apply(t4)
# Child gpu graph (much longer than parent graph).
prev = t2 * t2
for i in range(10):
prev = prev * t2
reentrant_root = prev
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will take much longer.
reentrant_root.backward()
return grad
# Parent gpu graph.
t6 = ReentrantFunc.apply(t3)
t7 = t6 * t6
# Parent graph will error out first, while child graph will continue executing.
with self.assertRaisesRegex(Exception, "Simulate error"):
torch.autograd.backward([t5.sum(), t7.sum()])
# No grads should be accumulated since child graph will stop execution
# after parent receives error.
self.assertIsNone(t2.grad)
self.assertIsNone(t1.grad)
self.assertIsNone(t3.grad)
@onlyCUDA
def test_reentrant_parent_error_on_cpu(self, device):
before = CudaMemoryLeakCheck.get_cuda_memory_usage()
# Run as separate function so that gc can clean up everything when we
# check for memory usage.
self._test_reentrant_parent_error_on_cpu(device)
# Wait for autograd thread to cleanup failed tasks.
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
start = time.time()
while before != after and time.time() - start < 30:
time.sleep(0.1)
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
self.assertEqual(before, after)
# test for backward in https://github.com/pytorch/pytorch/issues/15511
def test_pdist_large(self, device):
def func(x):
return torch.pdist(x, p=2)
# shape[0] should be able to be (roughly) arbitrarily large, but the kernel
# is currently limited to smaller sizes (see issue above); this is just testing
# a floor.
shape = (1000, 1)
x = torch.randn(shape, device=device).requires_grad_()
output = torch.pdist(x, p=2)
# just run a single backward, as gradcheck/gradgradcheck is expensive here
output.sum().backward()
def test_where_functional(self, device):
x = torch.randn(5, 5, device=device, requires_grad=True)
y = torch.randn(5, 5, device=device, requires_grad=True)
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where(cond, x, y):
return torch.where(cond, x, y)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)])
x = torch.randn(5, 1, 5, device=device, requires_grad=True)
y = torch.randn(5, 5, 1, device=device, requires_grad=True)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)])
def test_where_scalar(self, device):
x = torch.randn(5, 5, device=device, requires_grad=True)
scalar = 4.
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where_scalar_first(cond, x):
return torch.where(cond, scalar, x)
def where_scalar_second(cond, x):
return torch.where(cond, x, scalar)
gradcheck(where_scalar_first, (cond, x))
gradgradcheck(where_scalar_first, (cond, x))
gradcheck(where_scalar_second, (cond, x))
gradgradcheck(where_scalar_second, (cond, x))
@skipCUDAIf(True, """Test is flaky on Linux and Windows, typical error message:
https://github.com/pytorch/pytorch/issues/34870""")
def test_ctc_loss(self, device):
batch_size = 64
num_labels = 101
target_length = 15
gradcheck_input_size = 10
ZERO_NONE = 0
ZERO_SOME = 1
ZERO_ALL = 2
# input_length, vary_lengths, zero_lengths
tests = [(150, False, ZERO_NONE),
(150, True, ZERO_NONE),
(50, True, ZERO_SOME),
(50, True, ZERO_ALL)]
if 'cuda' in device:
tests += [(50, False, ZERO_NONE),
(50, True, ZERO_NONE),
(150, True, ZERO_SOME),
(150, True, ZERO_ALL)]
for input_length, vary_lengths, zero_mode in tests:
targets = torch.randint(1, num_labels, (batch_size, target_length),
device=device, dtype=torch.long)
x = torch.randn(gradcheck_input_size, device=device, requires_grad=True)
tile_factors = torch.randn(input_length * batch_size * num_labels // gradcheck_input_size + 1,
device=device)
input_lengths = [(torch.randint(input_length // 2, input_length + 1, ()).item()
if vary_lengths or i == 0 else input_length) for i in range(batch_size)]
if zero_mode == ZERO_ALL:
target_lengths = [0 for _ in range(batch_size)]
else:
target_lengths = [(torch.randint(target_length // 2, target_length + 1, ()).item()
if vary_lengths else target_length) for _ in range(batch_size)]
if zero_mode == ZERO_SOME:
idxes = torch.randint(0, batch_size, (10,))
for i in idxes:
target_lengths[i] = 0
def ctc_after_softmax(x):
x_full = ((x[:, None] * tile_factors[None, :]).view(-1)[:input_length * batch_size * num_labels]
.view(input_length, batch_size, num_labels))
log_probs = torch.log_softmax(x_full, 2)
return torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
gradcheck(ctc_after_softmax, [x])
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(7600)
def test_ctc_loss_cudnn(self, device):
batch_size = 16
input_length = 30
num_labels = 101
target_length = 15
targets = torch.randint(1, num_labels, (batch_size * target_length,),
device='cuda', dtype=torch.long)
log_probs = torch.log_softmax(torch.randn(input_length, batch_size, num_labels, device='cuda', dtype=torch.float), 2)
log_probs.requires_grad_()
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
grad_out = torch.randn(batch_size, device='cuda', dtype=torch.float)
with torch.backends.cudnn.flags(enabled=False):
loss_native = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
grad_native, = torch.autograd.grad(loss_native, log_probs, grad_out)
loss_cudnn = torch.nn.functional.ctc_loss(log_probs, targets.to('cpu', torch.int32),
input_lengths, target_lengths, reduction='none')
self.assertTrue("Cudnn" in str(loss_cudnn.grad_fn))
grad_cudnn, = torch.autograd.grad(loss_cudnn, log_probs, grad_out)
self.assertEqual(grad_cudnn, grad_native, atol=1e-4, rtol=0)
def test_leaky_relu_inplace_with_neg_slope(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), -2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
def test_leaky_relu_inplace_with_zero_slope(self, device):
a = torch.tensor([-2., 0., 2.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), 0.0)
b.backward(torch.ones(3, device=device))
expected = torch.tensor([0., 0., 1.], device=device)
self.assertEqual(a.grad, expected)
@onlyOnCPUAndCUDA
def test_elu_inplace_with_neg_alpha(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.elu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.celu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
@onlyCUDA
def test_free_unneeded_tensor(self, device):
x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True)
m = torch.randn(1, 3, 1, 1, device=device)
z = x.sum()
base_mem = torch.cuda.memory_allocated()
z = ((x + 2) * m).sum()
end_mem = torch.cuda.memory_allocated()
# In the end the memory usage should remain equal, because neither of
# (x + 2) and ((x + 2) * m) should be kept alive for backward, while the
# previous allocation of z had the same size as the current one.
self.assertEqual(base_mem, end_mem)
@onlyCUDA
def test_pin_memory(self, device):
x = torch.randn(2, 2, requires_grad=True)
self.assertEqual(x, x.pin_memory())
self.assertIsNot(x, x.pin_memory())
self.assertTrue(x.pin_memory().requires_grad)
gradcheck(lambda x: x.pin_memory(), [x])
gradgradcheck(lambda x: x.pin_memory(), [x])
@skipCUDAIfRocm
@onlyCUDA
def test_profiler_emit_nvtx(self, device):
# This test is not intended to ensure correctness of nvtx ranges.
# That would require something a great deal more complex (you'd have to create a
# profile in a subprocess, open it, and parse the sql somehow).
# This test is merely intended to catch if emit_nvtx breaks on construction.
a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device)
with torch.cuda.profiler.profile():
with emit_nvtx():
a.add(1.0)
@onlyCUDA
def test_rnn_backward_to_input_but_not_parameters(self, device):
# this checks whether it is possible to not require
# weight parameters, but require inputs, see #7722
l = torch.nn.LSTM(2, 3).to(device)
for p in l.parameters():
p.requires_grad = False
s = torch.randn(1, 1, 2, requires_grad=True, device=device)
out, _ = l(s)
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
@onlyCUDA
def test_lstmcell_backward_only_one_output_grad(self, device):
# checks that undefined gradients doen't hamper the backward
# see #11872
l = torch.nn.LSTMCell(2, 3).to(device).double()
s = torch.randn(1, 2, device=device, dtype=torch.double, requires_grad=True)
for i in range(2):
out = l(s)[i]
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
def _test_rnn_mod(self, mod, inp):
def flatten_out(mod, inp):
out = mod(inp)
return tuple([t if isinstance(t, torch.Tensor) else tt for t in out for tt in t])
gradcheckfunc = partial(flatten_out, mod)
with torch.backends.cudnn.flags(enabled=False):
gradcheck(gradcheckfunc, inp, check_batched_grad=False)
gradgradcheck(gradcheckfunc, inp, check_batched_grad=False)
if inp.is_cuda and not TEST_WITH_ROCM:
# Assert that we have good error message around unsupported CuDNN double backward
# NB: we trigger double backward using .backward() instead of autograd.grad due to
# https://github.com/pytorch/pytorch/issues/37874
with torch.backends.cudnn.flags(enabled=True):
result = gradcheckfunc(inp)
result[0].sum().backward(create_graph=True)
grad0 = next(mod.parameters()).grad
with self.assertRaisesRegex(RuntimeError,
"please disable the CuDNN backend temporarily"):
grad0.sum().backward()
# Here we avoid the backward(create_graph=True) memory leak
# described in https://github.com/pytorch/pytorch/issues/7343
for param in mod.parameters():
param.grad = None
inp.grad = None
def test_LSTM_grad_and_gradgrad(self, device):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.LSTM(hsize, hsize, bias=bias).to(device).to(torch.float64)
self._test_rnn_mod(mod, inp)
def test_GRU_grad_and_gradgrad(self, device):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.GRU(hsize, hsize, bias=bias).to(device).to(torch.float64)
self._test_rnn_mod(mod, inp)
def test_copysign_subgradient(self, device):
# Input is 0.0
x = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Input is -0.0
x = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is 0.0
x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [-1.0, 0.0, 1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is -0.0
x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [1.0, 0.0, -1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
@deviceCountAtLeast(1)
def test_grad_assignment(self, devices):
x = torch.randn(5, 5, device=devices[0])
# Tests that the wrong shape raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(2, 2, device=devices[0])
# Tests that the wrong dtype raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0])
# Tests that self-assignment raises
with self.assertRaises(RuntimeError):
x.grad = x
# Tests device -> cpu grad assignment raises
if self.device_type != 'cpu':
with self.assertRaises(RuntimeError):
t_cpu = torch.rand(5, 5)
t_cpu.grad = torch.randn(5, 5, device=devices[0])
# Tests half type on CUDA
if self.device_type == 'cuda':
x = x.to(dtype=torch.half, device=devices[0])
x.grad = torch.zeros_like(x)
# Tests cross-device assignment raises
if len(devices) > 1:
x = torch.randn(5, 5, device=devices[0])
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, device=devices[1])
@deviceCountAtLeast(1)
@dtypes(torch.float, torch.double)
def test_requires_grad_factory(self, devices, dtype):
fns = [torch.ones_like, torch.testing.randn_like]
x = torch.randn(2, 3, dtype=dtype, device=devices[0])
for fn in fns:
for requires_grad in [True, False]:
output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad)
self.assertEqual(requires_grad, output.requires_grad)
self.assertIs(dtype, output.dtype)
self.assertEqual(devices[0], str(x.device))
@deviceCountAtLeast(2)
def test_unused_output_device(self, devices):
from torch.nn.parallel._functions import Broadcast
x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True)
outputs = Broadcast.apply(list(range(len(devices))), x)
y = outputs[-1] * 2
y.sum().backward()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2)
@deviceCountAtLeast(2)
def test_backward_device(self, devices):
# check that current device matches the variable's device
device = [None]
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
device[0] = grad_output.device
return grad_output.clone()
v = torch.randn(1, device=devices[1], requires_grad=True)
Identity.apply(v).backward()
self.assertEqual(str(device[0]), devices[1])
@deviceCountAtLeast(2)
def test_inputbuffer_add_multidevice(self, devices):
input = torch.randn(1, device=devices[0], requires_grad=True)
output = input.to(device=devices[1]) + input.to(device=devices[1])
output.backward()
@onlyCPU
def test_copy_(self, device):
# At the time of writing this test, copy_ is not generated from native_functions.yaml
# there was a bug that bfloat16 was not recognized as floating.
x = torch.randn(10, device=device, requires_grad=True)
floating_dt = [dt for dt in torch.testing.get_all_dtypes() if dt.is_floating_point]
for dt in floating_dt:
y = torch.empty(10, device=device, dtype=dt)
y.copy_(x)
self.assertTrue(y.requires_grad)
z = x.to(torch.bfloat16)
self.assertTrue(z.requires_grad)
@onlyCUDA
def test_simple_reentrant_cross_device(self, device):
class ReentrantFunc(Function):
_cpu_mode = True
@staticmethod
def forward(ctx, x):
return x * (x + 2)
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
if ReentrantFunc._cpu_mode:
new_param = torch.randn(2, 2, requires_grad=True)
(new_param ** 2).sum().backward()
else:
new_param = torch.randn(2, 2, device=device, requires_grad=True)
(new_param ** 2).sum().backward()
return grad_output
# Reentrant starts on GPU thread, finishs on GPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
# set ReentrantFunc node to GPU to emit tasks to GPU queue
ReentrantFunc._cpu_mode = False
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on GPU thread, finishs on CPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
# set ReentrantFunc node to CPU to emit tasks to CPU queue
ReentrantFunc._cpu_mode = True
out = ReentrantFunc.apply(x)
out.sum().backward()
@onlyCUDA
def test_cross_device_reentrant_autograd(self, device):
# Output on gpu so that this task will be associated with the gpu thread
def fn_on_gpu(inp):
# Artificially increase the priority of the next op to make sure it runs
# as soon as we reach it before the ops of branch1.
dummy = inp * 2 * 2 * 2 * 2
return inp.to(device=device)
def parent_on_cpu(inp):
# Slow branch of ops on gpu so that the work queue for the gpu thread
# won't empty too quickly. They also have smaller priorities than the
# ones created by fn_on_gpu
branch1 = inp.to(device=device)
branch1 = branch1 / branch1
branch1 = branch1 / branch1
branch1 = branch1 / branch1
# Perform checkpoint on cpu tensors. So the last op performed in the reentrant
# autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread.
# So the cpu thread will notify the gpu thread with an empty NodeTask.
branch2 = checkpoint(fn_on_gpu, inp)
out = branch2 + branch1
return out
inp = torch.rand(2, requires_grad=True)
out = parent_on_cpu(inp)
# This will segfault if the empty NodeTask is not handled properly in the
# gpu thread ReadyQueue
out.sum().backward()
def test_inplace_view_backprop_base(self, device):
# modify view and back-prop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v1.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]])
def test_inplace_view_backprop_view_of_view(self, device):
# modify view and backprop through view-of-view
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = x.narrow(0, 0, 1)
v1.mul_(2)
v2.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]])
def test_inplace_view_of_view(self, device):
# modify view-of-view and backprop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]])
def test_inplace_view_then_no_grad(self, device):
# Perform an in-place operation on a view of a non-leaf variable.
a = torch.ones(3, 1, device=device, requires_grad=True)
b = a * 2
c = b.view_as(b)
c[0][0] = 3
# Force a graph update with grad disabled.
with torch.no_grad():
c.grad_fn
c.sum().backward()
def test_inplace_view_gradcheck(self, device):
# gradcheck modifications to views
a = torch.randn(4, 4, device=device, requires_grad=True)
b = torch.randn(2, 2, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b)
x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_view_multiple_outputs(self, device):
root = torch.arange(9.).reshape(3, 3).requires_grad_()
x = root.clone()
v1 = x.unbind()
with self.assertRaises(RuntimeError):
v1[0].mul_(2)
def test_inplace_view_of_multiple_output_view(self, device):
a = torch.rand(10, device=device, requires_grad=True).clone()
b = a.unbind(0)
c = b[0].view_as(b[0])
with self.assertRaises(RuntimeError):
c.mul_(2)
def test_inplace_multiple_output_view_of_view(self, device):
a = torch.rand(10, device=device, requires_grad=True).clone()
b = a.view_as(a)
c = b.unbind(0)
with self.assertRaises(RuntimeError):
c[0].mul_(2)
def test_inplace_view_makes_base_require_grad(self, device):
# in-place modification to view makes base require grad
a = torch.randn(4, 4, device=device, requires_grad=False)
b = torch.randn(4, 2, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
self.assertFalse(x.requires_grad)
x.narrow(1, 2, 2).mul_(b)
self.assertTrue(x.requires_grad)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_view_backprop_view(self, device):
# modify view and backprop through view
a = torch.tensor([2., 5.], device=device, requires_grad=False)
b = torch.tensor([3.], device=device, requires_grad=True)
res = a.narrow(0, 1, 1).mul_(b)
res.sum().backward()
self.assertEqual(b.grad.tolist(), [5])
self.assertIsNone(a.grad)
def test_inplace_view_modify_base(self, device):
# Test that an in-place operation on a base that forced it to require
# grad also forces any previous views to require grad and backprop
# correctly
r = torch.ones(1, device=device, requires_grad=True)
def fn(r):
x = torch.ones(5, device=device)
v = x.select(0, 1)
self.assertFalse(v.requires_grad)
self.assertIsNone(v.grad_fn)
x.add_(r) # v is now dependent on r due to the in-place op on x
self.assertTrue(v.requires_grad)
return v
gradcheck(fn, [r])
gradgradcheck(fn, [r])
def test_inplace_view_python(self, device):
# in-place modifications of Python-autograd created view
a = torch.randn(4, 4, device=device, requires_grad=True)
b = torch.randn(2, 2, device=device, requires_grad=True)
class PyAdd(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.mark_dirty(x)
x.add_(y)
return x
@staticmethod
def backward(ctx, grad):
return grad, grad
def func(root, b):
x = root.clone()
PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b)
PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_view_non_contig(self, device):
root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]])
def test_inplace_view_multi_output_unsafe(self, device):
for f in [lambda t: t.unsafe_split(1),
lambda t: t.unsafe_split_with_sizes((1, 1, 1)),
lambda t: t.unsafe_chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
s1.mul_(s2)
s1.sum().backward()
def test_inplace_view_multi_output_safe(self, device):
for f in [lambda t: t.split(1),
lambda t: t.split_with_sizes((1, 1, 1)),
lambda t: t.chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
with warnings.catch_warnings(record=True) as w:
s1.mul_(s2)
self.assertIn('Consider using `unsafe_` version', str(w[0].message))
def test_mv_grad_stride_0(self, device):
# Reference: https://github.com/pytorch/pytorch/issues/38315
mat = torch.randn(2, 2, device=device)
vec = torch.randn(1, device=device).requires_grad_(True)
def fn(vec):
# Expand inside the function to make sure the input to
# gradcheck does not have overlapping memory
vec = vec.expand(2)
return (mat @ vec).sum()
gradcheck(fn, (vec))
gradgradcheck(fn, (vec))
def test_logcumsumexp_large_value(self, device):
a = torch.rand(4, 4, 4, dtype=torch.double, requires_grad=True)
with torch.no_grad():
# Large Number
a[0] = 10000
gradcheck(lambda x: x.logcumsumexp(0), a)
gradgradcheck(lambda x: x.logcumsumexp(0), a)
gradcheck(lambda x: x.logcumsumexp(1), a)
gradgradcheck(lambda x: x.logcumsumexp(1), a)
gradcheck(lambda x: x.logcumsumexp(2), a)
gradgradcheck(lambda x: x.logcumsumexp(2), a)
@slowTest
def test_lu_backward(self, device):
def run_test(*sizes):
x = torch.rand(*sizes, device=device, dtype=torch.double).requires_grad_(True)
gradcheck(lambda x: x.lu(get_infos=True), x)
gradgradcheck(lambda x: x.lu(get_infos=True), x)
gradcheck(lambda x: x.lu(get_infos=False), x)
gradgradcheck(lambda x: x.lu(get_infos=False), x)
# there is no pivot-less LU factorization on CPU
if x.device.type == 'cuda':
gradcheck(lambda x: x.lu(pivot=False, get_infos=True), x)
gradgradcheck(lambda x: x.lu(pivot=False, get_infos=True), x)
gradcheck(lambda x: x.lu(pivot=False, get_infos=False), x)
gradgradcheck(lambda x: x.lu(pivot=False, get_infos=False), x)
run_test(3, 3)
run_test(3, 3, 3)
run_test(3, 3, 3, 3)
run_test(5, 5)
run_test(3, 5, 5)
run_test(3, 3, 5, 5)
def test_strided_leaf_grad_layout(self, device):
# (1) If leaf is non-overlapping and dense, grad's layout should match its leaf.
for fmt_a in (torch.contiguous_format, torch.channels_last):
for fmt_b in (torch.contiguous_format, torch.channels_last):
a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a)
b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b)
a.requires_grad_()
b.requires_grad_()
# checks (1) for broadcasted gradients
a.sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
b.sum().backward()
self.assertEqual(b.grad.stride(), b.stride())
# checks (1) for non-broadcasted gradients
a.grad = None
b.grad = None
(a * b).sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
self.assertEqual(b.grad.stride(), b.stride())
# (2) If leaf isn't dense, checks that grads are rowmajor contiguous.
c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device))
c.requires_grad_()
d = torch.rand((2, 2), device=device)
# checks (2) for broadcasted gradients
c.sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# checks (2) for non-broadcasted gradients
c.grad = None
(c * d).sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
def _test_atleast(self, device, torch_fn):
# 0-dim
s = torch.tensor(0.5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), s)
gradgradcheck(lambda x: torch_fn(x), s)
# 1-dim
a = torch.rand(4, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), a)
gradgradcheck(lambda x: torch_fn(x), a)
# 2,3,4-dim
b = torch.rand(4, 3, dtype=torch.double, requires_grad=True)
c = torch.rand(4, 3, 2, dtype=torch.double, requires_grad=True)
d = torch.rand(4, 3, 2, 1, dtype=torch.double, requires_grad=True)
input_tuple = (s, a, b, c, d)
gradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
gradgradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
def test_atleast(self, device):
self._test_atleast(device, torch.atleast_1d)
self._test_atleast(device, torch.atleast_2d)
self._test_atleast(device, torch.atleast_3d)
def test_xlogy(self, device):
def _tensor_tensor_helper(x, y):
gradcheck(lambda x, y: torch.xlogy(x, y), (x, y))
gradgradcheck(lambda x, y: torch.xlogy(x, y), (x, y))
with torch.no_grad():
x = x.clone()
x[torch.rand_like(x) > 0.5] = 0
gradcheck(lambda y: torch.xlogy(x, y), (y))
gradgradcheck(lambda y: torch.xlogy(x, y), (y))
shapes = ((4,), (1, 4), (1, 1, 4), (1, 1, 1, 4))
# For broadcastible shapes and scalar.
for x_shape, y_shape in permutations(shapes, 2):
x = torch.rand(*x_shape, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(*y_shape, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
gradcheck(lambda y: torch.xlogy(0, y), (y))
gradgradcheck(lambda y: torch.xlogy(0, y), (y))
gradcheck(lambda y: torch.xlogy(2, y), (y))
gradgradcheck(lambda y: torch.xlogy(2, y), (y))
gradcheck(lambda y: torch.xlogy(y, 2), (y))
gradgradcheck(lambda y: torch.xlogy(y, 2), (y))
# Different shape
x = torch.rand(2, 3, 4, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
_tensor_tensor_helper(x, x)
_tensor_tensor_helper(y, y)
# Same shape
x = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
_tensor_tensor_helper(x, x)
_tensor_tensor_helper(y, y)
class TestMultithreadAutograd(TestCase):
def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None):
threads = []
for _ in range(num_threads):
p = threading.Thread(target=fn, args=(args))
p.start()
threads.append(p)
for p in threads:
p.join()
def test_simple_backward(self):
# simple multithreaded backward that create threads in the beginning of training
# and everything else is training separately, i.e. inputs, operations, etc.
def train_fn():
x = torch.ones(5, 5, requires_grad=True)
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
self.assertEqual(x.grad, x + 3.5)
self._run_py_multithread_fn(train_fn)
def test_simple_backward_same_input(self):
# simple multithreaded backward with only shared inputs (i.e. This is common
# for things like Hogwild multithreaded training with multiple CPU threads)
def train_fn_backward(x):
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
x = torch.ones(5, 5, requires_grad=True)
self._run_py_multithread_fn(train_fn_backward, (x,))
# Since we are calling backward from multiple threads
# and all threads share the same input, when we do backward
# concurrently, different backwards will all accumulate to
# the same .grad for each input, and the gradients should
# be equal to num_threads * gradient
self.assertEqual(x.grad, 10 * (x + 3.5))
def train_fn_grad(x):
y = (x + 3) * (x + 4) * 0.5
grads = torch.autograd.grad(y.sum(), x)
self.assertEqual(len(grads), 1)
self.assertEqual(grads[0], x + 3.5)
# since we use functional grad() api, gradients will not
# be accumulate to the same place and should be the same
self._run_py_multithread_fn(train_fn_grad, (x,))
def test_python_thread_in_middle(self):
# User might write a network that starts on one CPU thread, then runs its second half
# concurrently with other threads (either via python threading or fork/join calls),
# then calls backward()/grad() on BOTH threads, like a Y pattern from input at the
# bottom to output at the top. This way part of the GraphTask is being shared across
# different threads and we need to ensure user specify retain_graph=True, otherwise
# error out with the correct error message
# Case 1: multiple backward with python threads, retain_graph=False
# should throw error in some threads with no retain_graph.
success_vs_raises = [0, 0]
def train_fn_no_retain_graph(x):
y = x + x ** 2
try:
y.sum().backward()
success_vs_raises[0] += 1
except RuntimeError as error:
success_vs_raises[1] += 1
self.assertRegex(str(error), "Specify retain_graph=True")
x_no_retain = torch.ones(5, 5, requires_grad=True)
y_no_retain = x_no_retain + x_no_retain ** 2
self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5)
# at least one thread will be success in this case, all other threads should raise
# with the error that throw to user to recommend them specify retain_graph=True
self.assertTrue(success_vs_raises[0] >= 1)
# multiple backward with python threads, no error with retain_graph=True
def train_fn_retain_graph(x):
y = x + x ** 2
y.sum().backward(retain_graph=True)
x_retain = torch.ones(5, 5, requires_grad=True)
y_retain = x_retain + x_retain ** 2
self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5)
# result should equal to num_thread * gradients
self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1))
def test_fork_join_in_middle(self):
# multiple backward with jit threads (fork/join primitive)
# similar to test_python_thread_in_middle, we test with retain_graph=False/True
# Case 1: multiple grad() calls with jit threads, retain_graph=False
# should throw error in some threads with no retain_graph.
@torch.jit.script
def train_fn_jit_no_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x])
@torch.jit.script
def train_fn_fork_join_calls_no_retain(x):
y_no_retain = (x + 3) * (x + 4) * 0.5
fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x)
grad_hat = train_fn_jit_no_retain(y_no_retain, x)
grad = torch.jit._wait(fut)
return grad, grad_hat
try:
train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True))
except RuntimeError as error:
self.assertRegex(str(error), "Specify retain_graph=True")
# Case 2: no error with retain_graph=True
@torch.jit.script
def train_fn_jit_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True)
@torch.jit.script
def train_fn_fork_join_calls_retain(x):
y_retain = (x + 3) * (x + 4) * 0.5
fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
grad = train_fn_jit_retain(y_retain, x)
grad1 = torch.jit._wait(fut1)
grad2 = torch.jit._wait(fut2)
return grad, grad1, grad2
grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True))
self.assertEqual(grad, grad1)
self.assertEqual(grad, grad2)
def test_preserve_backtrace(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, *grad):
raise ValueError("something")
t = torch.rand(10, requires_grad=True)
try:
Foo.apply(t).sum().backward()
except Exception:
import traceback
tb = sys.exc_info()[2]
tb_str = "\n".join(traceback.format_tb(tb))
self.assertTrue('raise ValueError("something")' in tb_str)
# TODO(@anjali411): add an OpInfo based test for torch.cat
# Issue: https://github.com/pytorch/pytorch/issues/51627
def test_cat_r_to_c(self):
inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True)
inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True)
def fn(x1, x2):
return torch.cat((x1, x2), dim=-1)
torch.autograd.gradcheck(fn, [inp_r, inp_c])
torch.autograd.gradcheck(fn, [inp_c, inp_r])
for test in method_tests():
add_test(*test)
# e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA
instantiate_device_type_tests(
TestAutogradDeviceType,
globals(),
except_for=None
)
if __name__ == '__main__':
run_tests()
|
test_cases.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains test case classes based on pytest for AEA end-to-end testing."""
import copy
import logging
import os
import random
import shutil
import string
import subprocess # nosec
import sys
import tempfile
import time
from abc import ABC
from contextlib import suppress
from filecmp import dircmp
from io import TextIOWrapper
from pathlib import Path
from threading import Thread
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Union
import yaml
from aea.cli import cli
from aea.configurations.base import (
AgentConfig,
PackageType,
_get_default_configuration_file_name_from_type,
)
from aea.configurations.constants import (
DEFAULT_AEA_CONFIG_FILE,
DEFAULT_INPUT_FILE_NAME,
DEFAULT_LEDGER,
DEFAULT_OUTPUT_FILE_NAME,
DEFAULT_PRIVATE_KEY_FILE,
DEFAULT_REGISTRY_NAME,
LAUNCH_SUCCEED_MESSAGE,
)
from aea.configurations.loader import ConfigLoader, ConfigLoaders
from aea.exceptions import enforce
from aea.helpers.base import cd, send_control_c, win_popen_kwargs
from aea.helpers.io import open_file
from aea.mail.base import Envelope
from aea.test_tools.click_testing import CliRunner, Result
from aea.test_tools.constants import DEFAULT_AUTHOR
from aea.test_tools.exceptions import AEATestingException
from aea.test_tools.generic import (
nested_set_config,
read_envelope_from_file,
write_envelope_to_file,
)
_default_logger = logging.getLogger(__name__)
CLI_LOG_OPTION = ["-v", "OFF"]
DEFAULT_PROCESS_TIMEOUT = 120
DEFAULT_LAUNCH_TIMEOUT = 10
class BaseAEATestCase(ABC): # pylint: disable=too-many-public-methods
"""Base class for AEA test cases."""
runner: CliRunner # CLI runner
last_cli_runner_result: Optional[Result] = None
author: str = DEFAULT_AUTHOR # author
subprocesses: List[subprocess.Popen] = [] # list of launched subprocesses
threads: List[Thread] = [] # list of started threads
packages_dir_path: Path = Path(DEFAULT_REGISTRY_NAME)
package_registry_src: Path = Path(".")
use_packages_dir: bool = True
package_registry_src_rel: Path = Path(os.getcwd(), packages_dir_path)
old_cwd: Path # current working directory path
t: Path # temporary directory path
current_agent_context: str = "" # the name of the current agent
agents: Set[str] = set() # the set of created agents
stdout: Dict[int, str] # dict of process.pid: string stdout
stderr: Dict[int, str] # dict of process.pid: string stderr
_is_teardown_class_called: bool = False
capture_log: bool = False
cli_log_options: List[str] = []
method_list: List[str] = []
@classmethod
def set_agent_context(cls, agent_name: str) -> None:
"""Set the current agent context."""
cls.current_agent_context = agent_name
@classmethod
def unset_agent_context(cls) -> None:
"""Unset the current agent context."""
cls.current_agent_context = ""
@classmethod
def set_config(
cls, dotted_path: str, value: Any, type_: Optional[str] = None
) -> Result:
"""
Set a config.
Run from agent's directory.
:param dotted_path: str dotted path to config param.
:param value: a new value to set.
:param type_: the type
:return: Result
"""
if type_ is None:
type_ = type(value).__name__
return cls.run_cli_command(
"config",
"set",
dotted_path,
str(value),
"--type",
type_,
cwd=cls._get_cwd(),
)
@classmethod
def nested_set_config(cls, dotted_path: str, value: Any) -> None:
"""Force set config."""
with cd(cls._get_cwd()):
nested_set_config(dotted_path, value)
@classmethod
def disable_aea_logging(cls) -> None:
"""
Disable AEA logging of specific agent.
Run from agent's directory.
"""
config_update_dict = {
"agent.logging_config.disable_existing_loggers": "False",
"agent.logging_config.version": "1",
}
for path, value in config_update_dict.items():
cls.run_cli_command("config", "set", path, value, cwd=cls._get_cwd())
@classmethod
def run_cli_command(cls, *args: str, cwd: str = ".", **kwargs: str) -> Result:
"""
Run AEA CLI command.
:param args: CLI args
:param cwd: the working directory from where to run the command.
:param kwargs: other keyword arguments to click.CliRunner.invoke.
:raises AEATestingException: if command fails.
:return: Result
"""
with cd(cwd):
result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, *args],
standalone_mode=False,
catch_exceptions=False,
**kwargs,
)
cls.last_cli_runner_result = result
if result.exit_code != 0: # pragma: nocover
raise AEATestingException(
"Failed to execute AEA CLI command with args {}.\n"
"Exit code: {}\nException: {}".format(
args, result.exit_code, result.exception
)
)
return result
@classmethod
def _run_python_subprocess(cls, *args: str, cwd: str = ".") -> subprocess.Popen:
"""
Run python with args as subprocess.
:param args: CLI args
:param cwd: the current working directory
:return: subprocess object.
"""
kwargs = dict(
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
env=os.environ.copy(),
cwd=cwd,
)
kwargs.update(win_popen_kwargs())
process = subprocess.Popen( # type: ignore # nosec # mypy fails on **kwargs
[sys.executable, *args], **kwargs,
)
cls.subprocesses.append(process)
return process
@classmethod
def start_subprocess(cls, *args: str, cwd: str = ".") -> subprocess.Popen:
"""
Run python with args as subprocess.
:param args: CLI args
:param cwd: the current working directory
:return: subprocess object.
"""
process = cls._run_python_subprocess(*args, cwd=cwd)
cls._start_output_read_thread(process)
cls._start_error_read_thread(process)
return process
@classmethod
def start_thread(cls, target: Callable, **kwargs: subprocess.Popen) -> Thread:
"""
Start python Thread.
:param target: target method.
:param kwargs: thread keyword arguments
:return: thread
"""
if "process" in kwargs:
thread = Thread(target=target, args=(kwargs["process"],))
else:
thread = Thread(target=target)
thread.start()
cls.threads.append(thread)
return thread
@classmethod
def create_agents(
cls, *agents_names: str, is_local: bool = True, is_empty: bool = False
) -> None:
"""
Create agents in current working directory.
:param agents_names: str agent names.
:param is_local: a flag for local folder add True by default.
:param is_empty: optional boolean flag for skip adding default dependencies.
"""
cli_args = ["create", "--local", "--empty"]
if not is_local: # pragma: nocover
cli_args.remove("--local")
if not is_empty: # pragma: nocover
cli_args.remove("--empty")
for name in set(agents_names):
cls.run_cli_command(*cli_args, name)
cls.agents.add(name)
@classmethod
def fetch_agent(
cls, public_id: str, agent_name: str, is_local: bool = True
) -> None:
"""
Create agents in current working directory.
:param public_id: str public id
:param agent_name: str agent name.
:param is_local: a flag for local folder add True by default.
"""
cli_args = ["fetch", "--local"]
if not is_local: # pragma: nocover
cli_args.remove("--local")
cls.run_cli_command(*cli_args, public_id, "--alias", agent_name)
cls.agents.add(agent_name)
@classmethod
def difference_to_fetched_agent(cls, public_id: str, agent_name: str) -> List[str]:
"""
Compare agent against the one fetched from public id.
:param public_id: str public id
:param agent_name: str agent name.
:return: list of files differing in the projects
"""
# for pydocstyle
def is_allowed_diff_in_agent_config(
path_to_fetched_aea: str, path_to_manually_created_aea: str
) -> Tuple[
bool, Union[Dict[str, str], List[Any]], Union[Dict[str, str], List[Any]]
]:
with open_file(
os.path.join(path_to_fetched_aea, "aea-config.yaml"), "r"
) as file:
content1 = list(yaml.safe_load_all(file)) # load all contents
with open_file(
os.path.join(path_to_manually_created_aea, "aea-config.yaml"), "r"
) as file:
content2 = list(yaml.safe_load_all(file))
content1_agentconfig = content1[0]
content2_agentconfig = content2[0]
content1_agentconfig_copy = copy.deepcopy(content1_agentconfig)
# check only agent part
for key, value in content1_agentconfig_copy.items():
if content2_agentconfig[key] == value:
content1_agentconfig.pop(key)
content2_agentconfig.pop(key)
allowed_diff_keys = [
"aea_version",
"author",
"description",
"version",
"connection_private_key_paths",
"private_key_paths",
"dependencies",
"required_ledgers",
]
result = all(
[key in allowed_diff_keys for key in content1_agentconfig.keys()]
)
result = result and all(
[key in allowed_diff_keys for key in content2_agentconfig.keys()]
)
if not result:
return result, content1_agentconfig, content2_agentconfig
# else, additionally check the other YAML pages
# (i.e. the component configuration overrides)
content1_component_overrides = content1[1:]
content2_component_overrides = content2[1:]
if len(content1_component_overrides) != len(content2_component_overrides):
return False, content1_component_overrides, content2_component_overrides
diff_1, diff_2 = [], []
for index, (override_1, override_2) in enumerate(
zip(content1_component_overrides, content2_component_overrides)
):
if override_1 != override_2:
result = False
diff_1.append((index, override_1))
diff_2.append((index, override_2))
return result, diff_1, diff_2
path_to_manually_created_aea = os.path.join(cls.t, agent_name)
new_cwd = os.path.join(cls.t, "fetch_dir")
os.mkdir(new_cwd)
fetched_agent_name = agent_name
path_to_fetched_aea = os.path.join(new_cwd, fetched_agent_name)
registry_tmp_dir = os.path.join(new_cwd, cls.packages_dir_path)
shutil.copytree(str(cls.package_registry_src_rel), str(registry_tmp_dir))
with cd(new_cwd):
cls.run_cli_command(
"fetch", "--local", public_id, "--alias", fetched_agent_name
)
comp = dircmp(path_to_manually_created_aea, path_to_fetched_aea)
file_diff = comp.diff_files
result, diff1, diff2 = is_allowed_diff_in_agent_config(
path_to_fetched_aea, path_to_manually_created_aea
)
if result:
if "aea-config.yaml" in file_diff: # pragma: nocover
file_diff.remove("aea-config.yaml") # won't match!
else:
file_diff.append(
"Difference in aea-config.yaml: " + str(diff1) + " vs. " + str(diff2)
)
with suppress(OSError, IOError):
shutil.rmtree(new_cwd)
return file_diff
@classmethod
def delete_agents(cls, *agents_names: str) -> None:
"""
Delete agents in current working directory.
:param agents_names: str agent names.
"""
for name in set(agents_names):
cls.run_cli_command("delete", name)
cls.agents.remove(name)
@classmethod
def run_agent(cls, *args: str) -> subprocess.Popen:
"""
Run agent as subprocess.
Run from agent's directory.
:param args: CLI args
:return: subprocess object.
"""
return cls._start_cli_process("run", *args)
@classmethod
def run_interaction(cls) -> subprocess.Popen:
"""
Run interaction as subprocess.
Run from agent's directory.
:return: subprocess object.
"""
return cls._start_cli_process("interact")
@classmethod
def _start_cli_process(cls, *args: str) -> subprocess.Popen:
"""
Start cli subprocess with args specified.
:param args: CLI args
:return: subprocess object.
"""
process = cls._run_python_subprocess(
"-m", "aea.cli", *cls.cli_log_options, *args, cwd=cls._get_cwd()
)
cls._start_output_read_thread(process)
cls._start_error_read_thread(process)
return process
@classmethod
def terminate_agents(
cls, *subprocesses: subprocess.Popen, timeout: int = 20,
) -> None:
"""
Terminate agent subprocesses.
Run from agent's directory.
:param subprocesses: the subprocesses running the agents
:param timeout: the timeout for interruption
"""
if not subprocesses:
subprocesses = tuple(cls.subprocesses)
for process in subprocesses:
process.poll()
if process.returncode is None: # stop only pending processes
send_control_c(process)
for process in subprocesses:
process.wait(timeout=timeout)
@classmethod
def is_successfully_terminated(cls, *subprocesses: subprocess.Popen) -> bool:
"""Check if all subprocesses terminated successfully."""
if not subprocesses:
subprocesses = tuple(cls.subprocesses)
all_terminated = all([process.returncode == 0 for process in subprocesses])
return all_terminated
@classmethod
def initialize_aea(cls, author: str) -> None:
"""Initialize AEA locally with author name."""
cls.run_cli_command("init", "--local", "--author", author, cwd=cls._get_cwd())
@classmethod
def add_item(cls, item_type: str, public_id: str, local: bool = True) -> Result:
"""
Add an item to the agent.
Run from agent's directory.
:param item_type: str item type.
:param public_id: public id of the item.
:param local: a flag for local folder add True by default.
:return: Result
"""
cli_args = ["add", "--local", item_type, public_id]
if not local: # pragma: nocover
cli_args.remove("--local")
return cls.run_cli_command(*cli_args, cwd=cls._get_cwd())
@classmethod
def remove_item(cls, item_type: str, public_id: str) -> Result:
"""
Remove an item from the agent.
Run from agent's directory.
:param item_type: str item type.
:param public_id: public id of the item.
:return: Result
"""
cli_args = ["remove", item_type, public_id]
return cls.run_cli_command(*cli_args, cwd=cls._get_cwd())
@classmethod
def scaffold_item(
cls, item_type: str, name: str, skip_consistency_check: bool = False
) -> Result:
"""
Scaffold an item for the agent.
Run from agent's directory.
:param item_type: str item type.
:param name: name of the item.
:param skip_consistency_check: if True, skip consistency check.
:return: Result
"""
flags = ["-s"] if skip_consistency_check else []
if item_type == "protocol":
return cls.run_cli_command(
*flags, "scaffold", item_type, "-y", name, cwd=cls._get_cwd()
)
return cls.run_cli_command(
*flags, "scaffold", item_type, name, cwd=cls._get_cwd()
)
@classmethod
def fingerprint_item(cls, item_type: str, public_id: str) -> Result:
"""
Fingerprint an item for the agent.
Run from agent's directory.
:param item_type: str item type.
:param public_id: public id of the item.
:return: Result
"""
return cls.run_cli_command(
"fingerprint", item_type, public_id, cwd=cls._get_cwd()
)
@classmethod
def eject_item(cls, item_type: str, public_id: str) -> Result:
"""
Eject an item in the agent in quiet mode (i.e. no interaction).
Run from agent's directory.
:param item_type: str item type.
:param public_id: public id of the item.
:return: None
"""
cli_args = ["eject", "--quiet", item_type, public_id]
return cls.run_cli_command(*cli_args, cwd=cls._get_cwd())
@classmethod
def run_install(cls) -> Result:
"""
Execute AEA CLI install command.
Run from agent's directory.
:return: Result
"""
return cls.run_cli_command("install", cwd=cls._get_cwd())
@classmethod
def generate_private_key(
cls,
ledger_api_id: str = DEFAULT_LEDGER,
private_key_file: Optional[str] = None,
password: Optional[str] = None,
) -> Result:
"""
Generate AEA private key with CLI command.
Run from agent's directory.
:param ledger_api_id: ledger API ID.
:param private_key_file: the private key file.
:param password: the password.
:return: Result
"""
cli_args = ["generate-key", ledger_api_id]
if private_key_file is not None: # pragma: nocover
cli_args.append(private_key_file)
cli_args += _get_password_option_args(password)
return cls.run_cli_command(*cli_args, cwd=cls._get_cwd())
@classmethod
def add_private_key(
cls,
ledger_api_id: str = DEFAULT_LEDGER,
private_key_filepath: str = DEFAULT_PRIVATE_KEY_FILE,
connection: bool = False,
password: Optional[str] = None,
) -> Result:
"""
Add private key with CLI command.
Run from agent's directory.
:param ledger_api_id: ledger API ID.
:param private_key_filepath: private key filepath.
:param connection: whether or not the private key filepath is for a connection.
:param password: the password to encrypt private keys.
:return: Result
"""
password_option = _get_password_option_args(password)
if connection:
return cls.run_cli_command(
"add-key",
ledger_api_id,
private_key_filepath,
"--connection",
*password_option,
cwd=cls._get_cwd(),
)
return cls.run_cli_command(
"add-key",
ledger_api_id,
private_key_filepath,
*password_option,
cwd=cls._get_cwd(),
)
@classmethod
def remove_private_key(
cls, ledger_api_id: str = DEFAULT_LEDGER, connection: bool = False,
) -> Result:
"""
Remove private key with CLI command.
Run from agent's directory.
:param ledger_api_id: ledger API ID.
:param connection: whether or not the private key filepath is for a connection.
:return: Result
"""
args = ["remove-key", ledger_api_id] + (["--connection"] if connection else [])
return cls.run_cli_command(*args, cwd=cls._get_cwd())
@classmethod
def replace_private_key_in_file(
cls, private_key: str, private_key_filepath: str = DEFAULT_PRIVATE_KEY_FILE
) -> None:
"""
Replace the private key in the provided file with the provided key.
:param private_key: the private key
:param private_key_filepath: the filepath to the private key file
:raises: exception if file does not exist
"""
with cd(cls._get_cwd()): # pragma: nocover
with open_file(private_key_filepath, "wt") as f:
f.write(private_key)
@classmethod
def generate_wealth(
cls, ledger_api_id: str = DEFAULT_LEDGER, password: Optional[str] = None
) -> Result:
"""
Generate wealth with CLI command.
Run from agent's directory.
:param ledger_api_id: ledger API ID.
:param password: the password.
:return: Result
"""
password_option = _get_password_option_args(password)
return cls.run_cli_command(
"generate-wealth",
ledger_api_id,
*password_option,
"--sync",
cwd=cls._get_cwd(),
)
@classmethod
def get_wealth(
cls, ledger_api_id: str = DEFAULT_LEDGER, password: Optional[str] = None
) -> str:
"""
Get wealth with CLI command.
Run from agent's directory.
:param ledger_api_id: ledger API ID.
:param password: the password to encrypt/decrypt private keys.
:return: command line output
"""
password_option = _get_password_option_args(password)
cls.run_cli_command(
"get-wealth", ledger_api_id, *password_option, cwd=cls._get_cwd()
)
if cls.last_cli_runner_result is None:
raise ValueError("Runner result not set!") # pragma: nocover
return str(cls.last_cli_runner_result.stdout_bytes, "utf-8")
@classmethod
def replace_file_content(cls, src: Path, dest: Path) -> None: # pragma: nocover
"""
Replace the content of the source file to the destination file.
:param src: the source file.
:param dest: the destination file.
"""
enforce(
src.is_file() and dest.is_file(), "Source or destination is not a file."
)
dest.write_text(src.read_text())
@classmethod
def change_directory(cls, path: Path) -> None:
"""
Change current working directory.
:param path: path to the new working directory.
"""
os.chdir(Path(path))
@classmethod
def _terminate_subprocesses(cls) -> None:
"""Terminate all launched subprocesses."""
for process in cls.subprocesses:
if not process.returncode == 0:
poll = process.poll()
if poll is None:
process.terminate()
process.wait(2)
cls.subprocesses = []
@classmethod
def _join_threads(cls) -> None:
"""Join all started threads."""
for thread in cls.threads:
thread.join()
cls.threads = []
@classmethod
def _read_out(
cls, process: subprocess.Popen
) -> None: # pragma: nocover # runs in thread!
for line in TextIOWrapper(process.stdout, encoding="utf-8"):
cls._log_capture("stdout", process.pid, line)
cls.stdout[process.pid] += line
@classmethod
def _read_err(
cls, process: subprocess.Popen
) -> None: # pragma: nocover # runs in thread!
if process.stderr is not None:
for line in TextIOWrapper(process.stderr, encoding="utf-8"):
cls._log_capture("stderr", process.pid, line)
cls.stderr[process.pid] += line
@classmethod
def _log_capture(cls, name: str, pid: int, line: str) -> None: # pragma: nocover
if not cls.capture_log:
return
sys.stdout.write(f"[{pid}]{name}>{line}")
sys.stdout.flush()
@classmethod
def _start_output_read_thread(cls, process: subprocess.Popen) -> None:
"""
Start an output reading thread.
:param process: target process passed to a thread args.
"""
cls.stdout[process.pid] = ""
cls.start_thread(target=cls._read_out, process=process)
@classmethod
def _start_error_read_thread(cls, process: subprocess.Popen) -> None:
"""
Start an error reading thread.
:param process: target process passed to a thread args.
"""
cls.stderr[process.pid] = ""
cls.start_thread(target=cls._read_err, process=process)
@classmethod
def _get_cwd(cls) -> str:
"""Get the current working directory."""
return str(cls.t / cls.current_agent_context)
@classmethod
def send_envelope_to_agent(cls, envelope: Envelope, agent: str) -> None:
"""Send an envelope to an agent, using the stub connection."""
# check added cause sometimes fails on win with permission error
dir_path = Path(cls.t / agent)
enforce(dir_path.exists(), "Dir path does not exist.")
enforce(dir_path.is_dir(), "Dir path is not a directory.")
write_envelope_to_file(envelope, str(cls.t / agent / DEFAULT_INPUT_FILE_NAME))
@classmethod
def read_envelope_from_agent(cls, agent: str) -> Envelope:
"""Read an envelope from an agent, using the stub connection."""
return read_envelope_from_file(str(cls.t / agent / DEFAULT_OUTPUT_FILE_NAME))
@classmethod
def missing_from_output(
cls,
process: subprocess.Popen,
strings: Sequence[str],
timeout: int = DEFAULT_PROCESS_TIMEOUT,
period: int = 1,
is_terminating: bool = True,
) -> List[str]:
"""
Check if strings are present in process output.
Read process stdout in thread and terminate when all strings are present
or timeout expired.
:param process: agent subprocess.
:param strings: tuple of strings expected to appear in output.
:param timeout: int amount of seconds before stopping check.
:param period: int period of checking.
:param is_terminating: whether or not the agents are terminated
:return: list of missed strings.
"""
missing_strings = list(strings)
end_time = time.time() + timeout
while missing_strings:
if time.time() > end_time:
break
missing_strings = [
line for line in missing_strings if line not in cls.stdout[process.pid]
]
time.sleep(period)
if is_terminating:
cls.terminate_agents(process)
if missing_strings != []:
_default_logger.info(
"Non-empty missing strings, stderr:\n{}".format(cls.stderr[process.pid])
)
_default_logger.info("=====================")
_default_logger.info(
"Non-empty missing strings, stdout:\n{}".format(cls.stdout[process.pid])
)
_default_logger.info("=====================")
return missing_strings
@classmethod
def is_running(
cls, process: subprocess.Popen, timeout: int = DEFAULT_LAUNCH_TIMEOUT
) -> bool:
"""
Check if the AEA is launched and running (ready to process messages).
:param process: agent subprocess.
:param timeout: the timeout to wait for launch to complete
:return: bool indicating status
"""
missing_strings = cls.missing_from_output(
process, (LAUNCH_SUCCEED_MESSAGE,), timeout, is_terminating=False
)
return missing_strings == []
@classmethod
def invoke(cls, *args: str) -> Result:
"""Call the cli command."""
with cd(cls._get_cwd()):
result = cls.runner.invoke(
cli, args, standalone_mode=False, catch_exceptions=False
)
return result
@classmethod
def load_agent_config(cls, agent_name: str) -> AgentConfig:
"""Load agent configuration."""
if agent_name not in cls.agents:
raise AEATestingException(
f"Cannot find agent '{agent_name}' in the current test case."
)
loader = ConfigLoaders.from_package_type(PackageType.AGENT)
config_file_name = _get_default_configuration_file_name_from_type(
PackageType.AGENT
)
configuration_file_path = Path(cls.t, agent_name, config_file_name)
with open_file(configuration_file_path) as file_input:
agent_config = loader.load(file_input)
return agent_config
@classmethod
def setup_class(cls) -> None:
"""Set up the test class."""
cls.method_list = [
func
for func in dir(cls)
if callable(getattr(cls, func))
and not func.startswith("__")
and func.startswith("test_")
]
cls.runner = CliRunner()
cls.old_cwd = Path(os.getcwd())
cls.subprocesses = []
cls.threads = []
cls.t = Path(tempfile.mkdtemp())
cls.change_directory(cls.t)
cls.package_registry_src = cls.old_cwd / cls.package_registry_src_rel
if cls.use_packages_dir:
registry_tmp_dir = cls.t / cls.packages_dir_path
shutil.copytree(str(cls.package_registry_src), str(registry_tmp_dir))
cls.initialize_aea(cls.author)
cls.stdout = {}
cls.stderr = {}
@classmethod
def teardown_class(cls) -> None:
"""Teardown the test."""
cls.change_directory(cls.old_cwd)
cls.terminate_agents(*cls.subprocesses)
cls._terminate_subprocesses()
cls._join_threads()
cls.unset_agent_context()
cls.last_cli_runner_result = None
cls.packages_dir_path = Path(DEFAULT_REGISTRY_NAME)
cls.use_packages_dir = True
cls.agents = set()
cls.current_agent_context = ""
cls.stdout = {}
cls.stderr = {}
with suppress(OSError, IOError):
shutil.rmtree(cls.t)
cls._is_teardown_class_called = True
def _get_password_option_args(password: Optional[str]) -> List[str]:
"""
Get password option arguments.
:param password: the password (optional).
:return: empty list if password is None, else ['--password', password].
"""
return [] if password is None else ["--password", password]
class AEATestCaseEmpty(BaseAEATestCase):
"""
Test case for a default AEA project.
This test case will create a default AEA project.
"""
agent_name = ""
IS_LOCAL = True
IS_EMPTY = False
@classmethod
def setup_class(cls) -> None:
"""Set up the test class."""
super(AEATestCaseEmpty, cls).setup_class()
cls.agent_name = "agent_" + "".join(random.choices(string.ascii_lowercase, k=5))
cls.create_agents(cls.agent_name, is_local=cls.IS_LOCAL, is_empty=cls.IS_EMPTY)
cls.set_agent_context(cls.agent_name)
@classmethod
def teardown_class(cls) -> None:
"""Teardown the test class."""
super(AEATestCaseEmpty, cls).teardown_class()
cls.agent_name = ""
class AEATestCaseEmptyFlaky(AEATestCaseEmpty):
"""
Test case for a default AEA project.
This test case will create a default AEA project.
Use for flaky tests with the flaky decorator.
"""
run_count: int = 0
@classmethod
def setup_class(cls) -> None:
"""Set up the test class."""
super(AEATestCaseEmptyFlaky, cls).setup_class()
if len(cls.method_list) > 1: # pragma: nocover
raise ValueError(f"{cls.__name__} can only contain one test method!")
cls.run_count += 1
@classmethod
def teardown_class(cls) -> None:
"""Teardown the test class."""
super(AEATestCaseEmptyFlaky, cls).teardown_class()
class AEATestCaseMany(BaseAEATestCase):
"""Test case for many AEA projects."""
@classmethod
def setup_class(cls) -> None:
"""Set up the test class."""
super(AEATestCaseMany, cls).setup_class()
@classmethod
def teardown_class(cls) -> None:
"""Teardown the test class."""
super(AEATestCaseMany, cls).teardown_class()
class AEATestCaseManyFlaky(AEATestCaseMany):
"""
Test case for many AEA projects which are flaky.
Use for flaky tests with the flaky decorator.
"""
run_count: int = 0
@classmethod
def setup_class(cls) -> None:
"""Set up the test class."""
super(AEATestCaseManyFlaky, cls).setup_class()
if len(cls.method_list) > 1: # pragma: nocover
raise ValueError(f"{cls.__name__} can only contain one test method!")
cls.run_count += 1
@classmethod
def teardown_class(cls) -> None:
"""Teardown the test class."""
super(AEATestCaseManyFlaky, cls).teardown_class()
class AEATestCase(BaseAEATestCase):
"""
Test case from an existing AEA project.
Subclass this class and set `path_to_aea` properly. By default,
it is assumed the project is inside the current working directory.
"""
agent_name = ""
path_to_aea: Path = Path(".")
packages_dir_path: Path = Path("..", DEFAULT_REGISTRY_NAME)
agent_configuration: Optional[AgentConfig] = None
t: Path # temporary directory path
@classmethod
def setup_class(cls) -> None:
"""Set up the test class."""
# make paths absolute
cls.path_to_aea = cls.path_to_aea.absolute()
# load agent configuration
with Path(cls.path_to_aea, DEFAULT_AEA_CONFIG_FILE).open(
mode="r", encoding="utf-8"
) as fp:
loader = ConfigLoader.from_configuration_type(PackageType.AGENT)
agent_configuration = loader.load(fp)
cls.agent_configuration = agent_configuration
cls.agent_name = agent_configuration.agent_name
# this will create a temporary directory and move into it
cls.use_packages_dir = False
super(AEATestCase, cls).setup_class()
# copy the content of the agent into the temporary directory
shutil.copytree(str(cls.path_to_aea), str(cls.t / cls.agent_name))
cls.set_agent_context(cls.agent_name)
@classmethod
def teardown_class(cls) -> None:
"""Teardown the test class."""
cls.agent_name = ""
cls.path_to_aea = Path(".")
cls.agent_configuration = None
super(AEATestCase, cls).teardown_class()
|
server.py
|
import socket
from threading import Thread
socketList = []
def waitConnect(s):
"""
cmd format --> # -H $IP -p $PORT -c <start|stop>
"""
while True:
sock, addr = s.accept()
if sock not in socketList:
socketList.append(sock)
def sendCmd(cmd):
print("Send command......")
for sock in socketList:
sock.send(cmd.encode("utf-8"))
def main():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('0.0.0.0', 58868))
s.listen(1024)
t = Thread(target=waitConnect, args=(s,))
t.start()
print("Wait at least a client connection!")
while not len(socketList):
pass
print("It has been a client connection!")
while True:
print("=" * 50)
print('The command format:"#-H $IP -p $PORT -c <start|stop>"')
cmd_str = input("Please input cmd:")
if len(cmd_str):
if cmd_str[0] == "#":
sendCmd(cmd_str)
if __name__ == "__main__":
main()
|
http2_connection.py
|
import Queue
import threading
import socket
import errno
import struct
from http_common import *
from hyper.common.bufsocket import BufferedSocket
from hyper.packages.hyperframe.frame import (
FRAMES, DataFrame, HeadersFrame, PushPromiseFrame, RstStreamFrame,
SettingsFrame, Frame, WindowUpdateFrame, GoAwayFrame, PingFrame,
BlockedFrame, FRAME_MAX_ALLOWED_LEN, FRAME_MAX_LEN
)
from http2_stream import Stream
from hyper.http20.window import BaseFlowControlManager
from hyper.packages.hpack import Encoder, Decoder
# this is defined in rfc7540
# default window size 64k
DEFAULT_WINDOW_SIZE = 65535
# default max frame is 16k, defined in rfc7540
DEFAULT_MAX_FRAME = FRAME_MAX_LEN
class FlowControlManager(BaseFlowControlManager):
"""
``hyper``'s default flow control manager.
This implements hyper's flow control algorithms. This algorithm attempts to
reduce the number of WINDOWUPDATE frames we send without blocking the remote
endpoint behind the flow control window.
This algorithm will become more complicated over time. In the current form,
the algorithm is very simple:
- When the flow control window gets less than 3/4 of the maximum size,
increment back to the maximum.
- Otherwise, if the flow control window gets to less than 1kB, increment
back to the maximum.
"""
def increase_window_size(self, frame_size):
future_window_size = self.window_size - frame_size
if ((future_window_size < (self.initial_window_size * 3 / 4)) or
(future_window_size < 1000)):
return self.initial_window_size - future_window_size
return 0
def blocked(self):
return self.initial_window_size - self.window_size
class RawFrame(object):
def __init__(self, dat):
self.dat = dat
def serialize(self):
return self.dat
def __repr__(self):
out_str = "{type}".format(type=type(self).__name__)
return out_str
class Http2Worker(HttpWorker):
version = "2"
def __init__(self, logger, ip_manager, config, ssl_sock, close_cb, retry_task_cb, idle_cb, log_debug_data):
super(Http2Worker, self).__init__(
logger, ip_manager, config, ssl_sock, close_cb, retry_task_cb, idle_cb, log_debug_data)
self.network_buffer_size = 65535
# Google http/2 time out is 4 mins.
self.ssl_sock.settimeout(240)
self._sock = BufferedSocket(ssl_sock, self.network_buffer_size)
self.next_stream_id = 1
self.streams = {}
self.last_ping_time = time.time()
self.continue_timeout = 0
# count ping not ACK
# increase when send ping
# decrease when recv ping ack
# if this in not 0, don't accept request.
self.ping_on_way = 0
self.accept_task = False
# request_lock
self.request_lock = threading.Lock()
# all send frame must put to this queue
# then send by send_loop
# every frame put to this queue must allowed by stream window and connection window
# any data frame blocked by connection window should put to self.blocked_send_frames
self.send_queue = Queue.Queue()
self.encoder = Encoder()
self.decoder = Decoder()
# keep blocked data frame in this buffer
# which is allowed by stream window but blocked by connection window.
# They will be sent when connection window open
self.blocked_send_frames = []
# Values for the settings used on an HTTP/2 connection.
# will send to remote using Setting Frame
self.local_settings = {
SettingsFrame.INITIAL_WINDOW_SIZE: 16 * 1024 * 1024,
SettingsFrame.SETTINGS_MAX_FRAME_SIZE: 256 * 1024
}
self.local_connection_initial_windows = 32 * 1024 * 1024
self.local_window_manager = FlowControlManager(self.local_connection_initial_windows)
# changed by server, with SettingFrame
self.remote_settings = {
SettingsFrame.INITIAL_WINDOW_SIZE: DEFAULT_WINDOW_SIZE,
SettingsFrame.SETTINGS_MAX_FRAME_SIZE: DEFAULT_MAX_FRAME,
SettingsFrame.MAX_CONCURRENT_STREAMS: 100
}
#self.remote_window_size = DEFAULT_WINDOW_SIZE
self.remote_window_size = 32 * 1024 * 1024
# send Setting frame before accept task.
self._send_preamble()
threading.Thread(target=self.send_loop).start()
threading.Thread(target=self.recv_loop).start()
# export api
def request(self, task):
if not self.keep_running:
# race condition
self.retry_task_cb(task)
return
if len(self.streams) > self.config.http2_max_concurrent:
self.accept_task = False
task.set_state("h2_req")
self.request_task(task)
def encode_header(self, headers):
return self.encoder.encode(headers)
def request_task(self, task):
with self.request_lock:
# create stream to process task
stream_id = self.next_stream_id
# http/2 client use odd stream_id
self.next_stream_id += 2
stream = Stream(self.logger, self.config, self, self.ip, stream_id, task,
self._send_cb, self._close_stream_cb, self.encode_header, self.decoder,
FlowControlManager(self.local_settings[SettingsFrame.INITIAL_WINDOW_SIZE]),
self.remote_settings[SettingsFrame.INITIAL_WINDOW_SIZE],
self.remote_settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE])
self.streams[stream_id] = stream
stream.start_request()
def send_loop(self):
while self.keep_running:
frame = self.send_queue.get(True)
if not frame:
# None frame means exist
break
if self.config.http2_show_debug:
self.logger.debug("%s Send:%s", self.ip, str(frame))
data = frame.serialize()
try:
self._sock.send(data, flush=False)
# don't flush for small package
# reduce send api call
if self.send_queue._qsize():
continue
# wait for payload frame
time.sleep(0.01)
# combine header and payload in one tcp package.
if not self.send_queue._qsize():
self._sock.flush()
self.last_send_time = time.time()
except socket.error as e:
if e.errno not in (errno.EPIPE, errno.ECONNRESET):
self.logger.warn("%s http2 send fail:%r", self.ip, e)
else:
self.logger.exception("send error:%r", e)
self.close("send fail:%r" % e)
except Exception as e:
self.logger.debug("http2 %s send error:%r", self.ip, e)
self.close("send fail:%r" % e)
def recv_loop(self):
while self.keep_running:
try:
self._consume_single_frame()
except Exception as e:
self.logger.exception("recv fail:%r", e)
self.close("recv fail:%r" % e)
def get_rtt_rate(self):
return self.rtt + len(self.streams) * 3000
def close(self, reason="conn close"):
self.keep_running = False
self.accept_task = False
# Notify loop to exit
# This function may be call by out side http2
# When gae_proxy found the appid or ip is wrong
self.send_queue.put(None)
for stream in self.streams.values():
if stream.task.responsed:
# response have send to client
# can't retry
stream.close(reason=reason)
else:
self.retry_task_cb(stream.task)
self.streams = {}
super(Http2Worker, self).close(reason)
def send_ping(self):
p = PingFrame(0)
p.opaque_data = struct.pack("!d", time.time())
self.send_queue.put(p)
self.last_ping_time = time.time()
self.ping_on_way += 1
def _send_preamble(self):
self.send_queue.put(RawFrame(b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n'))
f = SettingsFrame(0)
f.settings[SettingsFrame.ENABLE_PUSH] = 0
f.settings[SettingsFrame.INITIAL_WINDOW_SIZE] = self.local_settings[SettingsFrame.INITIAL_WINDOW_SIZE]
f.settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE] = self.local_settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE]
self._send_cb(f)
# update local connection windows size
f = WindowUpdateFrame(0)
f.window_increment = self.local_connection_initial_windows - DEFAULT_WINDOW_SIZE
self._send_cb(f)
def increase_remote_window_size(self, inc_size):
# check and send blocked frames if window allow
self.remote_window_size += inc_size
#self.logger.debug("%s increase send win:%d result:%d", self.ip, inc_size, self.remote_window_size)
while len(self.blocked_send_frames):
frame = self.blocked_send_frames[0]
if len(frame.data) > self.remote_window_size:
return
self.remote_window_size -= len(frame.data)
self.send_queue.put(frame)
self.blocked_send_frames.pop(0)
if self.keep_running and \
self.accept_task == False and \
len(self.streams) < self.config.http2_max_concurrent and \
self.remote_window_size > 10000:
self.accept_task = True
self.idle_cb()
def _send_cb(self, frame):
# can called by stream
# put to send_blocked if connection window not allow,
if frame.type == DataFrame.type:
if len(frame.data) > self.remote_window_size:
self.blocked_send_frames.append(frame)
self.accept_task = False
return
else:
self.remote_window_size -= len(frame.data)
self.send_queue.put(frame)
else:
self.send_queue.put(frame)
def _close_stream_cb(self, stream_id, reason):
# call by stream to remove from streams list
# self.logger.debug("%s close stream:%d %s", self.ssl_sock.ip, stream_id, reason)
try:
del self.streams[stream_id]
except KeyError:
pass
if self.keep_running and \
len(self.streams) < self.config.http2_max_concurrent and \
self.remote_window_size > 10000:
self.accept_task = True
self.idle_cb()
self.processed_tasks += 1
def _consume_single_frame(self):
try:
header = self._sock.recv(9)
except Exception as e:
self.logger.debug("%s _consume_single_frame:%r, inactive time:%d", self.ip, e, time.time() - self.last_recv_time)
self.close("ConnectionReset:%r" % e)
return
self.last_recv_time = time.time()
# Parse the header. We can use the returned memoryview directly here.
frame, length = Frame.parse_frame_header(header)
if length > FRAME_MAX_ALLOWED_LEN:
self.logger.error("%s Frame size exceeded on stream %d (received: %d, max: %d)",
self.ip, frame.stream_id, length, FRAME_MAX_LEN)
# self._send_rst_frame(frame.stream_id, 6) # 6 = FRAME_SIZE_ERROR
try:
data = self._recv_payload(length)
except Exception as e:
self.close("ConnectionReset:%r" % e)
return
self._consume_frame_payload(frame, data)
def _recv_payload(self, length):
if not length:
return memoryview(b'')
buffer = bytearray(length)
buffer_view = memoryview(buffer)
index = 0
data_length = -1
# _sock.recv(length) might not read out all data if the given length
# is very large. So it should be to retrieve from socket repeatedly.
while length and data_length:
data = self._sock.recv(length)
self.last_recv_time = time.time()
data_length = len(data)
end = index + data_length
buffer_view[index:end] = data[:]
length -= data_length
index = end
return buffer_view[:end]
def _consume_frame_payload(self, frame, data):
frame.parse_body(data)
if self.config.http2_show_debug:
self.logger.debug("%s Recv:%s", self.ip, str(frame))
# Maintain our flow control window. We do this by delegating to the
# chosen WindowManager.
if frame.type == DataFrame.type:
size = frame.flow_controlled_length
increment = self.local_window_manager._handle_frame(size)
if increment < 0:
self.logger.warn("increment:%d", increment)
elif increment:
#self.logger.debug("%s frame size:%d increase win:%d", self.ip, size, increment)
w = WindowUpdateFrame(0)
w.window_increment = increment
self._send_cb(w)
elif frame.type == PushPromiseFrame.type:
self.logger.error("%s receive push frame", self.ip,)
# Work out to whom this frame should go.
if frame.stream_id != 0:
try:
stream = self.streams[frame.stream_id]
stream.receive_frame(frame)
except KeyError as e:
if frame.type not in [WindowUpdateFrame.type]:
self.logger.exception("%s Unexpected stream identifier %d, frame.type:%s e:%r",
self.ip, frame.stream_id, frame, e)
else:
self.receive_frame(frame)
def receive_frame(self, frame):
if frame.type == WindowUpdateFrame.type:
# self.logger.debug("WindowUpdateFrame %d", frame.window_increment)
self.increase_remote_window_size(frame.window_increment)
elif frame.type == PingFrame.type:
if 'ACK' in frame.flags:
ping_time = struct.unpack("!d", frame.opaque_data)[0]
time_now = time.time()
rtt = (time_now - ping_time) * 1000
if rtt < 0:
self.logger.error("rtt:%f ping_time:%f now:%f", rtt, ping_time, time_now)
self.rtt = rtt
self.ping_on_way -= 1
#self.logger.debug("RTT:%d, on_way:%d", self.rtt, self.ping_on_way)
if self.keep_running and self.ping_on_way == 0:
self.accept_task = True
else:
# The spec requires us to reply with PING+ACK and identical data.
p = PingFrame(0)
p.flags.add('ACK')
p.opaque_data = frame.opaque_data
self._send_cb(p)
elif frame.type == SettingsFrame.type:
if 'ACK' not in frame.flags:
# send ACK as soon as possible
f = SettingsFrame(0)
f.flags.add('ACK')
self._send_cb(f)
# this may trigger send DataFrame blocked by remote window
self._update_settings(frame)
else:
self.accept_task = True
self.idle_cb()
elif frame.type == GoAwayFrame.type:
# If we get GoAway with error code zero, we are doing a graceful
# shutdown and all is well. Otherwise, throw an exception.
# If an error occured, try to read the error description from
# code registry otherwise use the frame's additional data.
error_string = frame._extra_info()
time_cost = time.time() - self.last_recv_time
if frame.additional_data != "session_timed_out":
self.logger.warn("goaway:%s, t:%d", error_string, time_cost)
self.close("GoAway:%s inactive time:%d" % (error_string, time_cost))
elif frame.type == BlockedFrame.type:
self.logger.warn("%s get BlockedFrame", self.ip)
elif frame.type in FRAMES:
# This frame isn't valid at this point.
#raise ValueError("Unexpected frame %s." % frame)
self.logger.error("%s Unexpected frame %s.", self.ip, frame)
else: # pragma: no cover
# Unexpected frames belong to extensions. Just drop it on the
# floor, but log so that users know that something happened.
self.logger.error("%s Received unknown frame, type %d", self.ip, frame.type)
def _update_settings(self, frame):
if SettingsFrame.HEADER_TABLE_SIZE in frame.settings:
new_size = frame.settings[SettingsFrame.HEADER_TABLE_SIZE]
self.remote_settings[SettingsFrame.HEADER_TABLE_SIZE] = new_size
#self.encoder.header_table_size = new_size
if SettingsFrame.INITIAL_WINDOW_SIZE in frame.settings:
newsize = frame.settings[SettingsFrame.INITIAL_WINDOW_SIZE]
oldsize = self.remote_settings[SettingsFrame.INITIAL_WINDOW_SIZE]
delta = newsize - oldsize
for stream in self.streams.values():
stream.remote_window_size += delta
self.remote_settings[SettingsFrame.INITIAL_WINDOW_SIZE] = newsize
if SettingsFrame.SETTINGS_MAX_FRAME_SIZE in frame.settings:
new_size = frame.settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE]
if not (FRAME_MAX_LEN <= new_size <= FRAME_MAX_ALLOWED_LEN):
self.logger.error("%s Frame size %d is outside of allowed range", self.ip, new_size)
# Tear the connection down with error code PROTOCOL_ERROR
self.close("bad max frame size")
#error_string = ("Advertised frame size %d is outside of range" % (new_size))
#raise ConnectionError(error_string)
return
self.remote_settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE] = new_size
for stream in self.streams.values():
stream.max_frame_size += new_size
def get_trace(self):
out_list = []
out_list.append(" continue_timeout:%d" % self.continue_timeout)
out_list.append(" processed:%d" % self.processed_tasks)
out_list.append(" h2.stream_num:%d" % len(self.streams))
out_list.append(" sni:%s, host:%s" % (self.ssl_sock.sni, self.ssl_sock.host))
return ",".join(out_list)
def check_active(self, now):
if not self.keep_running or len(self.streams) == 0:
return
for sid in self.streams.keys():
try:
stream = self.streams[sid]
stream.check_timeout(now)
except:
pass
if len(self.streams) > 0 and\
now - self.last_send_time > 3 and \
now - self.last_ping_time > self.config.http2_ping_min_interval:
if self.ping_on_way > 0:
self.close("active timeout")
return
self.send_ping()
|
mikemanager.py
|
#!/usr/bin/python -u
# coding=utf-8
"""
Manage microphone with pyaudio
"""
import logging
import os
import sys
import threading
import time
import traceback
import wave
from collections import deque
from pathlib import Path
from subprocess import Popen, PIPE
import argparse
import glob
import yaml
import pandas as pd
from pandas.tseries.offsets import Minute
from pyaudio import PyAudio
from logmanager import MessageHandler
TIMESTAMP_FORMAT = '%m/%d/%Y %H:%M:%S.%f'
TIMESTAMP_FORMAT_SQL = '%Y-%m-%d %H:%M:%S'
WAV_FILE_TIMESTAMP = '%Y%m%d%H%M'
MODULE_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
os.chdir(MODULE_DIRECTORY)
LOG_DIR = str(Path('./logs').absolute())
OUTPUT_QUEUE = deque([], 5) # used to queue frames for saving to wav file
STOP_QUEUE = deque([], 1) # used to issue stop command
class MikeManager:
"""
Main class to manage microphone
"""
_recorder_info: dict = None # configuration information
_msg_handler: MessageHandler = None # logging interface
py_audio: PyAudio = None # PyAudio representation of microphone
def __init__(self, recorder_info: dict, msg_handler: MessageHandler):
self._recorder_info = recorder_info
self._msg_handler = msg_handler
self.initialize()
def initialize(self):
self.py_audio = PyAudio()
def close(self):
self.py_audio.terminate()
def check_audio_connections(self):
"""
print out all PyAudio connections
:return:
"""
self._msg_handler.log("printing audio devices out and exiting...")
for device_ind in range(self.py_audio.get_device_count()):
print("index: {0:d}, name:{1}"
.format(device_ind, self.py_audio.get_device_info_by_index(device_ind).get('name')))
self.py_audio.terminate()
def create_recording(self, end: pd.Timestamp):
"""
create audio recording in accordiance with config file, append to queue
:param end:
:return:
"""
now = pd.Timestamp.now()
record_secs = (end - now).seconds
record_frames_num = int((self._recorder_info['sample-rate'] / self._recorder_info['chunk']) * record_secs)
self._msg_handler.log("recording until {0}, {1:d} seconds"
.format(end.strftime(TIMESTAMP_FORMAT_SQL), record_secs))
stream = self.py_audio.open(format=self._recorder_info['format'], rate=self._recorder_info['sample-rate'],
channels=self._recorder_info['channels'],
input_device_index=self._recorder_info['device-index'],
input=self._recorder_info['input'],
frames_per_buffer=self._recorder_info['chunk'])
self._msg_handler.log("recording {0:d} frames".format(record_frames_num))
frames = []
for ii in range(0, record_frames_num):
data = stream.read(self._recorder_info['chunk'], exception_on_overflow=False)
frames.append(data)
if len(frames) != record_frames_num:
self._msg_handler.log("Warning: {0:d) requested, {1:d} frames received"
.format(len(frames), record_frames_num))
if len(frames) == 0:
self._msg_handler.log(msg="No Frames received, reinitializing...", lvl=logging.WARNING)
self.close()
self.initialize()
self._msg_handler.log("finished recording {0:d} frames".format(len(frames)))
# stop the stream, close it, and terminate the pyaudio instantiation
stream.stop_stream()
stream.close()
self._msg_handler.log("stopped stream appending to queue..")
OUTPUT_QUEUE.append((frames, end))
def check_queue_and_write(recorder_info: dict, msg_handler: MessageHandler):
"""
check recording queue, write audio file to specified location
:param recorder_info: configuration dictionary
:param msg_handler: logger
:return: None
"""
msg_handler.log("Initializing writer thread...")
delete_old = recorder_info['delete-old']
if delete_old:
msg_handler.log("deleting old files...")
wav_output_filename_form = '{0}{1}.wav'
while True:
if OUTPUT_QUEUE: # check output queue, write frames
frames, end = OUTPUT_QUEUE.pop()
filename = wav_output_filename_form\
.format(end.strftime(WAV_FILE_TIMESTAMP), recorder_info['output-name'])
save_folder_path = recorder_info['dest-dir']
if not Path(recorder_info['dest-dir']).exists():
save_folder_path = mount_destination(recorder_info, msg_handler)
wav_output_filepath = str(Path(save_folder_path, filename).absolute())
if len(frames) > 0:
msg_handler.log("writer: saving {0} writing {1:d} frames"
.format(str(wav_output_filepath), len(frames)))
with wave.open(wav_output_filepath, 'wb') as wavefile:
wavefile.setnchannels(recorder_info['channels'])
wavefile.setsampwidth(2) # really audio.get_sample_size(FORM_1)
wavefile.setframerate(recorder_info['sample-rate'])
wavefile.writeframes(b''.join(frames))
# wavefile.close()
msg_handler.log("writer: {0} save complete".format(str(filename)))
if delete_old: # if delete old specified, only keep last 10 sound files, delete rest
index_to_delete = 10
sound_files = list(reversed(sorted(glob.glob(str(
Path(save_folder_path, '*{0}.wav'.format(recorder_info['output-name'])))))))
if len(sound_files) > index_to_delete:
msg_handler.log("deleting: {0} and older, {1:d} files"
.format(Path(sound_files[index_to_delete]).stem,
len(sound_files[index_to_delete:])))
for sound_file in sound_files[index_to_delete:]:
os.remove(sound_file)
else:
if STOP_QUEUE:
msg_handler.log("writer: stop queue detected by writer thread, breaking...")
break
time.sleep(1)
# noinspection PyTypeChecker
def check_audio_devices(recorder_info: dict, msg_handler: MessageHandler):
mike_manager = MikeManager(recorder_info, msg_handler)
mike_manager.check_audio_connections()
mike_manager.close()
def run_mike(recorder_info: dict, msg_handler: MessageHandler):
"""
manage microphone, recording sounds to queue
:param recorder_info: config specifications
:param msg_handler: logger
:return: None
"""
start_time = pd.Timestamp.now()
msg_handler.log("start mike manager now {0}".format(start_time.strftime(TIMESTAMP_FORMAT)))
start_time = pd.Timestamp.now().ceil('min') + Minute(1)
run_mins = recorder_info['run-minutes']
end_times = pd.date_range(start=start_time, periods=run_mins, freq='min').to_list()\
if run_mins else [start_time]
if run_mins:
msg_handler.log("start recording: {0}, end {1}"
.format(start_time.strftime(TIMESTAMP_FORMAT), end_times[-1].strftime(TIMESTAMP_FORMAT)))
else:
msg_handler.log("running for indefinite period")
if not run_mins:
msg_handler.log("first end time: {0}".format(start_time.strftime(TIMESTAMP_FORMAT)))
if run_mins:
msg_handler.log(str(end_times))
mike_manager = MikeManager(recorder_info, msg_handler)
next_recording_time = end_times.pop(0)
while True:
if next_recording_time:
mike_manager.create_recording(end=next_recording_time)
else:
msg_handler.log("Maximum reads exceeded, closing...")
mike_manager.close()
STOP_QUEUE.append("STOP")
secs_to_sleep = 10
msg_handler.log("Appended stop queue, Sleeping for {0:d} seconds".format(secs_to_sleep))
time.sleep(secs_to_sleep)
break
if run_mins:
if len(end_times) > 0:
next_recording_time = end_times.pop(0)
message_handler.log("popping next recording time {0}".format(str(next_recording_time)))
else:
message_handler.log("setting next recording time to None...")
next_recording_time = None
else:
next_recording_time = next_recording_time + Minute(1)
message_handler.log("recording program thread ended") # if no break, shouldn't reach this
def load_config_data(this_config_filename):
"""
load config data from filename
:param this_config_filename: config file
:return: config dictionary
"""
if Path(MODULE_DIRECTORY).exists() and Path(MODULE_DIRECTORY, this_config_filename).exists():
try:
with open(Path(MODULE_DIRECTORY, this_config_filename), 'r') as f:
return yaml.safe_load(f)['soundrecorder-info']
except Exception as ex1:
raise ValueError('Could not parse config, exception {0}'.format(str(ex1)))
else:
raise ValueError("Path to config file {0} does not exist"
.format(Path(MODULE_DIRECTORY, this_config_filename)))
def mount_destination(recorder_info: dict, msg_handler: MessageHandler):
mount_cmd = ['sudo', 'mount', '-t', 'cifs', recorder_info['network-share'],
recorder_info['local-mount'], '-o',
'credentials={0},rw,iocharset=utf8,file_mode=0777,dir_mode=0777,nodfs'
.format(recorder_info['credentials-file'])]
if not Path(recorder_info['dest-dir']).exists():
msg_handler.log('mounting directory')
# os.cmd(MOUNT_CMD)
p1 = Popen(mount_cmd, stdout=PIPE)
stdoutdata, stderrdata = p1.communicate()
msg_handler.log(stdoutdata)
msg_handler.log(stderrdata)
time.sleep(5)
if Path(recorder_info['dest-dir']).exists():
return recorder_info['dest-dir']
else:
return LOG_DIR
else:
return recorder_info['dest-dir']
def run_sound_recorder(recorder_info: dict, run_message_handler: MessageHandler):
"""
manage recording of sound to queue, reading of queue and writing soundfiles to
disk as concurrent threads
:param recorder_info: config dictionary
:param run_message_handler: logger
:return:
"""
run_message_handler.log("Establishing destination directory...")
dest_dir = mount_destination(recorder_info, run_message_handler)
run_message_handler.log("Destination directory is {0}".format(dest_dir))
run_message_handler.log("writing sound files to {0}".format(dest_dir))
recorder_info['dest-dir'] = dest_dir
# create threads and start
write_thread: threading.Thread = threading.Thread(target=check_queue_and_write,
args=(recorder_info, run_message_handler,))
record_thread: threading.Thread = threading.Thread(target=run_mike, args=(recorder_info, run_message_handler,))
write_thread.daemon = True
record_thread.daemon = True
write_thread.start()
record_thread.start()
while True:
if STOP_QUEUE and not OUTPUT_QUEUE: # gracefully stop threads if stop point reached
run_message_handler.log("Program stop signal received, stopping threads")
join_timout = 30.
write_thread.join(join_timout)
record_thread.join(join_timout)
if write_thread.is_alive():
run_message_handler.log("Write Thread is ALIVE!")
elif record_thread.is_alive():
run_message_handler.log("Record Thread is ALIVE!")
else:
run_message_handler.log("All threads joined, closing logger and exiting")
run_message_handler.close()
sys.exit(0)
else:
time.sleep(1)
if __name__ == "__main__":
try:
desctxt: str = "Check PyAudio devices, Manage NSRT Microphone"
my_parser = argparse.ArgumentParser(prog='soundrecorder', description=desctxt)
my_parser.add_argument('action', help="enter either run or check")
my_parser.add_argument('--config_file', type=str, help='config file to run')
args = my_parser.parse_args()
print(args)
if args.config_file:
config_data: dict = load_config_data(args.config_file)
else:
raise ValueError("No configuration file specified")
message_handler = MessageHandler(config_filename=str(Path(MODULE_DIRECTORY, args.config_file)),
is_debug=False)
if args.action == 'check':
message_handler.log("Checking audio devices only")
check_audio_devices({}, message_handler)
if args.action == 'run':
message_handler.log("Running and recording audio...")
run_sound_recorder(config_data, message_handler)
except Exception as ex:
# noinspection PyUnboundLocalVariable
message_handler.log(msg="Critical Exception: {0}".format(str(ex)), lvl=logging.CRITICAL)
message_handler.log(msg=traceback.format_exc(), lvl=logging.CRITICAL)
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import binascii
import time
import threading
import os
import traceback
import json
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
import eth_abi
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal, QPoint
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QSplitter, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum
from electrum.bitcoin import COIN, is_address, b58_address_to_hash160, Token, opcodes, \
TYPE_SCRIPT, is_hash160, hash_160, eth_abi_encode, Delegation, DELEGATE_ABI, DELEGATION_CONTRACT
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.transaction import contract_script, decode_opcreate_script, decode_opsender_script
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed, UntrustedServerReturnedError
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnDecodeException
from electrum.plugins.trezor.trezor import TrezorKeyStore
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT, getOpenFileName, getSaveFileName)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
from .token_dialog import TokenAddDialog, TokenInfoDialog, TokenSendDialog
from .smart_contract_dialog import ContractCreateDialog, ContractEditDialog, ContractFuncDialog
from .delegation_dialog import DelegationDialog
from electrum.coinchooser import SenderNoUTXOException
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [ Qt.Key_Return, Qt.Key_Enter ]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self._cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
self.tokens_tab = self.create_tokens_tab()
self.smart_contract_tab = self.create_smart_contract_tab()
self.delegations_tab = self.create_delegations_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
tabs.addTab(self.tokens_tab, read_QIcon("tab_contacts.png"), _('Tokens'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
if self.wallet.has_lightning():
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
add_optional_tab(tabs, self.smart_contract_tab, read_QIcon("tab_console.png"), _('Smart Contract'), 'contract')
add_optional_tab(tabs, self.delegations_tab, read_QIcon("tab_console.png"), _('Delegations'), 'delegations')
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded', 'on_token', 'on_delegation',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Htmlcoin Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
self._update_check_thread = None
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Htmlcoin Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_token(self):
self.token_hist_model.refresh('fx_token')
self.token_hist_list.update()
self.token_balance_list.update()
def on_fx_delegation(self):
self.delegation_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'on_token':
self.on_fx_token()
elif event == 'on_delegation':
self.on_fx_delegation()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Htmlcoin Electrum Testnet" if constants.net.TESTNET else "Htmlcoin Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend HTMLCOINs with it."),
_("Make sure you own the seed phrase or the private keys, before you request HTMLCOINs to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Htmlcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
try:
addr_type, __ = b58_address_to_hash160(self.addresses[0])
except:
addr_type = constants.net.SEGWIT_HRP
if not isinstance(self.wallet.keystore, TrezorKeyStore) and addr_type == constants.net.ADDRTYPE_P2PKH and not self.wallet.is_watching_only():
token_menu = wallet_menu.addMenu(_("&Token"))
token_menu.addAction(_("Add Token"), lambda: self.token_add_dialog())
smart_cotract_menu = wallet_menu.addMenu(_("&Smart Contract"))
smart_cotract_menu.addAction(_("Add Contract"), lambda: self.contract_add_dialog())
smart_cotract_menu.addAction(_("Create Contract"), lambda: self.contract_create_dialog())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
if self.wallet.has_lightning():
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
add_toggle_action(view_menu, self.smart_contract_tab)
add_toggle_action(view_menu, self.delegations_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign POD"), self.sign_pod)
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://github.com/HTMLCOIN/htmlcoin-electrum/"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('htmlcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Htmlcoin Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("This software is based on Electrum to support Htmlcoin.") + " " +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Htmlcoin Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Htmlcoin Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Htmlcoin Electrum", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False, num_zeros=None, decimal_point=None):
# x is in sats
return self.config.format_amount(x, is_diff, whitespaces, num_zeros, decimal_point)
def format_amount_and_units(self, amount):
# amount is in sats
text = self.config.format_amount_and_units(amount)
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains()) > 1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
elif server_lag < (-1):
text = _("Synchronizing headers...")
icon = read_QIcon("status_waiting.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
icon = read_QIcon("status_disconnected.png")
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
elif self.network.downloading_headers:
text = _("Downloading headers...")
icon = read_QIcon("status_waiting.png")
else:
text = _("Not connected")
if self.tray:
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon(icon)
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.token_balance_list.update()
self.token_hist_model.refresh('update_tabs')
self.token_hist_list.update()
self.smart_contract_list.update()
self.delegation_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return tab
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Htmlcoin addresses.'),
_('The htmlcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("htmlcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('New Address'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Htmlcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Htmlcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except (MultipleSpendMaxTxOutputs, NotEnoughFunds) as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
# show tooltip explaining max amount
mining_fee = tx.get_fee()
mining_fee_str = self.format_amount_and_units(mining_fee)
msg = _("Mining fee: {} (can be adjusted on next screen)").format(mining_fee_str)
if x_fee_amount:
twofactor_fee_str = self.format_amount_and_units(x_fee_amount)
msg += "\n" + _("2fa fee: {} (for the next batch of transactions)").format(twofactor_fee_str)
frozen_bal = self.get_frozen_balance_str()
if frozen_bal:
msg += "\n" + _("Some coins are frozen: {} (can be unfrozen in the Addresses or in the Coins tab)").format(frozen_bal)
QToolTip.showText(self.max_button.mapToGlobal(QPoint(0, 0)), msg)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Htmlcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
def task():
coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
return fut.result()
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
else:
self.request_list.update_item(key, req)
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
invoice = self.wallet.get_invoice(key)
if invoice is None:
return
status = self.wallet.get_invoice_status(invoice)
if status == PR_PAID:
self.invoice_list.update()
else:
self.invoice_list.update_item(key, invoice)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_str = self.get_frozen_balance_str()
if frozen_str:
text += " ({} {})".format(
frozen_str, _("are frozen")
)
return text
def get_frozen_balance_str(self) -> Optional[str]:
frozen_bal = sum(self.wallet.get_frozen_balance())
if not frozen_bal:
return None
return self.format_amount_and_units(frozen_bal)
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
conf_dlg = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if conf_dlg.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not conf_dlg.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs)
preview_dlg.show()
return
cancelled, is_send, password, tx = conf_dlg.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs)
preview_dlg.show()
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction, * , broadcast_done=None):
if tx is None:
self.show_error("tx is None")
return
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
if broadcast_done:
broadcast_done(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.main_window.show_error(str(e))
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(str(pr.error))
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = lnaddr.pubkey.serialize().hex()
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return tab
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning() and self.network:
self.lightning_button = StatusBarButton(read_QIcon("lightning_disconnected.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
sb.addPermanentWidget(self.lightning_button)
self.update_lightning_icon()
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if self.network.lngossip is None:
return
# display colorful lightning icon to signal connection
self.lightning_button.setIcon(read_QIcon("lightning.png"))
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
if self.wallet.can_have_lightning():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
local_nodeid = QLabel(self.wallet.lnworker.node_keypair.pubkey.hex())
local_nodeid.setTextInteractionFlags(Qt.TextSelectableByMouse)
grid.addWidget(QLabel(_('Lightning Node ID:')), 6, 0)
grid.addWidget(local_nodeid, 6, 1, 1, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Htmlcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Htmlcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def sign_pod(self, address=''):
d = WindowModalDialog(self, _('Sign POD'))
d.setMinimumSize(450, 300)
layout = QGridLayout(d)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 1, 0)
layout.addWidget(address_e, 1, 1)
staker_e = QLineEdit()
layout.addWidget(QLabel(_('Staker')), 2, 0)
layout.addWidget(staker_e, 2, 1)
pod_e = QTextEdit()
pod_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('POD')), 3, 0)
layout.addWidget(pod_e, 3, 1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign_pod(address_e, staker_e, pod_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def do_sign_pod(self, address_e, staker_e, pod_e):
staker = staker_e.text().strip()
if not is_hash160(staker):
try:
addr_type, staker = b58_address_to_hash160(staker)
except BaseException:
raise Exception('invalid staker address')
if addr_type != constants.net.ADDRTYPE_P2PKH:
raise Exception('invalid staker address')
staker = staker.hex()
message_e = QTextEdit()
message_e.setText(staker)
self.do_sign(address_e, message_e, pod_e)
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("htmlcoin:"):
self.pay_to_URI(data)
return
if data.startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
else:
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# note that closeEvent is NOT called if the user quits with Ctrl-C
self.clean_up()
event.accept()
def clean_up(self):
if self._cleaned_up:
return
self._cleaned_up = True
if self.wallet.thread:
self.wallet.thread.stop()
self.wallet.thread = None
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
if self._update_check_thread:
self._update_check_thread.exit()
self._update_check_thread.wait()
if self.tray:
self.tray = None
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: PartialTransaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label_for_txid(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, coins=self.get_coins())
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_('Cannot cancel transaction') + ': ' + _('unknown fee for original transaction'))
return
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Cancel transaction'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Cancel an unconfirmed RBF transaction by double-spending "
"its inputs back to your wallet with a higher fee.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.dscancel(tx=tx, new_fee_rate=new_fee_rate)
except CannotDoubleSpendTx as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
def disable_opsender(self) -> bool:
return self.config.get("disable_opsender", True) or \
self.network.get_server_height() <= constants.net.QIP5_FORK_HEIGHT
def set_token(self, token: 'Token'):
self.wallet.add_token(token)
self.token_balance_list.update()
self.token_hist_list.update()
self.token_hist_model.refresh('set_token')
def delete_token(self, key: str):
token_name = self.wallet.db.get_token(key).name
if not self.question(_("Remove {} from your token list ?")
.format(token_name)):
return
self.wallet.delete_token(key)
self.token_balance_list.update()
self.token_hist_model.refresh('delete_token')
def create_tokens_tab(self):
from .token_list import TokenBalanceList, TokenHistoryModel, TokenHistoryList
self.token_balance_list = tbl = TokenBalanceList(self)
self.token_hist_model = TokenHistoryModel(self)
self.token_hist_list = thl = TokenHistoryList(self, self.token_hist_model)
self.token_hist_model.set_view(self.token_hist_list)
splitter = QSplitter(self)
splitter.addWidget(tbl)
splitter.addWidget(thl)
splitter.setOrientation(Qt.Vertical)
return splitter
def token_add_dialog(self):
if isinstance(self.wallet.keystore, TrezorKeyStore):
self.show_message('Trezor does not support HRC20 Token for now')
return
d = TokenAddDialog(self)
d.show()
def token_info_dialog(self, token: 'Token'):
d = TokenInfoDialog(self, token)
d.show()
def token_send_dialog(self, token: 'Token'):
d = TokenSendDialog(self, token)
d.show()
def do_token_pay(self, token: 'Token', pay_to: str, amount: int, gas_limit: int, gas_price: int, dialog, preview=False):
try:
datahex = 'a9059cbb{}{:064x}'.format(pay_to.zfill(64), amount)
op_sender = None if self.disable_opsender() else token.bind_addr
script = contract_script(gas_limit, gas_price, datahex, token.contract_addr, opcodes.OP_CALL, op_sender)
outputs = [PartialTxOutput(scriptpubkey=script, value=0)]
tx_desc = _('Pay out {} {}').format(amount / (10 ** token.decimals), token.symbol)
self._smart_contract_broadcast(outputs, tx_desc, gas_limit * gas_price,
token.bind_addr, dialog, None, preview)
except (BaseException,) as e:
traceback.print_exc(file=sys.stderr)
dialog.show_message(str(e))
def set_delegation(self, dele: 'Delegation'):
self.wallet.add_delegation(dele)
self.delegation_list.update()
def delete_delegation(self, addr: str):
self.wallet.delete_delegation(addr)
self.delegation_list.update()
def call_add_delegation(self, addr: str, staker: str, fee: int, gas_limit: int, gas_price: int, dialog, pod: Optional[bytes]):
"""
:param staker: hash160 str
"""
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(_("Enter your password to proceed"))
if not password: return
if not pod:
pod = self.wallet.sign_message(addr, staker, password)
if len(pod) != 65:
raise Exception("incorrect POD length")
args = [staker.lower(), fee, pod]
self.sendto_smart_contract(DELEGATION_CONTRACT, DELEGATE_ABI[1], args,
gas_limit, gas_price, 0, addr, dialog, False, tx_desc="update delegation")
def call_remove_delegation(self, addr: str, gas_limit: int, gas_price: int, dialog):
self.sendto_smart_contract(DELEGATION_CONTRACT, DELEGATE_ABI[0], [],
gas_limit, gas_price, 0, addr, dialog, False, tx_desc="remove delegation")
def create_delegations_tab(self):
from .delegation_list import DelegationList
self.delegation_list = l = DelegationList(self)
return self.create_list_tab(l)
def delegation_dialog(self, dele: 'Delegation' = None, mode='add'):
if isinstance(self.wallet.keystore, TrezorKeyStore):
self.show_message('Trezor does not support staking delegation for now')
return
if self.network.get_server_height() < constants.net.OFFLINE_STAKE_HEIGHT:
self.show_message('Offline staking not activated')
return
d = DelegationDialog(self, dele, mode)
d.show()
def _smart_contract_broadcast(self, outputs: list, desc: str, gas_fee: int, sender: str, dialog,
broadcast_done=None, preview=False):
addr_type, __ = b58_address_to_hash160(sender)
if not addr_type == constants.net.ADDRTYPE_P2PKH:
dialog.show_message(_('only P2PKH address can call contract'))
return
coins = self.get_coins()
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(coins=coins,
outputs=outputs,
fee=fee_est,
change_addr=sender,
gas_fee=gas_fee,
sender=sender,
is_sweep=False)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
try:
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=False, gas_fee=gas_fee)
except SenderNoUTXOException as e:
self.show_error(str(e))
return
if d.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not d.have_enough_funds_assuming_zero_fees():
self.show_message(_('Not Enough Funds'))
return
# shortcut to advanced preview (after "enough funds" check!)
if preview or self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx)
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if tx is None:
self.show_message(_('transaction is None'))
return
if is_send:
def sign_done(success):
if success:
self.broadcast_or_show(tx, broadcast_done=broadcast_done)
if desc is not None:
self.wallet.set_label(tx.txid(), desc)
self.sign_tx_with_password(tx, callback=sign_done, password=password)
else:
self.preview_tx_dialog(make_tx=make_tx)
def create_smart_contract_tab(self):
from .smart_contract_list import SmartContractList
self.smart_contract_list = l = SmartContractList(self)
return self.create_list_tab(l)
def set_smart_contract(self, name: str, address: str, interface: list) -> bool:
if not is_hash160(address):
self.show_error(_('Invalid Address'))
self.smart_contract_list.update()
return False
self.wallet.db.smart_contracts[address] = (name, interface)
self.smart_contract_list.update()
return True
def delete_samart_contact(self, address: str) -> bool:
if not self.question(_("Remove {} from your list of smart contracts?".format(
self.wallet.db.smart_contracts[address][0]))):
return False
self.wallet.db.smart_contracts.pop(address)
self.smart_contract_list.update()
return True
def call_smart_contract(self, address: str, func: dict, args: list, sender: str, dialog):
data = eth_abi_encode(func, args)
try:
result = self.network.run_from_another_thread(self.network.call_contract(address, data, sender))
except BaseException as e:
self.logger.exception('')
dialog.show_message(str(e))
return
types = list([x['type'] for x in func.get('outputs', [])])
try:
if isinstance(result, dict):
except_msg = result.get('executionResult', {}).get('exceptedMessage', '')
if len(except_msg) > 1:
dialog.show_message(f"exceptedMessage: {except_msg}")
return
output = eth_abi.decode_abi(types, binascii.a2b_hex(result['executionResult']['output']))
else:
output = eth_abi.decode_abi(types, binascii.a2b_hex(result))
def decode_x(x):
if isinstance(x, bytes):
try:
return x.decode()
except UnicodeDecodeError:
return str(x)
return str(x)
output = ','.join([decode_x(x) for x in output])
dialog.show_message(output)
except (BaseException,) as e:
self.logger.exception('')
dialog.show_message(f'{e} {result}')
def sendto_smart_contract(self, address: str, func: dict, args: list,
gas_limit: int, gas_price: int, amount: int, sender: str,
dialog, preview, tx_desc=None):
try:
abi_encoded = eth_abi_encode(func, args)
op_sender = None if self.disable_opsender() else sender
script = contract_script(gas_limit, gas_price, abi_encoded, address, opcodes.OP_CALL, op_sender)
outputs = [PartialTxOutput(scriptpubkey=script, value=amount)]
if tx_desc is None:
tx_desc = 'contract sendto {}'.format(self.wallet.db.smart_contracts.get(address, [address, ])[0])
self._smart_contract_broadcast(outputs, tx_desc, gas_limit * gas_price, sender, dialog, None, preview)
except (BaseException,) as e:
self.logger.exception('')
dialog.show_message(str(e))
def create_smart_contract(self, name: str, bytecode: str, abi: list, constructor: dict,
args: list, gas_limit: int, gas_price: int, sender: str, dialog, preview):
def broadcast_done(tx):
s = tx.outputs()[0].scriptpubkey
if decode_opcreate_script(s) or decode_opsender_script(s):
reversed_txid = binascii.a2b_hex(tx.txid())[::-1]
output_index = b'\x00\x00\x00\x00'
contract_addr = hash_160(reversed_txid + output_index).hex()
self.set_smart_contract(name, contract_addr, abi)
else:
self.logger.debug("the smart contract created seems to be invalid")
try:
abi_encoded = ''
if constructor:
abi_encoded = eth_abi_encode(constructor, args)
op_sender = None if self.disable_opsender() else sender
script = contract_script(gas_limit, gas_price, bytecode + abi_encoded, None, opcodes.OP_CREATE, op_sender)
outputs = [PartialTxOutput(scriptpubkey=script, value=0)]
self._smart_contract_broadcast(outputs, 'create contract {}'.format(name), gas_limit * gas_price,
sender, dialog, broadcast_done, preview)
except (BaseException,) as e:
self.logger.exception('')
dialog.show_message(str(e))
def contract_create_dialog(self):
d = ContractCreateDialog(self)
d.show()
def contract_add_dialog(self):
d = ContractEditDialog(self)
d.show()
def contract_edit_dialog(self, address: str):
name, interface = self.wallet.db.smart_contracts[address]
contract = {
'name': name,
'interface': interface,
'address': address
}
d = ContractEditDialog(self, contract)
d.show()
def contract_func_dialog(self, address: str):
name, interface = self.wallet.db.smart_contracts[address]
contract = {
'name': name,
'interface': interface,
'address': address
}
d = ContractFuncDialog(self, contract)
d.show()
|
tello.py
|
import socket
import threading
import time
import numpy as np
import libh264decoder
class Tello:
"""Wrapper class to interact with the Tello drone."""
def __init__(self, local_ip, local_port, imperial=False, command_timeout=.3, tello_ip='192.168.10.1',
tello_port=8889):
"""
Binds to the local IP/port and puts the Tello into command mode.
:param local_ip (str): Local IP address to bind.
:param local_port (int): Local port to bind.
:param imperial (bool): If True, speed is MPH and distance is feet.
If False, speed is KPH and distance is meters.
:param command_timeout (int|float): Number of seconds to wait for a response to a command.
:param tello_ip (str): Tello IP.
:param tello_port (int): Tello port.
"""
self.abort_flag = False
self.decoder = libh264decoder.H264Decoder()
self.command_timeout = command_timeout
self.imperial = imperial
self.response = None
self.frame = None # numpy array BGR -- current camera output frame
self.is_freeze = False # freeze current camera output
self.last_frame = None
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for sending cmd
self.socket_video = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for receiving video stream
self.tello_address = (tello_ip, tello_port)
self.local_video_port = 11111 # port for receiving video stream
self.last_height = 0
self.socket.bind((local_ip, local_port))
# thread for receiving cmd ack
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = True
self.receive_thread.start()
# to receive video -- send cmd: command, streamon
self.socket.sendto(b'command', self.tello_address)
print ('sent: command')
self.socket.sendto(b'streamon', self.tello_address)
print ('sent: streamon')
self.socket_video.bind((local_ip, self.local_video_port))
# thread for receiving video
self.receive_video_thread = threading.Thread(target=self._receive_video_thread)
self.receive_video_thread.daemon = True
self.receive_video_thread.start()
def __del__(self):
"""Closes the local socket."""
self.socket.close()
self.socket_video.close()
def read(self):
"""Return the last frame from camera."""
if self.is_freeze:
return self.last_frame
else:
return self.frame
def video_freeze(self, is_freeze=True):
"""Pause video output -- set is_freeze to True"""
self.is_freeze = is_freeze
if is_freeze:
self.last_frame = self.frame
def _receive_thread(self):
"""Listen to responses from the Tello.
Runs as a thread, sets self.response to whatever the Tello last returned.
"""
while True:
try:
self.response, ip = self.socket.recvfrom(3000)
#print(self.response)
except socket.error as exc:
print ("Caught exception socket.error : %s" % exc)
def _receive_video_thread(self):
"""
Listens for video streaming (raw h264) from the Tello.
Runs as a thread, sets self.frame to the most recent frame Tello captured.
"""
packet_data = ""
while True:
try:
res_string, ip = self.socket_video.recvfrom(2048)
packet_data += res_string
# end of frame
if len(res_string) != 1460:
for frame in self._h264_decode(packet_data):
self.frame = frame
packet_data = ""
except socket.error as exc:
print ("Caught exception socket.error : %s" % exc)
def _h264_decode(self, packet_data):
"""
decode raw h264 format data from Tello
:param packet_data: raw h264 data array
:return: a list of decoded frame
"""
res_frame_list = []
frames = self.decoder.decode(packet_data)
for framedata in frames:
(frame, w, h, ls) = framedata
if frame is not None:
# print 'frame size %i bytes, w %i, h %i, linesize %i' % (len(frame), w, h, ls)
frame = np.fromstring(frame, dtype=np.ubyte, count=len(frame), sep='')
frame = (frame.reshape((h, ls / 3, 3)))
frame = frame[:, :w, :]
res_frame_list.append(frame)
return res_frame_list
def send_command(self, command):
"""
Send a command to the Tello and wait for a response.
:param command: Command to send.
:return (str): Response from Tello.
"""
print (">> send cmd: {}".format(command))
self.abort_flag = False
timer = threading.Timer(self.command_timeout, self.set_abort_flag)
self.socket.sendto(command.encode('utf-8'), self.tello_address)
timer.start()
while self.response is None:
if self.abort_flag is True:
break
timer.cancel()
if self.response is None:
response = 'none_response'
else:
response = self.response.decode('utf-8')
self.response = None
return response
def set_abort_flag(self):
"""
Sets self.abort_flag to True.
Used by the timer in Tello.send_command() to indicate to that a response
timeout has occurred.
"""
self.abort_flag = True
def takeoff(self):
"""
Initiates take-off.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('takeoff')
def set_speed(self, speed):
"""
Sets speed.
This method expects KPH or MPH. The Tello API expects speeds from
1 to 100 centimeters/second.
Metric: .1 to 3.6 KPH
Imperial: .1 to 2.2 MPH
Args:
speed (int|float): Speed.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
speed = float(speed)
if self.imperial is True:
speed = int(round(speed * 44.704))
else:
speed = int(round(speed * 27.7778))
return self.send_command('speed %s' % speed)
def rotate_cw(self, degrees):
"""
Rotates clockwise.
Args:
degrees (int): Degrees to rotate, 1 to 360.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('cw %s' % degrees)
def rotate_ccw(self, degrees):
"""
Rotates counter-clockwise.
Args:
degrees (int): Degrees to rotate, 1 to 360.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('ccw %s' % degrees)
def flip(self, direction):
"""
Flips.
Args:
direction (str): Direction to flip, 'l', 'r', 'f', 'b'.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('flip %s' % direction)
def get_response(self):
"""
Returns response of tello.
Returns:
int: response of tello.
"""
response = self.response
return response
def get_height(self):
"""Returns height(dm) of tello.
Returns:
int: Height(dm) of tello.
"""
height = self.send_command('height?')
height = str(height)
height = filter(str.isdigit, height)
try:
height = int(height)
self.last_height = height
except:
height = self.last_height
pass
return height
def get_battery(self):
"""Returns percent battery life remaining.
Returns:
int: Percent battery life remaining.
"""
battery = self.send_command('battery?')
try:
battery = int(battery)
except:
pass
return battery
def get_flight_time(self):
"""Returns the number of seconds elapsed during flight.
Returns:
int: Seconds elapsed during flight.
"""
flight_time = self.send_command('time?')
try:
flight_time = int(flight_time)
except:
pass
return flight_time
def get_speed(self):
"""Returns the current speed.
Returns:
int: Current speed in KPH or MPH.
"""
speed = self.send_command('speed?')
try:
speed = float(speed)
if self.imperial is True:
speed = round((speed / 44.704), 1)
else:
speed = round((speed / 27.7778), 1)
except:
pass
return speed
def land(self):
"""Initiates landing.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('land')
def move(self, direction, distance):
"""Moves in a direction for a distance.
This method expects meters or feet. The Tello API expects distances
from 20 to 500 centimeters.
Metric: .02 to 5 meters
Imperial: .7 to 16.4 feet
Args:
direction (str): Direction to move, 'forward', 'back', 'right' or 'left'.
distance (int|float): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
distance = float(distance)
if self.imperial is True:
distance = int(round(distance * 30.48))
else:
distance = int(round(distance * 100))
return self.send_command('%s %s' % (direction, distance))
def move_backward(self, distance):
"""Moves backward for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('back', distance)
def move_down(self, distance):
"""Moves down for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('down', distance)
def move_forward(self, distance):
"""Moves forward for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('forward', distance)
def move_left(self, distance):
"""Moves left for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('left', distance)
def move_right(self, distance):
"""Moves right for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
"""
return self.move('right', distance)
def move_up(self, distance):
"""Moves up for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('up', distance)
|
test_mp.py
|
import torch
import torch.multiprocessing as mp
import time
class warpper():
def __init__(self, t):
self.t = t
def fn(t):
#it is not safe
'''
PS F:\Dota2BotStepByStep> python .\d2bot\test\test_mp.py
tensor([ 3.5505e+05])
tensor([ 3.8177e+05])
tensor([ 4.7613e+05])
tensor([ 5.4674e+05])
tensor([ 4.7676e+05])
tensor([ 5.9204e+05])
tensor([ 6.4995e+05])
tensor([ 6.8313e+05])
tensor([ 7.0968e+05])
tensor([ 7.2394e+05])
tensor([ 7.2394e+05])
'''
for _ in range(100000):
t += 1
print(t)
def main():
t = torch.tensor([1])
t.share_memory_()
processes = []
for _ in range(10):
proc = mp.Process(target=fn, args=(t,))
proc.start()
processes.append(proc)
for p in processes:
p.join()
print(t)
if __name__ == '__main__':
main()
|
multithread_http_server.py
|
#!/usr/bin/env python
"""
MIT License
Copyright (c) 2018 Ortis (cao.ortis.org@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import socket
import threading
import time
from http.server import HTTPServer
import logging
class MultiThreadHttpServer:
def __init__(self, host, parallelism, http_handler_class, request_callback=None, log=None):
"""
:param host: host to bind. example: '127.0.0.1:80'
:param parallelism: number of thread listener and backlog
:param http_handler_class: the handler class extending BaseHTTPRequestHandler
:param request_callback: callback on incoming request. This method can be accede in the HTTPHandler instance.
Example: self.server.request_callback(
'GET', # specify http method
self # pass the HTTPHandler instance
)
"""
self.host = host
self.parallelism = parallelism
self.http_handler_class = http_handler_class
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.request_callback = request_callback
self.connection_handlers = []
self.stop_requested = False
self.log = log
def start(self, background=False):
self.socket.bind(self.host)
self.socket.listen(self.parallelism)
if self.log is not None:
self.log.debug("Creating "+str(self.parallelism)+" connection handler")
for i in range(self.parallelism):
ch = ConnectionHandler(self.socket, self.http_handler_class, self.request_callback)
ch.start()
self.connection_handlers.append(ch)
if background:
if self.log is not None:
self.log.debug("Serving (background thread)")
threading.Thread(target=self.__serve).start()
else:
if self.log is not None:
self.log.debug("Serving (current thread)")
self.__serve()
def stop(self):
self.stop_requested = True
for ch in self.connection_handlers:
ch.stop()
def __serve(self):
"""
Serve until stop() is called. Blocking method
:return:
"""
while not self.stop_requested:
time.sleep(1)
class ConnectionHandler(threading.Thread, HTTPServer):
def __init__(self, sock, http_handler_class, request_callback=None):
HTTPServer.__init__(self, sock.getsockname(), http_handler_class, False)
self.socket = sock
self.server_bind = self.server_close = lambda self: None
self.HTTPHandler = http_handler_class
self.request_callback = request_callback
threading.Thread.__init__(self)
self.daemon = True
self.stop_requested = False
def stop(self):
self.stop_requested = True
def run(self):
""" Each thread process request forever"""
self.serve_forever()
def serve_forever(self):
""" Handle requests until stopped """
while not self.stop_requested:
self.handle_request()
print("Finish" + str(threading.current_thread()))
|
test_dist_graph_store.py
|
import os
os.environ['OMP_NUM_THREADS'] = '1'
import dgl
import sys
import numpy as np
import time
import socket
from scipy import sparse as spsp
from numpy.testing import assert_array_equal
from multiprocessing import Process, Manager, Condition, Value
import multiprocessing as mp
from dgl.graph_index import create_graph_index
from dgl.data.utils import load_graphs, save_graphs
from dgl.distributed import DistGraphServer, DistGraph
from dgl.distributed import partition_graph, load_partition, load_partition_book, node_split, edge_split
from dgl.distributed import SparseAdagrad, SparseNodeEmbedding
from numpy.testing import assert_almost_equal
import backend as F
import math
import unittest
import pickle
if os.name != 'nt':
import fcntl
import struct
def get_local_usable_addr():
"""Get local usable IP and port
Returns
-------
str
IP address, e.g., '192.168.8.12:50051'
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
sock.connect(('10.255.255.255', 1))
ip_addr = sock.getsockname()[0]
except ValueError:
ip_addr = '127.0.0.1'
finally:
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
sock.listen(1)
port = sock.getsockname()[1]
sock.close()
return ip_addr + ' ' + str(port)
def create_random_graph(n):
arr = (spsp.random(n, n, density=0.001, format='coo') != 0).astype(np.int64)
ig = create_graph_index(arr, readonly=True)
return dgl.DGLGraph(ig)
def run_server(graph_name, server_id, num_clients, shared_mem):
g = DistGraphServer(server_id, "kv_ip_config.txt", num_clients,
'/tmp/dist_graph/{}.json'.format(graph_name),
disable_shared_mem=not shared_mem)
print('start server', server_id)
g.start()
def emb_init(shape, dtype):
return F.zeros(shape, dtype, F.cpu())
def run_client(graph_name, part_id, num_nodes, num_edges):
time.sleep(5)
gpb, graph_name = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),
part_id, None)
g = DistGraph("kv_ip_config.txt", graph_name, gpb=gpb)
check_dist_graph(g, num_nodes, num_edges)
def check_dist_graph(g, num_nodes, num_edges):
# Test API
assert g.number_of_nodes() == num_nodes
assert g.number_of_edges() == num_edges
# Test reading node data
nids = F.arange(0, int(g.number_of_nodes() / 2))
feats1 = g.ndata['features'][nids]
feats = F.squeeze(feats1, 1)
assert np.all(F.asnumpy(feats == nids))
# Test reading edge data
eids = F.arange(0, int(g.number_of_edges() / 2))
feats1 = g.edata['features'][eids]
feats = F.squeeze(feats1, 1)
assert np.all(F.asnumpy(feats == eids))
# Test init node data
new_shape = (g.number_of_nodes(), 2)
g.init_ndata('test1', new_shape, F.int32)
feats = g.ndata['test1'][nids]
assert np.all(F.asnumpy(feats) == 0)
# Test init edge data
new_shape = (g.number_of_edges(), 2)
g.init_edata('test1', new_shape, F.int32)
feats = g.edata['test1'][eids]
assert np.all(F.asnumpy(feats) == 0)
# Test sparse emb
try:
new_shape = (g.number_of_nodes(), 1)
emb = SparseNodeEmbedding(g, 'emb1', new_shape, emb_init)
lr = 0.001
optimizer = SparseAdagrad([emb], lr=lr)
with F.record_grad():
feats = emb(nids)
assert np.all(F.asnumpy(feats) == np.zeros((len(nids), 1)))
loss = F.sum(feats + 1, 0)
loss.backward()
optimizer.step()
feats = emb(nids)
assert_almost_equal(F.asnumpy(feats), np.ones((len(nids), 1)) * -lr)
rest = np.setdiff1d(np.arange(g.number_of_nodes()), F.asnumpy(nids))
feats1 = emb(rest)
assert np.all(F.asnumpy(feats1) == np.zeros((len(rest), 1)))
policy = dgl.distributed.PartitionPolicy('node', g.get_partition_book())
grad_sum = dgl.distributed.DistTensor(g, 'node:emb1_sum', policy)
assert np.all(F.asnumpy(grad_sum[nids]) == np.ones((len(nids), 1)))
assert np.all(F.asnumpy(grad_sum[rest]) == np.zeros((len(rest), 1)))
emb = SparseNodeEmbedding(g, 'emb2', new_shape, emb_init)
optimizer = SparseAdagrad([emb], lr=lr)
with F.record_grad():
feats1 = emb(nids)
feats2 = emb(nids)
feats = F.cat([feats1, feats2], 0)
assert np.all(F.asnumpy(feats) == np.zeros((len(nids) * 2, 1)))
loss = F.sum(feats + 1, 0)
loss.backward()
optimizer.step()
feats = emb(nids)
assert_almost_equal(F.asnumpy(feats), np.ones((len(nids), 1)) * math.sqrt(2) * -lr)
rest = np.setdiff1d(np.arange(g.number_of_nodes()), F.asnumpy(nids))
feats1 = emb(rest)
assert np.all(F.asnumpy(feats1) == np.zeros((len(rest), 1)))
except NotImplementedError as e:
pass
# Test write data
new_feats = F.ones((len(nids), 2), F.int32, F.cpu())
g.ndata['test1'][nids] = new_feats
feats = g.ndata['test1'][nids]
assert np.all(F.asnumpy(feats) == 1)
# Test metadata operations.
assert len(g.ndata['features']) == g.number_of_nodes()
assert g.ndata['features'].shape == (g.number_of_nodes(), 1)
assert g.ndata['features'].dtype == F.int64
assert g.node_attr_schemes()['features'].dtype == F.int64
assert g.node_attr_schemes()['test1'].dtype == F.int32
assert g.node_attr_schemes()['features'].shape == (1,)
selected_nodes = np.random.randint(0, 100, size=g.number_of_nodes()) > 30
# Test node split
nodes = node_split(selected_nodes, g.get_partition_book())
nodes = F.asnumpy(nodes)
# We only have one partition, so the local nodes are basically all nodes in the graph.
local_nids = np.arange(g.number_of_nodes())
for n in nodes:
assert n in local_nids
# clean up
if os.environ['DGL_DIST_MODE'] == 'distributed':
dgl.distributed.shutdown_servers()
dgl.distributed.finalize_client()
print('end')
def check_server_client(shared_mem):
prepare_dist()
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_2'
g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)
g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')
# let's just test on one partition for now.
# We cannot run multiple servers and clients on the same machine.
serv_ps = []
ctx = mp.get_context('spawn')
for serv_id in range(1):
p = ctx.Process(target=run_server, args=(graph_name, serv_id, 1, shared_mem))
serv_ps.append(p)
p.start()
cli_ps = []
for cli_id in range(1):
print('start client', cli_id)
p = ctx.Process(target=run_client, args=(graph_name, cli_id, g.number_of_nodes(),
g.number_of_edges()))
p.start()
cli_ps.append(p)
for p in cli_ps:
p.join()
for p in serv_ps:
p.join()
print('clients have terminated')
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support some of operations in DistGraph")
def test_server_client():
os.environ['DGL_DIST_MODE'] = 'distributed'
check_server_client(True)
check_server_client(False)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support some of operations in DistGraph")
def test_standalone():
os.environ['DGL_DIST_MODE'] = 'standalone'
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_3'
g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)
g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')
dist_g = DistGraph("kv_ip_config.txt", graph_name,
conf_file='/tmp/dist_graph/{}.json'.format(graph_name))
check_dist_graph(dist_g, g.number_of_nodes(), g.number_of_edges())
def test_split():
prepare_dist()
g = create_random_graph(10000)
num_parts = 4
num_hops = 2
partition_graph(g, 'dist_graph_test', num_parts, '/tmp/dist_graph', num_hops=num_hops, part_method='metis')
node_mask = np.random.randint(0, 100, size=g.number_of_nodes()) > 30
edge_mask = np.random.randint(0, 100, size=g.number_of_edges()) > 30
selected_nodes = np.nonzero(node_mask)[0]
selected_edges = np.nonzero(edge_mask)[0]
for i in range(num_parts):
dgl.distributed.set_num_client(num_parts)
part_g, node_feats, edge_feats, gpb, _ = load_partition('/tmp/dist_graph/dist_graph_test.json', i)
local_nids = F.nonzero_1d(part_g.ndata['inner_node'])
local_nids = F.gather_row(part_g.ndata[dgl.NID], local_nids)
nodes1 = np.intersect1d(selected_nodes, F.asnumpy(local_nids))
nodes2 = node_split(node_mask, gpb, i)
assert np.all(np.sort(nodes1) == np.sort(F.asnumpy(nodes2)))
local_nids = F.asnumpy(local_nids)
for n in nodes1:
assert n in local_nids
dgl.distributed.set_num_client(num_parts * 2)
nodes3 = node_split(node_mask, gpb, i * 2)
nodes4 = node_split(node_mask, gpb, i * 2 + 1)
nodes5 = F.cat([nodes3, nodes4], 0)
assert np.all(np.sort(nodes1) == np.sort(F.asnumpy(nodes5)))
dgl.distributed.set_num_client(num_parts)
local_eids = F.nonzero_1d(part_g.edata['inner_edge'])
local_eids = F.gather_row(part_g.edata[dgl.EID], local_eids)
edges1 = np.intersect1d(selected_edges, F.asnumpy(local_eids))
edges2 = edge_split(edge_mask, gpb, i)
assert np.all(np.sort(edges1) == np.sort(F.asnumpy(edges2)))
local_eids = F.asnumpy(local_eids)
for e in edges1:
assert e in local_eids
dgl.distributed.set_num_client(num_parts * 2)
edges3 = edge_split(edge_mask, gpb, i * 2)
edges4 = edge_split(edge_mask, gpb, i * 2 + 1)
edges5 = F.cat([edges3, edges4], 0)
assert np.all(np.sort(edges1) == np.sort(F.asnumpy(edges5)))
def test_split_even():
prepare_dist()
g = create_random_graph(10000)
num_parts = 4
num_hops = 2
partition_graph(g, 'dist_graph_test', num_parts, '/tmp/dist_graph', num_hops=num_hops, part_method='metis')
node_mask = np.random.randint(0, 100, size=g.number_of_nodes()) > 30
edge_mask = np.random.randint(0, 100, size=g.number_of_edges()) > 30
selected_nodes = np.nonzero(node_mask)[0]
selected_edges = np.nonzero(edge_mask)[0]
all_nodes1 = []
all_nodes2 = []
all_edges1 = []
all_edges2 = []
for i in range(num_parts):
dgl.distributed.set_num_client(num_parts)
part_g, node_feats, edge_feats, gpb, _ = load_partition('/tmp/dist_graph/dist_graph_test.json', i)
local_nids = F.nonzero_1d(part_g.ndata['inner_node'])
local_nids = F.gather_row(part_g.ndata[dgl.NID], local_nids)
nodes = node_split(node_mask, gpb, i, force_even=True)
all_nodes1.append(nodes)
subset = np.intersect1d(F.asnumpy(nodes), F.asnumpy(local_nids))
print('part {} get {} nodes and {} are in the partition'.format(i, len(nodes), len(subset)))
dgl.distributed.set_num_client(num_parts * 2)
nodes1 = node_split(node_mask, gpb, i * 2, force_even=True)
nodes2 = node_split(node_mask, gpb, i * 2 + 1, force_even=True)
nodes3 = F.cat([nodes1, nodes2], 0)
all_nodes2.append(nodes3)
subset = np.intersect1d(F.asnumpy(nodes), F.asnumpy(nodes3))
print('intersection has', len(subset))
dgl.distributed.set_num_client(num_parts)
local_eids = F.nonzero_1d(part_g.edata['inner_edge'])
local_eids = F.gather_row(part_g.edata[dgl.EID], local_eids)
edges = edge_split(edge_mask, gpb, i, force_even=True)
all_edges1.append(edges)
subset = np.intersect1d(F.asnumpy(edges), F.asnumpy(local_eids))
print('part {} get {} edges and {} are in the partition'.format(i, len(edges), len(subset)))
dgl.distributed.set_num_client(num_parts * 2)
edges1 = edge_split(edge_mask, gpb, i * 2, force_even=True)
edges2 = edge_split(edge_mask, gpb, i * 2 + 1, force_even=True)
edges3 = F.cat([edges1, edges2], 0)
all_edges2.append(edges3)
subset = np.intersect1d(F.asnumpy(edges), F.asnumpy(edges3))
print('intersection has', len(subset))
all_nodes1 = F.cat(all_nodes1, 0)
all_edges1 = F.cat(all_edges1, 0)
all_nodes2 = F.cat(all_nodes2, 0)
all_edges2 = F.cat(all_edges2, 0)
all_nodes = np.nonzero(node_mask)[0]
all_edges = np.nonzero(edge_mask)[0]
assert np.all(all_nodes == F.asnumpy(all_nodes1))
assert np.all(all_edges == F.asnumpy(all_edges1))
assert np.all(all_nodes == F.asnumpy(all_nodes2))
assert np.all(all_edges == F.asnumpy(all_edges2))
def prepare_dist():
ip_config = open("kv_ip_config.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('%s 1\n' % ip_addr)
ip_config.close()
if __name__ == '__main__':
os.makedirs('/tmp/dist_graph', exist_ok=True)
test_split()
test_split_even()
test_server_client()
test_standalone()
|
server.py
|
#########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import traceback
import tempfile
import re
import collections
import json
import threading
import socket
from Queue import Queue
from StringIO import StringIO
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler
from wsgiref.simple_server import make_server as make_wsgi_server
import bottle
class CtxProxy(object):
def __init__(self, ctx, socket_url):
self.ctx = ctx
self.socket_url = socket_url
def process(self, request):
try:
typed_request = json.loads(request)
args = typed_request['args']
payload = process_ctx_request(self.ctx, args)
result = json.dumps({
'type': 'result',
'payload': payload
})
except Exception, e:
tb = StringIO()
traceback.print_exc(file=tb)
payload = {
'type': type(e).__name__,
'message': str(e),
'traceback': tb.getvalue()
}
result = json.dumps({
'type': 'error',
'payload': payload
})
return result
def close(self):
pass
class HTTPCtxProxy(CtxProxy):
def __init__(self, ctx, port=None):
port = port or get_unused_port()
socket_url = 'http://localhost:{0}'.format(port)
super(HTTPCtxProxy, self).__init__(ctx, socket_url)
self.port = port
self._started = Queue(1)
self.thread = self._start_server()
self._started.get(timeout=5)
def _start_server(self):
proxy = self
class BottleServerAdapter(bottle.ServerAdapter):
def run(self, app):
class Server(WSGIServer):
allow_reuse_address = True
def handle_error(self, request, client_address):
pass
class Handler(WSGIRequestHandler):
def address_string(self):
return self.client_address[0]
def log_request(*args, **kwargs):
if not self.quiet:
return WSGIRequestHandler.log_request(
*args, **kwargs)
self.srv = make_wsgi_server(
self.host,
self.port,
app,
Server,
Handler)
proxy.server = self.srv
self.port = self.srv.server_port
proxy._started.put(True)
self.srv.serve_forever(poll_interval=0.1)
bottle.post('/', callback=self._request_handler)
def serve():
bottle.run(
host='localhost',
port=self.port,
quiet=True,
server=BottleServerAdapter)
thread = threading.Thread(target=serve)
thread.daemon = True
thread.start()
return thread
def close(self):
self.server.shutdown()
self.server.server_close()
def _request_handler(self):
request = bottle.request.body.read()
response = self.process(request)
return bottle.LocalResponse(
body=response,
status=200,
headers={'content-type': 'application/json'})
class ZMQCtxProxy(CtxProxy):
def __init__(self, ctx, socket_url):
super(ZMQCtxProxy, self).__init__(ctx, socket_url)
import zmq
self.z_context = zmq.Context(io_threads=1)
self.sock = self.z_context.socket(zmq.REP)
self.sock.bind(self.socket_url)
self.poller = zmq.Poller()
self.poller.register(self.sock, zmq.POLLIN)
def poll_and_process(self, timeout=1):
import zmq
state = dict(self.poller.poll(1000*timeout)).get(self.sock)
if not state == zmq.POLLIN:
return False
request = self.sock.recv()
response = self.process(request)
self.sock.send(response)
return True
def close(self):
self.sock.close()
self.z_context.term()
class UnixCtxProxy(ZMQCtxProxy):
def __init__(self, ctx, socket_path=None):
if not socket_path:
socket_path = tempfile.mktemp(prefix='ctx-', suffix='.socket')
socket_url = 'ipc://{0}'.format(socket_path)
super(UnixCtxProxy, self).__init__(ctx, socket_url)
class TCPCtxProxy(ZMQCtxProxy):
def __init__(self, ctx, ip='127.0.0.1', port=None):
port = port or get_unused_port()
socket_url = 'tcp://{0}:{1}'.format(ip, port)
super(TCPCtxProxy, self).__init__(ctx, socket_url)
class StubCtxProxy(object):
socket_url = ''
def close(self):
pass
def process_ctx_request(ctx, args):
current = ctx
num_args = len(args)
index = 0
while index < num_args:
arg = args[index]
desugared_attr = _desugar_attr(current, arg)
if desugared_attr:
current = getattr(current, desugared_attr)
elif isinstance(current, collections.MutableMapping):
key = arg
path_dict = PathDictAccess(current)
if index + 1 == num_args:
# read dict prop by path
value = path_dict.get(key)
current = value
elif index + 2 == num_args:
# set dict prop by path
value = args[index+1]
current = path_dict.set(key, value)
else:
raise RuntimeError('Illegal argument while accessing dict')
break
elif callable(current):
kwargs = {}
remaining_args = args[index:]
if isinstance(remaining_args[-1], collections.MutableMapping):
kwargs = remaining_args[-1]
remaining_args = remaining_args[:-1]
current = current(*remaining_args, **kwargs)
break
else:
raise RuntimeError('{0} cannot be processed in {1}'
.format(arg, args))
index += 1
if callable(current):
current = current()
return current
def _desugar_attr(obj, attr):
if not isinstance(attr, basestring):
return None
if hasattr(obj, attr):
return attr
attr = attr.replace('-', '_')
if hasattr(obj, attr):
return attr
return None
class PathDictAccess(object):
pattern = re.compile("(.+)\[(\d+)\]")
def __init__(self, obj):
self.obj = obj
def set(self, prop_path, value):
obj, prop_name = self._get_parent_obj_prop_name_by_path(prop_path)
obj[prop_name] = value
def get(self, prop_path):
value = self._get_object_by_path(prop_path)
return value
def _get_object_by_path(self, prop_path):
current = self.obj
for prop_segment in prop_path.split('.'):
match = self.pattern.match(prop_segment)
if match:
index = int(match.group(2))
property_name = match.group(1)
if property_name not in current:
self._raise_illegal(prop_path)
if type(current[property_name]) != list:
self._raise_illegal(prop_path)
current = current[property_name][index]
else:
if prop_segment not in current:
current[prop_segment] = {}
current = current[prop_segment]
return current
def _get_parent_obj_prop_name_by_path(self, prop_path):
split = prop_path.split('.')
if len(split) == 1:
return self.obj, prop_path
parent_path = '.'.join(split[:-1])
parent_obj = self._get_object_by_path(parent_path)
prop_name = split[-1]
return parent_obj, prop_name
@staticmethod
def _raise_illegal(prop_path):
raise RuntimeError('illegal path: {0}'.format(prop_path))
def get_unused_port():
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
_, port = sock.getsockname()
sock.close()
return port
|
multiprocessing_logging_1.py
|
import logging
import logging.handlers
import multiprocessing
from random import choice, random
import time
import utils
def listener_configurer():
root = logging.getLogger()
log_file_path = f"{utils.get_logs_directory()}/multiprocessing_logging_1.log"
h = logging.handlers.RotatingFileHandler(log_file_path, "a", 300, 10)
f = logging.Formatter(
"%(asctime)s %(processName)-10s %(name)s %(levelname)-8s %(message)s"
)
h.setFormatter(f)
root.addHandler(h)
def listener_process(queue, configurer):
configurer()
while True:
try:
record = queue.get()
if record is None:
break
logger = logging.getLogger(record.name)
logger.handle(record)
except Exception:
import sys, traceback
print("Whoops! Problem:", file=sys.stderr)
traceback.print_exec(file=sys.stderr)
LEVELS = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]
LOGGERS = ["a.b.c", "d.e.f"]
MESSAGES = ["Random message #1", "Random message #2", "Random message #3"]
def worker_configurer(queue):
h = logging.handlers.QueueHandler(queue)
root = logging.getLogger()
root.addHandler(h)
root.setLevel(logging.DEBUG)
def worker_process(queue, configuer):
configuer(queue)
name = multiprocessing.current_process().name
print("Worker started: %s" % name)
for i in range(10):
time.sleep(random())
logger = logging.getLogger(choice(LOGGERS))
level = choice(LEVELS)
message = choice(MESSAGES)
logger.log(level, message)
print("Worker finished: %s" % name)
def main():
queue = multiprocessing.Queue(-1)
listener = multiprocessing.Process(
target=listener_process, args=(queue, listener_configurer)
)
listener.start()
workers = []
for i in range(10):
worker = multiprocessing.Process(
target=worker_process, args=(queue, worker_configurer)
)
workers.append(worker)
worker.start()
for w in workers:
w.join()
queue.put_nowait(None)
listener.join()
if __name__ == "__main__":
main()
|
api.py
|
import hashlib
import os
import random
import sqlite3
import string
import sys
import threading
import time
from typing import List, Dict, Tuple
import cv2
import numpy as np
# import pd2image_patched
from pd2image_patched import convert_from_path
from pytesseract import pytesseract, Output
import api_interface
class IndexJob(api_interface.IndexJob):
def __init__(self, path, db_factory: api_interface.DbFactory, app_data_path, poppler_path=None, tesseract_exe=None):
self.path = path
self.db_factory = db_factory
self.app_data_path = app_data_path
self.poppler_path = poppler_path
self.tesseract_exe = tesseract_exe
self._stop = False
self.curr_file_idx = None
self.num_files = None
self.status = None # type: str
self.finished = False
self.__messages_mutex = threading.Lock()
self.__messages = None # type: List[str]
def start(self):
# init vars
self._stop = False
self.curr_file_idx = None
self.num_files = None
self.__messages = [] # type: List[str]
self.finished = False
# start thread
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def stop(self):
self._stop = True
def get_path(self) -> str:
return self.path
def get_curr_file_index(self) -> int:
return self.curr_file_idx
def get_num_files(self) -> str:
return self.num_files
def get_messages(self) -> List[str]:
messages = []
with self.__messages_mutex:
for message in self.__messages:
messages.append(message)
self.__messages.clear()
return messages
def is_finished(self) -> bool:
return self.finished
def __add_message(self, msg):
with self.__messages_mutex:
self.__messages.append(msg)
def __get_files(self):
scan_files = []
for root, dirs, files in os.walk(self.path):
for basename in files:
if self._stop:
return []
file_name, ext = os.path.splitext(basename)
ext = ext[1:].lower()
if ext not in ["jpg", "jpeg", "png", "bmp", "pdf"]:
continue
path = os.path.join(root, basename).replace("\\", "/")
scan_files.append(path)
return scan_files
def __process_image_file(self, c: sqlite3.Cursor, path, dir_id, doc_path, page):
if c.execute("select id from images where path = ?", (path,)).fetchone() is not None:
self.__add_message("Skipping already indexed file {}.".format(path.replace(self.path + "/", "")))
return
img = cv2.imread(path)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(img_gray, (9, 9), 0)
img = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
# img = 255 - thresh
# cv2.imwrite(path.replace(self.path, "C:/Users/mueller/Desktop/output"), img)
# img = remove_noise.process_image_for_ocr(path)
if self.tesseract_exe:
pytesseract.tesseract_cmd = self.tesseract_exe
d = pytesseract.image_to_data(img, output_type=Output.DICT)
c.execute("insert into 'images' (path, directory_id) values (?, ?)", (path, dir_id))
image_id = c.lastrowid
for j in range(len(d["text"])):
if not d['text'][j].strip():
continue
c.execute(
"insert into 'texts' (text, left, top, width, height, image_id) values (?, ?, ?, ?, ?, ?)",
(d['text'][j], d['left'][j], d['top'][j], d['width'][j], d['height'][j], image_id))
if doc_path is not None and page is not None:
doc_id = c.execute("select id from documents where path = ?", (doc_path,)).fetchone()
if doc_id is None:
c.execute("insert into documents (path, directory_id) values (?, ?)", (doc_path, dir_id))
doc_id = c.lastrowid
else:
doc_id = doc_id[0]
c.execute("update images set document_id = ?, doc_page = ? where id = ?", (doc_id, page, image_id))
def run(self):
try:
# get dir id
db = self.db_factory.create()
c = db.cursor()
res = c.execute("select id from directories where path = ?", (self.path,)).fetchone()
if res is not None:
dir_id = res[0]
else:
c.execute("insert into 'directories' (path) values (?)", (self.path,))
dir_id = c.lastrowid
db.commit()
# collect files
self.__add_message("Scanning files in {}".format(self.path))
scan_files = self.__get_files()
self.num_files = len(scan_files)
self.__add_message("Scanning files finished. Found {} files for indexing.".format(self.num_files))
# process files
for i in range(self.num_files):
if self._stop:
break
self.curr_file_idx = i
path = scan_files[i]
rel_path = path.replace(self.path + "/", "")
self.__add_message(
"File {} of {}: Analyzing {}.".format(i + 1, self.num_files, rel_path))
try:
_, ext = os.path.splitext(path)
image_paths = []
if ext.lower() == ".pdf":
if c.execute("select id from documents where path = ?", (path,)).fetchone() is not None:
self.__add_message(
"Skipping already indexed file {}.".format(path.replace(self.path + "/", "")))
continue
self.__add_message(
"Converting {} to single image files.".format(rel_path))
if self.poppler_path:
images = convert_from_path(path, 300, poppler_path=self.poppler_path)
else:
images = convert_from_path(path, 300)
page = 0
for image in images:
page = page + 1
# while True:
# img_path = self.app_data_path + "/" + self.random_string() + ".png"
# if not os.path.exists(img_path):
# break
img_path = self.app_data_path + "/" + hashlib.md5(
path.encode('utf-8')).hexdigest() + "_page" + str(
page) + ".jpg"
self.__add_message(
"Writing page {} of {} as image {}.".format(page, len(images), img_path))
# cv2.imwrite(img_path, image)
if not os.path.exists(img_path):
image.save(img_path, 'JPEG')
image_paths.append((img_path, path, page))
else:
image_paths = [(path, None, None)]
for img_path in image_paths:
self.__add_message(
"Extracting text from {}.".format(
img_path[0].replace(self.path + "/", "")))
try:
self.__process_image_file(c, img_path[0], dir_id, img_path[1], img_path[2])
except AttributeError as e:
self.__add_message("Attribute error occured while converting {}: {}".format(
img_path[0].replace(self.path + "/", ""), str(e.__str__())))
except Exception as e:
self.__add_message("Exception occured while converting {}: {}".format(
img_path[0].replace(self.path + "/", ""), str(e.__str__())))
except:
self.__add_message("An unknown error occured while converting {}: {}".format(
img_path[0].replace(self.path + "/", ""), str(sys.exc_info()[0])))
db.commit()
except Exception as e:
self.__add_message("An Exception occured while processing {}: {}".format(
rel_path, str(e)))
except:
self.__add_message("An unknown error occured while processing {}: {}".format(
rel_path, sys.exc_info()[0]))
# commit or rollback
if self._stop:
self.__add_message("Indexing stopped")
db.rollback()
else:
self.__add_message("Indexing successfully finished")
db.commit()
self.finished = True
except:
self._stop = True
e = sys.exc_info()[0]
self.__add_message("An unknown error occured: " + str(e))
if db is not None:
db.rollback()
def random_string(self, stringLength=5):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
class TextMatch(api_interface.TextMatch):
def __init__(self, text, top, left, width, height):
self.text = text
self.top = top
self.left = left
self.width = width
self.height = height
def get_text(self) -> str:
return self.text
class Result(api_interface.Result):
def __init__(self, path, page, doc_path, text_matches: List[TextMatch], image_id):
self.path = path
self.page = page
self.doc_path = doc_path
self.text_matches = text_matches
self.image_id = image_id
def get_path(self) -> str:
return self.doc_path if self.doc_path is not None else self.path
def get_text_matches(self) -> str:
return self.text_matches
def get_page(self) -> int:
return self.page
def get_preview_image(self) -> np.ndarray:
preview_image = None
if not os.path.exists(self.path):
return preview_image
image = cv2.imread(self.path)
overlay = image.copy()
preview_image = image
color_per_text = {}
for text_match in self.text_matches:
x, y, w, h = text_match.left, text_match.top, text_match.width, text_match.height # Rectangle parameters
text = text_match.get_text()
if text not in color_per_text.keys():
color_per_text[text] = tuple(list(np.random.choice(range(128, 255), size=3)))
color = color_per_text[text]
cv2.rectangle(overlay, (x, y), (x + w, y + h), (0, 0, 255), 5) # A filled rectangle
alpha = 0.65 # Transparency factor.
# Following line overlays transparent rectangle over the image
preview_image = cv2.addWeighted(overlay, alpha, preview_image, 1 - alpha, 0)
return preview_image
class WheresTheFckReceipt(api_interface.WheresTheFckReceipt):
def __init__(self, app_data_dir, db_factory: api_interface.DbFactory,
index_job_factory: api_interface.IndexJobFactory):
self.app_data_dir = app_data_dir
self.db_factory = db_factory
self.index_job_factory = index_job_factory
self.db = None
def get_last_directory(self) -> str:
self.assert_db()
c = self.db.cursor()
c.execute("select path from directories order by id desc limit 1")
row = c.fetchone()
return row[0] if row and os.path.exists(row[0]) else None
def assert_db(self):
if not self.db:
self.db = self.db_factory.create()
def get_directories(self) -> List[str]:
self.assert_db()
c = self.db.cursor()
c.execute("select path from directories")
rows = c.fetchall()
return [i[0] for i in rows]
def add_directory(self, directory) -> IndexJob:
poppler_path = self.get_setting("poppler_path")
tesseract_exe = self.get_setting("tesseract_exe")
return self.index_job_factory.create(directory, self.db_factory, self.app_data_dir, poppler_path, tesseract_exe)
def remove_directory(self, directory, progress_updater: api_interface.ProgressUpdater):
self.assert_db()
c = self.db.cursor()
own_images = c.execute(
"select images.path as path, images.id as id from images, directories where directories.path = ? and images.document_id IS NOT NULL and directories.id = images.directory_id", (directory,))
own_images_list = own_images.fetchall()
progress_updater.set_range(0, len(own_images_list))
for idx, own_image in enumerate(own_images_list):
#time.sleep(1)
if progress_updater.canceled():
break
path = own_image[0] # type: str
if path.startswith(self.app_data_dir) and os.path.exists(path):
os.remove(path)
c.execute("delete from images where id = ?", (own_image[1],))
progress_updater.set_value(idx + 1)
if progress_updater.canceled() is False:
c.execute("delete from directories where path = ?", (directory,))
self.db.commit()
def update_directory(self, directory):
return self.add_directory(directory)
def reindex_directory(self, directory) -> IndexJob:
self.remove_directory(directory)
return self.add_directory(directory)
def search(self, query: str, limit: int = None, case_sensitive: bool = False) -> List[Result]:
if query is None or query == "":
return []
self.assert_db()
c = self.db.cursor()
if case_sensitive:
text_where_clause = "texts.text like ?"
else:
text_where_clause = "lower(texts.text) like ?"
query_parts = query.split()
result_list = []
final_result_list = []
previous_ids = []
for query_part_idx, query_part in enumerate(query_parts):
args = []
sql = "select images.path as path, texts.text as text, images.doc_page as page, " \
"images.document_id as doc_id, texts.top as top, texts.left as left, texts.width as width, " \
"texts.height as height, images.id as image_id"
sql = sql + " from images, texts"
sql = sql + " where texts.image_id = images.id and " + text_where_clause
if case_sensitive:
query_part = "%" + query_part.lower() + "%"
else:
query_part = "%" + query_part + "%"
args.append(query_part)
if len(previous_ids) > 0:
sql = sql + " and ("
for idx, previous_id in enumerate(previous_ids):
sql = sql + " images.id = ?"
args.append(previous_id)
if idx + 1 < len(previous_ids):
sql = sql + " or"
sql = sql + " )"
if limit:
sql = sql + " limit ?"
args.append(limit)
c.execute(sql, tuple(args))
rows = c.fetchall()
for row in rows:
image_path = row[0]
text = row[1]
page = row[2]
doc_id = row[3]
top = row[4]
left = row[5]
width = row[6]
height = row[7]
image_id = row[8]
if image_id not in previous_ids:
previous_ids.append(image_id)
doc_path = c.execute("select path from documents where id = ?", (doc_id,)).fetchone()[
0] if doc_id else None
text_match = TextMatch(text, top, left, width, height)
result = None
for curr_result in result_list:
if image_id == curr_result.image_id:
result = curr_result
break
if result is None:
text_matches = [text_match]
result = Result(image_path, page, doc_path, text_matches, image_id)
result_list.append(result)
else:
result.text_matches.append(text_match)
if query_part_idx + 1 == len(query_parts):
final_result_exists = False
for curr_result in final_result_list:
if image_id == curr_result.image_id:
final_result_exists = True
break
if not final_result_exists:
final_result_list.append(result)
return final_result_list
def get_setting(self, key):
settings = self.get_settings()
if key in settings.keys():
value_ = settings[key][0]
type_ = settings[key][2]
if type_ == "int":
value_ = int(value_)
elif value_ == "":
return None
return value_
return None
def get_settings(self) -> Dict[str, Tuple[str, str, str]]:
self.assert_db()
c = self.db.cursor()
c.execute("select key, value, help, type from settings where hidden != 1")
rows = c.fetchall()
settings = {}
settings_value = {}
for row in rows:
settings[row[0]] = (row[1], row[2], row[3])
settings_value[row[0]] = row[1]
return settings
def set_settings(self, settings: Dict[str, str]):
self.assert_db()
c = self.db.cursor()
existing_settings = self.get_settings().keys()
for key, value in settings.items():
if key in existing_settings:
c.execute("update settings set value=? where key = ?", (value, key))
else:
c.execute("insert into settings (key, value) values(?, ?)", (key, value))
self.db.commit()
class IndexJobFactory(api_interface.IndexJobFactory):
def create(self, path, db_factory: api_interface.DbFactory, app_data_dir, poppler_path=None,
tesseract_exe=None) -> IndexJob:
return IndexJob(path, db_factory, app_data_dir, poppler_path, tesseract_exe)
class DbFactory(api_interface.DbFactory):
def __init__(self, app_data_dir: str, delete_db=False):
self.app_data_dir = app_data_dir
self.db_path = app_data_dir + "/db.sqlite3"
self.delete_db = delete_db
def create(self) -> sqlite3.Connection:
if self.delete_db and os.path.exists(self.db_path):
os.remove(self.db_path)
# database
db_path = self.db_path
if not os.path.exists(os.path.dirname(db_path)):
os.makedirs(os.path.dirname(db_path))
create_database = not os.path.exists(db_path)
db = sqlite3.connect(db_path)
c = db.cursor()
c.execute("PRAGMA foreign_keys = ON")
if create_database:
c.execute(
"create table settings (key text primary key, value text, help text, type text not null, hidden integer not null)")
self.update_schema(c)
db.commit()
return db
def update_schema(self, c: sqlite3.Cursor):
current_schema_version = None
rows = c.execute("select value from settings where key = 'current_schema_version'").fetchone()
if rows:
current_schema_version = int(rows[0])
if current_schema_version is None:
c.execute("CREATE TABLE directories ( id INTEGER PRIMARY KEY AUTOINCREMENT, path TEXT UNIQUE )")
c.execute(
"CREATE TABLE documents ( id INTEGER PRIMARY KEY AUTOINCREMENT, path TEXT UNIQUE NOT NULL, directory_id INTEGER NOT NULL, FOREIGN KEY(directory_id) REFERENCES directories(id) ON DELETE CASCADE )")
c.execute(
"CREATE TABLE images ( id INTEGER PRIMARY KEY AUTOINCREMENT, path TEXT UNIQUE NOT NULL, directory_id INTEGER NOT NULL, document_id INTEGER, doc_page INTEGER, FOREIGN KEY(directory_id) REFERENCES directories(id) ON DELETE CASCADE, FOREIGN KEY(document_id) REFERENCES documents(id) )")
c.execute(
"CREATE TABLE texts ( id INTEGER PRIMARY KEY AUTOINCREMENT, text TEXT NOT NULL, left INTEGER NOT NULL, top INTEGER NOT NULL, width INTEGER NOT NULL, height INTEGER NOT NULL, image_id INTEGER NOT NULL, FOREIGN KEY(image_id) REFERENCES images(id) ON DELETE CASCADE )")
c.execute(
"insert into settings (key, value, help, type, hidden) values('current_schema_version', '1', 'Current Schema Version', 'int', 1)")
c.execute(
"insert into settings (key, value, help, type, hidden) values('tesseract_exe', null, 'Path to Tesseract Exe', 'file', 0)")
c.execute(
"insert into settings (key, value, help, type, hidden) values('pdftopmm_exe', null, 'Path to Poppler Exe', 'file', 0)")
current_schema_version = 0 if current_schema_version is None else current_schema_version
if current_schema_version <= 1:
c.execute(
"insert into settings (key, value, help, type, hidden) values('default_limit', 0, 'The default limit for the search', 'int', 0)")
if current_schema_version <= 2:
c.execute(
"insert into settings (key, value, help, type, hidden) values('poppler_path', 0, 'The path for poppler', 'dir', 0)")
c.execute("delete from settings where key = 'pdftopmm_exe'")
if current_schema_version <= 3:
c.execute("update settings set value=NULL where key = 'poppler_path'")
if current_schema_version <= 4:
c.execute(
"update settings set help='Path to the poppler bin directory. Leave this empty if the path is in your PATH variable. Poppler for Windows can be downloaded from https://blog.alivate.com.au/poppler-windows/' where key = 'poppler_path'")
c.execute(
"update settings set help='Path to Tesseract executable. Leave this empty if the executable is in your PATH variable. Tesseract for Windows can be downloaded from https://github.com/UB-Mannheim/tesseract/wiki' where key = 'tesseract_exe'")
if current_schema_version <= 5:
c.execute(
"update settings set help='Path to the poppler bin directory. Leave this empty if the path is in your PATH variable. Poppler for Windows can be downloaded from <a href=\"https://blog.alivate.com.au/poppler-windows/\">https://blog.alivate.com.au/poppler-windows/</a>' where key = 'poppler_path'")
c.execute(
"update settings set help='Path to Tesseract executable. Leave this empty if the executable is in your PATH variable. Tesseract for Windows can be downloaded from <a href=\"https://github.com/UB-Mannheim/tesseract/wiki\">https://github.com/UB-Mannheim/tesseract/wiki</a>' where key = 'tesseract_exe'")
if current_schema_version <= 6:
c.execute("delete from settings where key = 'default_limit'")
c.execute("update settings set value=? where key = 'current_schema_version'", (6,))
|
scdlbot.py
|
# -*- coding: utf-8 -*-
"""Main module."""
import gc
import pathlib
import random
import shelve
import shutil
from datetime import datetime
from multiprocessing import Process, Queue
from queue import Empty
from subprocess import PIPE, TimeoutExpired # skipcq: BAN-B404
from urllib.parse import urljoin
from uuid import uuid4
import ffmpeg
from boltons.urlutils import find_all_links, URL
from mutagen.id3 import ID3
from mutagen.mp3 import EasyMP3 as MP3
from prometheus_client import Summary
from telegram import (Message, Chat, ChatMember, MessageEntity, ChatAction, InlineKeyboardMarkup,
InlineKeyboardButton, InlineQueryResultAudio, Update)
from telegram.error import (TelegramError, Unauthorized, BadRequest,
TimedOut, ChatMigrated, NetworkError)
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, InlineQueryHandler,
CallbackQueryHandler, CallbackContext)
from telegram.ext.dispatcher import run_async
from scdlbot.utils import *
logger = logging.getLogger(__name__)
REQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request')
class ScdlBot:
def __init__(self, tg_bot_token, proxies=None,
store_chat_id=None, no_flood_chat_ids=None, alert_chat_ids=None,
dl_dir="/tmp/scdlbot", dl_timeout=300, max_convert_file_size=80_000_000,
chat_storage_file="/tmp/scdlbotdata", app_url=None,
serve_audio=False, cookies_file=None, source_ips=None):
self.SERVE_AUDIO = serve_audio
if self.SERVE_AUDIO:
self.MAX_TG_FILE_SIZE = 19_000_000
else:
self.MAX_TG_FILE_SIZE = 45_000_000
self.SITES = {
"sc": "soundcloud",
"scapi": "api.soundcloud",
"bc": "bandcamp",
"yt": "youtu",
}
self.APP_URL = app_url
self.DL_TIMEOUT = dl_timeout
self.MAX_CONVERT_FILE_SIZE = max_convert_file_size
self.HELP_TEXT = get_response_text('help.tg.md')
self.SETTINGS_TEXT = get_response_text('settings.tg.md')
self.DL_TIMEOUT_TEXT = get_response_text('dl_timeout.txt').format(self.DL_TIMEOUT // 60)
self.WAIT_BIT_TEXT = [get_response_text('wait_bit.txt'), get_response_text('wait_beat.txt'),
get_response_text('wait_beet.txt')]
self.NO_AUDIO_TEXT = get_response_text('no_audio.txt')
self.NO_URLS_TEXT = get_response_text('no_urls.txt')
self.OLG_MSG_TEXT = get_response_text('old_msg.txt')
self.REGION_RESTRICTION_TEXT = get_response_text('region_restriction.txt')
self.DIRECT_RESTRICTION_TEXT = get_response_text('direct_restriction.txt')
self.LIVE_RESTRICTION_TEXT = get_response_text('live_restriction.txt')
# self.chat_storage = {}
self.chat_storage = shelve.open(chat_storage_file, writeback=True)
for chat_id in no_flood_chat_ids:
self.init_chat(chat_id=chat_id, chat_type=Chat.PRIVATE if chat_id > 0 else Chat.SUPERGROUP, flood="no")
self.ALERT_CHAT_IDS = set(alert_chat_ids) if alert_chat_ids else set()
self.STORE_CHAT_ID = store_chat_id
self.DL_DIR = dl_dir
self.COOKIES_DOWNLOAD_FILE = "/tmp/scdlbot_cookies.txt"
self.proxies = proxies
self.source_ips = source_ips
# https://yandex.com/support/music-app-ios/search-and-listen/listening-abroad.html
self.cookies_file = cookies_file
# if sc_auth_token:
# config = configparser.ConfigParser()
# config['scdl'] = {}
# config['scdl']['path'] = self.DL_DIR
# config['scdl']['auth_token'] = sc_auth_token
# config_dir = os.path.join(os.path.expanduser('~'), '.config', 'scdl')
# config_path = os.path.join(config_dir, 'scdl.cfg')
# os.makedirs(config_dir, exist_ok=True)
# with open(config_path, 'w') as config_file:
# config.write(config_file)
self.updater = Updater(token=tg_bot_token, use_context=True)
dispatcher = self.updater.dispatcher
start_command_handler = CommandHandler('start', self.help_command_callback)
dispatcher.add_handler(start_command_handler)
help_command_handler = CommandHandler('help', self.help_command_callback)
dispatcher.add_handler(help_command_handler)
settings_command_handler = CommandHandler('settings', self.settings_command_callback)
dispatcher.add_handler(settings_command_handler)
dl_command_handler = CommandHandler('dl', self.common_command_callback,
filters=~Filters.update.edited_message & ~Filters.forwarded)
dispatcher.add_handler(dl_command_handler)
link_command_handler = CommandHandler('link', self.common_command_callback,
filters=~Filters.update.edited_message & ~Filters.forwarded)
dispatcher.add_handler(link_command_handler)
message_with_links_handler = MessageHandler(~Filters.update.edited_message &
((Filters.text & (Filters.entity(MessageEntity.URL) |
Filters.entity(MessageEntity.TEXT_LINK))) |
(Filters.caption & (Filters.caption_entity(MessageEntity.URL) |
Filters.caption_entity(
MessageEntity.TEXT_LINK)))),
self.common_command_callback)
dispatcher.add_handler(message_with_links_handler)
button_query_handler = CallbackQueryHandler(self.button_query_callback)
dispatcher.add_handler(button_query_handler)
inline_query_handler = InlineQueryHandler(self.inline_query_callback)
dispatcher.add_handler(inline_query_handler)
unknown_handler = MessageHandler(Filters.command, self.unknown_command_callback)
dispatcher.add_handler(unknown_handler)
dispatcher.add_error_handler(self.error_callback)
self.bot_username = self.updater.bot.get_me().username
self.RANT_TEXT_PRIVATE = "Read /help to learn how to use me"
self.RANT_TEXT_PUBLIC = "[Start me in PM to read help and learn how to use me](t.me/{}?start=1)".format(
self.bot_username)
def start(self, use_webhook=False, webhook_host="127.0.0.1", webhook_port=None, cert_file=None, cert_key_file=None,
url_path="scdlbot"):
if use_webhook:
self.updater.start_webhook(listen=webhook_host,
port=webhook_port,
url_path=url_path)
# cert=cert_file if cert_file else None,
# key=cert_key_file if cert_key_file else None,
# webhook_url=urljoin(app_url, url_path))
self.updater.bot.set_webhook(url=urljoin(self.APP_URL, url_path),
certificate=open(cert_file, 'rb') if cert_file else None)
else:
self.updater.start_polling()
logger.warning("Bot started")
self.updater.idle()
def unknown_command_callback(self, update: Update, context: CallbackContext):
pass
# bot.send_message(chat_id=update.message.chat_id, text="Unknown command")
def error_callback(self, update: Update, context: CallbackContext): # skipcq: PYL-R0201
try:
raise context.error
except Unauthorized:
# remove update.message.chat_id from conversation list
logger.debug('Update {} caused Unauthorized error: {}'.format(update, context.error))
except BadRequest:
# handle malformed requests - read more below!
logger.debug('Update {} caused BadRequest error: {}'.format(update, context.error))
except TimedOut:
# handle slow connection problems
logger.debug('Update {} caused TimedOut error: {}'.format(update, context.error))
except NetworkError:
# handle other connection problems
logger.debug('Update {} caused NetworkError: {}'.format(update, context.error))
except ChatMigrated as e:
# the chat_id of a group has changed, use e.new_chat_id instead
logger.debug('Update {} caused ChatMigrated error: {}'.format(update, context.error))
except TelegramError:
# handle all other telegram related errors
logger.debug('Update {} caused TelegramError: {}'.format(update, context.error))
def init_chat(self, message=None, chat_id=None, chat_type=None, flood="yes"):
if message:
chat_id = str(message.chat_id)
chat_type = message.chat.type
else:
chat_id = str(chat_id)
if chat_id not in self.chat_storage:
self.chat_storage[chat_id] = {}
if "settings" not in self.chat_storage[chat_id]:
self.chat_storage[chat_id]["settings"] = {}
if "mode" not in self.chat_storage[chat_id]["settings"]:
if chat_type == Chat.PRIVATE:
self.chat_storage[chat_id]["settings"]["mode"] = "dl"
else:
self.chat_storage[chat_id]["settings"]["mode"] = "ask"
if "flood" not in self.chat_storage[chat_id]["settings"]:
self.chat_storage[chat_id]["settings"]["flood"] = flood
if "rant_msg_ids" not in self.chat_storage[chat_id]["settings"]:
self.chat_storage[chat_id]["settings"]["rant_msg_ids"] = []
self.chat_storage.sync()
# logger.debug("Current chat_storage: %r", self.chat_storage)
def cleanup_chat(self, chat_id):
chat_msgs = self.chat_storage[str(chat_id)].copy()
for msg_id in chat_msgs:
if msg_id != "settings":
timedelta = datetime.now() - self.chat_storage[str(chat_id)][msg_id]["message"].date
if timedelta.days > 0:
self.chat_storage[str(chat_id)].pop(msg_id)
self.chat_storage.sync()
def rant_and_cleanup(self, bot, chat_id, rant_text, reply_to_message_id=None):
rant_msg = bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=rant_text, parse_mode='Markdown', disable_web_page_preview=True)
flood = self.chat_storage[str(chat_id)]["settings"]["flood"]
if flood == "no":
rant_msgs = self.chat_storage[str(chat_id)]["settings"]["rant_msg_ids"].copy()
for rant_msg_id in rant_msgs:
try:
bot.delete_message(chat_id=chat_id, message_id=rant_msg_id)
except:
pass
self.chat_storage[str(chat_id)]["settings"]["rant_msg_ids"].remove(rant_msg_id)
self.chat_storage[str(chat_id)]["settings"]["rant_msg_ids"].append(rant_msg.message_id)
self.chat_storage.sync()
def help_command_callback(self, update: Update, context: CallbackContext):
self.init_chat(update.message)
event_name = "help"
entities = update.message.parse_entities(types=[MessageEntity.BOT_COMMAND])
for entity_value in entities.values():
event_name = entity_value.replace("/", "").replace("@{}".format(self.bot_username), "")
break
log_and_track(event_name, update.message)
chat_id = update.message.chat_id
chat_type = update.message.chat.type
reply_to_message_id = update.message.message_id
flood = self.chat_storage[str(chat_id)]["settings"]["flood"]
if chat_type != Chat.PRIVATE and flood == "no":
self.rant_and_cleanup(context.bot, chat_id, self.RANT_TEXT_PUBLIC, reply_to_message_id=reply_to_message_id)
else:
context.bot.send_message(chat_id=chat_id, text=self.HELP_TEXT,
parse_mode='Markdown', disable_web_page_preview=True)
def get_wait_text(self):
return random.choice(self.WAIT_BIT_TEXT)
def get_settings_inline_keyboard(self, chat_id):
mode = self.chat_storage[str(chat_id)]["settings"]["mode"]
flood = self.chat_storage[str(chat_id)]["settings"]["flood"]
emoji_yes = "✅"
emoji_no = "❌"
button_dl = InlineKeyboardButton(text=" ".join([emoji_yes if mode == "dl" else emoji_no, "Download"]),
callback_data=" ".join(["settings", "dl"]))
button_link = InlineKeyboardButton(text=" ".join([emoji_yes if mode == "link" else emoji_no, "Links"]),
callback_data=" ".join(["settings", "link"]))
button_ask = InlineKeyboardButton(text=" ".join([emoji_yes if mode == "ask" else emoji_no, "Ask"]),
callback_data=" ".join(["settings", "ask"]))
button_flood = InlineKeyboardButton(text=" ".join([emoji_yes if flood == "yes" else emoji_no, "Captions"]),
callback_data=" ".join(["settings", "flood"]))
button_close = InlineKeyboardButton(text=" ".join([emoji_no, "Close settings"]),
callback_data=" ".join(["settings", "close"]))
inline_keyboard = InlineKeyboardMarkup([[button_dl, button_link, button_ask], [button_flood, button_close]])
return inline_keyboard
def settings_command_callback(self, update: Update, context: CallbackContext):
self.init_chat(update.message)
log_and_track("settings")
chat_id = update.message.chat_id
context.bot.send_message(chat_id=chat_id, parse_mode='Markdown',
reply_markup=self.get_settings_inline_keyboard(chat_id),
text=self.SETTINGS_TEXT)
def common_command_callback(self, update: Update, context: CallbackContext):
self.init_chat(update.message)
chat_id = update.message.chat_id
chat_type = update.message.chat.type
reply_to_message_id = update.message.message_id
command_entities = update.message.parse_entities(types=[MessageEntity.BOT_COMMAND])
if not command_entities:
command_passed = False
# if no command then it is just a message and use default mode
mode = self.chat_storage[str(chat_id)]["settings"]["mode"]
else:
command_passed = True
# try to determine mode from command
mode = None
for entity_value in command_entities.values():
mode = entity_value.replace("/", "").replace("@{}".format(self.bot_username), "")
break
if not mode:
mode = "dl"
if command_passed and not context.args:
rant_text = self.RANT_TEXT_PRIVATE if chat_type == Chat.PRIVATE else self.RANT_TEXT_PUBLIC
rant_text += "\nYou can simply send message with links (to download) OR command as `/{} <links>`.".format(
mode)
self.rant_and_cleanup(context.bot, chat_id, rant_text, reply_to_message_id=reply_to_message_id)
return
# apologize and send TYPING: always in PM and only when it's command in non-PM
apologize = chat_type == Chat.PRIVATE or command_passed
if apologize:
context.bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)
source_ip = None
proxy = None
if self.source_ips:
source_ip = random.choice(self.source_ips)
if self.proxies:
proxy = random.choice(self.proxies)
# TODO find working IP?
urls = self.prepare_urls(msg_or_text=update.message,
direct_urls=(mode == "link"),
source_ip=source_ip, proxy=proxy)
logger.debug(urls)
if not urls:
if apologize:
context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.NO_URLS_TEXT, parse_mode='Markdown')
else:
event_name = ("{}_cmd".format(mode)) if command_passed else ("{}_msg".format(mode))
log_and_track(event_name, update.message)
if mode == "dl":
wait_message = context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
parse_mode='Markdown', text=get_italic(self.get_wait_text()))
for url in urls:
self.download_url_and_send(context.bot, url, urls[url], chat_id=chat_id,
reply_to_message_id=reply_to_message_id,
wait_message_id=wait_message.message_id,
source_ip=source_ip, proxy=proxy)
elif mode == "link":
wait_message = context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
parse_mode='Markdown', text=get_italic(self.get_wait_text()))
link_text = get_link_text(urls)
context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
parse_mode='Markdown', disable_web_page_preview=True,
text=link_text if link_text else self.NO_URLS_TEXT)
context.bot.delete_message(chat_id=chat_id, message_id=wait_message.message_id)
elif mode == "ask":
# ask: always in PM and only if good urls exist in non-PM
if chat_type == Chat.PRIVATE or "http" in " ".join(urls.values()):
orig_msg_id = str(reply_to_message_id)
self.chat_storage[str(chat_id)][orig_msg_id] = {"message": update.message, "urls": urls,
"source_ip": source_ip, "proxy": proxy}
question = "🎶 links found, what to do?"
button_dl = InlineKeyboardButton(text="✅ Download", callback_data=" ".join([orig_msg_id, "dl"]))
button_link = InlineKeyboardButton(text="❇️ Links",
callback_data=" ".join([orig_msg_id, "link"]))
button_cancel = InlineKeyboardButton(text="❎", callback_data=" ".join([orig_msg_id, "nodl"]))
inline_keyboard = InlineKeyboardMarkup([[button_dl, button_link, button_cancel]])
context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
reply_markup=inline_keyboard, text=question)
self.cleanup_chat(chat_id)
def button_query_callback(self, update: Update, context: CallbackContext):
btn_msg = update.callback_query.message
self.init_chat(btn_msg)
user_id = update.callback_query.from_user.id
btn_msg_id = btn_msg.message_id
chat = btn_msg.chat
chat_id = chat.id
chat_type = chat.type
orig_msg_id, action = update.callback_query.data.split()
if orig_msg_id == "settings":
if chat_type != Chat.PRIVATE:
chat_member_status = chat.get_member(user_id).status
if chat_member_status not in [ChatMember.ADMINISTRATOR,
ChatMember.CREATOR] and user_id not in self.ALERT_CHAT_IDS:
log_and_track("settings_fail")
update.callback_query.answer(text="You're not chat admin")
return
log_and_track("settings_{}".format(action), btn_msg)
if action == "close":
context.bot.delete_message(chat_id, btn_msg_id)
else:
setting_changed = False
if action in ["dl", "link", "ask"]:
current_setting = self.chat_storage[str(chat_id)]["settings"]["mode"]
if action != current_setting:
setting_changed = True
self.chat_storage[str(chat_id)]["settings"]["mode"] = action
elif action in ["flood"]:
current_setting = self.chat_storage[str(chat_id)]["settings"]["flood"]
setting_changed = True
self.chat_storage[str(chat_id)]["settings"][action] = "no" if current_setting == "yes" else "yes"
if setting_changed:
self.chat_storage.sync()
update.callback_query.answer(text="Settings changed")
update.callback_query.edit_message_reply_markup(parse_mode='Markdown',
reply_markup=self.get_settings_inline_keyboard(
chat_id))
else:
update.callback_query.answer(text="Settings not changed")
elif orig_msg_id in self.chat_storage[str(chat_id)]:
msg_from_storage = self.chat_storage[str(chat_id)].pop(orig_msg_id)
orig_msg = msg_from_storage["message"]
urls = msg_from_storage["urls"]
source_ip = msg_from_storage["source_ip"]
proxy = msg_from_storage["proxy"]
log_and_track("{}_msg".format(action), orig_msg)
if action == "dl":
update.callback_query.answer(text=self.get_wait_text())
wait_message = update.callback_query.edit_message_text(parse_mode='Markdown',
text=get_italic(self.get_wait_text()))
for url in urls:
self.download_url_and_send(context.bot, url, urls[url], chat_id=chat_id,
reply_to_message_id=orig_msg_id,
wait_message_id=wait_message.message_id,
source_ip=source_ip, proxy=proxy)
elif action == "link":
update.callback_query.answer(text=self.get_wait_text())
wait_message = update.callback_query.edit_message_text(parse_mode='Markdown',
text=get_italic(self.get_wait_text()))
urls = self.prepare_urls(urls.keys(), direct_urls=True, source_ip=source_ip, proxy=proxy)
link_text = get_link_text(urls)
context.bot.send_message(chat_id=chat_id, reply_to_message_id=orig_msg_id,
parse_mode='Markdown', disable_web_page_preview=True,
text=link_text if link_text else self.NO_URLS_TEXT)
context.bot.delete_message(chat_id=chat_id, message_id=wait_message.message_id)
elif action == "nodl":
context.bot.delete_message(chat_id=chat_id, message_id=btn_msg_id)
else:
update.callback_query.answer(text=self.OLG_MSG_TEXT)
context.bot.delete_message(chat_id=chat_id, message_id=btn_msg_id)
def inline_query_callback(self, update: Update, context: CallbackContext):
log_and_track("link_inline")
inline_query_id = update.inline_query.id
text = update.inline_query.query
results = []
urls = self.prepare_urls(msg_or_text=text, direct_urls=True)
for url in urls:
for direct_url in urls[url].splitlines(): # TODO: fix non-mp3 and allow only sc/bc
logger.debug(direct_url)
results.append(
InlineQueryResultAudio(id=str(uuid4()), audio_url=direct_url, title="FAST_INLINE_DOWNLOAD"))
try:
context.bot.answer_inline_query(inline_query_id, results)
except:
pass
def prepare_urls(self, msg_or_text, direct_urls=False, source_ip=None, proxy=None):
if isinstance(msg_or_text, Message):
urls = []
url_entities = msg_or_text.parse_entities(types=[MessageEntity.URL])
url_caption_entities = msg_or_text.parse_caption_entities(types=[MessageEntity.URL])
url_entities.update(url_caption_entities)
for entity in url_entities:
url_str = url_entities[entity]
logger.debug("Entity URL Parsed: %s", url_str)
if "://" not in url_str:
url_str = "http://{}".format(url_str)
urls.append(URL(url_str))
text_link_entities = msg_or_text.parse_entities(types=[MessageEntity.TEXT_LINK])
text_link_caption_entities = msg_or_text.parse_caption_entities(types=[MessageEntity.TEXT_LINK])
text_link_entities.update(text_link_caption_entities)
for entity in text_link_entities:
url_str = entity.url
logger.debug("Entity Text Link Parsed: %s", url_str)
urls.append(URL(url_str))
else:
urls = find_all_links(msg_or_text, default_scheme="http")
urls_dict = {}
for url in urls:
url_text = url.to_text(True)
#FIXME crutch:
url_text = url_text.replace("m.soundcloud.com", "soundcloud.com")
url_parts_num = len([part for part in url.path_parts if part])
try:
if (
# SoundCloud: tracks, sets and widget pages, no /you/ pages
(self.SITES["sc"] in url.host and (2 <= url_parts_num <= 3 or self.SITES["scapi"] in url_text) and (
not "you" in url.path_parts)) or
# Bandcamp: tracks and albums
(self.SITES["bc"] in url.host and (2 <= url_parts_num <= 2)) or
# YouTube: videos and playlists
(self.SITES["yt"] in url.host and (
"youtu.be" in url.host or "watch" in url.path or "playlist" in url.path))
):
if direct_urls or self.SITES["yt"] in url.host:
urls_dict[url_text] = get_direct_urls(url_text, self.cookies_file, self.COOKIES_DOWNLOAD_FILE,
source_ip, proxy)
else:
urls_dict[url_text] = "http"
elif not any((site in url.host for site in self.SITES.values())):
urls_dict[url_text] = get_direct_urls(url_text, self.cookies_file, self.COOKIES_DOWNLOAD_FILE,
source_ip, proxy)
except ProcessExecutionError:
logger.debug("youtube-dl get-url failed: %s", url_text)
except URLError as exc:
urls_dict[url_text] = exc.status
return urls_dict
@REQUEST_TIME.time()
@run_async
def download_url_and_send(self, bot, url, direct_urls, chat_id, reply_to_message_id=None,
wait_message_id=None, source_ip=None, proxy=None):
bot.send_chat_action(chat_id=chat_id, action=ChatAction.RECORD_AUDIO)
download_dir = os.path.join(self.DL_DIR, str(uuid4()))
shutil.rmtree(download_dir, ignore_errors=True)
os.makedirs(download_dir)
status = 0
if direct_urls == "direct":
status = -3
elif direct_urls == "country":
status = -4
elif direct_urls == "live":
status = -5
else:
if (self.SITES["sc"] in url and self.SITES["scapi"] not in url) or (self.SITES["bc"] in url):
cmd_name = "scdl"
cmd_args = []
cmd = None
cmd_input = None
if self.SITES["sc"] in url and self.SITES["scapi"] not in url:
cmd_name = "scdl"
cmd_args = (
"-l", url, # URL of track/playlist/user
"-c", # Continue if a music already exist
"--path", download_dir, # Download the music to a custom path
"--onlymp3", # Download only the mp3 file even if the track is Downloadable
"--addtofile", # Add the artist name to the filename if it isn't in the filename already
"--addtimestamp",
# Adds the timestamp of the creation of the track to the title (useful to sort chronologically)
"--no-playlist-folder",
# Download playlist tracks into directory, instead of making a playlist subfolder
"--extract-artist", # Set artist tag from title instead of username
)
cmd = scdl_bin
cmd_input = None
elif self.SITES["bc"] in url:
cmd_name = "bandcamp-dl"
cmd_args = (
"--base-dir", download_dir, # Base location of which all files are downloaded
"--template", "%{track} - %{artist} - %{title} [%{album}]", # Output filename template
"--overwrite", # Overwrite tracks that already exist
"--group", # Use album/track Label as iTunes grouping
"--embed-art", # Embed album art (if available)
"--no-slugify", # Disable slugification of track, album, and artist names
url, # URL of album/track
)
cmd = bandcamp_dl_bin
cmd_input = "yes"
logger.info("%s starts: %s", cmd_name, url)
cmd_proc = cmd[cmd_args].popen(stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
try:
cmd_stdout, cmd_stderr = cmd_proc.communicate(input=cmd_input, timeout=self.DL_TIMEOUT)
cmd_retcode = cmd_proc.returncode
# TODO listed are common scdl problems for one track with 0 retcode, all its output is always in stderr:
if cmd_retcode or (any(err in cmd_stderr for err in ["Error resolving url", "is not streamable",
"Failed to get item"]) and ".mp3" not in cmd_stderr):
raise ProcessExecutionError(cmd_args, cmd_retcode, cmd_stdout, cmd_stderr)
logger.info("%s succeeded: %s", cmd_name, url)
status = 1
except TimeoutExpired:
cmd_proc.kill()
logger.info("%s took too much time and dropped: %s", cmd_name, url)
status = -1
except ProcessExecutionError:
logger.exception("%s failed: %s", cmd_name, url)
if status == 0:
cmd_name = "youtube-dl"
cmd = youtube_dl_func
# TODO: set different ydl_opts for different sites
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': os.path.join(download_dir, '%(title)s.%(ext)s'),
# default: %(autonumber)s - %(title)s-%(id)s.%(ext)s
'postprocessors': [
{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '128',
},
# {'key': 'EmbedThumbnail',}, {'key': 'FFmpegMetadata',},
],
}
if proxy:
ydl_opts['proxy'] = proxy
if source_ip:
ydl_opts['source_address'] = source_ip
# https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L210
if self.cookies_file:
if "http" in self.cookies_file:
ydl_opts['cookiefile'] = self.COOKIES_DOWNLOAD_FILE
else:
ydl_opts['cookiefile'] = self.cookies_file
queue = Queue()
cmd_args = (url, ydl_opts, queue,)
logger.info("%s starts: %s", cmd_name, url)
cmd_proc = Process(target=cmd, args=cmd_args)
cmd_proc.start()
try:
cmd_retcode, cmd_stderr = queue.get(block=True, timeout=self.DL_TIMEOUT)
cmd_stdout = ""
cmd_proc.join()
if cmd_retcode:
raise ProcessExecutionError(cmd_args, cmd_retcode, cmd_stdout, cmd_stderr)
# raise cmd_status #TODO: pass and re-raise original Exception?
logger.info("%s succeeded: %s", cmd_name, url)
status = 1
except Empty:
cmd_proc.join(1)
if cmd_proc.is_alive():
cmd_proc.terminate()
logger.info("%s took too much time and dropped: %s", cmd_name, url)
status = -1
except ProcessExecutionError:
logger.exception("%s failed: %s", cmd_name, url)
status = -2
gc.collect()
if status == -1:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.DL_TIMEOUT_TEXT, parse_mode='Markdown')
elif status == -2:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.NO_AUDIO_TEXT, parse_mode='Markdown')
elif status == -3:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.DIRECT_RESTRICTION_TEXT, parse_mode='Markdown')
elif status == -4:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.REGION_RESTRICTION_TEXT, parse_mode='Markdown')
elif status == -5:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.LIVE_RESTRICTION_TEXT, parse_mode='Markdown')
elif status == 1:
file_list = []
for d, dirs, files in os.walk(download_dir):
for file in files:
file_list.append(os.path.join(d, file))
if not file_list:
logger.info("No files in dir: %s", download_dir)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, I couldn't download any files from provided links",
parse_mode='Markdown')
else:
for file in sorted(file_list):
file_name = os.path.split(file)[-1]
file_parts = []
try:
file_parts = self.convert_and_split_audio_file(file)
except FileNotSupportedError as exc:
if not (exc.file_format in ["m3u", "jpg", "jpeg", "png", "finished", "tmp"]):
logger.warning("Unsupported file format: %s", file_name)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, downloaded file `{}` is in format I could not yet convert or send".format(
file_name),
parse_mode='Markdown')
except FileTooLargeError as exc:
logger.info("Large file for convert: %s", file_name)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, downloaded file `{}` is `{}` MB and it is larger than I could convert (`{} MB`)".format(
file_name, exc.file_size // 1000000,
self.MAX_CONVERT_FILE_SIZE // 1000000),
parse_mode='Markdown')
except FileSplittedPartiallyError as exc:
file_parts = exc.file_parts
logger.exception("Splitting failed: %s", file_name)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, not enough memory to convert file `{}`..".format(
file_name),
parse_mode='Markdown')
except FileNotConvertedError as exc:
logger.exception("Splitting failed: %s", file_name)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, not enough memory to convert file `{}`..".format(
file_name),
parse_mode='Markdown')
try:
caption = None
flood = self.chat_storage[str(chat_id)]["settings"]["flood"]
if flood == "yes":
addition = ""
url_obj = URL(url)
if self.SITES["yt"] in url_obj.host:
source = "YouTube"
file_root, file_ext = os.path.splitext(file_name)
file_title = file_root.replace(file_ext, "")
addition = ": " + file_title
elif self.SITES["sc"] in url_obj.host:
source = "SoundCloud"
elif self.SITES["bc"] in url_obj.host:
source = "Bandcamp"
else:
source = url_obj.host.replace(".com", "").replace("www.", "").replace("m.", "")
# if "youtu.be" in url_obj.host:
# url = url.replace("http://", "").replace("https://", "")
# else:
# url = shorten_url(url)
caption = "@{} _got it from_ [{}]({}){}".format(self.bot_username.replace("_", "\_"),
source, url, addition.replace("_", "\_"))
# logger.info(caption)
sent_audio_ids = self.send_audio_file_parts(bot, chat_id, file_parts,
reply_to_message_id if flood == "yes" else None,
caption)
except FileSentPartiallyError as exc:
sent_audio_ids = exc.sent_audio_ids
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, could not send file `{}` or some of it's parts..".format(
file_name),
parse_mode='Markdown')
logger.warning("Sending some parts failed: %s", file_name)
if not self.SERVE_AUDIO:
shutil.rmtree(download_dir, ignore_errors=True)
if wait_message_id: # TODO: delete only once
try:
bot.delete_message(chat_id=chat_id, message_id=wait_message_id)
except:
pass
def convert_and_split_audio_file(self, file=""):
file_root, file_ext = os.path.splitext(file)
file_format = file_ext.replace(".", "").lower()
file_size = os.path.getsize(file)
if file_format not in ["mp3", "m4a", "mp4"]:
raise FileNotSupportedError(file_format)
if file_size > self.MAX_CONVERT_FILE_SIZE:
raise FileTooLargeError(file_size)
if file_format != "mp3":
logger.info("Converting: %s", file)
try:
file_converted = file.replace(file_ext, ".mp3")
ffinput = ffmpeg.input(file)
ffmpeg.output(ffinput, file_converted, audio_bitrate="128k", vn=None).run()
file = file_converted
file_root, file_ext = os.path.splitext(file)
file_format = file_ext.replace(".", "").lower()
file_size = os.path.getsize(file)
except Exception:
# TODO exceptions
raise FileNotConvertedError
file_parts = []
if file_size <= self.MAX_TG_FILE_SIZE:
file_parts.append(file)
else:
logger.info("Splitting: %s", file)
id3 = None
try:
id3 = ID3(file, translate=False)
except:
pass
parts_number = file_size // self.MAX_TG_FILE_SIZE + 1
# https://github.com/c0decracker/video-splitter
# https://superuser.com/a/1354956/464797
try:
# file_duration = float(ffmpeg.probe(file)['format']['duration'])
part_size = file_size // parts_number
cur_position = 0
for i in range(parts_number):
file_part = file.replace(file_ext, ".part{}{}".format(str(i + 1), file_ext))
ffinput = ffmpeg.input(file)
if i == (parts_number - 1):
ffmpeg.output(ffinput, file_part, codec="copy", vn=None, ss=cur_position).run()
else:
ffmpeg.output(ffinput, file_part, codec="copy", vn=None, ss=cur_position, fs=part_size).run()
part_duration = float(ffmpeg.probe(file_part)['format']['duration'])
cur_position += part_duration
if id3:
try:
id3.save(file_part, v1=2, v2_version=4)
except:
pass
file_parts.append(file_part)
except Exception:
# TODO exceptions
raise FileSplittedPartiallyError(file_parts)
return file_parts
def send_audio_file_parts(self, bot, chat_id, file_parts, reply_to_message_id=None, caption=None):
sent_audio_ids = []
for index, file_part in enumerate(file_parts):
path = pathlib.Path(file_part)
file_name = os.path.split(file_part)[-1]
# file_name = translit(file_name, 'ru', reversed=True)
logger.info("Sending: %s", file_name)
bot.send_chat_action(chat_id=chat_id, action=ChatAction.UPLOAD_AUDIO)
caption_part = None
if len(file_parts) > 1:
caption_part = "Part {} of {}".format(str(index + 1), str(len(file_parts)))
if caption:
if caption_part:
caption_full = caption_part + " | " + caption
else:
caption_full = caption
else:
if caption_part:
caption_full = caption_part
else:
caption_full = ""
# caption_full = textwrap.shorten(caption_full, width=190, placeholder="..")
for i in range(3):
try:
mp3 = MP3(file_part)
duration = round(mp3.info.length)
performer = None
title = None
try:
performer = ", ".join(mp3['artist'])
title = ", ".join(mp3['title'])
except:
pass
if self.SERVE_AUDIO:
audio = str(urljoin(self.APP_URL, str(path.relative_to(self.DL_DIR))))
logger.debug(audio)
else:
audio = open(file_part, 'rb')
if i > 0:
# maybe: Reply message not found
reply_to_message_id = None
audio_msg = bot.send_audio(chat_id=chat_id,
reply_to_message_id=reply_to_message_id,
audio=audio,
duration=duration,
performer=performer,
title=title,
caption=caption_full,
parse_mode='Markdown')
sent_audio_ids.append(audio_msg.audio.file_id)
logger.info("Sending succeeded: %s", file_name)
break
except TelegramError:
if i == 2:
logger.exception("Sending failed because of TelegramError: %s", file_name)
if len(sent_audio_ids) != len(file_parts):
raise FileSentPartiallyError(sent_audio_ids)
return sent_audio_ids
|
state_machine.py
|
#!/usr/bin/env python
from pysm import StateMachine, State, Event
import drill_machine
import melt_machine
import threading
import rospy
from std_msgs.msg import Int32
from std_msgs.msg import Bool
from std_msgs.msg import Empty
from std_msgs.msg import String
from nuice_msgs.srv import FloatCommand, FloatCommandResponse
class Carosel(StateMachine):
def __init__(self, name):
super(Carosel,self).__init__(name)
self.worker_thread = threading.Thread(target=self.run)
goal = 0
drill_motion_pub = rospy.Publisher("/central_board/drill_stp/set_abs_pos", Int32, queue_size = 10)
drill_rel_motion_pub = rospy.Publisher("/central_board/drill_stp/set_rel_pos", Int32, queue_size = 10)
drill_stop_pub = rospy.Publisher("/central_board/drill_stp/quick_stop", Empty, queue_size = 10)
drill_pub = rospy.Publisher("/central_board/drill_relay/set_state", Bool, queue_size = 10)
melt_motion_pub = rospy.Publisher("/central_board/melt_stp/set_abs_pos", Int32, queue_size = 10)
melt_rel_motion_pub = rospy.Publisher("/central_board/melt_stp/set_rel_pos", Int32, queue_size = 10)
melt_stop_pub = rospy.Publisher("/central_board/drill_stp/quick_stop", Empty, queue_size = 10)
rospy.wait_for_service('set_probe1')
probe_1_service = rospy.ServiceProxy('set_probe1', FloatCommand)
rospy.wait_for_service('set_probe2')
probe_2_service = rospy.ServiceProxy('set_probe2', FloatCommand)
# Children state machines
self.drill = drill_machine.Drill("drilling", drill_motion_pub, drill_rel_motion_pub, drill_stop_pub, drill_pub)
self.melt = melt_machine.Melt("melting", melt_motion_pub, melt_rel_motion_pub, melt_stop_pub, probe_1_service, probe_2_service)
rospy.Subscriber('/central_board/drill_stp/current_position', Int32, self.drill.drill_position_callback)
rospy.Subscriber('/central_board/drill_limit/current_state', Bool, self.drill.drill_limit_callback)
rospy.Subscriber('/central_board/melt_stp/current_position', Int32, self.melt.melt_position_callback)
rospy.Subscriber('/central_board/melt_limit/current_state', Bool, self.melt.melt_limit_callback)
rospy.Subscriber('ac/goal', Int32, self.goal_callback)
rospy.Subscriber('ac/events', String, lambda event_data: state_machine.dispatch(Event(event_data.data, goal)))
# Main states
#init = State("init")
#manual = State("manual")
#repos = State("repos")
steady = State("steady")
# Sub-States
#self.add_state(init, initial=True)
#self.add_state(manual)
#self.add_state(repos)
self.add_state(steady, initial=True)
# Sub-state transitions
#self.add_transition(self.init, self.manual, event='manual')
#self.add_transition(self.repos, self.manual, event='manual')
#self.add_transition(self.steady, self.manual, event='manual')
#self.add_transition(self.manual, self.repos, event='reposition')
#self.add_transition(self.steady, self.repos, event='reposition')
#self.add_transition(self.init, self.repos, event='initialized')
#self.add_transition(self.repos, self.steady, event='steady')
#self.init.handlers = {'enter': self.initOnEnter}
#self.repos.handlers = {'turn': self.turn,
# 'exit': self.reposExit}
steady.handlers = {#'exit': self.exitSteady,
'drill_idle': lambda state, event: self.drill.dispatch(Event('idle')),
'drill_drill': lambda state, event: self.drill.dispatch(Event('drill', event.input)),
'drill_stop': lambda state, event: self.drill.dispatch(Event('stop')),
'melt_idle': lambda state, event: self.melt.dispatch(Event('idle')),
'melt_melt': lambda state, event: self.melt.dispatch(Event('melt', event.input)),
'melt_stop': lambda state, event: self.melt.dispatch(Event('stop')),
'melt_probe_1' : lambda state, event: self.melt.dispatch(Event('probe1', event.input)),
'melt_probe_2' : lambda state, event: self.melt.dispatch(Event('probe2', event.input))
}
self.rate = rospy.Rate(20)
worker_thread.start()
def goal_callback(self, goal_data):
self.goal = goal_data.data
#def turn(self, state, event):
#def reposExit(self, state, event):
#def exitSteady(self, state, event):
def run(self):
while not rospy.is_shutdown():
self.rate.sleep()
if __name__ == '__main__':
rospy.init_node('autonomy_core')
state_machine = Carosel("carosel")
while not rospy.is_shutdown():
rospy.spin()
|
email.py
|
from threading import Thread
from flask import current_app
from flask_mail import Message
from app import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body, attachments=None, sync=False):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
if attachments:
for attachment in attachments:
msg.attach(*attachment)
if sync:
mail.send(msg)
else:
Thread(target=send_async_email, args=(current_app._get_current_object(), msg)).start()
|
app.py
|
import flask
from functools import partial
import logging
import queue
import threading
import time
from .service import Agent, Channel
from .service.slack import load_config, validate_token, get_service
from .dispatch import QueuingDispatch, MuxDispatch
from .game import GeneralWerewolf, SpecificWerewolf
from .persist import load
from .rules import load_games
LOG = logging.getLogger(__name__)
app = flask.Flask(__name__)
load_config()
load_games()
# BOT_MESSAGE = 'bot_message'
BASE_PATH = '/slack/werewolf'
QUEUE = queue.Queue()
DISPATCHER = QueuingDispatch(queue=QUEUE)
RUNNER = load('mux',
default=partial(MuxDispatch, queue=QUEUE, default=GeneralWerewolf()),
factory=partial(MuxDispatch.load, queue=QUEUE,
default_type=GeneralWerewolf,
default_factory=GeneralWerewolf.load,
target_factory=SpecificWerewolf.load))
RUNNER.start()
# Set up logging
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('werewolf.text').setLevel(logging.INFO)
@app.route(BASE_PATH, methods=['POST'])
def root(*args, **kwargs):
args = flask.request.json
token = args.get('token')
if not validate_token(token):
return flask.Response('', status=401)
if args.get('challenge') is not None:
LOG.debug("request is %s", args)
return flask.Response(args['challenge'], 200)
team = args.get('team_id')
if team is None:
return flask.Response('', status=404)
service = get_service(team)
if service is None:
return flask.Response('', status=404)
if service.token != token:
LOG.warning("Mismatch of token to team: %s", args)
return flask.Response('', status=400)
if args.get('type') == 'event_callback':
event = args.get('event', {})
if event.get('type') == 'app_mention':
pass
elif event.get('type') == 'message' and event.get('subtype') is None:
LOG.debug('args are %s', {k: args[k] for k in args if k != 'token'})
text = event.get('text', '')
sender = Agent(id=event.get('user'))
channel = Channel(id=event.get('channel'))
ts = event.get('ts')
if event.get('channel_type') == 'group':
channel = channel.replace(is_private=True)
elif event.get('channel_type') == 'im':
channel = channel.replace(is_im=True)
receivers = [Agent(id=rcv, is_bot=True) for rcv in args.get('authed_users', [])]
DISPATCHER.raw_message(srv=service, sender=sender, channel=channel, receivers=receivers,
text=text, message_id=ts)
else:
LOG.debug('args are %s', {k: args[k] for k in args if k != 'token'})
return flask.Response('', 200)
@app.route(BASE_PATH + "/oauth/<team>", methods=['GET'])
def oauth(team):
args = flask.request.args
service = get_service(team)
if service is not None:
DISPATCHER.oauth_callback(srv=service, code=args['code'], state=args['state'])
return flask.Response('', 200)
@app.before_first_request
def activate_timer():
def tick():
while True:
LOG.debug("Time marches on")
DISPATCHER.tick(srv_lookup=get_service)
time.sleep(60)
threading.Thread(target=tick).start()
def main():
app.run(host='0.0.0.0', port=5002)
|
ecr.py
|
"""
Date 04.2021
@author: Chair of Functional Materials - Saarland University - Saarbrücken, Bruno Alderete
@version 1.2
"""
__author__ = 'Bruno Alderete'
#######################################################################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2021 Bruno Alderete
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#######################################################################################################################
#########################################################################
#########################################################################
#########################################################################
######### THIS PROGRAM INPUTS A LIST OF APPLIED NORMAL LAODS ###########
##### CARRIES OUT THE ECR MEASUREMENTS AND MOVES TO THE NEXT LOAD #######
############ THE CYCLE CONCLUDES AFTER MEASURING ECR UNDER ##############
############################# ALL LOADS #################################
#########################################################################
#########################################################################
from gsv8 import gsv8 # import for force sensor
from GSV_BasicMeasurement import BasicMeasurement # import for force Sensor
from pipython import GCSDevice, pitools # import for stages
import time # import for time
import datetime # import for time
import threading
import pandas as pd
import pyvisa
########################
##### INSTRUMENTS #####
########################
### STAGES ###
STAGES = ('L-511.20SD00', 'L-511.20SD00')
REFMODE = ('FNL') # Fast reference move to negative limit
motorZ = GCSDevice('C-663.12')
motorX = GCSDevice('C-663.12')
### FORCE SENSOR ###
force_sensor = gsv8("COM16", 115200)
### ELECTRICAL EQUIPMENT ###
rm = pyvisa.ResourceManager('@py')
k2400 = rm.open_resource('GPIB::24::INSTR')
k2182 = rm.open_resource('GPIB::7::INSTR')
###############################
##### VARIABLES AND LISTS #####
###############################
is_done = False
### ELECTRIC PARAMETERS ###
i_meas_list = []
v_meas_list = []
resistance = []
current = 100E-3 # 100 mA (user input)
sleep_time = 2 # delay between measurements (fixed value)
meas = 5 # amount of measurements per force (user input)
delay = 1 # fixed value
j = 0 # iterater
f_list = [] # empty list where forces are saved (for data output)
### RESULTS ###
df_results = pd.DataFrame(columns=['Current', 'Voltage', 'Resistance', 'Force'])
###############################
##### FUNCTION DEFINITION #####
###############################
### TAKE FORCE ALL AXIS ###
def take_force_all():
measurement0 = force_sensor.ReadValue()
x_load0 = "%.4f" % float('{}'.format(measurement0.getChannel1()))
y_load0 = "%.4f" % float('{}'.format(measurement0.getChannel2()))
z_load0 = "%.4f" % float('{}'.format(measurement0.getChannel3()))
print('X load is: ', x_load0, ' N')
print('Y load is: ', y_load0, ' N')
print('Z load is: ', z_load0, ' N')
take_force_all() # TAKES INITIAL FORCE
### TAKE NORMAL LOAD ONLY ###
def take_force_normal():
measurementZ = force_sensor.ReadValue()
z_load = "%.4f" % float('{}'.format(measurementZ.getChannel3()))
print('Z load is ', z_load, ' N')
return z_load
### START, REFERENCE AND MOVE STAGES TO CENTER POSITION ###
def start_stages(targetZ=10,targetX=26,vel_Z=15,vel_X=15,wait=5):
motorZ.OpenUSBDaisyChain(description='019550102')
daisychainid = motorZ.dcid
motorZ.ConnectDaisyChainDevice(1, daisychainid)
motorX.ConnectDaisyChainDevice(2, daisychainid)
pitools.startup(motorZ, stages=STAGES, refmode=REFMODE)
print('Z-axis current position: ', float(str(motorZ.qPOS(motorZ.axes))[18:-3]))
pitools.startup(motorX, stages=STAGES, refmode=REFMODE)
print('X-axis current position: ', float(str(motorX.qPOS(motorX.axes))[18:-3]))
time.sleep(wait)
motorX.VEL(motorX.axes, vel_X)
motorX.MOV(motorX.axes, targetX)
pitools.waitontarget(motorX)
print('Position X: ',float(str(motorX.qPOS(motorX.axes))[18:-3]))
motorZ.VEL(motorZ.axes, vel_Z)
motorZ.MOV(motorZ.axes, targetZ)
pitools.waitontarget(motorZ)
print('Position Z: ',float(str(motorZ.qPOS(motorZ.axes))[18:-3]))
time.sleep(wait)
start_stages(targetZ=10,targetX=26,vel_Z=15,vel_X=15,wait=5) # STARTS STAGES
### START ELECTRIC INSTRUMENTS ###
def start_instr(curr_app='100E-3',curr_prot='150E-3'):
# Start and reset Keithley 2400
k2400.write('*RST')
k2400.timeout = 60000 # 60 seconds timeout
k2400.write(':ROUT:TERM REAR') # rear output
k2400.write(':SENS:FUNC:CONC OFF')
k2400.write(':SOUR:FUNC CURR') # source current
k2400.write(f':SOUR:CURR {curr_app}') # Applied current in A
k2400.write(":SENS:FUNC 'CURR:DC'")
k2400.write(f':SENS:CURR:PROT {curr_prot}') # set protection current
# Start and resete Keithley 2182
k2182.write('*RST')
### TAKE i MEASUREMENT ###
def take_i_meas(sour_del=delay):
k2400.write('TRIG:COUN 1') # Amount of measurements (set to one, will loop)
k2400.write(f'SOUR:DEL {sour_del}') # Delay in seconds (between 0, 60).
k2400.write(':FORM:ELEM CURR')
k2400.write(':OUTP ON') # turns off source
i_meas_list.append(k2400.query_ascii_values(':READ?')) # saves value to list
### TAKE V MEASUREMENT ###
def take_v_meas(v_range='0.100'):
k2182.write('*RST') # resets all stored values
k2182.write(":SENS:FUNC 'VOLT'") # measuremes voltage
k2182.write(':SENS:CHAN 1') # channel out measurement
k2182.write(f':SENS:VOLT:CHAN1:RANG {v_range}') # sets voltmeter range # k2182.write(':SENS:VOLT:CHAN1:RANG:AUTO ON') for auto range
v_meas_list.append(k2182.query_ascii_values(':READ?')) # saves value to list
### TAKE FULL MEASUREMENT ###
def take_measurement(meas=5, trigs=0, curr_app='100E-3', curr_prot='150E-3', sour_del=delay, v_range='0.100', sleep_time=1):
start_instr(curr_app, curr_prot) # initiates instruments
# loops the measurements
while trigs < meas:
take_i_meas(sour_del) # calls smu function
take_v_meas(v_range) # calls nanovoltmeter function
trigs += 1 # counter
k2400.write(':OUTP OFF') # turns off smu between measurements
### ECR ###
def ecr(meas=meas):
trigs = 0 # set counter to zero
while trigs < meas:
if trigs % 2 == 0:
take_measurement(meas=1, trigs=0, curr_app=str(current), curr_prot='150E-3', sour_del=delay, v_range=str(v_range))
time.sleep(sleep_time)
else:
take_measurement(meas=1, trigs=0, curr_app='-' + str(current), curr_prot='150E-3', sour_del=delay, v_range=str(v_range))
time.sleep(sleep_time)
trigs += 1
trigs = 0
### COARSE APPROACH X-AXIS ###
def approach_X_stage(approach_X=1):
print('Position X: ',float(str(motorX.qPOS(motorX.axes))[18:-3]))
while approach_X == 1:
print('\n##############################################################')
moveX = float(input('Enter how many mm the X-axis should move: '))
print('You chose to move: ', moveX, ' mm\n')
motorX.MVR(motorX.axes, moveX) # MVR moves relative, MOV moves absolute
pitools.waitontarget(motorX)
print('Position X: ',float(str(motorX.qPOS(motorX.axes))[18:-3]))
approach_X = int(input('Do you want to keep moving the X stage? (enter 1 to continue approaching, enter 0 if you DO NOT want to continue approaching: '))
if approach_X == 1:
print('You chose to continue approaching the X stage.\n') # MAKE SURE YOU INPUT CORRECTLY
else:
print('You are done approaching the X stage.\n')
approach_X_stage(approach_X=1) # STARTS X APPORACH
### COARSE APPROACH Z-AXIS ###
def approach_Z_stage(approach_Z=1):
print('Position Z: ',float(str(motorZ.qPOS(motorZ.axes))[18:-3]))
while approach_Z == 1:
print('\n##############################################################')
moveZ = float(input('Enter how many mm the Z-axis should move: '))
print('You chose to move: ', moveZ, ' mm\n')
motorZ.MVR(motorZ.axes, moveZ)
pitools.waitontarget(motorZ)
print('Position Z: ',float(str(motorZ.qPOS(motorZ.axes))[18:-3]))
approach_Z = int(input('Do you want to keep moving the Z stage? (enter 1 to continue approaching, enter 0 if you DO NOT want to continue approaching: '))
if approach_Z == 1 :
print('You chose to continue approaching the Z stage.\n') # MAKE SURE YOU INPUT CORRECTLY
else:
print('You are done approaching the Z stage.\n')
approach_Z_stage(approach_Z=1) # STARTS Z APPORACH
approach_X_stage(approach_X=1) # REPEATS X APPROACH (in case you want to modifiy X position after Z appraoch)
#############################
###### PARAMETER INPUT ######
#############################
forces = input('Enter the force sequence seperated by a space (for decimals use "."):' ) # force input
forces_list1 = list(forces.split(' ')) # string to list
forces_list = [float(i) for i in forces_list1]
meas = int(input('Input measurements per force: '))
current = str(input('Enter current (in A): '))
v_range = str(input('Enter nanovoltmeter range (0.010, 0.100, 1, 10, 100 V): '))
start = int(input('Do you want to start the measurements? Enter 1 to start, enter 0 to exit. '))
time.sleep(1)
#################################################
##### INITIAL FINE APPROACH TO FORCE SENSOR #####
#################################################
def fine_approach(target_load=forces_list[j]):
z_load_i = take_force_normal()
print('Initial normal load measurement: ', z_load_i, ' N')
if start == 1:
print ('\nStarting fine approach')
print('Current force: ', float(z_load_i), ' N') # first measurement from force sensor (NULLER)
current_z_load = take_force_normal() # current sensor value
print('Current load: ',current_z_load, ' N')
while (float(current_z_load) < float(target_load)):
position = float(str(motorZ.qPOS(motorZ.axes))[18:-3]) # get position
print('Current motor position: ', position)
print('\nApproaching...') # starts moving
if float(current_z_load) < (target_load * 0.4):
ztarget = position + 0.01
elif float(current_z_load) >= (target_load * 0.4) and float(current_z_load) <= (target_load * 0.90):
ztarget = position + 0.001
else:
ztarget = position + 0.0001
print('Target: ', ztarget)
motorZ.MOV(motorZ.axes, ztarget)
pitools.waitontarget(motorZ)
position = float(str(motorZ.qPOS(motorZ.axes))[18:-3])
print('New motor position: ', position)
time.sleep(0.5) # wait for 0.2 s before taking new force measurement
current_z_load = take_force_normal()
print('current load: ',current_z_load, ' N')
print('Target force reached: {}'.format(current_z_load))
time.sleep(1)
fine_approach(target_load=forces_list[0]) # start fine appraoch to first load
### NEXT LOAD ###
def next_load(next_l=forces_list[j]): #j+1
z_load_i = take_force_normal()
print('Current load is: {} N'.format(z_load_i))
print('Moving to next load {} N'.format(forces_list[j])) #j+1
current_z_load = float(take_force_normal())
while (float(current_z_load)) < float(next_l):
position = float(str(motorZ.qPOS(motorZ.axes))[18:-3]) # get position
print('Current motor position: ', position)
if float(current_z_load) < (next_l * 0.40):
ztarget = position + 0.01
elif float(current_z_load) >= (next_l * 0.40) and float(current_z_load) <= (next_l * 0.90):
ztarget = position + 0.001
else:
ztarget = position + 0.0001
print('Target: ', ztarget)
motorZ.MOV(motorZ.axes, ztarget)
pitools.waitontarget(motorZ)
position = float(str(motorZ.qPOS(motorZ.axes))[18:-3])
print('New motor position: ', position)
time.sleep(0.5) # wait for 0.2 s before taking new force measurement
current_z_load = float(take_force_normal())
print('current load: ',current_z_load, ' N')
while (float(current_z_load)) > float(next_l):
position = float(str(motorZ.qPOS(motorZ.axes))[18:-3]) # get position
print('Current motor position: ', position)
if float(current_z_load) > float((next_l * 0.90) + next_l):
ztarget = position - 0.01
elif float(current_z_load) <= float((next_l * 0.90) + next_l) and float(current_z_load) >= float((next_l * 0.4) + next_l):
ztarget = position -0.001
else:
ztarget = position - 0.0001
print('Target: ', ztarget)
motorZ.MOV(motorZ.axes, ztarget)
pitools.waitontarget(motorZ)
position = float(str(motorZ.qPOS(motorZ.axes))[18:-3])
print('New motor position: ', position)
time.sleep(0.5) # wait for 0.2 s before taking new force measurement
current_z_load = float(take_force_normal())
print('current load: ',current_z_load, ' N')
print('\n Target force reached: {}'.format(current_z_load))
### CONTROL NORMAL LOAD ###
def control_normal_load(target_load=forces_list[j]):
while is_done == False:
normal_load = take_force_normal()
if float(normal_load) < target_load:
position = float(str(motorZ.qPOS(motorZ.axes))[18:-3])
ztarget = position + 0.0001
motorZ.MOV(motorZ.axes, ztarget)
normal_load = take_force_normal()
elif float(normal_load) > target_load:
position = float(str(motorZ.qPOS(motorZ.axes))[18:-3])
ztarget = position - 0.0001
motorZ.MOV(motorZ.axes, ztarget)
normal_load = take_force_normal()
elif float(normal_load) == target_load:
time.sleep(0.1)
### DATA TO CSV ###
def save_data():
# Electric parameters #
for x in range(len(i_meas_list[:])):
#for x in range(len(i_meas_list[0:])):
df_results.loc[x, 'Current'] = (i_meas_list[x-1][0])
df_results.loc[x, 'Voltage'] = (v_meas_list[x-1][0])
resist = v_meas_list[x-1][0] / i_meas_list[x-1][0]
resistance.append(abs(resist))
for x in range(len(forces_list[:])):
for y in range(meas):
f_list.append(forces_list[x])
df_results['Resistance'] = resistance
df_results['Force'] = f_list
print(df_results)
current_date = datetime.datetime.now()
filename = (str(current_date.year)+'.'+str(current_date.month)+'.'+str(current_date.day)+'-'+str(current_date.hour)+'-'+str(current_date.minute)+'-'+str(current_date.second)+'.csv')
df_results.to_csv(r'C:\Users\Labor\Desktop\{}'.format(filename), index=False)
##########################
### THREADED FUNCTIONS ###
##########################
def start_threading():
global is_done
is_done = False
global j
j = 0
#for j in range(len(forces_list)):
while j < (len(forces_list)):
time.sleep(0.5)
t1 = threading.Thread(target=ecr, args=(meas,))
t2 = threading.Thread(target=control_normal_load, args=(forces_list[j],))
if j > 0:
t3 = threading.Thread(target=next_load, args=(forces_list[j],))
t3.start()
t3.join()
time.sleep(0.5)
t2.start() # start controlling normal load
time.sleep(0.5)
normal_load = float(take_force_normal()) # take normal load measurement
while normal_load < float((forces_list[j] * 0.99)) or normal_load > float((forces_list[j] * 1.01)): # check that the load is above 99% or below 101% of target
time.sleep(0.5) # if it isnt, wait 0.5
normal_load = float(take_force_normal()) # take measurement again
# once normal load is between 99% and 101%, start ECR measurements
time.sleep(0.5)
t1.start()
t1.join()
time.sleep(0.5)
#time.sleep(1)
if t1.is_alive() == False: # when ECR is done, stop controlling normal load
is_done = True
print(t1.is_alive())
j += 1 # next force
time.sleep(1)
is_done = False
start_threading()
save_data()
### Z Stage retreats ###
def retreat_stages(targetZ=10,targetX=26,vel_Z=15,vel_X=15,wait=2):
time.sleep(2)
retreat = int(input('Return stages to starting position? Enter 1 for yes, enter 0 for no: '))
if retreat == 1:
targetZ0 = -5
motorZ.MVR(motorZ.axes, targetZ0)
pitools.waitontarget(motorZ)
time.sleep(1)
motorX.VEL(motorX.axes, vel_X)
motorX.MOV(motorX.axes, targetX)
pitools.waitontarget(motorX)
print('Position X: ',float(str(motorX.qPOS(motorX.axes))[18:-3]))
motorZ.VEL(motorZ.axes, vel_Z)
motorZ.MOV(motorZ.axes, targetZ)
pitools.waitontarget(motorZ)
print('Position Z: ',float(str(motorZ.qPOS(motorZ.axes))[18:-3]))
time.sleep(wait)
retreat_stages(targetZ=10,targetX=26,vel_Z=15,vel_X=15,wait=5)
print('Closing program...')
elapse_time = (time.perf_counter() / 60)
print('Elapse time: {} min'.format(elapse_time))
#########################################
##### CLOSES CONNECTION WITH MOTORS #####
#########################################
motorZ.CloseDaisyChain()
|
GameSpaceBot.py
|
from threading import Thread
from source.console.Preview import Preview
from source.main.Main import Main
from source.vkapi.CallBackAPI import m_thread
from source.vkapi.VkAPP.GSB import app
# Preview
Preview.preview()
# Messages Handling
messages = Thread(target=Main.routine)
messages.start()
# VkApp
vk_app = Thread(target=app.run, args=('localhost', 7999, False,))
vk_app.start()
# Web Server
m_thread.run(host='localhost', port=8001, debug=False)
|
filelockscheduler.py
|
import multiprocessing
import threading
import atexit
import time
from multiprocessing import shared_memory
from filelock import Timeout, FileLock
CPUS = multiprocessing.cpu_count()
global cpu_list
'''
The CPU bits (0 for unused, 1 for occupied) in the shared list are designed
to pre-empt load off the lockfiles and filesystem.
'''
try:
cpu_list = shared_memory.ShareableList([0] * 4, name="cpus")
except:
cpu_list = shared_memory.ShareableList(name="cpus")
def exit_handler():
global cpu_list
print("Closing sharedmemory")
cpu_list.shm.close()
cpu_list.shm.unlink()
del cpu_list
atexit.register(exit_handler)
def grab_cpu():
while True:
cpus = shared_memory.ShareableList(name="cpus")
for i in range(0, len(cpus)):
cpu = cpus[i]
print("Checking CPU{}: {}".format(i, cpu))
print(cpus)
time.sleep(4)
if cpu == 0:
try:
lock = FileLock("/tmp/cpu{}.lock".format(i))
with lock.acquire(timeout=1):
print('===Occupy CPU{}: {}'.format(
i, threading.current_thread().name))
# Set the CPU BIT
cpus[i] = 1
print(cpus)
# DO SOME WORK HERE
time.sleep(13)
# Clear the CPU bit
cpus[i] = 0
print('===Release CPU{}: {}'.format(
i, threading.current_thread().name))
except Timeout:
print("Thread {}: CPU {} Lock was held already, looking for another CPU.".format(
threading.current_thread().name, i))
finally:
lock.release()
time.sleep(4)
else:
print("Taking a break")
time.sleep(4)
def grab_cpus():
x = threading.Thread(target=grab_cpu)
x.start()
if __name__ == '__main__':
print("Starting thread...")
grab_cpus()
grab_cpus()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.