hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f766e4596509da0d3219da0e404d0b0ef34bf56a | 4,621 | py | Python | tests/unit/test_partition.py | ganik/DeepSpeed | 788e1c40e83beacfc4901e7daa1e097d2efb82bb | [
"MIT"
] | 1 | 2022-02-12T06:27:26.000Z | 2022-02-12T06:27:26.000Z | tests/unit/test_partition.py | ganik/DeepSpeed | 788e1c40e83beacfc4901e7daa1e097d2efb82bb | [
"MIT"
] | null | null | null | tests/unit/test_partition.py | ganik/DeepSpeed | 788e1c40e83beacfc4901e7daa1e097d2efb82bb | [
"MIT"
] | null | null | null | import pytest
import torch
import torch.distributed as dist
from deepspeed.runtime.utils import partition_uniform
from deepspeed.runtime.utils import partition_balanced
from deepspeed.runtime.utils import prefix_sum_inc
from deepspeed.runtime.utils import PartitionedTensor
from .common import distributed_test
@distributed_test(world_size=4)
def test_partitioned_tensor():
world = dist.get_world_size()
rank = dist.get_rank()
group = dist.new_group(ranks=list(range(world)))
rows = world * 4
cols = 3
full = torch.rand(rows, cols).cuda()
dist.broadcast(full, src=0, group=group)
part = PartitionedTensor(full, group=group)
assert len(part.local_size()) == 1
assert part.local_size()[0] * world == full.numel()
reconstructed = part.full()
assert torch.equal(full, reconstructed)
@distributed_test(world_size=4)
def test_partitioned_tensor_meta():
world = dist.get_world_size()
rank = dist.get_rank()
group = dist.new_group(ranks=list(range(world)))
rows = world * 7
cols = 3
full = torch.rand(rows, cols).cuda()
dist.broadcast(full, src=0, group=group)
part = PartitionedTensor(full, group=group)
my_meta = PartitionedTensor.from_meta(part.to_meta(), part.local_data, group)
assert torch.equal(full, my_meta.full())
def assert_valid_partition(weights, parts, P):
N = len(weights)
assert len(parts) == P + 1
assert parts[0] == 0
assert parts[P] == N
for idx in range(P):
assert parts[idx] <= parts[idx + 1]
def get_partition_weights(weights, parts):
""" Return the amount of weight in each partition. """
costs = [0] * (len(parts) - 1)
P = len(parts) - 1
for p in range(P):
start = parts[p]
stop = parts[p + 1]
costs[p] = sum(weights[start:stop])
return costs
def test_prefix_sum():
x = [3, 4, 5]
psum = prefix_sum_inc(x)
assert psum == [3, 7, 12]
def test_valid_partition():
N = 10
P = 1
weights = [1] * N
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
def test_short_partition_uniform():
N = 2
P = 4
weights = [1] * N
parts = partition_uniform(len(weights), P)
assert_valid_partition(weights, parts, P)
def test_short_partition():
N = 2
P = 4
weights = [1] * N
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
def test_easy_balance_uniform():
weights = [1] * 8
P = 4
parts = partition_uniform(len(weights), P)
assert_valid_partition(weights, parts, P)
costs = get_partition_weights(weights, parts)
assert all(c == 2 for c in costs)
def test_easy_balance_balanced():
weights = [1] * 8
P = 4
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
costs = get_partition_weights(weights, parts)
assert all(c == 2 for c in costs), costs
def test_int_balanced():
weights = [0, 1, 2, 3, 3, 3]
P = 4
parts = partition_balanced(weights, P)
assert parts == [0, 3, 4, 5, 6]
assert_valid_partition(weights, parts, P)
costs = get_partition_weights(weights, parts)
assert all(c == 3 for c in costs)
def test_float_balanced():
weights = [0., 1.1, 1.9, 3., 3., 3.]
P = 4
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
assert parts == [0, 3, 4, 5, 6]
@pytest.mark.skip(reason="Variance-minimizing partitioning returns different result.")
def test_float_lastheavy():
weights = [0., 1.1, 1.9, 3., 30.]
P = 2
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
assert parts == [0, 4, 5]
def test_float_midheavy():
weights = [0., 1.1, 30, 3.]
P = 3
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
assert parts == [0, 2, 3, 4]
def test_balance_bert():
# Parameters per layer for a transformer model with 24 transformers and hidden dim 1024
weights = [
52559872,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
12596224,
0,
52559872
]
P = 8
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
| 24.193717 | 91 | 0.637524 |
e497c1941ade6a069527180fb574fcdb41fcc079 | 8,135 | py | Python | jc/parsers/ls.py | lyterk/jc | 681176e4c958157ef1f2151b3e57963a6ba48e09 | [
"MIT"
] | null | null | null | jc/parsers/ls.py | lyterk/jc | 681176e4c958157ef1f2151b3e57963a6ba48e09 | [
"MIT"
] | null | null | null | jc/parsers/ls.py | lyterk/jc | 681176e4c958157ef1f2151b3e57963a6ba48e09 | [
"MIT"
] | null | null | null | """jc - JSON CLI output utility `ls` and `vdir` command output parser
Options supported:
- `lbaR1`
- `--time-style=full-iso`
Note: The `-1`, `-l`, or `-b` option of `ls` should be used to correctly
parse filenames that include newline characters. Since `ls` does not encode
newlines in filenames when outputting to a pipe it will cause `jc` to see
multiple files instead of a single file if `-1`, `-l`, or `-b` is not used.
Alternatively, `vdir` can be used, which is the same as running `ls -lb`.
The `epoch` calculated timestamp field is naive. (i.e. based on the local
time of the system the parser is run on)
The `epoch_utc` calculated timestamp field is timezone-aware and is only
available if the timezone field is UTC.
Usage (cli):
$ ls | jc --ls
or
$ jc ls
Usage (module):
import jc
result = jc.parse('ls', ls_command_output)
or
import jc.parsers.ls
result = jc.parsers.ls.parse(ls_command_output)
Schema:
[
{
"filename": string,
"flags": string,
"links": integer,
"parent": string,
"owner": string,
"group": string,
"size": integer,
"date": string,
"epoch": integer, # [0]
"epoch_utc": integer # [1]
}
]
[0] naive timestamp if date field exists and can be converted.
[1] timezone aware timestamp if date field is in UTC and can
be converted.
Examples:
$ ls /usr/bin | jc --ls -p
[
{
"filename": "apropos"
},
{
"filename": "arch"
},
...
]
$ ls -l /usr/bin | jc --ls -p
[
{
"filename": "apropos",
"link_to": "whatis",
"flags": "lrwxrwxrwx.",
"links": 1,
"owner": "root",
"group": "root",
"size": 6,
"date": "Aug 15 10:53"
},
{
"filename": "ar",
"flags": "-rwxr-xr-x.",
"links": 1,
"owner": "root",
"group": "root",
"size": 62744,
"date": "Aug 8 16:14"
},
...
]
$ ls -l /usr/bin | jc --ls -p -r
[
{
"filename": "apropos",
"link_to": "whatis",
"flags": "lrwxrwxrwx.",
"links": "1",
"owner": "root",
"group": "root",
"size": "6",
"date": "Aug 15 10:53"
},
{
"filename": "arch",
"flags": "-rwxr-xr-x.",
"links": "1",
"owner": "root",
"group": "root",
"size": "33080",
"date": "Aug 19 23:25"
},
...
]
"""
import re
import jc.utils
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.10'
description = '`ls` command parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
compatible = ['linux', 'darwin', 'cygwin', 'aix', 'freebsd']
magic_commands = ['ls', 'vdir']
__version__ = info.version
def _process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (List of Dictionaries) raw structured data to process
Returns:
List of Dictionaries. Structured data to conform to the schema.
"""
for entry in proc_data:
int_list = ['links', 'size']
for key in entry:
if key in int_list:
entry[key] = jc.utils.convert_to_int(entry[key])
if 'date' in entry:
# to speed up processing only try to convert the date if it's not the default format
if not re.match(r'[a-zA-Z]{3}\s{1,2}\d{1,2}\s{1,2}[0-9:]{4,5}', entry['date']):
ts = jc.utils.timestamp(entry['date'])
entry['epoch'] = ts.naive
entry['epoch_utc'] = ts.utc
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) unprocessed output if True
quiet: (boolean) suppress warning messages if True
Returns:
List of Dictionaries. Raw or processed structured data.
"""
jc.utils.compatibility(__name__, info.compatible, quiet)
jc.utils.input_type_check(data)
raw_output = []
warned = False
parent = ''
next_is_parent = False
new_section = False
linedata = data.splitlines()
if jc.utils.has_data(data):
# Delete first line if it starts with 'total 1234'
if re.match(r'total [0-9]+', linedata[0]):
linedata.pop(0)
# Look for parent line if glob or -R is used
if not re.match(r'[-dclpsbDCMnP?]([-r][-w][-xsS]){2}([-r][-w][-xtT])[+]?', linedata[0]) \
and linedata[0].endswith(':'):
parent = linedata.pop(0)[:-1]
# Pop following total line if it exists
if re.match(r'total [0-9]+', linedata[0]):
linedata.pop(0)
# Check if -l was used to parse extra data
if re.match(r'[-dclpsbDCMnP?]([-r][-w][-xsS]){2}([-r][-w][-xtT])[+]?', linedata[0]):
for entry in linedata:
output_line = {}
parsed_line = entry.split(maxsplit=8)
if not re.match(r'[-dclpsbDCMnP?]([-r][-w][-xsS]){2}([-r][-w][-xtT])[+]?', entry) \
and entry.endswith(':'):
parent = entry[:-1]
new_section = True
# fixup to remove trailing \n in previous entry
raw_output[-1]['filename'] = raw_output[-1]['filename'][:-1]
continue
if re.match(r'total [0-9]+', entry):
new_section = False
continue
# fix for OSX - doesn't print 'total xx' line if empty directory
if new_section and entry == '':
new_section = False
continue
# fixup for filenames with newlines
if not new_section \
and not re.match(r'[-dclpsbDCMnP?]([-r][-w][-xsS]){2}([-r][-w][-xtT])[+]?', entry):
raw_output[-1]['filename'] = raw_output[-1]['filename'] + '\n' + entry
continue
# split filenames and links
if len(parsed_line) == 9:
filename_field = parsed_line[8].split(' -> ')
else:
# in case of filenames starting with a newline character
filename_field = ['']
# create list of dictionaries
output_line['filename'] = filename_field[0]
if len(filename_field) > 1:
output_line['link_to'] = filename_field[1]
if parent:
output_line['parent'] = parent
output_line['flags'] = parsed_line[0]
output_line['links'] = parsed_line[1]
output_line['owner'] = parsed_line[2]
output_line['group'] = parsed_line[3]
output_line['size'] = parsed_line[4]
output_line['date'] = ' '.join(parsed_line[5:8])
raw_output.append(output_line)
else:
for entry in linedata:
output_line = {}
if entry == '':
next_is_parent = True
continue
if next_is_parent and entry.endswith(':'):
parent = entry[:-1]
next_is_parent = False
continue
if not quiet and next_is_parent and not entry.endswith(':') and not warned:
jc.utils.warning_message(['Newline characters detected. Filenames probably corrupted. Use ls -l or -b instead.'])
warned = True
output_line['filename'] = entry
if parent:
output_line['parent'] = parent
raw_output.append(output_line)
if raw:
return raw_output
else:
return _process(raw_output)
| 28.54386 | 133 | 0.506085 |
644cdf1b590530f42cc03f56add383668ef1c186 | 12,007 | py | Python | YOLO/YOLOv3/Utils/utils.py | rahulchamoli916/RDD2020 | 86da9ada662fa0c055f44a6f88595e792c576eea | [
"MIT"
] | null | null | null | YOLO/YOLOv3/Utils/utils.py | rahulchamoli916/RDD2020 | 86da9ada662fa0c055f44a6f88595e792c576eea | [
"MIT"
] | null | null | null | YOLO/YOLOv3/Utils/utils.py | rahulchamoli916/RDD2020 | 86da9ada662fa0c055f44a6f88595e792c576eea | [
"MIT"
] | 1 | 2021-06-18T06:33:01.000Z | 2021-06-18T06:33:01.000Z | import colorsys
import cv2
import h5py
from keras import Model
import numpy as np
import os
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
from PIL import Image, ImageFont, ImageDraw
from timeit import default_timer as timer
# import readline
# readline.parse_and_bind("tab: complete")
min_logo_size = (10, 10)
def detect_object(yolo, img_path, save_img, save_img_path="./", postfix=""):
"""
Call YOLO logo detector on input image, optionally save resulting image.
Args:
yolo: keras-yolo3 initialized YOLO instance
img_path: path to image file
save_img: bool to save annotated image
save_img_path: path to directory where to save image
postfix: string to add to filenames
Returns:
prediction: list of bounding boxes in format (xmin,ymin,xmax,ymax,class_id,confidence)
image: unaltered input image as (H,W,C) array
"""
try:
image = Image.open(img_path)
if image.mode != "RGB":
image = image.convert("RGB")
image_array = np.array(image)
except:
print("File Open Error! Try again!")
return None, None
prediction, new_image = yolo.detect_image(image)
img_out = postfix.join(os.path.splitext(os.path.basename(img_path)))
if save_img:
new_image.save(os.path.join(save_img_path, img_out))
return prediction, image_array
def parse_input():
"""
Ask user input for input images: pass path to individual images, directory
"""
out = []
while True:
ins = input("Enter path (q to quit):").strip()
if ins in ["q", "quit"]:
break
if not os.path.exists(ins):
print("Error: file not found!")
elif os.path.isdir(ins):
out = [
os.path.abspath(os.path.join(ins, f))
for f in os.listdir(ins)
if f.endswith((".jpg", ".png"))
]
break
elif ins.endswith((".jpg", ".png")):
out.append(os.path.abspath(ins))
print(out)
return out
def load_extractor_model(model_name="InceptionV3", flavor=1):
"""Load variant of InceptionV3 or VGG16 model specified.
Args:
model_name: string, either InceptionV3 or VGG16
flavor: int specifying the model variant and input_shape.
For InceptionV3, the map is {0: default, 1: 200*200, truncate last Inception block,
2: 200*200, truncate last 2 blocks, 3: 200*200, truncate last 3 blocks, 4: 200*200}
For VGG16, it only changes the input size, {0: 224 (default), 1: 128, 2: 64}."""
start = timer()
if model_name == "InceptionV3":
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import preprocess_input
model = InceptionV3(weights="imagenet", include_top=False)
trunc_layer = [-1, 279, 248, 228, -1]
i_layer = flavor
model_out = Model(
inputs=model.inputs, outputs=model.layers[trunc_layer[i_layer]].output
)
input_shape = (299, 299, 3) if flavor == 0 else (200, 200, 3)
elif model_name == "VGG16":
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
model_out = VGG16(weights="imagenet", include_top=False)
input_length = [224, 128, 64][flavor]
input_shape = (input_length, input_length, 3)
end = timer()
print("Loaded {} feature extractor in {:.2f}sec".format(model_name, end - start))
return model_out, preprocess_input, input_shape
def chunks(l, n, preprocessing_function=None):
"""Yield successive n-sized chunks from l.
General purpose function modified for Keras: made infinite loop,
add preprocessing, returns np.array instead of list
Args:
l: iterable
n: number of items to take for each chunk
preprocessing_function: function that processes image (3D array)
Returns:
generator with n-sized np.array preprocessed chunks of the input
"""
func = (lambda x: x) if (preprocessing_function is None) else preprocessing_function
# in predict_generator, steps argument sets how many times looped through "while True"
while True:
for i in range(0, len(l), n):
yield np.array([func(el) for el in l[i : i + n]])
def load_features(filename):
"""
Load pre-saved HDF5 features for all logos in the LogosInTheWild database
"""
start = timer()
# get database features
with h5py.File(filename, "r") as hf:
brand_map = list(hf.get("brand_map"))
input_shape = list(hf.get("input_shape"))
features = hf.get("features")
features = np.array(features)
end = timer()
print(
"Loaded {} features from {} in {:.2f}sec".format(
features.shape, filename, end - start
)
)
return features, brand_map, input_shape
def save_features(filename, features, brand_map, input_shape):
"""
Save features to compressed HDF5 file for later use
"""
print("Saving {} features into {}... ".format(features.shape, filename), end="")
# reduce file size by saving as float16
features = features.astype(np.float16)
start = timer()
with h5py.File(filename, "w") as hf:
hf.create_dataset("features", data=features, compression="lzf")
hf.create_dataset("brand_map", data=brand_map)
hf.create_dataset("input_shape", data=input_shape)
end = timer()
print("done in {:.2f}sec".format(end - start))
return None
def features_from_image(img_array, model, preprocess, batch_size=100):
"""
Extract features from image array given a decapitated keras model.
Use a generator to avoid running out of memory for large inputs.
Args:
img_array: (N, H, W, C) list/array of input images
model: keras model, outputs
Returns:
features: (N, F) array of 1D features
"""
if len(img_array) == 0:
return np.array([])
steps = len(img_array) // batch_size + 1
img_gen = chunks(img_array, batch_size, preprocessing_function=preprocess)
features = model.predict_generator(img_gen, steps=steps)
# if the generator has looped past end of array, cut it down
features = features[: len(img_array)]
# reshape features: flatten last three dimensions to one
features = features.reshape(features.shape[0], np.prod(features.shape[1:]))
return features
##################################################
# image processing and bounding box functions
##################################################
def pad_image(img, shape, mode="constant_mean"):
"""
Resize and pad image to given size.
Args:
img: (H, W, C) input numpy array
shape: (H', W') destination size
mode: filling mode for new padded pixels. Default = 'constant_mean' returns
grayscale padding with pixel intensity equal to mean of the array. Other
options include np.pad() options, such as 'edge', 'mean' (by row/column)...
Returns:
new_im: (H', W', C) padded numpy array
"""
if mode == "constant_mean":
mode_args = {"mode": "constant", "constant_values": np.mean(img)}
else:
mode_args = {"mode": mode}
ih, iw = img.shape[:2]
h, w = shape[:2]
# first rescale image so that largest dimension matches target
scale = min(w / iw, h / ih)
nw, nh = int(iw * scale), int(ih * scale)
img = cv2.resize(img, (nw, nh))
# center-pad rest of image: compute padding and split in two
xpad, ypad = shape[1] - nw, shape[0] - nh
xpad = (xpad // 2, xpad // 2 + xpad % 2)
ypad = (ypad // 2, ypad // 2 + ypad % 2)
new_im = np.pad(img, pad_width=(ypad, xpad, (0, 0)), **mode_args)
return new_im
def bbox_colors(n):
"""
Define n distinct bounding box colors
Args:
n: number of colors
Returns:
colors: (n, 3) np.array with RGB integer values in [0-255] range
"""
hsv_tuples = [(x / n, 1.0, 1.0) for x in range(n)]
colors = 255 * np.array([colorsys.hsv_to_rgb(*x) for x in hsv_tuples])
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
return colors.astype(int)
def contents_of_bbox(img, bbox_list, expand=1.0):
"""
Extract portions of image inside bounding boxes list.
Args:
img: (H,W,C) image array
bbox_list: list of bounding box specifications, with first 4 elements
specifying box corners in (xmin, ymin, xmax, ymax) format.
Returns:
candidates: list of 3D image arrays
i_candidates_too_small: list of indices of small candidates dropped
"""
candidates = []
i_candidates_too_small = []
for i, (xmin, ymin, xmax, ymax, *_) in enumerate(bbox_list):
# for very low confidence sometimes logos found outside of the image
if ymin > img.shape[0] or xmin > img.shape[1]:
continue
xmin, ymin = int(xmin // expand), int(ymin // expand)
xmax, ymax = int(np.round(xmax // expand)), int(np.round(ymax // expand))
# do not even consider tiny logos
if xmax - xmin > min_logo_size[1] and ymax - ymin > min_logo_size[0]:
candidates.append(img[ymin:ymax, xmin:xmax])
else:
i_candidates_too_small.append(i)
return candidates, i_candidates_too_small
def draw_annotated_box(image, box_list_list, label_list, color_list):
"""
Draw box and overhead label on image.
Args:
image: PIL image object
box_list_list: list of lists of bounding boxes, one for each label, each box in
(xmin, ymin, xmax, ymax [, score]) format (where score is an optional float)
label_list: list of string to go above box
color_list: list of RGB tuples
Returns:
image: annotated PIL image object
"""
font_path = os.path.join(
os.path.dirname(__file__), "keras_yolo3/font/FiraMono-Medium.otf"
)
font = ImageFont.truetype(
font=font_path, size=np.floor(3e-2 * image.size[1] + 0.5).astype("int32")
)
thickness = (image.size[0] + image.size[1]) // 300
draw = ImageDraw.Draw(image)
for box_list, label, color in zip(box_list_list, label_list, color_list):
if not isinstance(color, tuple):
color = tuple(color)
for box in box_list:
# deal with empty predictions
if len(box) < 4:
continue
# if score is also passed, append to label
thelabel = "{}".format(label)
if len(box) > 4:
thelabel += " {:.2f}".format(box[-1])
label_size = draw.textsize(thelabel, font)
xmin, ymin, xmax, ymax = box[:4]
ymin = max(0, np.floor(ymin + 0.5).astype("int32"))
xmin = max(0, np.floor(xmin + 0.5).astype("int32"))
ymax = min(image.size[1], np.floor(ymax + 0.5).astype("int32"))
xmax = min(image.size[0], np.floor(xmax + 0.5).astype("int32"))
if ymin - label_size[1] >= 0:
text_origin = np.array([xmin, ymin - label_size[1]])
else:
text_origin = np.array([xmin, ymax])
for i in range(thickness):
draw.rectangle([xmin + i, ymin + i, xmax - i, ymax - i], outline=color)
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)], fill=color
)
draw.text(text_origin, thelabel, fill=(0, 0, 0), font=font)
del draw
return image | 35.108187 | 93 | 0.605314 |
d6eeaaaabf5b3f72e80bcf5f87b100be8cd2f38a | 17,432 | py | Python | examples/language_model/bert/run_pretrain.py | chenyu-2020/PaddleNLP | 805c7894b735e73de5d15cc4bd06ab26d2149e9f | [
"Apache-2.0"
] | 6 | 2021-06-08T13:19:35.000Z | 2021-06-24T15:08:54.000Z | examples/language_model/bert/run_pretrain.py | xihuanafeng/PaddleNLP | 14c3209118b2cadcce9a8f66b760c9cddb3a02ad | [
"Apache-2.0"
] | null | null | null | examples/language_model/bert/run_pretrain.py | xihuanafeng/PaddleNLP | 14c3209118b2cadcce9a8f66b760c9cddb3a02ad | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import itertools
import logging
import os
import random
import time
import h5py
import distutils.util
from functools import partial
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import paddle
import paddle.distributed as dist
from paddle.io import DataLoader, Dataset
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.transformers import BertForPretraining, BertModel, BertPretrainingCriterion
from paddlenlp.transformers import ErnieForPretraining, ErnieModel, ErniePretrainingCriterion
from paddlenlp.transformers import BertTokenizer, ErnieTokenizer
from paddlenlp.transformers import LinearDecayWithWarmup
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"bert":
(BertModel, BertForPretraining, BertPretrainingCriterion, BertTokenizer),
"ernie":
(ErnieModel, ErnieForPretraining, ErniePretrainingCriterion, ErnieTokenizer)
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(MODEL_CLASSES.keys()), )
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(
sum([
list(classes[-1].pretrained_init_configuration.keys())
for classes in MODEL_CLASSES.values()
], [])), )
parser.add_argument(
"--input_dir",
default=None,
type=str,
required=True,
help="The input directory where the data will be read from.", )
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--max_predictions_per_seq",
default=80,
type=int,
help="The maximum total of masked tokens in input sequence")
parser.add_argument(
"--batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.", )
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some.")
parser.add_argument(
"--adam_epsilon",
default=1e-8,
type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs",
default=3,
type=int,
help="Total number of training epochs to perform.", )
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps",
default=0,
type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument(
"--logging_steps",
type=int,
default=500,
help="Log every X updates steps.")
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save checkpoint every X updates steps.")
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--device",
type=str,
default="gpu",
choices=["cpu", "gpu", "xpu"],
help="Device for selecting for the training.")
parser.add_argument(
"--use_amp",
type=distutils.util.strtobool,
default=False,
help="Enable mixed precision training.")
parser.add_argument(
"--scale_loss",
type=float,
default=2**15,
help="The value of scale_loss for fp16.")
args = parser.parse_args()
return args
def set_seed(args):
random.seed(args.seed + paddle.distributed.get_rank())
np.random.seed(args.seed + paddle.distributed.get_rank())
paddle.seed(args.seed + paddle.distributed.get_rank())
class WorkerInitObj(object):
def __init__(self, seed):
self.seed = seed
def __call__(self, id):
np.random.seed(seed=self.seed + id)
random.seed(self.seed + id)
def create_pretraining_dataset(input_file, max_pred_length, shared_list, args,
worker_init):
train_data = PretrainingDataset(
input_file=input_file, max_pred_length=max_pred_length)
# files have been sharded, no need to dispatch again
train_batch_sampler = paddle.io.BatchSampler(
train_data, batch_size=args.batch_size, shuffle=True)
# DataLoader cannot be pickled because of its place.
# If it can be pickled, use global function instead of lambda and use
# ProcessPoolExecutor instead of ThreadPoolExecutor to prefetch.
def _collate_data(data, stack_fn=Stack()):
num_fields = len(data[0])
out = [None] * num_fields
# input_ids, segment_ids, input_mask, masked_lm_positions,
# masked_lm_labels, next_sentence_labels, mask_token_num
for i in (0, 1, 2, 5):
out[i] = stack_fn([x[i] for x in data])
batch_size, seq_length = out[0].shape
size = num_mask = sum(len(x[3]) for x in data)
# Padding for divisibility by 8 for fp16 or int8 usage
if size % 8 != 0:
size += 8 - (size % 8)
# masked_lm_positions
# Organize as a 1D tensor for gather or use gather_nd
out[3] = np.full(size, 0, dtype=np.int32)
# masked_lm_labels
out[4] = np.full([size, 1], -1, dtype=np.int64)
mask_token_num = 0
for i, x in enumerate(data):
for j, pos in enumerate(x[3]):
out[3][mask_token_num] = i * seq_length + pos
out[4][mask_token_num] = x[4][j]
mask_token_num += 1
# mask_token_num
out.append(np.asarray([mask_token_num], dtype=np.float32))
return out
train_data_loader = DataLoader(
dataset=train_data,
batch_sampler=train_batch_sampler,
collate_fn=_collate_data,
num_workers=0,
worker_init_fn=worker_init,
return_list=True)
return train_data_loader, input_file
class PretrainingDataset(Dataset):
def __init__(self, input_file, max_pred_length):
self.input_file = input_file
self.max_pred_length = max_pred_length
f = h5py.File(input_file, "r")
keys = [
'input_ids', 'input_mask', 'segment_ids', 'masked_lm_positions',
'masked_lm_ids', 'next_sentence_labels'
]
self.inputs = [np.asarray(f[key][:]) for key in keys]
f.close()
def __len__(self):
'Denotes the total number of samples'
return len(self.inputs[0])
def __getitem__(self, index):
[
input_ids, input_mask, segment_ids, masked_lm_positions,
masked_lm_ids, next_sentence_labels
] = [
input[index].astype(np.int64)
if indice < 5 else np.asarray(input[index].astype(np.int64))
for indice, input in enumerate(self.inputs)
]
# TODO: whether to use reversed mask by changing 1s and 0s to be
# consistent with nv bert
input_mask = (1 - np.reshape(
input_mask.astype(np.float32), [1, 1, input_mask.shape[0]])) * -1e9
index = self.max_pred_length
# store number of masked tokens in index
# outputs of torch.nonzero diff with that of numpy.nonzero by zip
padded_mask_indices = (masked_lm_positions == 0).nonzero()[0]
if len(padded_mask_indices) != 0:
index = padded_mask_indices[0].item()
mask_token_num = index
else:
index = self.max_pred_length
mask_token_num = self.max_pred_length
# masked_lm_labels = np.full(input_ids.shape, -1, dtype=np.int64)
# masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index]
masked_lm_labels = masked_lm_ids[:index]
masked_lm_positions = masked_lm_positions[:index]
# softmax_with_cross_entropy enforce last dim size equal 1
masked_lm_labels = np.expand_dims(masked_lm_labels, axis=-1)
next_sentence_labels = np.expand_dims(next_sentence_labels, axis=-1)
return [
input_ids, segment_ids, input_mask, masked_lm_positions,
masked_lm_labels, next_sentence_labels
]
def do_train(args):
paddle.set_device(args.device)
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
set_seed(args)
worker_init = WorkerInitObj(args.seed + paddle.distributed.get_rank())
args.model_type = args.model_type.lower()
base_class, model_class, criterion_class, tokenizer_class = MODEL_CLASSES[
args.model_type]
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
pretrained_models_list = list(
model_class.pretrained_init_configuration.keys())
if args.model_name_or_path in pretrained_models_list:
model = model_class(
base_class(**model_class.pretrained_init_configuration[
args.model_name_or_path]))
else:
model = model_class.from_pretrained(args.model_name_or_path)
criterion = criterion_class(
getattr(model, model_class.base_model_prefix).config["vocab_size"])
if paddle.distributed.get_world_size() > 1:
model = paddle.DataParallel(model)
# If use defalut last_epoch, lr of the first iteration is 0.
# Use `last_epoch = 0` to be consistent with nv bert.
num_training_steps = args.max_steps if args.max_steps > 0 else len(
train_data_loader) * args.num_train_epochs
lr_scheduler = LinearDecayWithWarmup(
args.learning_rate, num_training_steps, args.warmup_steps, last_epoch=0)
# Generate parameter names needed to perform weight decay.
# All bias and LayerNorm parameters are excluded.
decay_params = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
epsilon=args.adam_epsilon,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params)
if args.use_amp:
scaler = paddle.amp.GradScaler(init_loss_scaling=args.scale_loss)
pool = ThreadPoolExecutor(1)
global_step = 0
tic_train = time.time()
for epoch in range(args.num_train_epochs):
files = [
os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir)
if os.path.isfile(os.path.join(args.input_dir, f)) and "train" in f
]
files.sort()
num_files = len(files)
random.Random(args.seed + epoch).shuffle(files)
f_start_id = 0
shared_file_list = {}
if paddle.distributed.get_world_size() > num_files:
remainder = paddle.distributed.get_world_size() % num_files
data_file = files[(
f_start_id * paddle.distributed.get_world_size() +
paddle.distributed.get_rank() + remainder * f_start_id) %
num_files]
else:
data_file = files[(f_start_id * paddle.distributed.get_world_size()
+ paddle.distributed.get_rank()) % num_files]
previous_file = data_file
train_data_loader, _ = create_pretraining_dataset(
data_file, args.max_predictions_per_seq, shared_file_list, args,
worker_init)
# TODO(guosheng): better way to process single file
single_file = True if f_start_id + 1 == len(files) else False
for f_id in range(f_start_id, len(files)):
if not single_file and f_id == f_start_id:
continue
if paddle.distributed.get_world_size() > num_files:
data_file = files[(
f_id * paddle.distributed.get_world_size() +
paddle.distributed.get_rank() + remainder * f_id) %
num_files]
else:
data_file = files[(f_id * paddle.distributed.get_world_size() +
paddle.distributed.get_rank()) % num_files]
previous_file = data_file
dataset_future = pool.submit(create_pretraining_dataset, data_file,
args.max_predictions_per_seq,
shared_file_list, args, worker_init)
train_reader_cost = 0.0
train_run_cost = 0.0
total_samples = 0
reader_start = time.time()
for step, batch in enumerate(train_data_loader):
train_reader_cost += time.time() - reader_start
train_start = time.time()
global_step += 1
(input_ids, segment_ids, input_mask, masked_lm_positions,
masked_lm_labels, next_sentence_labels,
masked_lm_scale) = batch
with paddle.amp.auto_cast(
args.use_amp,
custom_white_list=["layer_norm", "softmax", "gelu"]):
prediction_scores, seq_relationship_score = model(
input_ids=input_ids,
token_type_ids=segment_ids,
attention_mask=input_mask,
masked_positions=masked_lm_positions)
loss = criterion(prediction_scores, seq_relationship_score,
masked_lm_labels, next_sentence_labels,
masked_lm_scale)
if args.use_amp:
scaler.scale(loss).backward()
scaler.minimize(optimizer, loss)
else:
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_grad()
train_run_cost += time.time() - train_start
total_samples += args.batch_size
if global_step % args.logging_steps == 0:
if paddle.distributed.get_rank() == 0:
logger.info(
"global step: %d, epoch: %d, batch: %d, loss: %f, "
"avg_reader_cost: %.5f sec, avg_batch_cost: %.5f sec, avg_samples: %.5f, ips: %.5f sequences/sec"
% (global_step, epoch, step, loss,
train_reader_cost / args.logging_steps,
(train_reader_cost + train_run_cost) /
args.logging_steps, total_samples /
args.logging_steps, total_samples /
(train_reader_cost + train_run_cost)))
train_reader_cost = 0.0
train_run_cost = 0.0
total_samples = 0
if global_step % args.save_steps == 0:
if paddle.distributed.get_rank() == 0:
output_dir = os.path.join(args.output_dir,
"model_%d" % global_step)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# need better way to get inner model of DataParallel
model_to_save = model._layers if isinstance(
model, paddle.DataParallel) else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
paddle.save(
optimizer.state_dict(),
os.path.join(output_dir, "model_state.pdopt"))
if global_step >= args.max_steps:
del train_data_loader
return
reader_start = time.time()
del train_data_loader
train_data_loader, data_file = dataset_future.result(timeout=None)
if __name__ == "__main__":
args = parse_args()
do_train(args)
| 38.566372 | 125 | 0.60584 |
d7c6bb2753042bc511de1f269e69c01f3c04c3a8 | 3,934 | py | Python | research/PromptKGC/models/albert/__init__.py | zjunlp/PromptKG | 791bf82390eeadc30876d9f95e8dd26cd05de3dc | [
"MIT"
] | 11 | 2022-02-04T12:32:37.000Z | 2022-03-25T11:49:48.000Z | research/PromptKGC/models/albert/__init__.py | zjunlp/PromptKG | 791bf82390eeadc30876d9f95e8dd26cd05de3dc | [
"MIT"
] | null | null | null | research/PromptKGC/models/albert/__init__.py | zjunlp/PromptKG | 791bf82390eeadc30876d9f95e8dd26cd05de3dc | [
"MIT"
] | 4 | 2022-02-04T05:08:23.000Z | 2022-03-16T02:07:52.000Z | # flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from transformers.file_utils import (
_BaseLazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_import_structure = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig"],
}
if is_sentencepiece_available():
_import_structure["tokenization_albert"] = ["AlbertTokenizer"]
if is_tokenizers_available():
_import_structure["tokenization_albert_fast"] = ["AlbertTokenizerFast"]
if is_torch_available():
_import_structure["modeling_albert"] = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
if is_tf_available():
_import_structure["modeling_tf_albert"] = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
if is_tokenizers_available():
from .tokenization_albert_fast import AlbertTokenizerFast
if is_torch_available():
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
if is_tf_available():
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
else:
import importlib
import os
import sys
class _LazyModule(_BaseLazyModule):
"""
Module class that surfaces all objects but only performs associated imports when the objects are requested.
"""
__file__ = globals()["__file__"]
__path__ = [os.path.dirname(__file__)]
def _get_module(self, module_name: str):
return importlib.import_module("." + module_name, self.__name__)
sys.modules[__name__] = _LazyModule(__name__, _import_structure)
| 31.98374 | 115 | 0.699288 |
4257ee0e79813b9e8a18b59195f5eec44f2343e8 | 2,363 | py | Python | sympy/stats/__init__.py | shipci/sympy | 4b59927bed992b980c9b3faac01becb36feef26b | [
"BSD-3-Clause"
] | 1 | 2015-01-14T22:55:45.000Z | 2015-01-14T22:55:45.000Z | sympy/stats/__init__.py | curzel-it/KiPyCalc | 909c783d5e6967ea58ca93f875106d8a8e3ca5db | [
"MIT"
] | null | null | null | sympy/stats/__init__.py | curzel-it/KiPyCalc | 909c783d5e6967ea58ca93f875106d8a8e3ca5db | [
"MIT"
] | null | null | null | """
SymPy statistics module
Introduces a random variable type into the SymPy language.
Random variables may be declared using prebuilt functions such as
Normal, Exponential, Coin, Die, etc... or built with functions like FiniteRV.
Queries on random expressions can be made using the functions
========================= =============================
Expression Meaning
------------------------- -----------------------------
``P(condition)`` Probability
``E(expression)`` Expected value
``variance(expression)`` Variance
``density(expression)`` Probability Density Function
``sample(expression)`` Produce a realization
``where(condition)`` Where the condition is true
========================= =============================
Examples
========
>>> from sympy.stats import P, E, variance, Die, Normal
>>> from sympy import Eq, simplify
>>> X, Y = Die('X', 6), Die('Y', 6) # Define two six sided dice
>>> Z = Normal('Z', 0, 1) # Declare a Normal random variable with mean 0, std 1
>>> P(X>3) # Probability X is greater than 3
1/2
>>> E(X+Y) # Expectation of the sum of two dice
7
>>> variance(X+Y) # Variance of the sum of two dice
35/6
>>> simplify(P(Z>1)) # Probability of Z being greater than 1
-erf(sqrt(2)/2)/2 + 1/2
"""
__all__ = []
from . import rv_interface
from .rv_interface import (
cdf, covariance, density, dependent, E, given, independent, P, pspace,
random_symbols, sample, sample_iter, skewness, std, variance, where,
correlation, moment, cmoment, smoment, sampling_density,
)
__all__.extend(rv_interface.__all__)
from . import frv_types
from .frv_types import (
Bernoulli, Binomial, Coin, Die, DiscreteUniform, FiniteRV, Hypergeometric,
Rademacher,
)
__all__.extend(frv_types.__all__)
from . import crv_types
from .crv_types import (
ContinuousRV,
Arcsin, Benini, Beta, BetaPrime, Cauchy, Chi, ChiNoncentral, ChiSquared,
Dagum, Erlang, Exponential, FDistribution, FisherZ, Frechet, Gamma,
GammaInverse, Kumaraswamy, Laplace, Logistic, LogNormal, Maxwell,
Nakagami, Normal, Pareto, QuadraticU, RaisedCosine, Rayleigh,
StudentT, Triangular, Uniform, UniformSum, VonMises, Weibull,
WignerSemicircle
)
__all__.extend(crv_types.__all__)
from . import drv_types
from .drv_types import (Geometric, Poisson)
__all__.extend(drv_types.__all__)
| 33.28169 | 79 | 0.66441 |
755f9e6f94d2babd94d80b4e70efab8a0f2bbe8d | 761 | py | Python | mobetta/icu/migrations/0001_initial.py | maykinmedia/mobetta | 7c6ce4d9ccb41371e9a6171f35002730b841cc5c | [
"BSD-3-Clause"
] | 5 | 2017-10-26T18:40:48.000Z | 2019-04-09T21:06:33.000Z | mobetta/icu/migrations/0001_initial.py | maykinmedia/mobetta | 7c6ce4d9ccb41371e9a6171f35002730b841cc5c | [
"BSD-3-Clause"
] | 23 | 2017-02-10T16:23:35.000Z | 2019-05-02T11:54:28.000Z | mobetta/icu/migrations/0001_initial.py | maykinmedia/mobetta | 7c6ce4d9ccb41371e9a6171f35002730b841cc5c | [
"BSD-3-Clause"
] | 1 | 2017-03-10T15:05:24.000Z | 2017-03-10T15:05:24.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-11 09:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ICUTranslationFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512)),
('filepath', models.CharField(max_length=1024)),
('created', models.DateTimeField(auto_now_add=True)),
('is_valid', models.BooleanField(default=True)),
],
),
]
| 28.185185 | 114 | 0.595269 |
3f0440a332e725d1be2b9f4d8bf41ca99082b5e6 | 5,580 | py | Python | parse_doc.py | nprapps/idp-georgia | 316eba6195b7f410567a7e11eb4811ff7cba54cc | [
"Unlicense"
] | 1 | 2017-04-15T01:48:27.000Z | 2017-04-15T01:48:27.000Z | parse_doc.py | nprapps/idp-georgia | 316eba6195b7f410567a7e11eb4811ff7cba54cc | [
"Unlicense"
] | 153 | 2017-04-14T18:06:26.000Z | 2017-06-02T13:08:09.000Z | parse_doc.py | nprapps/idp-georgia | 316eba6195b7f410567a7e11eb4811ff7cba54cc | [
"Unlicense"
] | 1 | 2021-02-18T11:15:52.000Z | 2021-02-18T11:15:52.000Z | # _*_ coding:utf-8 _*_
import logging
import re
import app_config
from bs4 import BeautifulSoup
from shortcode import process_shortcode
logging.basicConfig(format=app_config.LOG_FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(app_config.LOG_LEVEL)
end_doc_regex = re.compile(ur'^\s*[Ee][Nn][Dd]\s*$',
re.UNICODE)
new_section_marker_regex = re.compile(ur'^\s*\+{50,}\s*$',
re.UNICODE)
section_end_marker_regex = re.compile(ur'^\s*-{50,}\s*$',
re.UNICODE)
frontmatter_marker_regex = re.compile(ur'^\s*-{3}\s*$',
re.UNICODE)
extract_metadata_regex = re.compile(ur'^(.*?):(.*)$',
re.UNICODE)
shortcode_regex = re.compile(ur'^\s*\[%\s*.*\s*%\]\s*$', re.UNICODE)
def is_section_marker(tag):
"""
Checks for the beginning of a new section
"""
text = tag.get_text()
m = new_section_marker_regex.match(text)
if m:
return True
else:
return False
def is_section_end_marker(tag):
"""
Checks for the beginning of a new section
"""
text = tag.get_text()
m = section_end_marker_regex.match(text)
if m:
return True
else:
return False
def process_headline(contents):
logger.debug('--process_headline start--')
headline = None
for tag in contents:
if tag.name == "h2":
headline = tag.get_text()
else:
logger.warning('unexpected tag found: Ignore %s' % tag.get_text())
if not headline:
logger.error('Did not find headline on post. Contents: %s' % contents)
return headline
def process_metadata(contents):
logger.debug('--process_metadata start--')
metadata = {}
for tag in contents:
text = tag.get_text()
m = extract_metadata_regex.match(text)
if m:
key = m.group(1).strip().lower()
value = m.group(2).strip().lower()
metadata[key] = value
else:
logger.error('Could not parse metadata. Text: %s' % text)
logger.debug("metadata: %s" % metadata)
return metadata
def process_section_contents(contents):
"""
Process episode copy content
In particular parse and generate HTML from shortcodes
"""
logger.debug('--process_post_contents start--')
parsed = []
for tag in contents:
text = tag.get_text()
m = shortcode_regex.match(text)
if m:
parsed.append(process_shortcode(tag))
else:
parsed.append(unicode(tag))
episode_contents = ''.join(parsed)
return episode_contents
def parse_raw_sections(raw_sections):
"""
parse raw episodes into an array of section objects
"""
# Divide each episode into its subparts
# - Headline
# - FrontMatter
# - Contents
sections = []
for raw_section in raw_sections:
section = {}
marker_counter = 0
section_raw_headline = []
section_raw_metadata = []
section_raw_contents = []
for tag in raw_section:
text = tag.get_text()
m = frontmatter_marker_regex.match(text)
if m:
marker_counter += 1
else:
if (marker_counter == 0):
section_raw_headline.append(tag)
elif (marker_counter == 1):
section_raw_metadata.append(tag)
else:
section_raw_contents.append(tag)
section[u'headline'] = process_headline(section_raw_headline)
metadata = process_metadata(section_raw_metadata)
for k, v in metadata.iteritems():
section[k] = v
section[u'contents'] = process_section_contents(section_raw_contents)
sections.append(section)
return sections
def split_sections(doc):
"""
split the raw document into an array of raw sections
"""
logger.debug('--split_sections start--')
raw_sections = []
raw_episode_contents = []
ignore_orphan_text = True
body = doc.soup.body
for child in body.children:
if is_section_marker(child):
# Detected first post stop ignoring orphan text
if ignore_orphan_text:
ignore_orphan_text = False
else:
if ignore_orphan_text:
continue
elif is_section_end_marker(child):
ignore_orphan_text = True
raw_sections.append(raw_episode_contents)
raw_episode_contents = []
else:
raw_episode_contents.append(child)
return raw_sections
def find_section_id(sections, id):
"""
Find the section with a given id
"""
for idx, section in enumerate(sections):
try:
if section['id'] == id:
return idx
except KeyError:
continue
return None
def process_extracted_contents(inline_intro):
"""
Remove html markup
"""
return inline_intro['contents']
def parse(doc):
"""
parse google doc files and extract markup
"""
try:
parsed_document = {}
logger.info('-------------start------------')
raw_sections = split_sections(doc)
sections = parse_raw_sections(raw_sections)
logger.info('Number of sections: %s' % len(sections))
parsed_document['sections'] = sections
finally:
logger.info('-------------end------------')
return parsed_document
| 28.040201 | 78 | 0.58405 |
385072a57b53a2ec7c0f28ec1c5c4e2accc2985d | 386 | py | Python | work/wsgi.py | lestrato/badgepack | 7432c0ead1d5f63dd509620a0bb06bd76828b590 | [
"MS-PL"
] | 3 | 2016-10-21T01:35:46.000Z | 2020-11-07T01:20:05.000Z | work/wsgi.py | lestrato/badgepack | 7432c0ead1d5f63dd509620a0bb06bd76828b590 | [
"MS-PL"
] | 31 | 2016-10-31T19:28:53.000Z | 2017-01-19T16:55:49.000Z | work/wsgi.py | lestrato/badgepack | 7432c0ead1d5f63dd509620a0bb06bd76828b590 | [
"MS-PL"
] | 1 | 2020-11-07T01:20:07.000Z | 2020-11-07T01:20:07.000Z | """
WSGI config for work project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "work.settings")
application = get_wsgi_application()
| 22.705882 | 78 | 0.782383 |
4bdf158d1c0c65daa9ba753b0ee4626b4ee7fbb0 | 403 | py | Python | add_windows/AddRule.py | rahulchaurasiya1/cautious-waddle | 8906aa99a30c0261622680f4b9668bb880fbdd93 | [
"MIT"
] | 2 | 2020-06-19T19:33:06.000Z | 2020-12-09T17:49:32.000Z | add_windows/AddRule.py | rahulchaurasiya1/cautious-waddle | 8906aa99a30c0261622680f4b9668bb880fbdd93 | [
"MIT"
] | null | null | null | add_windows/AddRule.py | rahulchaurasiya1/cautious-waddle | 8906aa99a30c0261622680f4b9668bb880fbdd93 | [
"MIT"
] | null | null | null | from Database import Database
from add_windows.AddMainWindow import AddMainWindow
class AddRule(AddMainWindow):
def __init__(self, parent=None):
super().__init__(parent, "UI/AddRule.ui")
def addToDatabase(self):
rule = str(self.rule.text())
fine = str(self.fine.text())
Database.get_instance().insert_into_rules(rule, fine)
self.destroy()
| 28.785714 | 62 | 0.662531 |
12673ce925985e23b2de80d40142781f2f51bd5b | 144 | py | Python | torchglyph/pipe/__init__.py | speedcell4/torchglyph | 8abc8a35cc0b7a3512c069489c59e4cc1b2588df | [
"MIT"
] | 11 | 2020-03-12T05:58:18.000Z | 2021-12-22T11:46:43.000Z | torchglyph/pipe/__init__.py | speedcell4/torchglyph | 8abc8a35cc0b7a3512c069489c59e4cc1b2588df | [
"MIT"
] | 6 | 2020-08-18T18:56:33.000Z | 2021-11-06T04:22:23.000Z | torchglyph/pipe/__init__.py | speedcell4/torchglyph | 8abc8a35cc0b7a3512c069489c59e4cc1b2588df | [
"MIT"
] | null | null | null | from torchglyph.pipe.abc import *
from torchglyph.pipe.ctx import *
from torchglyph.pipe.packing import *
from torchglyph.pipe.padding import *
| 28.8 | 37 | 0.805556 |
a9e5d5f64bec1a1c1b128d2e69a3509570857e82 | 1,560 | py | Python | plastid_genes/match_clusters.py | maxemil/picozoa-scripts | 89a4e961e9a9bdc298a7d73cc3f03122c4df4229 | [
"MIT"
] | null | null | null | plastid_genes/match_clusters.py | maxemil/picozoa-scripts | 89a4e961e9a9bdc298a7d73cc3f03122c4df4229 | [
"MIT"
] | null | null | null | plastid_genes/match_clusters.py | maxemil/picozoa-scripts | 89a4e961e9a9bdc298a7d73cc3f03122c4df4229 | [
"MIT"
] | null | null | null | from Bio import SeqIO
import glob
import os
import shutil
rec2genome = {}
for rec in SeqIO.parse('Rhodelphis_plastid.faa', 'fasta'):
rec2genome[rec.id] = []
for line in open('Rlimneticus_vs_Rhodelphis_plastid.out'):
line = line.strip('\n').split('\t')
if float(line[2]) > 99:
rec2genome[line[0]].append(line[1])
for line in open('Rmarinus_vs_Rhodelphis_plastid.out'):
line = line.strip('\n').split('\t')
if float(line[2]) > 99:
rec2genome[line[0]].append(line[1])
Rhodelphis_plastid = [v[0] for v in rec2genome.values() if v]
ep2annot = {}
for k,v in rec2genome.items():
for ep in v:
ep2annot[ep] = k
count = 0
for k,v in rec2genome.items():
if len(v) != 1:
pass
else:
count += 1
clsts = set()
for f in glob.glob('../20_endosymbiotic_origin_genes/Orthogroup_Sequences/*'):
for rec in SeqIO.parse(f, 'fasta'):
if rec.id in Rhodelphis_plastid:
clsts.add(os.path.basename(f).replace('.fa', ''))
for c in clsts:
if len(glob.glob('../20_endosymbiotic_origin_genes/Orthogroup_LGT_trees/trees/{}.treefile'.format(c))) > 0:
shutil.copyfile('../20_endosymbiotic_origin_genes/Orthogroup_LGT_trees/trees/{}.treefile'.format(c), 'Orthogroup_trees/{}.treefile'.format(c))
elif len(glob.glob('../20_endosymbiotic_origin_genes/Orthogroup_alignments_trees/trees/{}.treefile'.format(c))) > 0:
shutil.copyfile('../20_endosymbiotic_origin_genes/Orthogroup_alignments_trees/trees/{}.treefile'.format(c), 'Orthogroup_trees/{}.treefile'.format(c))
| 34.666667 | 157 | 0.673077 |
6c7490b792d76d5492b0c25fc41215ac0c6a768f | 5,980 | py | Python | modules/rats/jrat.py | nidsche/viper | d4e3b572fe772efc06e6879f7403b1c49e37f46f | [
"BSD-3-Clause"
] | 97 | 2017-12-18T15:19:28.000Z | 2022-03-25T07:10:00.000Z | modules/reversing/viper/rats/jrat.py | robertdigital/CIRTKit | 58b8793ada69320ffdbdd4ecdc04a3bb2fa83c37 | [
"MIT"
] | 1 | 2019-01-29T16:29:27.000Z | 2019-01-29T16:29:27.000Z | modules/reversing/viper/rats/jrat.py | robertdigital/CIRTKit | 58b8793ada69320ffdbdd4ecdc04a3bb2fa83c37 | [
"MIT"
] | 21 | 2018-04-04T18:12:13.000Z | 2021-06-12T09:40:58.000Z | # Originally written by Kevin Breen (@KevTheHermit):
# https://github.com/kevthehermit/RATDecoders/blob/master/ClientMesh.py
#Standard Imports Go Here
import os
import sys
from base64 import b64decode
import string
from zipfile import ZipFile
from cStringIO import StringIO
from Crypto.Cipher import AES, DES3
#Helper Functions Go Here
# This extracts the Encryption Key and Config File from the Jar and or Dropper
def get_parts(data):
new_zip = StringIO(data)
enckey = None
dropper = None
conf = None
try:
with ZipFile(new_zip, 'r') as zip:
for name in zip.namelist(): # get all the file names
if name == "key.dat": # this file contains the encrytpion key
enckey = zip.read(name)
if name == "enc.dat": # if this file exists, jrat has an installer / dropper
dropper = zip.read(name)
if name == "config.dat": # this is the encrypted config file
conf = zip.read(name)
except:
self.log('error', "Dropped File is not Jar File starts with Hex Chars: {0}".format(data[:5].encode('hex')))
return None, None
if enckey and conf:
return enckey, conf
elif enckey and dropper:
newkey, conf = get_dropper(enckey, dropper)
return newkey, conf
else:
return None, None
# This extracts the Encryption Key and New conf from a 'Dropper' jar
def get_dropper(enckey, dropper):
try:
split = enckey.split('\x2c')
key = split[0][:16]
for x in split: # grab each line of the config and decode it.
try:
drop = b64decode(x).decode('hex')
except:
drop = b64decode(x[16:]).decode('hex')
new_zipdata = decrypt_aes(key, dropper)
new_key, conf = get_parts(new_zipdata)
return new_key, conf
except:
return None, None
# Returns only printable chars
def string_print(line):
return ''.join((char for char in line if 32 < ord(char) < 127))
# Messy Messy Messy
def messy_split(long_line):
# this is a messy way to split the data but it works for now.
'''
Split on = gives me the right sections but deletes the b64 padding
use modulo math to restore padding.
return new list.
'''
new_list = []
old_list = long_line.split('=')
for line in old_list:
if len(line) != 0:
line += "=" * ((4 - len(line) % 4) % 4)
new_list.append(line)
return new_list
# AES Decrypt
def decrypt_aes(enckey, data):
cipher = AES.new(enckey) # set the cipher
return cipher.decrypt(data) # decrpyt the data
# DES Decrypt
def decrypt_des(enckey, data):
cipher = DES3.new(enckey) # set the ciper
return cipher.decrypt(data) # decrpyt the data
# Process Versions 3.2.2 > 4.2.
def old_aes(conf, enckey):
decoded_config = decrypt_aes(enckey, conf)
clean_config = string_print(decoded_config)
raw_config = clean_config.split('SPLIT')
return raw_config
#Process versions 4.2. >
def new_aes(conf, enckey):
sections = messy_split(conf)
decoded_config = ''
for x in sections:
decoded_config += decrypt_aes(enckey, b64decode(x))
raw_config = string_print(decoded_config).split('SPLIT')
return raw_config
# process versions < 3.2.2
def old_des(conf, enckey):
decoded_config = decrypt_des(enckey, conf)
clean_config = string_print(decoded_config)
raw_config = clean_config.split('SPLIT')
return raw_config
def parse_config(raw_config, enckey):
config_dict = {}
for kv in raw_config:
if kv == '':
continue
kv = string_print(kv)
key, value = kv.split('=')
if key == 'ip':
config_dict['Domain'] = value
if key == 'addresses':
dom_list = value.split(',')
dom_count = 0
for dom in dom_list:
if dom == '':
continue
config_dict['Domain {0}'.format(dom_count)] = value.split(':')[0]
config_dict['Port {0}'.format(dom_count)] = value.split(':')[1]
dom_count += 1
if key == 'port':
config_dict['Port'] = value
if key == 'os':
config_dict['OS'] = value
if key == 'mport':
config_dict['MPort'] = value
if key == 'perms':
config_dict['Perms'] = value
if key == 'error':
config_dict['Error'] = value
if key == 'reconsec':
config_dict['RetryInterval'] = value
if key == 'ti':
config_dict['TI'] = value
if key == 'pass':
config_dict['Password'] = value
if key == 'id':
config_dict['CampaignID'] = value
if key == 'mutex':
config_dict['Mutex'] = value
if key == 'toms':
config_dict['TimeOut'] = value
if key == 'per':
config_dict['Persistance'] = value
if key == 'name':
config_dict['InstallName'] = value
if key == 'tiemout':
config_dict['TimeOutFlag'] = value
if key == 'debugmsg':
config_dict['DebugMsg'] = value
config_dict["EncryptionKey"] = enckey.encode('hex')
return config_dict
def config(data):
enckey, conf = get_parts(data)
if enckey == None:
return
if len(enckey) == 16:
# Newer versions use a base64 encoded config.dat
if '==' in conf: # this is not a great test but should work 99% of the time
b64_check = True
else:
b64_check = False
if b64_check:
raw_config = new_aes(conf, enckey)
else:
raw_config = old_aes(conf, enckey)
if len(enckey) in [24, 32]:
raw_config = old_des(conf, enckey)
config_dict = parse_config(raw_config, enckey)
return config_dict | 33.038674 | 115 | 0.579599 |
ab0318eb07992d87053496a7262d0a0db7ecaf91 | 3,983 | py | Python | Tests/test_dis.py | amaeckelberghe/Pyjion | cdf8fbd3f3808d398a71fca085420f71c7dff106 | [
"MIT"
] | null | null | null | Tests/test_dis.py | amaeckelberghe/Pyjion | cdf8fbd3f3808d398a71fca085420f71c7dff106 | [
"MIT"
] | null | null | null | Tests/test_dis.py | amaeckelberghe/Pyjion | cdf8fbd3f3808d398a71fca085420f71c7dff106 | [
"MIT"
] | null | null | null | from pyjion.dis import print_il, dis
import pyjion
import unittest
import io
import contextlib
class DisassemblerModuleTestCase(unittest.TestCase):
def setUp(self) -> None:
pyjion.enable()
def tearDown(self) -> None:
pyjion.disable()
def test_fat(self):
def test_f():
a = 1
b = 2
c = 3
d = 4
return a+b+c+d
self.assertTrue(test_f() == 10)
f = io.StringIO()
with contextlib.redirect_stdout(f):
dis(test_f)
self.assertIn("ldarg.1", f.getvalue())
def test_fat_static(self):
test_method = bytearray(b'\x03 h\x00\x00\x00\xd3X\n\x03(A\x00\x00\x00\x16\r!0\x19Rc\xd1\x7f\x00\x00\xd3% \x00\x00\x00\x00\xd3X%J\x17XT\x06\x18T\x13\n\x03 h\x01\x00\x00\xd3XM\x03 h\x01\x00\x00\xd3X\x11\n\xdf(\x10\x00\x00\x00!P\x19Rc\xd1\x7f\x00\x00\xd3% \x00\x00\x00\x00\xd3X%J\x17XT\x06\x1cT\x13\n\x03 p\x01\x00\x00\xd3XM\x03 p\x01\x00\x00\xd3X\x11\n\xdf(\x10\x00\x00\x00!p\x19Rc\xd1\x7f\x00\x00\xd3% \x00\x00\x00\x00\xd3X%J\x17XT\x06\x1f\nT\x13\n\x03 x\x01\x00\x00\xd3XM\x03 x\x01\x00\x00\xd3X\x11\n\xdf(\x10\x00\x00\x00!\x90\x19Rc\xd1\x7f\x00\x00\xd3% \x00\x00\x00\x00\xd3X%J\x17XT\x06\x1f\x0eT\x13\n\x03 \x80\x01\x00\x00\xd3XM\x03 \x80\x01\x00\x00\xd3X\x11\n\xdf(\x10\x00\x00\x00\x06\x1f\x10T\x03 h\x01\x00\x00\xd3XM%\x0c\x16\xd3@\x1a\x00\x00\x00!0 nc\xd1\x7f\x00\x00\xd3(:\x00\x00\x00\x03(8\x00\x00\x008G\x01\x00\x00\x08% \x00\x00\x00\x00\xd3X%J\x17XT\x06\x1f\x12T\x03 p\x01\x00\x00\xd3XM%\x0c\x16\xd3@\x1c\x00\x00\x00!\xf0\xbeac\xd1\x7f\x00\x00\xd3(:\x00\x00\x00\x03(8\x00\x00\x00\x13\x0b8\x07\x01\x00\x00\x08% \x00\x00\x00\x00\xd3X%J\x17XT\x06\x1f\x14T(\x00\x00\x00\x00%\x0c\x16\xd3@\x0b\x00\x00\x00\x03(8\x00\x00\x008\xdc\x00\x00\x00\x08\x06\x1f\x16T\x03 x\x01\x00\x00\xd3XM%\x0c\x16\xd3@\x1c\x00\x00\x00!\xb0\x8c]c\xd1\x7f\x00\x00\xd3(:\x00\x00\x00\x03(8\x00\x00\x00\x13\x0b8\xa9\x00\x00\x00\x08% \x00\x00\x00\x00\xd3X%J\x17XT\x06\x1f\x18T(\x00\x00\x00\x00%\x0c\x16\xd3@\x0b\x00\x00\x00\x03(8\x00\x00\x008~\x00\x00\x00\x08\x06\x1f\x1aT\x03 \x80\x01\x00\x00\xd3XM%\x0c\x16\xd3@\x1c\x00\x00\x00!\xb0\x8b]c\xd1\x7f\x00\x00\xd3(:\x00\x00\x00\x03(8\x00\x00\x00\x13\x0b8K\x00\x00\x00\x08% \x00\x00\x00\x00\xd3X%J\x17XT\x06\x1f\x1cT(\x00\x00\x00\x00%\x0c\x16\xd3@\x0b\x00\x00\x00\x03(8\x00\x00\x008 \x00\x00\x00\x08\x06\x1f\x1eT\x0b8\x1c\x00\x00\x00\t\x16>\t\x00\x00\x00&&&\t\x19\xda\r+\xf08\x00\x00\x00\x00\x16\xd38\x01\x00\x00\x00\x07\x03(B\x00\x00\x00*')
f = io.StringIO()
with contextlib.redirect_stdout(f):
print_il(test_method)
self.assertIn("ldarg.1", f.getvalue())
def test_thin(self):
test_method = bytearray(b'\x03 h\x00\x00\x00\xd3X\n\x03(A\x00\x00\x00\x16\r\x06 '
b'\x00\x00\x00\x00\xd3T\x03!\xb0\xc6V)\x91\x7f\x00\x00\xd3('
b'\x00\x00\x03\x00%\x0c\x16\xd3@\x0b\x00\x00\x00\x03('
b'8\x00\x00\x008\x91\x00\x00\x00\x08\x06 '
b'\x02\x00\x00\x00\xd3T!\xf0\xc3\x13*\x91\x7f\x00\x00\xd3% '
b'\x00\x00\x00\x00\xd3X%J\x17XT\x06 \x04\x00\x00\x00\xd3T('
b'\x01\x00\x01\x00%\x0c\x16\xd3@\x0b\x00\x00\x00\x03('
b'8\x00\x00\x008P\x00\x00\x00\x08\x06 \x06\x00\x00\x00\xd3T(\x10\x00\x00\x00\x06 '
b'\x08\x00\x00\x00\xd3T!\xe0\x1e\xda\x02\x01\x00\x00\x00\xd3% '
b'\x00\x00\x00\x00\xd3X%J\x17XT\x06 '
b'\n\x00\x00\x00\xd3T\x0b\xdd\x1c\x00\x00\x00\t\x16>\t\x00\x00\x00&&&\x19\tY\r+\xf08'
b'\x00\x00\x00\x00\x16\xd38\x01\x00\x00\x00\x07\x03(B\x00\x00\x00*')
f = io.StringIO()
with contextlib.redirect_stdout(f):
print_il(test_method)
self.assertIn("ldarg.1", f.getvalue())
if __name__ == "__main__":
unittest.main()
| 68.672414 | 1,866 | 0.611599 |
39547dee2a532c091ff4c9aa1c72df1aaa93faec | 1,893 | py | Python | CodeStomp/AmyCare/fit/migrations/0012_auto_20201124_1637.py | mayank712jindal/Code-Innovation-Series-ChitkaraUniversity | 43adf0b75a076d3d6821b20c103c8c079655b77e | [
"MIT"
] | null | null | null | CodeStomp/AmyCare/fit/migrations/0012_auto_20201124_1637.py | mayank712jindal/Code-Innovation-Series-ChitkaraUniversity | 43adf0b75a076d3d6821b20c103c8c079655b77e | [
"MIT"
] | null | null | null | CodeStomp/AmyCare/fit/migrations/0012_auto_20201124_1637.py | mayank712jindal/Code-Innovation-Series-ChitkaraUniversity | 43adf0b75a076d3d6821b20c103c8c079655b77e | [
"MIT"
] | null | null | null | # Generated by Django 3.1.3 on 2020-11-24 11:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fit', '0011_auto_20201124_1629'),
]
operations = [
migrations.AlterField(
model_name='pharmacy',
name='phar_StoreImage',
field=models.ImageField(default='', null=True, upload_to='fit/pharmacy'),
),
migrations.AlterField(
model_name='pharmacy',
name='phar_address',
field=models.CharField(default='', max_length=500, null=True),
),
migrations.AlterField(
model_name='pharmacy',
name='phar_email',
field=models.CharField(default='', max_length=100, null=True),
),
migrations.AlterField(
model_name='pharmacy',
name='phar_idProof',
field=models.ImageField(default='', null=True, upload_to='fit/pharmacy'),
),
migrations.AlterField(
model_name='pharmacy',
name='phar_name',
field=models.CharField(default='', max_length=100, null=True),
),
migrations.AlterField(
model_name='pharmacy',
name='phar_ownerName',
field=models.CharField(default='', max_length=150, null=True),
),
migrations.AlterField(
model_name='pharmacy',
name='phar_phone',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='pharmacy',
name='phar_username',
field=models.CharField(default='', max_length=50, null=True),
),
migrations.AlterField(
model_name='pharmacy',
name='pharmay_location',
field=models.CharField(default='', max_length=100, null=True),
),
]
| 32.084746 | 85 | 0.564184 |
81d6133e341d5b84e5270fc356e06f82a8f037bb | 639 | py | Python | src/figcli/test/cli/data/login.py | figtools/figgy-cli | 88f4ccb8221ef9734f95b2637acfacc6e00983e7 | [
"Apache-2.0"
] | 36 | 2020-07-21T21:22:02.000Z | 2021-10-20T06:55:47.000Z | src/figcli/test/cli/data/login.py | figtools/figgy-cli | 88f4ccb8221ef9734f95b2637acfacc6e00983e7 | [
"Apache-2.0"
] | 2 | 2020-10-29T12:49:15.000Z | 2021-04-29T01:12:05.000Z | src/figcli/test/cli/data/login.py | figtools/figgy-cli | 88f4ccb8221ef9734f95b2637acfacc6e00983e7 | [
"Apache-2.0"
] | null | null | null | import pexpect
from figcli.test.cli.config import *
from figcli.test.cli.figgy import FiggyTest
from figcli.config import *
from figcli.utils.utils import *
import os
class DataLogin(FiggyTest):
def __init__(self, extra_args=""):
super().__init__(pexpect.spawn(f'{CLI_NAME} login sandbox', timeout=20, encoding='utf-8'), extra_args=extra_args)
def run(self):
self.expect('.*user name:.*')
self.sendline('FiggyDataTester')
self.expect('.*weirdness.*')
self.sendline('n')
self.expect('.*Options.*devops.*')
self.sendline('data')
self.expect('.*Login successful.*')
| 29.045455 | 121 | 0.658842 |
1ca976884177fe67326e99d0d378a567a9d051a2 | 2,842 | py | Python | Pacman-ish_v1.0/vector.py | Naxaes/Pacman | 907ca82b981ff5d750d59c6f5cf8f748a38daded | [
"MIT"
] | null | null | null | Pacman-ish_v1.0/vector.py | Naxaes/Pacman | 907ca82b981ff5d750d59c6f5cf8f748a38daded | [
"MIT"
] | null | null | null | Pacman-ish_v1.0/vector.py | Naxaes/Pacman | 907ca82b981ff5d750d59c6f5cf8f748a38daded | [
"MIT"
] | null | null | null | import math
class Vector2D(tuple):
"""A 2 dimensional vector class"""
def __new__(cls, x, y):
return tuple.__new__(cls, (x, y))
def __init__(self, x, y):
super(Vector2D, self).__init__()
def __add__(self, other):
return Vector2D(self[0] + other[0], self[1] + other[1])
__radd__ = __add__
def __sub__(self, other):
return Vector2D(self[0] - other[0], self[1] - other[1])
def __mul__(self, other):
if isinstance(other, Vector2D):
return self[0] * other[0] + self[1] * other[1]
else:
return Vector2D(self[0] * other, self[1] * other)
def __rmul__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Vector2D(self[0] * other, self[1] * other)
raise ValueError
def __neg__(self):
return Vector2D(-self[0], -self[1])
def __abs__(self):
"""Returns the length of the vector."""
return (self[0] ** 2 + self[1] ** 2) ** 0.5
def __bool__(self):
return abs(self) != 0
def __repr__(self):
return "(%.2f, %.2f)" % self
def __getnewargs__(self):
return self[0], self[1]
def rotate(self, angle, radians=True):
"""rotate self counterclockwise by angle"""
perpendicular = -self[1], self[0]
if not radians:
angle = angle * math.pi / 180.0
c, s = math.cos(angle), math.sin(angle)
return Vector2D(self[0] * c + perpendicular[0] * s, self[1] * c + perpendicular[1] * s)
def normalize(self):
"""Returns a vector in the same direction of length 1."""
length = abs(self)
if length == 0:
return Vector2D(0, 0)
else:
return Vector2D(self[0] / length, self[1] / length)
def projection(self, other):
"""Returns a vector projected on other vector."""
if abs(self) == 0 or abs(other) == 0:
return Vector2D(0, 0)
else:
return ((self * other) / abs(other) ** 2) * other
def reflection(self, other):
"""Returns a vector reflected on other vector."""
return 2 * self.projection(other) - self
@staticmethod
def heading(angle, radians=True):
if radians:
return Vector2D(math.cos(angle), math.sin(angle))
else:
return Vector2D(math.cos(math.radians(angle)), math.sin(math.radians(angle)))
def get_angle(self, other, radians=True):
"""Will return the angle between this vector and another vector."""
if abs(self) == 0 or abs(other) == 0:
return 0
if radians:
return math.atan2(other[1], other[0]) - math.atan2(self[1], self[0])
else:
return (360 / (2 * math.pi)) * (math.atan2(other[1], other[0]) - math.atan2(self[1], self[0]))
| 31.577778 | 106 | 0.561928 |
16458841f44885df8f6cc2fd9537bc420eaa32d2 | 6,069 | py | Python | python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 2 | 2021-11-12T11:31:12.000Z | 2021-12-05T10:30:28.000Z | python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/custom_op/test_custom_relu_op_jit.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 1 | 2021-08-21T06:57:20.000Z | 2021-08-21T06:57:20.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import paddle
import numpy as np
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args, IS_WINDOWS, IS_MAC
from test_custom_relu_op_setup import custom_relu_dynamic, custom_relu_static
# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
file = '{}\\custom_relu_module_jit\\custom_relu_module_jit.pyd'.format(
get_build_directory())
if os.name == 'nt' and os.path.isfile(file):
cmd = 'del {}'.format(file)
run_cmd(cmd, True)
# Compile and load custom op Just-In-Time.
# custom_relu_op_dup.cc is only used for multi ops test,
# not a new op, if you want to test only one op, remove this
# source file
sources = ['custom_relu_op.cc', 'custom_relu_op_dup.cc']
if not IS_MAC:
sources.append('custom_relu_op.cu')
custom_module = load(
name='custom_relu_module_jit',
sources=sources,
extra_include_paths=paddle_includes, # add for Coverage CI
extra_cxx_cflags=extra_cc_args, # test for cc flags
extra_cuda_cflags=extra_nvcc_args, # test for nvcc flags
verbose=True)
class TestJITLoad(unittest.TestCase):
def setUp(self):
self.custom_ops = [
custom_module.custom_relu, custom_module.custom_relu_dup,
custom_module.custom_relu_no_x_in_backward
]
self.dtypes = ['float32', 'float64']
if paddle.is_compiled_with_cuda():
self.dtypes.append('float16')
self.devices = ['cpu']
if paddle.is_compiled_with_cuda():
self.devices.append('gpu')
def test_static(self):
for device in self.devices:
for dtype in self.dtypes:
if device == 'cpu' and dtype == 'float16':
continue
x = np.random.uniform(-1, 1, [4, 8]).astype(dtype)
for custom_op in self.custom_ops:
out = custom_relu_static(custom_op, device, dtype, x)
pd_out = custom_relu_static(custom_op, device, dtype, x,
False)
self.assertTrue(
np.array_equal(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
out, pd_out))
def test_dynamic(self):
for device in self.devices:
for dtype in self.dtypes:
if device == 'cpu' and dtype == 'float16':
continue
x = np.random.uniform(-1, 1, [4, 8]).astype(dtype)
for custom_op in self.custom_ops:
out, x_grad = custom_relu_dynamic(custom_op, device, dtype,
x)
pd_out, pd_x_grad = custom_relu_dynamic(custom_op, device,
dtype, x, False)
self.assertTrue(
np.array_equal(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
out, pd_out))
self.assertTrue(
np.array_equal(x_grad, pd_x_grad),
"custom op x grad: {},\n paddle api x grad: {}".format(
x_grad, pd_x_grad))
def test_exception(self):
caught_exception = False
try:
x = np.random.uniform(-1, 1, [4, 8]).astype('int32')
custom_relu_dynamic(custom_module.custom_relu, 'cpu', 'int32', x)
except OSError as e:
caught_exception = True
self.assertTrue(
"function \"relu_cpu_forward\" is not implemented for data type `int32`"
in str(e))
if IS_WINDOWS:
self.assertTrue(
r"python\paddle\fluid\tests\custom_op\custom_relu_op.cc" in
str(e))
else:
self.assertTrue(
"python/paddle/fluid/tests/custom_op/custom_relu_op.cc" in
str(e))
self.assertTrue(caught_exception)
caught_exception = False
# MAC-CI don't support GPU
if IS_MAC:
return
try:
x = np.random.uniform(-1, 1, [4, 8]).astype('int32')
custom_relu_dynamic(custom_module.custom_relu, 'gpu', 'int32', x)
except OSError as e:
caught_exception = True
self.assertTrue(
"function \"relu_cuda_forward_kernel\" is not implemented for data type `int32`"
in str(e))
self.assertTrue(
"python/paddle/fluid/tests/custom_op/custom_relu_op.cu" in
str(e))
self.assertTrue(caught_exception)
def test_load_multiple_module(self):
custom_module = load(
name='custom_conj_jit',
sources=['custom_conj_op.cc'],
extra_include_paths=paddle_includes, # add for Coverage CI
extra_cxx_cflags=extra_cc_args, # test for cc flags
extra_cuda_cflags=extra_nvcc_args, # test for nvcc flags
verbose=True)
custom_conj = custom_module.custom_conj
self.assertIsNotNone(custom_conj)
if __name__ == '__main__':
unittest.main()
| 41.006757 | 96 | 0.593508 |
9063d7d1535e8aa8047be814aaf0db850185d48e | 729 | py | Python | chainer/functions/math/logarithm_1p.py | takeratta/chainer | 02686e98cd6dc8f20979a1f3a79130f076cbfc6c | [
"MIT"
] | 7 | 2017-05-08T07:02:40.000Z | 2018-12-02T18:35:39.000Z | chainer/functions/math/logarithm_1p.py | takeratta/chainer | 02686e98cd6dc8f20979a1f3a79130f076cbfc6c | [
"MIT"
] | null | null | null | chainer/functions/math/logarithm_1p.py | takeratta/chainer | 02686e98cd6dc8f20979a1f3a79130f076cbfc6c | [
"MIT"
] | 1 | 2021-05-27T16:52:11.000Z | 2021-05-27T16:52:11.000Z | import numpy
from chainer import cuda
from chainer import function
from chainer import utils
from chainer.utils import type_check
class Log1p(function.Function):
@property
def label(self):
return 'log1p'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
return utils.force_array(numpy.log1p(x[0])),
def forward_gpu(self, x):
return cuda.cupy.log1p(x[0]),
def backward(self, x, gy):
return utils.force_array(gy[0] / (x[0] + x[0].dtype.type(1.0))),
def log1p(x):
"""Elementwise natural logarithm plus one function."""
return Log1p()(x)
| 22.78125 | 72 | 0.657064 |
c841d6d52bc03c9e2ed88b6d1f535867d24398d4 | 3,092 | py | Python | demo.py | krrish94/commExplore | 6f30129e6fb5bcdcd86e7faaf25183e44e018f51 | [
"MIT"
] | null | null | null | demo.py | krrish94/commExplore | 6f30129e6fb5bcdcd86e7faaf25183e44e018f51 | [
"MIT"
] | null | null | null | demo.py | krrish94/commExplore | 6f30129e6fb5bcdcd86e7faaf25183e44e018f51 | [
"MIT"
] | 1 | 2021-12-08T11:59:24.000Z | 2021-12-08T11:59:24.000Z | # The MIT License (MIT)
# Copyright (c) 2014 INSPIRE Lab, BITS Pilani
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Provides a demo of the Communicative Exploration algorithm for a fixed base station.
"""
from math import floor
from time import sleep
from Tkinter import Tk, Canvas, Frame, BOTH
import AStar
import CommExplore
import ConfigFileReader
import GridUI
def main():
cfgReader = ConfigFileReader.ConfigFileReader("barmaze.config")
ret, height, width, numRobots, R, baseX, baseY, initLocs, obstacles = cfgReader.readCfg()
if ret == -1:
print 'readCfg() Unsuccessful!'
sys.exit(-1)
print 'height', height
print 'width', width
print 'numRobots', numRobots
print 'R', R
print 'baseX', baseX
print 'baseY', baseY
print 'initLocs', initLocs
print 'obstacles', obstacles
k = 10
T = 1000
algo = CommExplore.CommExplore(height, width, obstacles, numRobots, initLocs, R, k)
algo.printGrid()
print ''
print ''
cfgc = algo.generateCfgcPopulation()
# for j in range(T):
# algo.runOneIter()
# timeTaken = algo.printVisitedStatus()
# if timeTaken == 0:
# return T
# return timeTaken
if height <= 10:
xoffset = 300
else:
xoffset = 100
if width <= 10:
yoffset = 300
else:
yoffset = 100
maxScreenHeight = 700
cellSize = int(floor(maxScreenHeight / (height + 2)))
root = Tk()
# ex = Example(root)
# root.geometry('400x100+500+500')
# root.mainloop()
gui = GridUI.GridUI(root, height, width, cellSize, algo.gridworld, algo.robots, algo.frontier)
guiHeight = str((height + 2) * cellSize)
guiWidth = str((width + 2) * cellSize)
xOffset = str(xoffset)
yOffset = str(yoffset)
geometryParam = guiWidth + 'x' + guiHeight + '+' + xOffset + '+' + yOffset
root.geometry(geometryParam)
def run():
algo.runOneIter()
gui.redraw(height, width, cellSize, algo.gridworld, algo.robots, algo.frontier)
root.after(50, run)
root.after(50, run)
root.mainloop()
if __name__ == '__main__':
# numTrials = 10
# average = 0
# for i in range(numTrials):
# average += main()
# average /= numTrials
# print average
main() | 26.42735 | 95 | 0.721539 |
09e020a26251c57d6bd2b4dcf78cf282b9a5074f | 853 | py | Python | desktop/core/ext-py/cryptography-1.3.1/src/_cffi_src/build_commoncrypto.py | vinaymundada27/Hue | 7bffb33bbe7cfa34d340241c4ba3b19476211b2a | [
"Apache-2.0"
] | 2 | 2020-02-05T04:57:55.000Z | 2021-03-03T23:29:30.000Z | desktop/core/ext-py/cryptography-1.3.1/src/_cffi_src/build_commoncrypto.py | vinaymundada27/Hue | 7bffb33bbe7cfa34d340241c4ba3b19476211b2a | [
"Apache-2.0"
] | null | null | null | desktop/core/ext-py/cryptography-1.3.1/src/_cffi_src/build_commoncrypto.py | vinaymundada27/Hue | 7bffb33bbe7cfa34d340241c4ba3b19476211b2a | [
"Apache-2.0"
] | 2 | 2019-06-17T11:51:56.000Z | 2020-07-25T08:29:56.000Z | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from _cffi_src.utils import build_ffi_for_binding
ffi = build_ffi_for_binding(
module_name="_commoncrypto",
module_prefix="_cffi_src.commoncrypto.",
modules=[
"cf",
"common_digest",
"common_hmac",
"common_key_derivation",
"common_cryptor",
"common_symmetric_key_wrap",
"seccertificate",
"secimport",
"secitem",
"seckey",
"seckeychain",
"secpolicy",
"sectransform",
"sectrust",
],
extra_link_args=[
"-framework", "Security", "-framework", "CoreFoundation"
],
)
| 25.848485 | 79 | 0.640094 |
46385d7dd6c50bf5f36dc3bcd08d23a0e5f69b5e | 348 | py | Python | lcdblib/__init__.py | acgtcoder/lcdblib | a3e5c81b841f0a06e63641f1cbcc24fc207f40f0 | [
"MIT"
] | 1 | 2018-08-27T11:42:33.000Z | 2018-08-27T11:42:33.000Z | lcdblib/__init__.py | acgtcoder/lcdblib | a3e5c81b841f0a06e63641f1cbcc24fc207f40f0 | [
"MIT"
] | 15 | 2016-11-23T18:50:33.000Z | 2018-04-10T20:07:16.000Z | lcdblib/__init__.py | acgtcoder/lcdblib | a3e5c81b841f0a06e63641f1cbcc24fc207f40f0 | [
"MIT"
] | 1 | 2019-04-08T19:01:17.000Z | 2019-04-08T19:01:17.000Z | from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
# The following assumes versioneer was configured with the "pep440-pre" style
# in setup.cfg
toks = __version__.split('.dev')
if len(toks) == 2:
__conda_version__, __conda_build__ = toks
else:
__conda_build__ = '0'
__conda_version__ = toks[0]
| 26.769231 | 77 | 0.744253 |
7504108485f7853a2bf93bc27ae9b89ef9b54c8d | 1,057 | py | Python | zospy/functions/nce.py | MREYE-LUMC/ZOSPy | 0c9d3c0ecd63b8c6999ee7a5ee6f742adc43e465 | [
"CC0-1.0"
] | 1 | 2022-02-11T11:52:04.000Z | 2022-02-11T11:52:04.000Z | zospy/functions/nce.py | MREYE-LUMC/ZOSPy | 0c9d3c0ecd63b8c6999ee7a5ee6f742adc43e465 | [
"CC0-1.0"
] | null | null | null | zospy/functions/nce.py | MREYE-LUMC/ZOSPy | 0c9d3c0ecd63b8c6999ee7a5ee6f742adc43e465 | [
"CC0-1.0"
] | 1 | 2022-03-13T14:36:58.000Z | 2022-03-13T14:36:58.000Z | from zospy.api import constants
from zospy.utils.zputils import proc_constant
def object_change_type(obj, newtype):
"""Simple function to change the type of an object in the NCE
Parameters
----------
obj: INCERow
The Row/Object for which the change is to be made.
newtype: str or int
The new object type, either string (e.g. 'StandardLens') or int. The integer will be treated as if obtained from
zp.constants.Editors.NCE.ObjectType.
Returns
-------
None
Examples
--------
>>> import zospy as zp
>>> zos = zp.ZOS()
>>> zos.connect_as_extension()
>>> oss = zos.get_primary_system()
>>> oss.System.MakeNonSequential()
>>> newobj = oss.NCE.InsertNewObjectAt(0)
>>> object_change_type(newobj, 'StandardLens')
"""
# Obtain the integer representing the new type if needed
newtype = proc_constant(constants.Editors.NCE.ObjectType, newtype)
# Apply
newsurftypesettings = obj.GetObjectTypeSettings(newtype)
obj.ChangeType(newsurftypesettings)
| 28.567568 | 120 | 0.667928 |
bd2947bd3daf6ba253a4438ab06b8acb14a66119 | 13,009 | py | Python | src/dependency_tracking.py | hw-sw-contracts/revizor | 6aa7694d55d62c554917fc539b1775949508ed4b | [
"MIT"
] | 29 | 2021-05-24T12:45:19.000Z | 2022-03-16T16:53:10.000Z | src/dependency_tracking.py | hw-sw-contracts/revizor | 6aa7694d55d62c554917fc539b1775949508ed4b | [
"MIT"
] | 3 | 2021-11-23T15:37:25.000Z | 2022-01-24T12:01:44.000Z | src/dependency_tracking.py | hw-sw-contracts/revizor | 6aa7694d55d62c554917fc539b1775949508ed4b | [
"MIT"
] | 3 | 2021-06-17T09:00:27.000Z | 2022-03-28T03:42:01.000Z | from iced_x86 import *
from typing import Dict, Sequence
from types import ModuleType
import copy
def create_enum_dict(module: ModuleType) -> Dict[int, str]:
return {
module.__dict__[key]: key
for key in module.__dict__
if isinstance(module.__dict__[key], int)
}
REGISTER_TO_STRING: Dict[int, str] = create_enum_dict(Register)
OP_ACCESS_TO_STRING: Dict[int, str] = create_enum_dict(OpAccess)
FLOW_CONTROL_TO_STRING: Dict[int, str] = create_enum_dict(FlowControl)
MEMORY_SIZE_TO_STRING: Dict[int, str] = create_enum_dict(MemorySize)
def register_to_string(value: int) -> str:
s = REGISTER_TO_STRING.get(value)
if s is None:
return str(value) + " /*Register enum*/"
return s
def op_access_to_string(value: int) -> str:
s = OP_ACCESS_TO_STRING.get(value)
if s is None:
return str(value) + " /*OpAccess enum*/"
return s
def flow_control_to_string(value: int) -> str:
s = FLOW_CONTROL_TO_STRING.get(value)
if s is None:
return str(value) + " /*FlowControl enum*/"
return s
def memory_size_to_string(value: int) -> str:
s = MEMORY_SIZE_TO_STRING.get(value)
if s is None:
return str(value) + " /*MemorySize enum*/"
return s
def used_reg_to_string(reg_info: UsedRegister) -> str:
return register_to_string(reg_info.register) + ":" + op_access_to_string(reg_info.access)
def used_mem_to_string(mem_info: UsedMemory) -> str:
sb = "[" + register_to_string(mem_info.segment) + ":"
need_plus = mem_info.base != Register.NONE
if need_plus:
sb += register_to_string(mem_info.base)
if mem_info.index != Register.NONE:
if need_plus:
sb += "+"
need_plus = True
sb += register_to_string(mem_info.index)
if mem_info.scale != 1:
sb += "*" + str(mem_info.scale)
if mem_info.displacement != 0 or not need_plus:
if need_plus:
sb += "+"
sb += f"0x{mem_info.displacement:X}"
sb += ";" + memory_size_to_string(mem_info.memory_size) + ";" + op_access_to_string(
mem_info.access) + "]"
return sb
def decode_rflags_bits(rf: int) -> list:
sb = []
if (rf & RflagsBits.OF) != 0:
sb.append("OF")
if (rf & RflagsBits.SF) != 0:
sb.append("SF")
if (rf & RflagsBits.ZF) != 0:
sb.append("ZF")
if (rf & RflagsBits.AF) != 0:
sb.append("AF")
if (rf & RflagsBits.CF) != 0:
sb.append("CF")
if (rf & RflagsBits.PF) != 0:
sb.append("PF")
if (rf & RflagsBits.DF) != 0:
sb.append("DF")
if (rf & RflagsBits.IF) != 0:
sb.append("IF")
if (rf & RflagsBits.AC) != 0:
sb.append("AC")
if (rf & RflagsBits.UIF) != 0:
sb.append("UIF")
return sb
def get_register_label(reg_tracking, register_name: str) -> set:
if register_name not in reg_tracking.keys():
return {register_name}
else:
label = set()
for reg in register_deps(register_name):
if reg not in reg_tracking.keys():
label.add(reg)
else:
label = label.union(reg_tracking[reg])
return label
# return regTracking[register_name]
def get_flag_label(flag_tracking, flag_name: str) -> set:
if flag_name not in flag_tracking.keys():
return {flag_name}
else:
return flag_tracking[flag_name]
def get_mem_label(mem_tracking, address: int) -> set:
if address not in mem_tracking.keys():
return {address}
else:
return mem_tracking[address]
def register_deps(reg: str) -> set:
if reg == "PC":
return {reg}
for i in {"A", "B", "C", "D"}:
if reg == f"R{i}X":
return {f"{i}L", f"{i}H", f"{i}X", f"E{i}X", f"R{i}X"}
elif reg == f"E{i}X":
return {f"{i}L", f"{i}H", f"{i}X", f"E{i}X"}
elif reg == f"{i}X":
return {f"{i}L", f"{i}H", f"{i}X"}
elif reg == f"{i}L":
return {f"{i}L"}
elif reg == f"{i}H":
return {f"{i}H"}
for i in {"BP", "SI", "DI", "SP", "IP"}:
if reg == f"R{i}":
return {f"{i}L", f"{i}", f"E{i}", f"R{i}"}
elif reg == f"E{i}":
return {f"{i}L", f"{i}", f"E{i}"}
elif reg == f"{i}":
return {f"{i}L", f"{i}"}
elif reg == f"{i}L":
return {f"{i}L"}
for j in range(8, 16):
if reg == f"R{j}":
return {f"R{j}B", f"R{j}W", f"R{j}D", f"R{j}"}
elif reg == f"R{j}D":
return {f"R{j}B", f"R{j}W", f"R{j}D"}
elif reg == f"R{j}W":
return {f"R{j}B", f"R{j}W"}
elif reg == f"R{j}B":
return {f"R{j}B"}
print(f"Unsupported register {reg}")
exit(1)
class DependencyTracker:
# TODO:
# 1) When we observe an instruction operands,
# right now we do not distinguish between 1st and 2nd operand. Fix that!!
def __init__(self, code_biteness, initial_observations=None):
if initial_observations is None:
initial_observations = []
self.flag_tracking = {}
self.reg_tracking = {}
self.mem_tracking = {}
self.code_biteness = code_biteness
self.src_regs = set()
self.src_flags = set()
self.src_mems = set()
self.trg_regs = set()
self.trg_flags = set()
self.trg_mems = set()
self.debug = False
self.initial_observations = initial_observations
self.observed_labels = set(self.initial_observations)
self.strict_undefined = True
self.checkpoints = []
def reset(self):
self.flag_tracking = {}
self.reg_tracking = {}
self.mem_tracking = {}
self.observed_labels = set(self.initial_observations)
self.src_regs = set()
self.src_flags = set()
self.src_mems = set()
self.trg_regs = set()
self.trg_flags = set()
self.trg_mems = set()
self.checkpoints = []
def initialize(self, instruction):
# TODO: this function is extremely slow, has to get optimized
# Collect source and target registers/flags
self.src_regs = set()
self.src_flags = set()
self.src_mems = set()
self.trg_regs = set()
self.trg_flags = set()
self.trg_mems = set()
decoder = Decoder(self.code_biteness, instruction)
formatter = FastFormatter(FormatterSyntax.NASM) # Formatter(FormatterSyntax.NASM)
info_factory = InstructionInfoFactory()
index = 0
for instr in decoder:
info = info_factory.info(instr)
if self.debug:
print(f"{instr}")
for reg_info in info.used_registers():
print(f" Used reg: {used_reg_to_string(reg_info)}")
for mem_info in info.used_memory():
print(f" Used mem: {used_mem_to_string(mem_info)}")
if instr.rflags_read != RflagsBits.NONE:
print(f" RFLAGS Read: {decode_rflags_bits(instr.rflags_read)}")
if instr.rflags_written != RflagsBits.NONE:
print(f" RFLAGS Written: {decode_rflags_bits(instr.rflags_written)}")
if instr.rflags_cleared != RflagsBits.NONE:
print(f" RFLAGS Cleared: {decode_rflags_bits(instr.rflags_cleared)}")
if instr.rflags_set != RflagsBits.NONE:
print(f" RFLAGS Set: {decode_rflags_bits(instr.rflags_set)}")
if instr.rflags_undefined != RflagsBits.NONE:
print(f" RFLAGS Undefined: {decode_rflags_bits(instr.rflags_undefined)}")
if instr.rflags_modified != RflagsBits.NONE:
print(f" RFLAGS Modified: {decode_rflags_bits(instr.rflags_modified)}")
print(f" FlowControl: {flow_control_to_string(instr.flow_control)}")
for reg_info in info.used_registers():
if op_access_to_string(reg_info.access) in ["READ", "READ_WRITE", "COND_READ"]:
self.src_regs.add(register_to_string(reg_info.register))
if op_access_to_string(reg_info.access) in ["WRITE", "READ_WRITE", "COND_WRITE"]:
self.trg_regs.add(register_to_string(reg_info.register))
if flow_control_to_string(instr.flow_control) != "NEXT":
self.trg_regs.add("PC")
self.src_flags = set(decode_rflags_bits(instr.rflags_read))
if self.strict_undefined:
self.src_flags = self.src_flags.union(
set(decode_rflags_bits(instr.rflags_undefined)))
self.trg_flags = set(decode_rflags_bits(instr.rflags_modified))
if self.debug:
print(f" Source Registers: {self.src_regs}")
print(f" Target Registers: {self.trg_regs}")
print(f" Source Flags: {self.src_flags}")
print(f" Target Flags: {self.trg_flags}")
index = index + 1
assert (index <= 1)
def track_memory_access(self, address, size, mode):
if self.debug:
print(f"Track Memory Access {address} {size} {mode}")
# Tracking concrete memory accesses
if mode == "READ":
for i in range(0, size):
self.src_mems.add(address + i)
elif mode == "WRITE":
for i in range(0, size):
self.trg_mems.add(address + i)
else:
print(f"Unsupported mode {mode}")
exit(1)
def finalize_tracking(self):
# Compute the new dependency maps
# Compute source label
src_label = set()
for reg in self.src_regs:
src_label = src_label.union(get_register_label(self.reg_tracking, reg))
for flag in self.src_flags:
src_label = src_label.union(get_flag_label(self.flag_tracking, flag))
for addr in self.src_mems:
src_label = src_label.union(get_mem_label(self.mem_tracking, addr))
# Propagate label to all targets
for reg in self.trg_regs:
self.reg_tracking[reg] = list(src_label)
for flg in self.trg_flags:
self.flag_tracking[flg] = list(src_label)
for mem in self.trg_mems:
self.mem_tracking[mem] = list(src_label)
if self.debug:
print("Tracking information")
print(f"Source label: {src_label}")
print(f"Registers: {self.reg_tracking}")
print(f"Flags: {self.flag_tracking}")
print(f"Memory: {self.mem_tracking}")
def observe_instruction(self, mode):
if self.debug:
print(f"ObservedLabels: {self.observed_labels}")
if mode == "PC":
# Add regLabel(PC) to the set of observed labels
self.observed_labels = \
self.observed_labels.union(get_register_label(self.reg_tracking, "PC"))
elif mode == "OPS":
# For all registers r in the instruction operands
# (i.e., all source registers), Add regLabel(r) to the set of observed labels
for reg in self.src_regs:
self.observed_labels = \
self.observed_labels.union(get_register_label(self.reg_tracking, reg))
else:
print(f"Invalid mode {mode}")
exit(1)
if self.debug:
print(f"ObserveInstruction {mode} : {self.observed_labels}")
def observe_memory_address(self, address: int, size: int):
# Add memLabel(address) to the set of observed labels
if self.debug:
print(f"ObservedLabels: {self.observed_labels}")
for i in range(0, size):
self.observed_labels = \
self.observed_labels.union(get_mem_label(self.mem_tracking, address + i))
if self.debug:
print(f"ObserveMemoryAddress {address} {size} : {self.observed_labels}")
def save_state(self):
# return a copy of the tracker state!
return copy.deepcopy(self.flag_tracking), \
copy.deepcopy(self.reg_tracking), \
copy.deepcopy(self.mem_tracking), \
copy.deepcopy(self.observed_labels)
def restore_state(self, flag_tracking, reg_tracking, mem_tracking, observed_labels):
self.flag_tracking = copy.deepcopy(flag_tracking)
self.reg_tracking = copy.deepcopy(reg_tracking)
self.mem_tracking = copy.deepcopy(mem_tracking)
self.observed_labels = copy.deepcopy(observed_labels)
def checkpoint(self):
t = self.save_state()
self.checkpoints.append(t)
def rollback(self):
if len(self.checkpoints) > 0:
t = self.checkpoints.pop()
self.restore_state(*t)
else:
print("There are no more checkpoints")
exit(1)
def get_observed_dependencies(self):
return copy.deepcopy(self.observed_labels)
| 36.036011 | 97 | 0.580906 |
9e6361b14c7c2e1b38c152891bc19775f6db2631 | 7,719 | py | Python | DeepLearning/PyTorch/book_repo/p2ch13/model.py | dSalazar10/Course-Exploring_Deep_Learning | e79cbc7c4802c9b2d62d7fc419eb77b4d2fed355 | [
"MIT"
] | null | null | null | DeepLearning/PyTorch/book_repo/p2ch13/model.py | dSalazar10/Course-Exploring_Deep_Learning | e79cbc7c4802c9b2d62d7fc419eb77b4d2fed355 | [
"MIT"
] | null | null | null | DeepLearning/PyTorch/book_repo/p2ch13/model.py | dSalazar10/Course-Exploring_Deep_Learning | e79cbc7c4802c9b2d62d7fc419eb77b4d2fed355 | [
"MIT"
] | null | null | null | import math
import random
from collections import namedtuple
import torch
from torch import nn as nn
import torch.nn.functional as F
from util.logconf import logging
from util.unet import UNet
log = logging.getLogger(__name__)
# log.setLevel(logging.WARN)
# log.setLevel(logging.INFO)
log.setLevel(logging.DEBUG)
class UNetWrapper(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.input_batchnorm = nn.BatchNorm2d(kwargs['in_channels'])
self.unet = UNet(**kwargs)
self.final = nn.Sigmoid()
self._init_weights()
def _init_weights(self):
init_set = {
nn.Conv2d,
nn.Conv3d,
nn.ConvTranspose2d,
nn.ConvTranspose3d,
nn.Linear,
}
for m in self.modules():
if type(m) in init_set:
nn.init.kaiming_normal_(m.weight.data, mode='fan_out', nonlinearity='relu', a=0)
if m.bias is not None:
fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(m.weight.data)
bound = 1 / math.sqrt(fan_out)
nn.init.normal_(m.bias, -bound, bound)
# nn.init.constant_(self.unet.last.bias, -4)
# nn.init.constant_(self.unet.last.bias, 4)
def forward(self, input_batch):
bn_output = self.input_batchnorm(input_batch)
un_output = self.unet(bn_output)
fn_output = self.final(un_output)
return fn_output
class SegmentationAugmentation(nn.Module):
def __init__(self, flip=None, offset=None, scale=None, rotate=None, noise=None):
super().__init__()
self.flip = flip
self.offset = offset
self.scale = scale
self.rotate = rotate
self.noise = noise
def forward(self, input_g, label_g):
transform_t = self._build2dTransformMatrix()
# log.debug([input_g.shape, label_g.shape])
transform_t = transform_t.expand(input_g.shape[0], -1, -1)
transform_t = transform_t.to(input_g.device, torch.float32)
affine_t = F.affine_grid(
transform_t[:,:2],
input_g.size(),
align_corners=False,
)
augmented_input_g = F.grid_sample(
input_g,
affine_t,
padding_mode='border',
align_corners=False,
)
augmented_label_g = F.grid_sample(
label_g.to(torch.float32),
affine_t,
padding_mode='border',
align_corners=False,
)
if self.noise:
noise_t = torch.randn_like(augmented_input_g)
noise_t *= self.noise
augmented_input_g += noise_t
return augmented_input_g, augmented_label_g > 0.5
def _build2dTransformMatrix(self):
transform_t = torch.eye(3).to(torch.float64)
for i in range(2):
if self.flip:
if random.random() > 0.5:
transform_t[i,i] *= -1
if self.offset:
offset_float = self.offset
random_float = (random.random() * 2 - 1)
transform_t[2,i] = offset_float * random_float
if self.scale:
scale_float = self.scale
random_float = (random.random() * 2 - 1)
transform_t[i,i] *= 1.0 + scale_float * random_float
if self.rotate:
angle_rad = random.random() * math.pi * 2
s = math.sin(angle_rad)
c = math.cos(angle_rad)
rotation_t = torch.tensor([
[c, -s, 0],
[s, c, 0],
[0, 0, 1],
], dtype=torch.float64)
transform_t @= rotation_t
return transform_t
# MaskTuple = namedtuple('MaskTuple', 'raw_dense_mask, dense_mask, body_mask, air_mask, raw_candidate_mask, candidate_mask, lung_mask, neg_mask, pos_mask')
#
# class SegmentationMask(nn.Module):
# def __init__(self):
# super().__init__()
#
# self.conv_list = nn.ModuleList([
# self._make_circle_conv(radius) for radius in range(1, 8)
# ])
#
# def _make_circle_conv(self, radius):
# diameter = 1 + radius * 2
#
# a = torch.linspace(-1, 1, steps=diameter)**2
# b = (a[None] + a[:, None])**0.5
#
# circle_weights = (b <= 1.0).to(torch.float32)
#
# conv = nn.Conv2d(1, 1, kernel_size=diameter, padding=radius, bias=False)
# conv.weight.data.fill_(1)
# conv.weight.data *= circle_weights / circle_weights.sum()
#
# return conv
#
#
# def erode(self, input_mask, radius, threshold=1):
# conv = self.conv_list[radius - 1]
# input_float = input_mask.to(torch.float32)
# result = conv(input_float)
#
# # log.debug(['erode in ', radius, threshold, input_float.min().item(), input_float.mean().item(), input_float.max().item()])
# # log.debug(['erode out', radius, threshold, result.min().item(), result.mean().item(), result.max().item()])
#
# return result >= threshold
#
# def deposit(self, input_mask, radius, threshold=0):
# conv = self.conv_list[radius - 1]
# input_float = input_mask.to(torch.float32)
# result = conv(input_float)
#
# # log.debug(['deposit in ', radius, threshold, input_float.min().item(), input_float.mean().item(), input_float.max().item()])
# # log.debug(['deposit out', radius, threshold, result.min().item(), result.mean().item(), result.max().item()])
#
# return result > threshold
#
# def fill_cavity(self, input_mask):
# cumsum = input_mask.cumsum(-1)
# filled_mask = (cumsum > 0)
# filled_mask &= (cumsum < cumsum[..., -1:])
# cumsum = input_mask.cumsum(-2)
# filled_mask &= (cumsum > 0)
# filled_mask &= (cumsum < cumsum[..., -1:, :])
#
# return filled_mask
#
#
# def forward(self, input_g, raw_pos_g):
# gcc_g = input_g + 1
#
# with torch.no_grad():
# # log.info(['gcc_g', gcc_g.min(), gcc_g.mean(), gcc_g.max()])
#
# raw_dense_mask = gcc_g > 0.7
# dense_mask = self.deposit(raw_dense_mask, 2)
# dense_mask = self.erode(dense_mask, 6)
# dense_mask = self.deposit(dense_mask, 4)
#
# body_mask = self.fill_cavity(dense_mask)
# air_mask = self.deposit(body_mask & ~dense_mask, 5)
# air_mask = self.erode(air_mask, 6)
#
# lung_mask = self.deposit(air_mask, 5)
#
# raw_candidate_mask = gcc_g > 0.4
# raw_candidate_mask &= air_mask
# candidate_mask = self.erode(raw_candidate_mask, 1)
# candidate_mask = self.deposit(candidate_mask, 1)
#
# pos_mask = self.deposit((raw_pos_g > 0.5) & lung_mask, 2)
#
# neg_mask = self.deposit(candidate_mask, 1)
# neg_mask &= ~pos_mask
# neg_mask &= lung_mask
#
# # label_g = (neg_mask | pos_mask).to(torch.float32)
# label_g = (pos_mask).to(torch.float32)
# neg_g = neg_mask.to(torch.float32)
# pos_g = pos_mask.to(torch.float32)
#
# mask_dict = {
# 'raw_dense_mask': raw_dense_mask,
# 'dense_mask': dense_mask,
# 'body_mask': body_mask,
# 'air_mask': air_mask,
# 'raw_candidate_mask': raw_candidate_mask,
# 'candidate_mask': candidate_mask,
# 'lung_mask': lung_mask,
# 'neg_mask': neg_mask,
# 'pos_mask': pos_mask,
# }
#
# return label_g, neg_g, pos_g, lung_mask, mask_dict
| 32.987179 | 155 | 0.563026 |
d8fd4a97c25487cc14c941ca84499676a12093f9 | 11,809 | py | Python | objetto/_changes.py | brunonicko/objetto | 50540bc14c5767e3f8fa09bf59ba322cb043a353 | [
"MIT"
] | 8 | 2020-12-10T21:04:06.000Z | 2022-01-22T07:43:07.000Z | objetto/_changes.py | brunonicko/objetto | 50540bc14c5767e3f8fa09bf59ba322cb043a353 | [
"MIT"
] | null | null | null | objetto/_changes.py | brunonicko/objetto | 50540bc14c5767e3f8fa09bf59ba322cb043a353 | [
"MIT"
] | 1 | 2022-01-20T21:39:48.000Z | 2022-01-20T21:39:48.000Z | # -*- coding: utf-8 -*-
"""Object changes."""
from typing import TYPE_CHECKING
from ._bases import final
from ._constants import INTEGER_TYPES, STRING_TYPES
from ._states import BaseState
from .data import (
Data,
data_attribute,
data_constant_attribute,
data_protected_dict_attribute,
data_protected_list_attribute,
data_protected_set_attribute,
)
if TYPE_CHECKING:
from typing import Any, Callable, Final, Optional
from ._data import DictData, ListData, SetData
from ._history import HistoryObject
from ._objects import BaseObject
__all__ = [
"BaseChange",
"BaseAtomicChange",
"Batch",
"Update",
"DictUpdate",
"ListInsert",
"ListDelete",
"ListUpdate",
"ListMove",
"SetUpdate",
"SetRemove",
]
class BaseChange(Data):
"""
Base change.
Inherits from:
- :class:`objetto.data.Data`
Inherited By:
- :class:`objetto.bases.BaseAtomicChange`
- :class:`objetto.changes.Batch`
"""
name = data_attribute(STRING_TYPES, checked=False, abstracted=True) # type: str
"""
Name describing the change.
:type: str
"""
obj = data_attribute(
".._objects|BaseObject", subtypes=True, checked=False, finalized=True
) # type: Final[BaseObject]
"""
Object being changed.
:type: objetto.bases.BaseObject
"""
is_atomic = data_constant_attribute(False, abstracted=True) # type: bool
"""
Whether change is atomic or not.
:type: bool
"""
class BaseAtomicChange(BaseChange):
"""
Base atomic change.
Inherits from:
- :class:`objetto.bases.BaseChange`
Inherited By:
- :class:`objetto.changes.Update`
- :class:`objetto.changes.DictUpdate`
- :class:`objetto.changes.ListInsert`
- :class:`objetto.changes.ListDelete`
- :class:`objetto.changes.ListUpdate`
- :class:`objetto.changes.ListMove`
- :class:`objetto.changes.SetUpdate`
- :class:`objetto.changes.SetRemove`
"""
__redo__ = data_attribute(
finalized=True, compared=False, serialized=False, represented=False
) # type: Final[Callable]
"""Redo delegate."""
__undo__ = data_attribute(
finalized=True, compared=False, serialized=False, represented=False
) # type: Final[Callable]
"""Undo delegate."""
old_state = data_attribute(
BaseState, subtypes=True, checked=False, finalized=True
) # type: Final[BaseState]
"""
Object state before the change.
:type: objetto.bases.BaseState
"""
new_state = data_attribute(
BaseState, subtypes=True, checked=False, finalized=True
) # type: Final[BaseState]
"""
Object state after the change.
:type: objetto.bases.BaseState
"""
old_children = data_protected_set_attribute(
".._objects|BaseObject",
subtypes=True,
checked=False,
finalized=True,
) # type: Final[SetData[BaseObject]]
"""
Children objects being released.
:type: objetto.data.SetData[objetto.bases.BaseObject]
"""
new_children = data_protected_set_attribute(
".._objects|BaseObject",
subtypes=True,
checked=False,
finalized=True,
) # type: Final[SetData[BaseObject]]
"""
Children objects being adopted.
:type: objetto.data.SetData[objetto.bases.BaseObject]
"""
history_adopters = data_protected_set_attribute(
".._objects|BaseObject",
subtypes=True,
checked=False,
finalized=True,
) # type: Final[SetData[BaseObject]]
"""
Objects adopting the history from the object being changed.
:type: objetto.data.SetData[objetto.bases.BaseObject]
"""
history = data_attribute(
(".._history|HistoryObject", None),
subtypes=False,
checked=False,
finalized=True,
default=None,
) # type: Final[Optional[HistoryObject]]
"""
History where this changed originated from (result of an redo/undo operation).
:type: objetto.history.HistoryObject or None
"""
is_atomic = data_constant_attribute(True, finalized=True) # type: bool
"""
Whether change is atomic or not.
:type: bool
"""
@final
class Batch(BaseChange):
"""
Batch change.
Inherits from:
- :class:`objetto.bases.BaseChange`
"""
name = data_attribute(STRING_TYPES, checked=False) # type: str
"""
Name describing the change.
:type: str
"""
metadata = data_protected_dict_attribute(
key_types=STRING_TYPES, checked=False
) # type: DictData[str, Any]
"""
Metadata.
:type: objetto.data.DictData[str, Any]
"""
is_atomic = data_constant_attribute(False, finalized=True) # type: bool
"""
Whether change is atomic or not.
:type: bool
"""
@final
class Update(BaseAtomicChange):
"""
Object's attributes have been updated.
Inherits from:
- :class:`objetto.bases.BaseAtomicChange`
"""
name = data_attribute(
STRING_TYPES, checked=False, default="Update Attributes"
) # type: str
"""
Name describing the change.
:type: str
"""
old_values = data_protected_dict_attribute(
checked=False, key_types=STRING_TYPES
) # type: DictData[str, Any]
"""
Old attribute values.
:type: objetto.data.DictData[str, Any]
"""
new_values = data_protected_dict_attribute(
checked=False, key_types=STRING_TYPES
) # type: DictData[str, Any]
"""
New attribute values.
:type: objetto.data.DictData[str, Any]
"""
@final
class DictUpdate(BaseAtomicChange):
"""
Dictionary values have been updated.
Inherits from:
- :class:`objetto.bases.BaseAtomicChange`
"""
name = data_attribute(
STRING_TYPES, checked=False, default="Update Values"
) # type: str
"""
Name describing the change.
"""
old_values = data_protected_dict_attribute(
checked=False
) # type: DictData[Any, Any]
"""
Old values.
:type: objetto.data.DictData[collections.abc.Hashable, Any]
"""
new_values = data_protected_dict_attribute(
checked=False
) # type: DictData[Any, Any]
"""
New values.
:type: objetto.data.DictData[collections.abc.Hashable, Any]
"""
@final
class ListInsert(BaseAtomicChange):
"""
Values have been inserted into the list.
Inherits from:
- :class:`objetto.bases.BaseAtomicChange`
"""
name = data_attribute(
STRING_TYPES, checked=False, default="Insert Values"
) # type: str
"""
Name describing the change.
:type: str
"""
index = data_attribute(INTEGER_TYPES, checked=False) # type: int
"""
Insertion index.
:type: int
"""
last_index = data_attribute(INTEGER_TYPES, checked=False) # type: int
"""
Last inserted value index.
:type: int
"""
stop = data_attribute(INTEGER_TYPES, checked=False) # type: int
"""
Stop index.
:type: int
"""
new_values = data_protected_list_attribute(
checked=False,
) # type: ListData[Any]
"""
New values.
:type: objetto.data.ListData[Any]
"""
@final
class ListDelete(BaseAtomicChange):
"""
Values have been removed from the list.
Inherits from:
- :class:`objetto.bases.BaseAtomicChange`
"""
name = data_attribute(
STRING_TYPES, checked=False, default="Remove Values"
) # type: str
"""
Name describing the change.
:type: str
"""
index = data_attribute(INTEGER_TYPES, checked=False) # type: int
"""
First removed value index.
:type: int
"""
last_index = data_attribute(INTEGER_TYPES, checked=False) # type: int
"""
Last removed value index.
:type: int
"""
stop = data_attribute(INTEGER_TYPES, checked=False) # type: int
"""
Stop index.
:type: int
"""
old_values = data_protected_list_attribute(
checked=False,
) # type: ListData[Any]
"""
Old values.
:type: objetto.data.ListData[Any]
"""
@final
class ListUpdate(BaseAtomicChange):
"""
List values have been updated.
Inherits from:
- :class:`objetto.bases.BaseAtomicChange`
"""
name = data_attribute(
STRING_TYPES, checked=False, default="Update values"
) # type: str
"""
Name describing the change.
:type: str
"""
index = data_attribute(INTEGER_TYPES, checked=False) # type: int
"""
First updated value index.
:type: int
"""
last_index = data_attribute(INTEGER_TYPES, checked=False) # type: int
"""
Last updated value index.
:type: int
"""
stop = data_attribute(INTEGER_TYPES, checked=False) # type: int
"""
Stop index.
:type: int
"""
old_values = data_protected_list_attribute(
checked=False,
) # type: ListData[Any]
"""
Old values.
:type: objetto.data.ListData[Any]
"""
new_values = data_protected_list_attribute(
checked=False,
) # type: ListData[Any]
"""
New values.
:type: objetto.data.ListData[Any]
"""
@final
class ListMove(BaseAtomicChange):
"""
List values have been moved internally.
Inherits from:
- :class:`objetto.bases.BaseAtomicChange`
"""
name = data_attribute(
STRING_TYPES, checked=False, default="Move values"
) # type: str
"""
Name describing the change.
:type: str
"""
index = data_attribute(INTEGER_TYPES, checked=False) # type: int
"""
First moved value index.
:type: int
"""
last_index = data_attribute(INTEGER_TYPES, checked=False) # type: int
"""
Last moved value index.
:type: int
"""
stop = data_attribute(INTEGER_TYPES, checked=False) # type: int
"""
Stop index.
:type: int
"""
target_index = data_attribute(INTEGER_TYPES, checked=False) # type: int
"""
Index where values are being moved to.
:type: int
"""
post_index = data_attribute(INTEGER_TYPES, checked=False) # type: int
"""
First moved value index after the move.
:type: int
"""
post_last_index = data_attribute(INTEGER_TYPES, checked=False) # type: int
"""
Last moved value index after the move.
:type: int
"""
post_stop = data_attribute(INTEGER_TYPES, checked=False) # type: int
"""
Stop index after the move.
:type: int
"""
values = data_protected_list_attribute(
checked=False,
) # type: ListData[Any]
"""
Values being moved.
:type: objetto.data.ListData[Any]
"""
@final
class SetUpdate(BaseAtomicChange):
"""
Values have been added to the set.
Inherits from:
- :class:`objetto.bases.BaseAtomicChange`
"""
name = data_attribute(
STRING_TYPES, checked=False, default="Add values"
) # type: str
"""
Name describing the change.
:type: str
"""
new_values = data_protected_set_attribute(checked=False) # type: SetData[Any]
"""
Values being added to the set.
:type: objetto.data.SetData[collections.abc.Hashable]
"""
@final
class SetRemove(BaseAtomicChange):
"""
Values have been removed from the set.
Inherits from:
- :class:`objetto.bases.BaseAtomicChange`
"""
name = data_attribute(
STRING_TYPES, checked=False, default="Remove values"
) # type: str
"""
Name describing the change.
:type: str
"""
old_values = data_protected_set_attribute(checked=False) # type: SetData[Any]
"""
Values being removed from the set.
:type: objetto.data.SetData[collections.abc.Hashable]
"""
| 20.82716 | 84 | 0.621983 |
888d0f6b6960d945a32761a12a5cf8772c533f9b | 511 | py | Python | deploys/setup-infra/install_dashboard.py | kimcu-on-thenet/coolstore-microservices | d2524bf56fc0219496fd1e499a663f3e63b42818 | [
"MIT"
] | 1 | 2019-08-03T17:42:39.000Z | 2019-08-03T17:42:39.000Z | deploys/setup-infra/install_dashboard.py | kimcu-on-thenet/coolstore-microservices | d2524bf56fc0219496fd1e499a663f3e63b42818 | [
"MIT"
] | null | null | null | deploys/setup-infra/install_dashboard.py | kimcu-on-thenet/coolstore-microservices | d2524bf56fc0219496fd1e499a663f3e63b42818 | [
"MIT"
] | null | null | null | import os, subprocess
import json
with open("configs/config.json") as config_file:
config_data = json.load(config_file)
def run():
subprocess.call(['kubectl', 'config', 'use-context', config_data["aks"]["name"]])
subprocess.call(['kubectl', 'create',
'clusterrolebinding',
'kubernetes-dashboard',
'-n', 'kube-system',
'--clusterrole', 'cluster-admin',
'--serviceaccount', 'kube-system:kubernetes-dashboard'])
if __name__ == "__main__":
run() | 30.058824 | 85 | 0.624266 |
2e1a5b478fae8c0e6082ea56f4aadaa0f47ad061 | 6,987 | py | Python | tests/differentiation/test_generate_steps.py | PaulBehler/estimagic | c14f743986262d508e55738c90737cb504fe987b | [
"MIT"
] | 83 | 2019-09-26T04:44:03.000Z | 2022-03-17T20:24:02.000Z | tests/differentiation/test_generate_steps.py | PaulBehler/estimagic | c14f743986262d508e55738c90737cb504fe987b | [
"MIT"
] | 243 | 2019-06-25T18:15:53.000Z | 2022-03-26T09:17:44.000Z | tests/differentiation/test_generate_steps.py | PaulBehler/estimagic | c14f743986262d508e55738c90737cb504fe987b | [
"MIT"
] | 23 | 2019-07-03T11:16:55.000Z | 2022-03-07T00:57:38.000Z | import numpy as np
import pytest
from estimagic.differentiation.generate_steps import _calculate_or_validate_base_steps
from estimagic.differentiation.generate_steps import _fillna
from estimagic.differentiation.generate_steps import _rescale_to_accomodate_bounds
from estimagic.differentiation.generate_steps import _set_unused_side_to_nan
from estimagic.differentiation.generate_steps import generate_steps
from numpy.testing import assert_array_almost_equal as aaae
def test_scalars_as_base_steps():
steps_scalar = _calculate_or_validate_base_steps(
0.1, np.ones(3), "first_derivative", None, scaling_factor=1
)
steps_array = _calculate_or_validate_base_steps(
np.full(3, 0.1), np.ones(3), "first_derivative", None, scaling_factor=1
)
aaae(steps_scalar, steps_array)
def test_scalars_as_min_steps():
steps_scalar = _calculate_or_validate_base_steps(
0.1, np.ones(3), "first_derivative", 0.12, scaling_factor=1.5
)
steps_array = _calculate_or_validate_base_steps(
np.full(3, 0.1),
np.ones(3),
"first_derivative",
np.full(3, 0.12),
scaling_factor=1.5,
)
aaae(steps_scalar, steps_array)
def test_calculate_or_validate_base_steps_invalid_too_small():
base_steps = np.array([1e-10, 0.01, 0.01])
min_steps = np.full(3, 1e-8)
with pytest.raises(ValueError):
_calculate_or_validate_base_steps(
base_steps, np.ones(3), "first_derivative", min_steps, scaling_factor=1
)
def test_calculate_or_validate_base_steps_wrong_shape():
base_steps = np.array([0.01, 0.01, 0.01])
min_steps = np.full(3, 1e-8)
with pytest.raises(ValueError):
_calculate_or_validate_base_steps(
base_steps, np.ones(2), "first_derivative", min_steps, scaling_factor=1
)
def test_calculate_or_validate_base_steps_jacobian():
x = np.array([0.05, 1, -5])
expected = np.array([0.1, 1, 5]) * np.sqrt(np.finfo(float).eps)
calculated = _calculate_or_validate_base_steps(
None, x, "first_derivative", 0, scaling_factor=1.0
)
aaae(calculated, expected, decimal=12)
def test_calculate_or_validate_base_steps_jacobian_with_scaling_factor():
x = np.array([0.05, 1, -5])
expected = np.array([0.1, 1, 5]) * np.sqrt(np.finfo(float).eps) * 2
calculated = _calculate_or_validate_base_steps(
None, x, "first_derivative", 0, scaling_factor=2.0
)
aaae(calculated, expected, decimal=12)
def test_calculate_or_validate_base_steps_binding_min_step():
x = np.array([0.05, 1, -5])
expected = np.array([0.1, 1, 5]) * np.sqrt(np.finfo(float).eps)
expected[0] = 1e-8
calculated = _calculate_or_validate_base_steps(
None, x, "first_derivative", 1e-8, scaling_factor=1.0
)
aaae(calculated, expected, decimal=12)
def test_calculate_or_validate_base_steps_hessian():
x = np.array([0.05, 1, -5])
expected = np.array([0.1, 1, 5]) * np.finfo(float).eps ** (1 / 3)
calculated = _calculate_or_validate_base_steps(
None, x, "second_derivative", 0, scaling_factor=1.0
)
aaae(calculated, expected, decimal=12)
def test_set_unused_side_to_nan_forward():
pos = np.ones((3, 2))
neg = -np.ones((3, 2))
method = "forward"
x = np.zeros(3)
upper_bounds = np.array([0.5, 2, 3])
lower_bounds = np.array([-2, -0.1, -0.1])
expected_pos = np.array([[np.nan, np.nan], [1, 1], [1, 1]])
expected_neg = np.array([[-1, -1], [np.nan, np.nan], [np.nan, np.nan]])
calculated_pos, calculated_neg = _set_unused_side_to_nan(
x, pos, neg, method, lower_bounds, upper_bounds
)
assert np.allclose(calculated_pos, expected_pos, equal_nan=True)
assert np.allclose(calculated_neg, expected_neg, equal_nan=True)
def test_set_unused_side_to_nan_backward():
pos = np.ones((3, 2))
neg = -np.ones((3, 2))
method = "backward"
x = np.zeros(3)
upper_bounds = np.array([0.5, 2, 3])
lower_bounds = np.array([-2, -0.1, -2])
expected_pos = np.array([[np.nan, np.nan], [1, 1], [np.nan, np.nan]])
expected_neg = np.array([[-1, -1], [np.nan, np.nan], [-1, -1]])
calculated_pos, calculated_neg = _set_unused_side_to_nan(
x, pos, neg, method, lower_bounds, upper_bounds
)
assert np.allclose(calculated_pos, expected_pos, equal_nan=True)
assert np.allclose(calculated_neg, expected_neg, equal_nan=True)
def test_fillna():
a = np.array([np.nan, 3, 4])
assert np.allclose(_fillna(a, 0), np.array([0, 3, 4.0]))
def test_rescale_to_accomodate_bounds():
pos = np.array([[1, 2], [1.5, 3], [1, 2], [3, np.nan]])
neg = -pos
base_steps = np.array([1, 1.5, 2, 3])
min_step = 0.1
lower_bounds = -4 * np.ones(4)
upper_bounds = np.ones(4) * 2.5
expected_pos = np.array([[1, 2], [1.25, 2.5], [1, 2], [2.5, np.nan]])
expected_neg = -expected_pos
calculated_pos, calculated_neg = _rescale_to_accomodate_bounds(
base_steps, pos, neg, lower_bounds, upper_bounds, min_step
)
np.allclose(calculated_pos, expected_pos, equal_nan=True)
np.allclose(calculated_neg, expected_neg, equal_nan=True)
def test_rescale_to_accomodate_bounds_binding_min_step():
pos = np.array([[1, 2], [1.5, 3], [1, 2]])
neg = -pos
base_steps = np.array([1, 1.5, 2])
min_step = np.array([0, 1.4, 0])
lower_bounds = -4 * np.ones(3)
upper_bounds = np.ones(3) * 2.5
expected_pos = np.array([[1, 2], [1.4, 2.8], [1, 2]])
expected_neg = -expected_pos
calculated_pos, calculated_neg = _rescale_to_accomodate_bounds(
base_steps, pos, neg, lower_bounds, upper_bounds, min_step
)
aaae(calculated_pos, expected_pos)
aaae(calculated_neg, expected_neg)
def test_generate_steps_binding_min_step():
calculated_steps = generate_steps(
x=np.arange(3),
method="central",
n_steps=2,
target="first_derivative",
base_steps=np.array([0.1, 0.2, 0.3]),
lower_bounds=np.full(3, -np.inf),
upper_bounds=np.full(3, 2.5),
step_ratio=2.0,
min_steps=np.full(3, 1e-8),
scaling_factor=1.0,
)
expected_pos = np.array([[0.1, 0.2], [0.2, 0.4], [0.25, 0.5]]).T
expected_neg = -expected_pos
aaae(calculated_steps.pos, expected_pos)
aaae(calculated_steps.neg, expected_neg)
def test_generate_steps_min_step_equals_base_step():
calculated_steps = generate_steps(
x=np.arange(3),
method="central",
n_steps=2,
target="first_derivative",
base_steps=np.array([0.1, 0.2, 0.3]),
lower_bounds=np.full(3, -np.inf),
upper_bounds=np.full(3, 2.5),
step_ratio=2.0,
min_steps=None,
scaling_factor=1.0,
)
expected_pos = np.array([[0.1, 0.2], [0.2, 0.4], [0.3, np.nan]]).T
expected_neg = np.array([[-0.1, -0.2], [-0.2, -0.4], [-0.3, -0.6]]).T
aaae(calculated_steps.pos, expected_pos)
aaae(calculated_steps.neg, expected_neg)
| 32.649533 | 86 | 0.661514 |
3f4e85a80935ec021f5d4e6ee704cb915848facd | 11,194 | py | Python | asterisk/ami/client.py | nchizhov/python-ami | d1953f4129452cbc015d7cb3d1e2f360dcaee45c | [
"BSD-3-Clause"
] | null | null | null | asterisk/ami/client.py | nchizhov/python-ami | d1953f4129452cbc015d7cb3d1e2f360dcaee45c | [
"BSD-3-Clause"
] | null | null | null | asterisk/ami/client.py | nchizhov/python-ami | d1953f4129452cbc015d7cb3d1e2f360dcaee45c | [
"BSD-3-Clause"
] | null | null | null | import re
import socket
import errno
import threading
from functools import partial
from .action import Action, LoginAction, LogoffAction, SimpleAction
from .event import Event, EventListener
from .response import Response, FutureResponse
from .utils import str, unicode
NOOP = lambda *args, **kwargs: None
NOOP_LISTENER = dict(
on_action=NOOP,
on_response=NOOP,
on_event=NOOP,
on_connect=NOOP,
on_disconnect=NOOP,
on_unknown=NOOP,
)
class AMIClientListener(object):
methods = ['on_action', 'on_response', 'on_event', 'on_connect', 'on_disconnect', 'on_unknown']
def __init__(self, **kwargs):
for k, v in kwargs.items():
if k not in self.methods:
raise TypeError('\'%s\' is an invalid keyword argument for this function' % k)
setattr(self, k, v)
def on_action(self, source, action):
raise NotImplementedError()
def on_response(self, source, response):
raise NotImplementedError()
def on_event(self, source, event):
raise NotImplementedError()
def on_connect(self, source):
raise NotImplementedError()
def on_disconnect(self, source, error=None):
raise NotImplementedError()
def on_unknown(self, source, pack):
raise NotImplementedError()
class AMIClient(object):
asterisk_start_regex = re.compile('^Asterisk *Call *Manager/(?P<version>([0-9]+\.)*[0-9]+)', re.IGNORECASE)
asterisk_line_regex = re.compile(b'\r\n', re.IGNORECASE | re.MULTILINE)
asterisk_pack_regex = re.compile(b'\r\n\r\n', re.IGNORECASE | re.MULTILINE)
def __init__(self, address='127.0.0.1', port=5038,
encoding='utf-8', encoding_errors='replace',
timeout=3, buffer_size=2 ** 10,
**kwargs):
self._action_counter = 0
self._futures = {}
self._listeners = []
self._event_listeners = []
self._address = address
self._buffer_size = buffer_size
self._port = port
self._socket = None
self._thread = None
self.finished = None
self._ami_version = None
self._timeout = timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
if len(kwargs) > 0:
self.add_listener(**kwargs)
def next_action_id(self):
id = self._action_counter
self._action_counter += 1
return str(id)
def connect(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(self._timeout)
self._socket.connect((self._address, self._port))
self.finished = threading.Event()
self._thread = threading.Thread(target=self.listen)
self._thread.daemon = True
self._thread.start()
def _fire_on_connect(self, **kwargs):
for listener in self._listeners:
listener.on_connect(source=self, **kwargs)
def _fire_on_disconnect(self, **kwargs):
for listener in self._listeners:
listener.on_disconnect(source=self, **kwargs)
def _fire_on_response(self, **kwargs):
for listener in self._listeners:
listener.on_response(source=self, **kwargs)
def _fire_on_action(self, **kwargs):
for listener in self._listeners:
listener.on_action(source=self, **kwargs)
def _fire_on_event(self, **kwargs):
for listener in self._listeners:
listener.on_event(source=self, **kwargs)
def _fire_on_unknown(self, **kwargs):
for listener in self._listeners:
listener.on_unknown(source=self, **kwargs)
def disconnect(self):
self.finished.set()
try:
self._socket.close()
self._thread.join(self._timeout)
except:
pass
def login(self, username, secret, callback=None):
if self.finished is None or self.finished.is_set():
self.connect()
return self.send_action(LoginAction(username, secret), callback)
def logoff(self, callback=None):
if self.finished is None or self.finished.is_set():
return
return self.send_action(LogoffAction(), callback)
def send_action(self, action, callback=None):
if 'ActionID' not in action.keys:
action_id = self.next_action_id()
action.keys['ActionID'] = action_id
else:
action_id = action.keys['ActionID']
future = FutureResponse(callback, self._timeout)
self._futures[action_id] = future
self._fire_on_action(action=action)
self.send(action)
return future
def send(self, pack):
self._socket.send(bytearray(unicode(pack) + '\r\n', self.encoding))
def _decode_pack(self, pack):
return pack.decode(self.encoding, errors=self.encoding_errors)
def _next_pack(self):
data = b''
while not self.finished.is_set():
recv = self._socket.recv(self._buffer_size)
if recv == b'':
self.finished.set()
continue
data += recv
if self.asterisk_line_regex.search(data):
(pack, data) = self.asterisk_line_regex.split(data, 1)
yield self._decode_pack(pack)
break
while not self.finished.is_set():
while self.asterisk_pack_regex.search(data):
(pack, data) = self.asterisk_pack_regex.split(data, 1)
yield self._decode_pack(pack)
recv = self._socket.recv(self._buffer_size)
if recv == b'':
self.finished.set()
continue
data += recv
self._socket.close()
def listen(self):
pack_generator = self._next_pack()
asterisk_start = next(pack_generator)
match = AMIClient.asterisk_start_regex.match(asterisk_start)
if not match:
raise Exception()
self._ami_version = match.group('version')
self._fire_on_connect()
try:
while not self.finished.is_set():
pack = next(pack_generator)
self.fire_recv_pack(pack)
self._fire_on_disconnect(error=None)
except Exception as ex:
self._fire_on_disconnect(error=ex)
def fire_recv_reponse(self, response):
self._fire_on_response(response=response)
if response.status.lower() == 'goodbye':
self.finished.set()
if 'ActionID' not in response.keys:
return
action_id = response.keys['ActionID']
if action_id not in self._futures:
return
future = self._futures.pop(action_id)
future.response = response
def fire_recv_event(self, event):
self._fire_on_event(event=event)
for listener in self._event_listeners:
listener(event=event, source=self)
def fire_recv_pack(self, pack):
if Response.match(pack):
response = Response.read(pack)
self.fire_recv_reponse(response)
return
if Event.match(pack):
event = Event.read(pack)
self.fire_recv_event(event)
return
self._fire_on_unknown(pack=pack)
def add_listener(self, listener=None, **kwargs):
if not listener:
default = NOOP_LISTENER.copy()
default.update(kwargs)
listener = AMIClientListener(**default)
self._listeners.append(listener)
return listener
def remove_listener(self, listener):
self._listeners.remove(listener)
return listener
def add_event_listener(self, on_event=None, **kwargs):
if len(kwargs) > 0 and not isinstance(on_event, EventListener):
event_listener = EventListener(on_event=on_event, **kwargs)
else:
event_listener = on_event
self._event_listeners.append(event_listener)
return event_listener
def remove_event_listener(self, event_listener):
self._event_listeners.remove(event_listener)
class AMIClientAdapter(object):
def __init__(self, ami_client):
self._ami_client = ami_client
def _action(self, name, _callback=None, variables={}, **kwargs):
action = Action(name, kwargs)
action.variables = variables
return self._ami_client.send_action(action, _callback)
def __getattr__(self, item):
return partial(self._action, item)
class AutoReconnect(threading.Thread):
def __init__(self, ami_client, delay=0.5,
on_disconnect=lambda *args: None, on_reconnect=lambda *args: None):
super(AutoReconnect, self).__init__()
self.on_reconnect = on_reconnect
self.on_disconnect = on_disconnect
self.delay = delay
self.finished = None
self._ami_client = ami_client
self._login_args = None
self._login = None
self._logoff = None
self._prepare_client()
def _prepare_client(self):
self._login = self._ami_client.login
self._logoff = self._ami_client.logoff
self._ami_client.login = self._login_wrapper
self._ami_client.logoff = self._logoff_wrapper
def _rollback_client(self):
self._ami_client.login = self._login
self._ami_client.logoff = self._logoff
def _login_wrapper(self, *args, **kwargs):
callback = kwargs.pop('callback', None) or (lambda *a, **k: None)
def on_login(response, *a, **k):
if not response.is_error():
if self._login_args is None:
self.finished = threading.Event()
self.start()
self._login_args = (args, kwargs)
callback(response, *a, **k)
kwargs['callback'] = on_login
return self._login(*args, **kwargs)
def _logoff_wrapper(self, *args, **kwargs):
self.finished.set()
self._rollback_client()
return self._logoff(*args, **kwargs)
def ping(self):
try:
f = self._ami_client.send_action(Action('Ping'))
response = f.response
if response is not None and not response.is_error():
return True
self.on_disconnect(self._ami_client, response)
except Exception as ex:
self.on_disconnect(self._ami_client, ex)
return False
def try_reconnect(self):
try:
f = self._login(*self._login_args[0], **self._login_args[1])
response = f.response
if response is not None and not response.is_error():
self.on_reconnect(self._ami_client, response)
return True
except socket.error as e:
if e.errno != errno.EPIPE:
pass
self._ami_client.disconnect()
self.try_reconnect()
except:
pass
return False
def run(self):
self.finished.wait(self.delay)
while not self.finished.is_set():
if not self.ping():
self.try_reconnect()
self.finished.wait(self.delay)
def __del__(self):
self._rollback_client()
| 33.414925 | 111 | 0.614168 |
094ed60c477af4df05ae8996c0dcc09df1ea5251 | 2,475 | py | Python | tests/gold_tests/runroot/runroot_init.test.py | dio/trafficserver | 20e83321f5120c71c7ec220ec54567b160bca43a | [
"Apache-2.0"
] | null | null | null | tests/gold_tests/runroot/runroot_init.test.py | dio/trafficserver | 20e83321f5120c71c7ec220ec54567b160bca43a | [
"Apache-2.0"
] | 2 | 2017-03-14T02:29:31.000Z | 2017-09-22T22:11:35.000Z | tests/gold_tests/runroot/runroot_init.test.py | persiaAziz/trafficserver | 0b323a6aac08469d6a6fbfcf93e4d7c2b2602682 | [
"Apache-2.0"
] | 1 | 2021-02-15T08:09:17.000Z | 2021-02-15T08:09:17.000Z | '''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
Test.Summary = '''
Test for init of runroot from traffic_layout.
'''
Test.ContinueOnFail = True
p = Test.MakeATSProcess("ts")
ts_root = p.Env['TS_ROOT']
# init from pass in path
path1 = os.path.join(ts_root, "runroot1")
tr = Test.AddTestRun("Test traffic_layout init #1")
tr.Processes.Default.Command = "$ATS_BIN/traffic_layout init --path " + path1
tr.Processes.Default.ReturnCode = 0
f = tr.Disk.File(os.path.join(path1, "runroot_path.yml"))
f.Exists = True
# init to relative directory
path2 = os.path.join(ts_root, "runroot2")
tr = Test.AddTestRun("Test traffic_layout init #2")
tr.Processes.Default.Command = "cd " + ts_root + ";$ATS_BIN/traffic_layout init --path runroot2"
tr.Processes.Default.ReturnCode = 0
f = tr.Disk.File(os.path.join(path2, "runroot_path.yml"))
f.Exists = True
# init to cwd
path3 = os.path.join(ts_root, "runroot3")
tr = Test.AddTestRun("Test traffic_layout init #3")
tr.Processes.Default.Command = "mkdir " + path3 + ";cd " + path3 + ";$ATS_BIN/traffic_layout init"
tr.Processes.Default.ReturnCode = 0
f = tr.Disk.File(os.path.join(path3, "runroot_path.yml"))
f.Exists = True
# --force init to an non-empty directory
path4 = os.path.join(ts_root, "runroot4")
tr = Test.AddTestRun("Test traffic_layout init #4")
randomfile = os.path.join(path4, "foo")
tr.Processes.Default.Command = "mkdir " + path4 + ";touch " + randomfile + ";$ATS_BIN/traffic_layout init --force --path " + path4
tr.Processes.Default.ReturnCode = 0
f = tr.Disk.File(os.path.join(path4, "runroot_path.yml"))
f.Exists = True
tr.Processes.Default.Streams.All = Testers.ContainsExpression("Forcing creating runroot", "force message")
| 38.671875 | 130 | 0.737374 |
894135269d5a69613108ec5ceb32cd4bb48b18d9 | 2,150 | py | Python | screenpy_requests/abilities/make_api_requests.py | ScreenPyHQ/screenpy_requests | 8f34b52b149ff2000992d1207765ff7855e219cd | [
"MIT"
] | 39 | 2019-03-22T15:18:23.000Z | 2022-02-23T17:32:03.000Z | screenpy_requests/abilities/make_api_requests.py | ScreenPyHQ/screenpy_requests | 8f34b52b149ff2000992d1207765ff7855e219cd | [
"MIT"
] | 63 | 2019-07-17T06:25:19.000Z | 2022-01-13T07:03:53.000Z | screenpy_requests/abilities/make_api_requests.py | ScreenPyHQ/screenpy_requests | 8f34b52b149ff2000992d1207765ff7855e219cd | [
"MIT"
] | 15 | 2019-07-09T11:02:56.000Z | 2021-12-24T07:43:56.000Z | """
Enable the Actor to make API requests and store the responses.
"""
from typing import Any, Callable, Dict, List, Optional
from requests import Response, Session
from ..exceptions import RequestError
class MakeAPIRequests:
"""Use Requests to enable sending API requests.
Examples::
Perry = AnActor.named("Perry").who_can(MakeAPIRequests())
Perry = AnActor.named("Perry").who_can(
MakeAPIRequests.using(session_instance)
)
"""
@staticmethod
def using(session: Session) -> "MakeAPIRequests":
"""Provide a |Requests| session for the Ability to use."""
return MakeAPIRequests(session=session)
def to_send(self, method: str, url: str, **kwargs: Any) -> None:
"""Send a request.
This is a pass-through to the session's ``request`` method and has the
same parameter signature. The response is stored in this Ability.
Args:
method: the HTTP method of the request - GET, POST, etc.
url: the URL to which to send the request.
kwargs: additional keyword arguments to pass through to |request|.
"""
http_requests: Dict[str, Callable] = {
"DELETE": self.session.delete,
"GET": self.session.get,
"HEAD": self.session.head,
"OPTIONS": self.session.options,
"PATCH": self.session.patch,
"POST": self.session.post,
"PUT": self.session.put,
}
method = method.upper()
if method not in http_requests:
raise RequestError(f'"{method}" is not a valid HTTP method.')
self.responses.append(http_requests[method](url, **kwargs))
send = to_send
def forget(self) -> None:
"""Clean up the Session instance stored in this Ability."""
self.session.close()
def __repr__(self) -> str:
return "Make API Requests"
__str__ = __repr__
def __init__(self, session: Optional[Session] = None) -> None:
if session is None:
session = Session()
self.session = session
self.responses: List[Response] = []
| 29.861111 | 78 | 0.610698 |
5adedc94e6dc1e1ed95a312b644cc258e6bf9ec7 | 10,628 | py | Python | evennia/utils/picklefield.py | dineshsonachalam/evennia | 24c5ddecabb84d8a7f62fb65a11c8bbcc879c49e | [
"BSD-3-Clause"
] | 9 | 2017-07-10T04:27:31.000Z | 2020-07-31T08:54:08.000Z | evennia/utils/picklefield.py | dineshsonachalam/evennia | 24c5ddecabb84d8a7f62fb65a11c8bbcc879c49e | [
"BSD-3-Clause"
] | null | null | null | evennia/utils/picklefield.py | dineshsonachalam/evennia | 24c5ddecabb84d8a7f62fb65a11c8bbcc879c49e | [
"BSD-3-Clause"
] | 4 | 2017-09-11T02:26:21.000Z | 2021-12-31T05:20:34.000Z | #
# Copyright (c) 2009-2010 Gintautas Miliauskas
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Pickle field implementation for Django.
Modified for Evennia by Griatch.
"""
from builtins import object
from ast import literal_eval
from copy import deepcopy
from base64 import b64encode, b64decode
from zlib import compress, decompress
#import six # this is actually a pypy component, not in default syslib
import django
from django.core.exceptions import ValidationError
from django.db import models
# django 1.5 introduces force_text instead of force_unicode
from django.forms import CharField, Textarea
from django.forms.utils import flatatt
from django.utils.html import format_html
from evennia.utils.dbserialize import from_pickle, to_pickle
from future.utils import with_metaclass
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
# python 3.x does not have cPickle module
try:
from cPickle import loads, dumps # cpython 2.x
except ImportError:
from pickle import loads, dumps # cpython 3.x, other interpreters
DEFAULT_PROTOCOL = 2
class PickledObject(str):
"""
A subclass of string so it can be told whether a string is a pickled
object or not (if the object is an instance of this class then it must
[well, should] be a pickled one).
Only really useful for passing pre-encoded values to ``default``
with ``dbsafe_encode``, not that doing so is necessary. If you
remove PickledObject and its references, you won't be able to pass
in pre-encoded values anymore, but you can always just pass in the
python objects themselves.
"""
class _ObjectWrapper(object):
"""
A class used to wrap object that have properties that may clash with the
ORM internals.
For example, objects with the `prepare_database_save` property such as
`django.db.Model` subclasses won't work under certain conditions and the
same apply for trying to retrieve any `callable` object.
"""
__slots__ = ('_obj',)
def __init__(self, obj):
self._obj = obj
def wrap_conflictual_object(obj):
if hasattr(obj, 'prepare_database_save') or callable(obj):
obj = _ObjectWrapper(obj)
return obj
def dbsafe_encode(value, compress_object=False, pickle_protocol=DEFAULT_PROTOCOL):
# We use deepcopy() here to avoid a problem with cPickle, where dumps
# can generate different character streams for same lookup value if
# they are referenced differently.
# The reason this is important is because we do all of our lookups as
# simple string matches, thus the character streams must be the same
# for the lookups to work properly. See tests.py for more information.
value = dumps(deepcopy(value), protocol=pickle_protocol)
if compress_object:
value = compress(value)
value = b64encode(value).decode() # decode bytes to str
return PickledObject(value)
def dbsafe_decode(value, compress_object=False):
value = value.encode() # encode str to bytes
value = b64decode(value)
if compress_object:
value = decompress(value)
return loads(value)
class PickledWidget(Textarea):
def render(self, name, value, attrs=None):
value = repr(value)
try:
literal_eval(value)
except ValueError:
return value
final_attrs = self.build_attrs(attrs, name=name)
return format_html('<textarea{0}>\r\n{1}</textarea>',
flatatt(final_attrs),
value)
class PickledFormField(CharField):
widget = PickledWidget
default_error_messages = dict(CharField.default_error_messages)
default_error_messages['invalid'] = (
"This is not a Python Literal. You can store things like strings, "
"integers, or floats, but you must do it by typing them as you would "
"type them in the Python Interpreter. For instance, strings must be "
"surrounded by quote marks. We have converted it to a string for your "
"convenience. If it is acceptable, please hit save again.")
def __init__(self, *args, **kwargs):
# This needs to fall through to literal_eval.
kwargs['required'] = False
super(PickledFormField, self).__init__(*args, **kwargs)
def clean(self, value):
try:
if not value.strip():
# Field was left blank. Make this None.
value = 'None'
except AttributeError:
value = 'None'
try:
return literal_eval(value)
except (ValueError, SyntaxError):
try:
value = repr(value)
return literal_eval(value)
except (ValueError, SyntaxError):
raise ValidationError(self.error_messages['invalid'])
class PickledObjectField(models.Field):
"""
A field that will accept *any* python object and store it in the
database. PickledObjectField will optionally compress its values if
declared with the keyword argument ``compress=True``.
Does not actually encode and compress ``None`` objects (although you
can still do lookups using None). This way, it is still possible to
use the ``isnull`` lookup type correctly.
"""
def __init__(self, *args, **kwargs):
self.compress = kwargs.pop('compress', False)
self.protocol = kwargs.pop('protocol', DEFAULT_PROTOCOL)
super(PickledObjectField, self).__init__(*args, **kwargs)
def get_default(self):
"""
Returns the default value for this field.
The default implementation on models.Field calls force_unicode
on the default, which means you can't set arbitrary Python
objects as the default. To fix this, we just return the value
without calling force_unicode on it. Note that if you set a
callable as a default, the field will still call it. It will
*not* try to pickle and encode it.
"""
if self.has_default():
if callable(self.default):
return self.default()
return self.default
# If the field doesn't have a default, then we punt to models.Field.
return super(PickledObjectField, self).get_default()
#def to_python(self, value):
def from_db_value(self, value, *args):
"""
B64decode and unpickle the object, optionally decompressing it.
If an error is raised in de-pickling and we're sure the value is
a definite pickle, the error is allowed to propagate. If we
aren't sure if the value is a pickle or not, then we catch the
error and return the original value instead.
"""
if value is not None:
try:
value = dbsafe_decode(value, self.compress)
except Exception:
# If the value is a definite pickle; and an error is raised in
# de-pickling it should be allowed to propogate.
if isinstance(value, PickledObject):
raise
else:
if isinstance(value, _ObjectWrapper):
return value._obj
return value
def formfield(self, **kwargs):
return PickledFormField(**kwargs)
def pre_save(self, model_instance, add):
value = super(PickledObjectField, self).pre_save(model_instance, add)
return wrap_conflictual_object(value)
def get_db_prep_value(self, value, connection=None, prepared=False):
"""
Pickle and b64encode the object, optionally compressing it.
The pickling protocol is specified explicitly (by default 2),
rather than as -1 or HIGHEST_PROTOCOL, because we don't want the
protocol to change over time. If it did, ``exact`` and ``in``
lookups would likely fail, since pickle would now be generating
a different string.
"""
if value is not None and not isinstance(value, PickledObject):
# We call force_text here explicitly, so that the encoded string
# isn't rejected by the postgresql_psycopg2 backend. Alternatively,
# we could have just registered PickledObject with the psycopg
# marshaller (telling it to store it like it would a string), but
# since both of these methods result in the same value being stored,
# doing things this way is much easier.
value = force_text(dbsafe_encode(value, self.compress, self.protocol))
return value
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value)
def get_internal_type(self):
return 'TextField'
def get_db_prep_lookup(self, lookup_type, value, connection=None, prepared=False):
if lookup_type not in ['exact', 'in', 'isnull']:
raise TypeError('Lookup type %s is not supported.' % lookup_type)
# The Field model already calls get_db_prep_value before doing the
# actual lookup, so all we need to do is limit the lookup types.
return super(PickledObjectField, self).get_db_prep_lookup(
lookup_type, value, connection=connection, prepared=prepared)
# South support; see http://south.aeracode.org/docs/tutorial/part4.html#simple-inheritance
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], [r"^evennia\.utils\.picklefield\.PickledObjectField"])
| 38.507246 | 90 | 0.682819 |
e8855b4422c6b0a55ad51b7200dc726b8910ad3e | 5,073 | py | Python | Minesweeper.py | MonotonicCode/Minesweeper | d5a849d5ec0bc12dc2da421997d3ff2971a82e7d | [
"MIT"
] | null | null | null | Minesweeper.py | MonotonicCode/Minesweeper | d5a849d5ec0bc12dc2da421997d3ff2971a82e7d | [
"MIT"
] | null | null | null | Minesweeper.py | MonotonicCode/Minesweeper | d5a849d5ec0bc12dc2da421997d3ff2971a82e7d | [
"MIT"
] | null | null | null | import random
import pygame
pygame.init()
screenSize = 400 # Size of the Window
cellSize = 20 # Size of individual Cell
mines = 50 # Number of Mines in the Game
grid = [] # A 2D Array to Store all the Cells
over = False # Is the Game Over
firstclick = True # Did the user click more than once
class Cell():
def __init__(self, i, j):
# (i,j) --> index of the cell in the grid
# (x,y) --> coordinates of the cell on the board
self.i = i
self.j = j
self.x = i*cellSize
self.y = j*cellSize
self.mine = False
self.revealed = False
self.nmines = 0
def show(self, win): # Show the cell on the Screen
if self.revealed:
pygame.draw.rect(win, (200, 200, 200), pygame.Rect(
self.x, self.y, cellSize-2, cellSize-2))
font = pygame.font.Font('freesansbold.ttf', cellSize//2)
text = font.render(str(self.nmines), True,
(0, 0, 0), (200, 200, 200))
textRect = text.get_rect()
textRect.center = (self.x + (cellSize*0.5),
self.y + (cellSize*0.5))
if self.mine:
pygame.draw.circle(
win, (0, 0, 0), (self.x + int(cellSize*0.5), self.y + int(cellSize*0.5)), int(cellSize*0.25))
else:
if self.nmines != 0:
win.blit(text, textRect)
else:
pygame.draw.rect(win, (100, 100, 100), pygame.Rect(
self.x, self.y, cellSize-2, cellSize-2))
def reveal(self): # Reveal whats inside of a cell
global over
self.revealed = True
if self.mine:
over = True
print("You Lose")
for rows in grid:
for cell in rows:
cell.revealed = True
for k in range(-1, 2):
for l in range(-1, 2):
if (self.i+k == -1) or (self.j+l == -1) or (self.i+k >= screenSize//cellSize) or (self.j+l >= screenSize//cellSize):
pass
else:
if (grid[self.i+k][self.j+l].revealed == False) and self.nmines == 0:
grid[self.i+k][self.j+l].reveal()
def checkWin(): # Check if the User won
global over
c = 0
for rows in grid:
for cell in rows:
if cell.revealed:
c += 1
if c == (screenSize//cellSize)**2 - mines:
print("You Win")
over = True
def createGrid(): # Create the grid of Cells
for i in range(screenSize//cellSize):
row = []
for j in range(screenSize//cellSize):
row.append(Cell(i, j))
grid.append(row)
def plantMines(a,b): # Plant the mines randomly
global mines
if mines > (screenSize//cellSize)**2:
mines = (screenSize//cellSize)**2
done = []
done.append([a,b])
for i in range(mines):
x = random.randint(0, (screenSize//cellSize)-1)
y = random.randint(0, (screenSize//cellSize)-1)
while [x, y] in done:
x = random.randint(0, (screenSize//cellSize)-1)
y = random.randint(0, (screenSize//cellSize)-1)
done.append([x, y])
grid[x][y].mine = True
def calcMines(): # Count the number of neighboring mines to a cell
for i in range(screenSize//cellSize):
for j in range(screenSize//cellSize):
for k in range(-1, 2):
for l in range(-1, 2):
if (i+k == -1) or (j+l == -1) or (i+k >= screenSize//cellSize) or (j+l >= screenSize//cellSize):
pass
else:
if grid[i+k][j+l].mine:
grid[i][j].nmines += 1
def main(): # Driver Code
global over, firstclick
win = pygame.display.set_mode((screenSize, screenSize))
pygame.display.set_caption("Minesweeper")
loop = True
createGrid()
# Game Loop
while loop:
for i in range(screenSize//cellSize):
for j in range(screenSize//cellSize):
grid[i][j].show(win)
checkWin()
if over:
loop = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
loop = False
# If player cicks a cell, the cell's content is revealed
if event.type == pygame.MOUSEBUTTONDOWN:
x, y = pygame.mouse.get_pos()
x = x//(screenSize//(screenSize//cellSize))
y = y//(screenSize//(screenSize//cellSize))
if firstclick:
plantMines(x,y)
calcMines()
firstclick = False
grid[x][y].reveal()
pygame.display.flip()
main()
| 35.725352 | 133 | 0.48216 |
5d0e15940df0848bb3ee7e5495d34a47b69ca378 | 6,816 | py | Python | GadgetBox.py | CrazyRobMiles/FreeCADSimulator | b9654c1442525d6ea21045e691062bf3e121b15e | [
"Apache-2.0"
] | 63 | 2020-11-15T10:17:19.000Z | 2021-09-17T16:36:25.000Z | GadgetBox.py | CrazyRobMiles/FreeCADSimulator | b9654c1442525d6ea21045e691062bf3e121b15e | [
"Apache-2.0"
] | null | null | null | GadgetBox.py | CrazyRobMiles/FreeCADSimulator | b9654c1442525d6ea21045e691062bf3e121b15e | [
"Apache-2.0"
] | 3 | 2020-11-16T09:37:06.000Z | 2021-05-15T05:58:59.000Z | from tkinter import *
class TKDisplay(object):
def __init__(self, width, height):
self.message = ""
self.drawObjects = []
self.root = Tk()
self.root.title("FakeFreeCAD - Rob Miles")
self.xOffset = 20
self.yOffset = 20
self.width = width
self.height = height
self.canvas = Canvas(self.root, width=width, height=height)
self.canvas.grid(row=0, column=0)
self.output_Text = Text(self.root, height=5)
self.output_Text.grid(row=1, column=0, padx=5, pady=5, sticky='nsew')
output_Scrollbar = Scrollbar(self.root, command=self.output_Text.yview)
output_Scrollbar.grid(row=1, column=1, sticky='nsew')
self.output_Text['yscrollcommand'] = output_Scrollbar.set
self.root.update()
def zoomIn(self,amount):
self.canvas.scale(ALL, 0, 0, amount, amount)
def mainloop(self):
self.root.mainloop()
def addMessageLine(self, text):
text = text + "\n"
self.output_Text.insert(END,text)
self.output_Text.see(END)
def addDrawelement(self,item):
self.drawObjects.append(item)
def drawRectangle(self,x1,y1,x2,y2,fill, outline):
x1 = x1+self.xOffset
x2 = x2+self.xOffset
y1 = self.height-(y1+self.yOffset)
y2 = self.height-(y2+self.yOffset)
print("draw rectangle: ", fill)
self.canvas.create_rectangle(x1,y1,x2,y2,fill=fill, outline=outline)
def drawCircle(self,x,y,r,fill, outline):
x = x+self.xOffset
y = self.height-(y+self.yOffset)
print("draw circle: ", fill)
self.canvas.create_oval(x-r, y-r, x+r, y+r,fill=fill, outline=outline)
class Display(object):
message = ""
drawObjects = []
imageCanvas = None
@staticmethod
def addMessageLine(text):
print(text)
if Display.imageCanvas!=None:
Display.imageCanvas.addMessageLine(text)
@staticmethod
def addDrawelement(item):
if Display.imageCanvas!=None:
Display.imageCanvas.addDrawelement(item)
scalefactor = 1
@staticmethod
def setCanvas(c):
Display.imageCanvas = c
@staticmethod
def drawRectangle(x1,y1,x2,y2,fill, outline):
if Display.imageCanvas!=None:
Display.imageCanvas.drawRectangle(x1,y1,x2,y2,fill,outline)
@staticmethod
def drawCircle(x,y,r,fill, outline):
if Display.imageCanvas!=None:
Display.imageCanvas.drawCircle(x,y,r,fill,outline)
class FreeCadView(object):
@staticmethod
def viewAxometric():
Display.addMessageLine("Freecad Axiometric view selected")
class FreeCadDocument(object):
@staticmethod
def activeView():
return FreeCadView()
class Gui(object):
@staticmethod
def SendMsgToActiveView(message):
Display.addMessageLine(message)
@staticmethod
def activeDocument():
return FreeCadDocument()
class FreeCAD(object):
@staticmethod
def newDocument():
print("New Document")
return "New Document"
class Base(object):
class Vector:
x=0
y=0
z=0
def __init__(self, x,y,z):
self.x=x
self.y=y
self.z=z
class Component(object):
drawAction = "none"
componentList = []
position=Base.Vector(0,0,0)
def __init__(self, position):
self.position = position
def fuse(self, component):
selfCopy = self.copy()
fuseCopy = component.copy()
fuseCopy.drawAction="fuse"
selfCopy.componentList.append(fuseCopy)
return selfCopy
def cut(self, component):
selfCopy = self.copy()
fuseCopy = component.copy()
fuseCopy.drawAction="cut"
selfCopy.componentList.append(fuseCopy)
return selfCopy
def copy(self):
result = Component(self.position)
result.componentList = list(self.componentList)
result.drawAction = self.drawAction
return result
def translate(self,vector):
pass
def show(self):
message = "Component "+self.drawAction+" at (" + str(self.position.x) + ","+str(self.position.y)+","+str(self.position.y) + ")"
Display.addMessageLine(message)
for c in self.componentList:
c.show()
def drawColour(self):
colour = "cyan"
if self.drawAction == "fuse":
colour="blue"
else:
if self.drawAction == "cut":
colour="red"
else:
colour = "yellow"
return colour
class Box(Component):
width=0
height=0
depth=0
def __init__(self, width,height,depth,position):
super(Box,self).__init__(position)
self.width=width
self.height=height
self.depth=depth
def copy(self):
result = Box(self.width, self.height, self.depth,self.position)
result.componentList = list(self.componentList)
return result
def show(self):
message = "Box "+self.drawAction+" at (" + str(self.position.x) + ","+str(self.position.y)+","+str(self.position.y) + ") W:"+str(self.width) + " H:"+str(self.height)+" D:"+str(self.depth)
Display.addMessageLine(message)
x1=self.position.x
y1=self.position.y
x2=x1+self.width
y2=y1+self.height
colour = self.drawColour()
Display.drawRectangle(x1,y1,x2,y2,colour,colour)
for c in self.componentList:
c.show()
class Cylinder(Component):
radius=0
height=0
dir = Base.Vector(0,0,1)
def __init__(self, radius,height,position, dir=Base.Vector(0,0,1)):
super(Cylinder,self).__init__(position)
self.radius=radius
self.height=height
self.dir = dir
def copy(self):
result = Cylinder(self.radius, self.height, self.position, self.dir)
result.componentList = list(self.componentList)
return result
def show(self):
message = "Cylinder "+self.drawAction+" at (" + str(self.position.x) + ","+str(self.position.y)+","+str(self.position.y) + ")"
Display.addMessageLine(message)
colour = self.drawColour()
Display.drawCircle(self.position.x, self.position.y, self.radius, colour, colour)
for c in self.componentList:
c.show()
def rotate(self,origin, axis, amount):
pass
class Part(object):
@staticmethod
def makeBox(width, height, depth, position=Base.Vector(0,0,0)):
return Box(width,height,depth,position)
@staticmethod
def makeCylinder(radius,height,position=Base.Vector(0,0,0),dir=Base.Vector(0,0,1)):
return Cylinder(radius,height,position)
@staticmethod
def show(component):
component.show()
| 28.049383 | 195 | 0.612676 |
4bb3ad6d17652d916bbaa0d6463dbf5a40796067 | 1,748 | py | Python | digi/utils.py | HotStew/digihel | 3a53f6542b41c64c818fa34cc0709cdbfb8055db | [
"MIT"
] | 21 | 2016-08-22T10:15:24.000Z | 2022-03-23T08:10:48.000Z | digi/utils.py | HotStew/digihel | 3a53f6542b41c64c818fa34cc0709cdbfb8055db | [
"MIT"
] | 101 | 2016-08-08T07:52:44.000Z | 2021-06-17T20:18:59.000Z | digi/utils.py | HotStew/digihel | 3a53f6542b41c64c818fa34cc0709cdbfb8055db | [
"MIT"
] | 16 | 2016-08-02T11:45:26.000Z | 2021-02-18T11:27:34.000Z | import logging
import time
from django.conf import settings
from django.core.cache import cache
log = logging.getLogger(__name__)
def get_cached_with_mtime(cache_key, getter, max_mtime=60, default=None, expiry=86400):
"""
Get something with a maximum modification time.
I.e. if the data stored in the cache is older than max_mtime seconds (or does not
exist), it attempts to call getter() for a new value.
However, if the
:param cache_key: Cache key string
:type cache_key: str
:param getter: Getter function
:type getter: function
:param max_mtime: Maximum modification time, in seconds
:type max_mtime: int
:param default: Default value, if nothing is in the cache
:type default: object
:param expiry: Maximum expiry for the cache entity, in seconds
:type expiry: int
:return: data, from the getter or the cache
:rtype: object
"""
cached_data = cache.get(cache_key)
if cached_data is None or (time.time() - cached_data['mtime']) > max_mtime:
try:
cached_data = {
'mtime': time.time(),
'data': getter(),
}
cache.set(cache_key, cached_data, expiry)
except Exception:
if settings.DEBUG:
raise
log.warn('error fetching in get_cached_with_mtime(%s)', cache_key, exc_info=True)
if cached_data is None:
# If we didn't have anything cached to begin with,
# at least cache something for a while to avoid hammering the original `getter()`
cached_data = {'mtime': time.time(), 'data': default}
cache.set(cache_key, cached_data, max_mtime)
return cached_data['data']
| 34.96 | 97 | 0.639588 |
2fda57353abac7baba062d0a6eda19604ccbf9e7 | 11,961 | py | Python | calliope/cli.py | guidogz/Calliope_guido | 148ee39c3671e55ad3a1a2da216ee23112d16abf | [
"Apache-2.0"
] | null | null | null | calliope/cli.py | guidogz/Calliope_guido | 148ee39c3671e55ad3a1a2da216ee23112d16abf | [
"Apache-2.0"
] | null | null | null | calliope/cli.py | guidogz/Calliope_guido | 148ee39c3671e55ad3a1a2da216ee23112d16abf | [
"Apache-2.0"
] | null | null | null | """
Copyright (C) 2013-2019 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
cli.py
~~~~~~
Command-line interface.
"""
import contextlib
import datetime
import itertools
import logging
import os
import pstats
import shutil
import sys
import traceback
import click
from calliope import AttrDict, Model, examples, read_netcdf
from calliope._version import __version__
from calliope.core.util.generate_runs import generate
from calliope.core.util.logging import set_log_verbosity
from calliope.exceptions import BackendError
_time_format = '%Y-%m-%d %H:%M:%S'
_debug = click.option(
'--debug', is_flag=True, default=False,
help='Print debug information when encountering errors.'
)
_quiet = click.option(
'--quiet', is_flag=True, default=False,
help='Be less verbose about what is happening, including hiding '
'solver output.'
)
_pdb = click.option(
'--pdb', is_flag=True, default=False,
help='If used together with --debug, drop into interactive '
'debugger on encountering errors.'
)
_profile = click.option(
'--profile', is_flag=True, default=False,
help='Run through cProfile.'
)
_profile_filename = click.option(
'--profile_filename', type=str,
help='Filename to save profile to if enabled --profile.'
)
_fail_when_infeasible = click.option(
'--fail_when_infeasible/--no_fail_when_infeasible', is_flag=True, default=True,
help='Return fail on command line when problem is infeasible (default True).'
)
@contextlib.contextmanager
def format_exceptions(
debug=False, pdb=False, profile=False,
profile_filename=None, start_time=None):
try:
if profile:
import cProfile
profile = cProfile.Profile()
profile.enable()
yield
if profile:
profile.disable()
if profile_filename:
dump_path = os.path.expanduser(profile_filename)
click.secho('\nSaving cProfile output to: {}'.format(dump_path))
profile.dump_stats(dump_path)
else:
click.secho('\n\n----PROFILE OUTPUT----\n\n')
stats = pstats.Stats(profile).sort_stats('cumulative')
stats.print_stats(20) # Print first 20 lines
except Exception as e:
if debug:
traceback.print_exc()
if pdb:
import ipdb
ipdb.post_mortem(e.__traceback__)
else:
stack = traceback.extract_tb(e.__traceback__)
# Get last stack trace entry still in Calliope
last = [i for i in stack if 'calliope' in i[0]][-1]
err_string = '\nError in {}, {}:{}'.format(last[2], last[0], last[1])
click.secho(err_string, fg='red')
click.secho(str(e), fg='red')
if start_time:
print_end_time(start_time, msg='aborted due to an error')
sys.exit(1)
def print_end_time(start_time, msg='complete'):
end_time = datetime.datetime.now()
secs = round((end_time - start_time).total_seconds(), 1)
tend = end_time.strftime(_time_format)
click.secho(
'\nCalliope run {}. '
'Elapsed: {} seconds (time at exit: {})'.format(msg, secs, tend))
def _get_version():
return 'Version {}'.format(__version__)
def _cli_start(debug, quiet):
"""
Initial setup for CLI commands.
Returns ``start_time`` (datetime timestamp)
"""
if debug:
click.secho(_get_version())
if debug:
verbosity = 'debug'
log_solver = True
else:
if quiet:
verbosity = 'warning'
log_solver = False
else: # Default option
verbosity = 'info'
log_solver = True
set_log_verbosity(
verbosity, include_solver_output=log_solver,
capture_warnings=True
)
start_time = datetime.datetime.now()
return start_time
@click.group(invoke_without_command=True)
@click.pass_context
@click.option('--version', is_flag=True, default=False,
help='Display version.')
def cli(ctx, version):
"""Calliope: a multi-scale energy systems modelling framework"""
if ctx.invoked_subcommand is None and not version:
click.secho(ctx.get_help())
if version:
click.secho(_get_version())
@cli.command(name='new', short_help='Create a new model based on a built-in example.')
@click.argument('path')
@click.option('--template', type=str, default=None)
@_debug
def new(path, template, debug):
"""
Create new model at the given ``path``, based on one of the built-in
example models. The target path must not yet exist. Intermediate
directories will be created automatically.
"""
_cli_start(debug, quiet=False)
with format_exceptions(debug):
if template is None:
template = 'national_scale'
source_path = examples._PATHS[template]
click.echo('Copying {} template to target directory: {}'.format(template, path))
shutil.copytree(source_path, path)
def _run_setup_model(
model_file, scenario, model_format, override_dict):
"""
Build model in CLI commands. Returns ``model``, a ready-to-run
calliope.Model instance.
"""
# Try to determine model file type if not given explicitly
if model_format is None:
if model_file.split('.')[-1] in ['yaml', 'yml']:
model_format = 'yaml'
elif model_file.split('.')[-1] in ['nc', 'nc4', 'netcdf']:
model_format = 'netcdf'
else:
raise ValueError(
'Cannot determine model file format based on file '
'extension for "{}". Set format explicitly with '
'--model_format.'.format(model_file)
)
if model_format == 'yaml':
model = Model(
model_file, scenario=scenario, override_dict=override_dict
)
elif model_format == 'netcdf':
if scenario is not None or override_dict is not None:
raise ValueError(
'When loading a pre-built model from NetCDF, the '
'--scenario and --override_dict options are not available.'
)
model = read_netcdf(model_file)
else:
raise ValueError('Invalid model format: {}'.format(model_format))
return model
@cli.command(name='run', short_help='Build and run a model.')
@click.argument('model_file')
@click.option('--scenario')
@click.option('--model_format')
@click.option('--override_dict')
@click.option('--save_netcdf')
@click.option('--save_csv')
@click.option('--save_plots')
@click.option('--save_logs')
@click.option(
'--save_lp', help='Build and save model to the given LP file. '
'When this is set, the model is not sent to a solver, and all other save options are ignored.')
@_debug
@_quiet
@_pdb
@_profile
@_profile_filename
@_fail_when_infeasible
def run(model_file, scenario, model_format, override_dict,
save_netcdf, save_csv, save_plots, save_logs, save_lp,
debug, quiet, pdb, profile, profile_filename, fail_when_infeasible):
"""
Execute the given model. Tries to guess from the file extension whether
``model_file`` is a YAML file or a pre-built model saved to NetCDF.
This can also explicitly be set with the --model_format=yaml or
--model_format=netcdf option.
"""
start_time = _cli_start(debug, quiet)
click.secho(
'Calliope {} starting at {}\n'.format(
__version__,
start_time.strftime(_time_format)
))
with format_exceptions(debug, pdb, profile, profile_filename, start_time):
model = _run_setup_model(model_file, scenario, model_format, override_dict)
click.secho(model.info() + '\n')
# Only save LP file
if save_lp: # Only save LP file without solving model
click.secho('Saving model to LP file...')
if save_csv is not None or save_netcdf is not None or save_plots is not None:
click.secho(
'WARNING: Model will not be solved - ignoring other save options!',
fg='red', bold=True)
model.to_lp(save_lp)
print_end_time(start_time)
# Else run the model, then save outputs
else:
click.secho('Starting model run...')
if save_logs:
model.run_config['save_logs'] = save_logs
if save_csv is None and save_netcdf is None:
click.secho(
'\n!!!\nWARNING: No options to save results have been '
'specified.\nModel will run without saving results!\n!!!\n',
fg='red', bold=True
)
model.run()
termination = model._model_data.attrs.get('termination_condition', 'unknown')
if save_csv:
click.secho('Saving CSV results to directory: {}'.format(save_csv))
model.to_csv(save_csv)
if save_netcdf:
click.secho('Saving NetCDF results to file: {}'.format(save_netcdf))
model.to_netcdf(save_netcdf)
if save_plots:
if termination == 'optimal':
click.secho('Saving HTML file with plots to: {}'.format(save_plots))
model.plot.summary(to_file=save_plots)
else:
click.secho(
'Model termination condition non-optimal. Not saving plots',
fg='red', bold=True
)
print_end_time(start_time)
if fail_when_infeasible and termination != 'optimal':
raise BackendError("Problem is infeasible.")
@cli.command(
name='generate_runs',
short_help='Generate a script to run multiple models.'
)
@click.argument('model_file')
@click.argument('out_file')
@click.option('--kind', help='One of: "bash", "bsub", "sbatch", or "windows".')
@click.option('--scenarios')
@click.option('--cluster_threads', default=1)
@click.option('--cluster_mem')
@click.option('--cluster_time')
@click.option(
'--additional_args', default='',
help='Any additional arguments to pass directly on to `calliope run`.')
@click.option('--override_dict')
@_debug
@_quiet
@_pdb
def generate_runs(
model_file, out_file, kind, scenarios,
additional_args, override_dict,
cluster_threads, cluster_mem, cluster_time,
debug, quiet, pdb):
_cli_start(debug, quiet)
kwargs = dict(
model_file=model_file,
out_file=out_file,
scenarios=scenarios,
additional_args=additional_args,
override_dict=override_dict,
cluster_mem=cluster_mem,
cluster_time=cluster_time,
cluster_threads=cluster_threads,
)
with format_exceptions(debug, pdb):
generate(kind, **kwargs)
@cli.command(
name='generate_scenarios',
short_help='Generate scenario definitions from given combinations of overrides.'
)
@click.argument('model_file')
@click.argument('out_file')
@click.argument('overrides', nargs=-1)
@click.option('--scenario_name_prefix')
@_debug
@_quiet
@_pdb
def generate_scenarios(
model_file, out_file, overrides, scenario_name_prefix,
debug, quiet, pdb):
_cli_start(debug, quiet)
with format_exceptions(debug, pdb):
combinations = list(itertools.product(
*[i.split(';') for i in overrides]
))
if not scenario_name_prefix:
scenario_name_prefix = 'scenario_'
# len(str(x)) gives us the number of digits in x, for padding
scenario_string = '{}{:0>' + str(len(str(len(combinations)))) + 'd}'
scenarios = {'scenarios': {
scenario_string.format(scenario_name_prefix, i + 1): c
for i, c in enumerate(combinations)}}
AttrDict(scenarios).to_yaml(out_file)
| 31.311518 | 99 | 0.627707 |
3d047414c272df91991c962e24b252721561a852 | 2,780 | py | Python | tests/system/action/personal_note/test_delete.py | OpenSlides/openslides-backend | 57f58a4ca0e5ca113ff104efa9db3e2c66e3aeab | [
"MIT"
] | 5 | 2020-01-20T13:57:15.000Z | 2021-03-27T14:14:44.000Z | tests/system/action/personal_note/test_delete.py | OpenSlides/openslides-backend | 57f58a4ca0e5ca113ff104efa9db3e2c66e3aeab | [
"MIT"
] | 859 | 2020-01-11T22:58:37.000Z | 2022-03-30T14:54:06.000Z | tests/system/action/personal_note/test_delete.py | OpenSlides/openslides-backend | 57f58a4ca0e5ca113ff104efa9db3e2c66e3aeab | [
"MIT"
] | 16 | 2020-01-04T20:28:57.000Z | 2022-02-10T12:06:54.000Z | from typing import Any, Dict
from tests.system.action.base import BaseActionTestCase
class PersonalNoteDeleteActionTest(BaseActionTestCase):
def setUp(self) -> None:
super().setUp()
self.test_models: Dict[str, Dict[str, Any]] = {
"meeting/111": {"personal_note_ids": [1]},
"user/1": {
"personal_note_$111_ids": [1],
"personal_note_$_ids": ["111"],
"meeting_ids": [111],
},
"personal_note/1": {
"star": True,
"note": "blablabla",
"user_id": 1,
"meeting_id": 111,
},
}
def test_delete_correct(self) -> None:
# checks permissions too.
self.set_models(self.test_models)
response = self.request("personal_note.delete", {"id": 1})
self.assert_status_code(response, 200)
self.assert_model_deleted("personal_note/1")
user = self.get_model("user/1")
assert user.get("personal_note_$111_ids") == []
assert user.get("personal_note_$_ids") == []
def test_delete_wrong_user_id(self) -> None:
self.set_models(
{
"meeting/111": {"personal_note_ids": [1]},
"user/2": {
"personal_note_$111_ids": [1],
"personal_note_$_ids": ["111"],
},
"personal_note/1": {
"star": True,
"note": "blablabla",
"user_id": 2,
"meeting_id": 111,
},
"user/1": {"meeting_ids": [111]},
}
)
response = self.request("personal_note.delete", {"id": 1})
self.assert_status_code(response, 403)
self.assertIn(
"Cannot delete not owned personal note.", response.json["message"]
)
self.assert_model_exists("personal_note/1")
def test_delete_no_permission_user_not_in_meeting(self) -> None:
self.test_models["user/1"]["meeting_ids"] = []
self.set_models(self.test_models)
response = self.request("personal_note.delete", {"id": 1})
self.assert_status_code(response, 403)
assert "User not associated with meeting." in response.json["message"]
def test_delete_no_permission_anon_user(self) -> None:
self.set_models(self.test_models)
self.set_anonymous(meeting_id=111)
response = self.request(
"personal_note.delete",
{"id": 1},
anonymous=True,
)
self.assert_status_code(response, 403)
assert (
"Anonymous is not allowed to execute personal_note.delete"
in response.json["message"]
)
| 35.641026 | 78 | 0.53705 |
0d1c945b85f15cf1300ebfe5a825c04962f78977 | 1,749 | py | Python | single_table.py | baluyotraf/mysql-sample-generator | 348734c5b711280af72aef5207ced0834df2cee1 | [
"MIT"
] | null | null | null | single_table.py | baluyotraf/mysql-sample-generator | 348734c5b711280af72aef5207ced0834df2cee1 | [
"MIT"
] | null | null | null | single_table.py | baluyotraf/mysql-sample-generator | 348734c5b711280af72aef5207ced0834df2cee1 | [
"MIT"
] | null | null | null | from mysql import *
from generators import *
import os
import random
import string
DIR_NAME = os.path.dirname(os.path.abspath(__file__))
TABLE_NAME = 'employees'
TABLE_COLUMNS = [
Column('id', Column.INT, Column.AUTO_INCREMENT, Column.PRIMARY_KEY),
Column('first_name', Column.STRING(), Column.NOT_NULL),
Column('last_name', Column.STRING(), Column.NOT_NULL),
Column('middle_name', Column.STRING(), Column.NOT_NULL),
Column('age', Column.INT, Column.NOT_NULL),
Column('mobile_no', Column.STRING(), Column.NOT_NULL),
Column('salary', Column.INT, Column.NOT_NULL),
Column('level', Column.INT, Column.NOT_NULL),
]
INSERT_COLUMN = [c.name for c in TABLE_COLUMNS if c.name != 'id']
DATA_COUNT = 1000
def gint():
return generate_integer(0, 100)
def gstr():
return generate_alphanum_string(5)
def values_generator(count):
columns = [c for c in TABLE_COLUMNS if c.name != 'id']
for _ in range(count):
yield [gint() if c.dtype == Column.INT else gstr()
for c in columns]
def write_line(file, string):
file.write(string + '\n')
if __name__ == '__main__':
seed = input('Enter a seed: ')
seed = seed.lower()
random.seed(seed)
a, b = sorted([gint(), gint()])
x, y = sorted(random.choices(string.ascii_lowercase, k=2))
print('A:', a, 'B:', b)
print('X:', x, 'Y:', y)
db = Database(seed)
table = Table(TABLE_NAME, TABLE_COLUMNS)
output_path = os.path.join(DIR_NAME, seed + '.sql')
with open(output_path, 'w') as file:
write_line(file, db.create())
write_line(file, db.use())
write_line(file, table.create())
write_line(file, table.insert(INSERT_COLUMN, values_generator(DATA_COUNT)))
input()
| 26.907692 | 83 | 0.652945 |
65b49db63e83751c3ae4c66f7dccc6f97720d063 | 2,459 | py | Python | research/cv/dcgan/preprocess.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/cv/dcgan/preprocess.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/cv/dcgan/preprocess.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""preprocess"""
import os
import argparse
import numpy as np
from src.config import dcgan_cifar10_cfg
from src.dataset import create_dataset_cifar10
parser = argparse.ArgumentParser('preprocess')
parser.add_argument('--dataset_name', type=str, default="cifar10")
parser.add_argument('--data_path', type=str, default='', help='eval data dir')
args = parser.parse_args()
if __name__ == "__main__":
dataset_train = create_dataset_cifar10(args.data_path, num_parallel_workers=2, usage='train')
img_path_train = os.path.join('./preprocess_Result/', "train_data")
os.makedirs(img_path_train)
label_list = []
for idx, data in enumerate(dataset_train.create_dict_iterator(output_numpy=True)):
file_name = "dcgan_data_bs" + str(dcgan_cifar10_cfg.batch_size) + "_" + str(idx) + ".bin"
file_path = os.path.join(img_path_train, file_name)
data["image"].tofile(file_path)
label_list.append(data["label"])
np.save(os.path.join('./preprocess_Result/', "cifar10_label_ids_train.npy"), label_list)
print("=" * 20, "export bin files finished", "=" * 20)
dataset_test = create_dataset_cifar10(args.data_path, num_parallel_workers=2, usage='test')
img_path_test = os.path.join('./preprocess_Result/', "test_data")
os.makedirs(img_path_test)
label_list = []
for idx, data in enumerate(dataset_test.create_dict_iterator(output_numpy=True)):
file_name = "dcgan_data_bs" + str(dcgan_cifar10_cfg.batch_size) + "_" + str(idx) + ".bin"
file_path = os.path.join(img_path_test, file_name)
data["image"].tofile(file_path)
label_list.append(data["label"])
np.save(os.path.join('./preprocess_Result/', "cifar10_label_ids_test.npy"), label_list)
print("=" * 20, "export bin files finished", "=" * 20)
| 48.215686 | 97 | 0.701911 |
f8c8bb3d306b3bf61fdd76c5c33e08c1f2e2e565 | 4,904 | py | Python | decap/train/keras_ctc_cm.py | jkmiao/captcha | 902dfd960a43ad5036544c8b299b3472f01004ec | [
"MIT"
] | 2 | 2018-11-01T06:19:00.000Z | 2021-04-02T10:11:29.000Z | decap/train/keras_ctc_cm.py | jkmiao/captcha | 902dfd960a43ad5036544c8b299b3472f01004ec | [
"MIT"
] | null | null | null | decap/train/keras_ctc_cm.py | jkmiao/captcha | 902dfd960a43ad5036544c8b299b3472f01004ec | [
"MIT"
] | 3 | 2017-10-23T04:50:23.000Z | 2018-09-22T15:10:52.000Z | #!/usr/bin/env python
# coding: utf-8
from keras.models import Model, Input, load_model
from keras.layers import *
from keras import backend as K
import numpy as np
from PIL import Image
import random
import os
# 基本参数配置
charsets = '0123456789abcs'
rnn_size = 128
height, width, max_len, n_class = 30, 200, 5, len(charsets)
char_dict = dict((c, i) for i, c in enumerate(charsets))
indic_dict = dict((i, c) for i, c in enumerate(charsets))
TIMESTEP = 23
def load_data(path='img/origin_cm'):
fnames = [os.path.join(path, fname) for fname in os.listdir(path) if fname.endswith('jpg')]
random.shuffle(fnames)
data = np.zeros((len(fnames), width, height, 3), dtype=np.uint8) # 数据类型
input_label = np.zeros((len(fnames), max_len), dtype=np.uint8)
input_len = np.ones(len(fnames), dtype=np.uint8)*21 # 23-2 reshape时的维度
label_len = np.zeros(len(fnames), dtype=np.uint8)
oriLabel = []
for idx, fname in enumerate(fnames):
img = Image.open(fname).convert('RGB').resize((width, height), Image.ANTIALIAS)
data[idx] = np.array(img).transpose(1, 0, 2)
imgLabel = (fname.split('/')[-1].split('_')[0])
tmp_vec = np.zeros(max_len, dtype=np.uint8) # 默认为0
for i, c in enumerate(imgLabel):
tmp_vec[i] = char_dict[c] # 填充相关正确参数
input_label[idx] = tmp_vec
label_len[idx] = len(imgLabel)
oriLabel.append(imgLabel)
return [data, input_label, input_len, label_len], oriLabel
def ctc_lambda_func(args):
y_pred, labels, input_len, label_len = args
y_pred = y_pred[:, 2:, :]
return K.ctc_batch_cost(labels, y_pred, input_len, label_len)
DEBUG = True
if DEBUG:
# 新定义
input_tensor = Input((width, height, 3), name='input_tensor')
x = input_tensor
for i in range(3):
x = Conv2D(128, (3, 3), padding='same', activation='relu')(x)
x = Conv2D(128, (3, 3), activation='relu')(x)
x = MaxPooling2D((2,2))(x)
x = BatchNormalization()(x)
conv_shape = x.get_shape()
x = Reshape(target_shape=(int(conv_shape[1]), int(conv_shape[2]*conv_shape[3])))(x)
x = Bidirectional(GRU(rnn_size, return_sequences=True), name='BiGRU1', merge_mode='sum')(x)
x = Dropout(0.25)(x)
x = Dense(n_class, activation='softmax')(x)
base_model = Model(inputs=input_tensor, outputs=x)
labels = Input(name='the_labels', shape=[max_len], dtype='float32')
input_len = Input(name='input_len', shape=[1], dtype='int64')
label_len = Input(name='label_len', shape=[1], dtype='int64')
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc_loss')([x, labels, input_len, label_len])
model = Model(inputs=[input_tensor, labels, input_len, label_len], outputs=[loss_out])
# 编译
model.compile(loss={'ctc_loss':lambda y_true, y_pred: y_pred}, optimizer='adam')
print model.summary()
print 'conv_shape', conv_shape
print 'x_shape', x.get_shape()
base_model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# 重新加载进行训练
model.load_weights('model/tgcode_ctc_weights.h5')
# 载入数据
[X_data, labels, input_len, label_len], oriLabel = load_data()
print 'X_data', X_data.shape
print 'labels', labels.shape, labels[:5]
print 'input_len', input_len.shape, input_len[:5]
print 'label_len', label_len.shape, label_len[10:100:5]
inputs = {
'input_tensor': X_data,
'the_labels': labels,
'input_len': input_len,
'label_len': label_len
}
outputs = {'ctc_loss': np.zeros([len(labels)])}
def test(path='img/origin_cm'):
fnames = [os.path.join(path, fname) for fname in os.listdir(path)][1:20:5]
cnt = 0
for fname in fnames:
img = Image.open(fname).convert('RGB').resize((width, height), Image.ANTIALIAS)
img = np.array(img, np.uint8).transpose(1, 0, 2)
img = np.expand_dims(img, 0)
y_pred = base_model.predict(img)
y_pred = y_pred[:, 2:, :]
ctc_decode = (K.ctc_decode(y_pred, input_length=np.ones(y_pred.shape[0])*y_pred.shape[1])[0][0])
y_out = K.get_value(ctc_decode)[:, :5]
y_out = ''.join([charsets[x] for x in y_out[0]])
y_true = fname.split('/')[-1].split('_')[0]
print y_out, y_true
if y_out==y_true:
cnt += 1
print '--'*20
print float(cnt)/len(fnames)
# test()
# for i in range(10):
# hist = model.fit(inputs, outputs, batch_size=32, epochs=2)
# print hist.history['loss']
# print '=='*10, i
test()
y_pred = base_model.predict(X_data[:2])
y_pred = y_pred[:, 2:, :]
print 'y_pred.shape', y_pred.shape
ctc_decode = K.ctc_decode(y_pred, input_length=np.ones(y_pred.shape[0])*y_pred.shape[1])[0][0]
y_out = K.get_value(ctc_decode)[:, :4]
print y_out
out = ''.join([charsets[x] for x in y_out[0]])
print out
print oriLabel[0]
base_model.save('model/tgcode_ctc_cm.h5')
model.save_weights('model/tgcode_ctc_weights.h5')
| 34.055556 | 109 | 0.64947 |
f7678123ea5340826c6562c5fba3502068a8ddd4 | 3,676 | py | Python | tests/test_dataset/test_ocr_seg_dataset.py | hongxuenong/mmocr | e8e3a059f8f2e4fca96af37751c33563fc48e2ba | [
"Apache-2.0"
] | 2,261 | 2021-04-08T03:45:41.000Z | 2022-03-31T23:37:46.000Z | tests/test_dataset/test_ocr_seg_dataset.py | hongxuenong/mmocr | e8e3a059f8f2e4fca96af37751c33563fc48e2ba | [
"Apache-2.0"
] | 789 | 2021-04-08T05:40:13.000Z | 2022-03-31T09:42:39.000Z | tests/test_dataset/test_ocr_seg_dataset.py | hongxuenong/mmocr | e8e3a059f8f2e4fca96af37751c33563fc48e2ba | [
"Apache-2.0"
] | 432 | 2021-04-08T03:56:16.000Z | 2022-03-30T18:44:43.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import json
import math
import os.path as osp
import tempfile
import pytest
from mmocr.datasets.ocr_seg_dataset import OCRSegDataset
def _create_dummy_ann_file(ann_file):
ann_info1 = {
'file_name':
'sample1.png',
'annotations': [{
'char_text':
'F',
'char_box': [11.0, 0.0, 22.0, 0.0, 12.0, 12.0, 0.0, 12.0]
}, {
'char_text':
'r',
'char_box': [23.0, 2.0, 31.0, 1.0, 24.0, 11.0, 16.0, 11.0]
}, {
'char_text':
'o',
'char_box': [33.0, 2.0, 43.0, 2.0, 36.0, 12.0, 25.0, 12.0]
}, {
'char_text':
'm',
'char_box': [46.0, 2.0, 61.0, 2.0, 53.0, 12.0, 39.0, 12.0]
}, {
'char_text':
':',
'char_box': [61.0, 2.0, 69.0, 2.0, 63.0, 12.0, 55.0, 12.0]
}],
'text':
'From:'
}
ann_info2 = {
'file_name':
'sample2.png',
'annotations': [{
'char_text': 'o',
'char_box': [0.0, 5.0, 7.0, 5.0, 9.0, 15.0, 2.0, 15.0]
}, {
'char_text':
'u',
'char_box': [7.0, 4.0, 14.0, 4.0, 18.0, 18.0, 11.0, 18.0]
}, {
'char_text':
't',
'char_box': [13.0, 1.0, 19.0, 2.0, 24.0, 18.0, 17.0, 18.0]
}],
'text':
'out'
}
with open(ann_file, 'w') as fw:
for ann_info in [ann_info1, ann_info2]:
fw.write(json.dumps(ann_info) + '\n')
return ann_info1, ann_info2
def _create_dummy_loader():
loader = dict(
type='HardDiskLoader',
repeat=1,
parser=dict(
type='LineJsonParser', keys=['file_name', 'text', 'annotations']))
return loader
def test_ocr_seg_dataset():
tmp_dir = tempfile.TemporaryDirectory()
# create dummy data
ann_file = osp.join(tmp_dir.name, 'fake_data.txt')
ann_info1, ann_info2 = _create_dummy_ann_file(ann_file)
# test initialization
loader = _create_dummy_loader()
dataset = OCRSegDataset(ann_file, loader, pipeline=[])
tmp_dir.cleanup()
# test pre_pipeline
img_info = dataset.data_infos[0]
results = dict(img_info=img_info)
dataset.pre_pipeline(results)
assert results['img_prefix'] == dataset.img_prefix
# test _parse_anno_info
annos = ann_info1['annotations']
with pytest.raises(AssertionError):
dataset._parse_anno_info(annos[0])
annos2 = ann_info2['annotations']
with pytest.raises(AssertionError):
dataset._parse_anno_info([{'char_text': 'i'}])
with pytest.raises(AssertionError):
dataset._parse_anno_info([{'char_box': [1, 2, 3, 4, 5, 6, 7, 8]}])
annos2[0]['char_box'] = [1, 2, 3]
with pytest.raises(AssertionError):
dataset._parse_anno_info(annos2)
return_anno = dataset._parse_anno_info(annos)
assert return_anno['chars'] == ['F', 'r', 'o', 'm', ':']
assert len(return_anno['char_rects']) == 5
# test prepare_train_img
expect_results = {
'img_info': {
'filename': 'sample1.png'
},
'img_prefix': '',
'ann_info': return_anno
}
data = dataset.prepare_train_img(0)
assert data == expect_results
# test evluation
metric = 'acc'
results = [{'text': 'From:'}, {'text': 'ou'}]
eval_res = dataset.evaluate(results, metric)
assert math.isclose(eval_res['word_acc'], 0.5, abs_tol=1e-4)
assert math.isclose(eval_res['char_precision'], 1.0, abs_tol=1e-4)
assert math.isclose(eval_res['char_recall'], 0.857, abs_tol=1e-4)
| 28.496124 | 78 | 0.547062 |
2ee539c5d6aaf99437f45cc30824a45a37a8e689 | 993 | py | Python | edi_835_parser/segments/outpatient_adjudication.py | shalini1017/edi-835-parser | 5c7c9549621a71cea893b37998e4bdea94822c5c | [
"MIT"
] | null | null | null | edi_835_parser/segments/outpatient_adjudication.py | shalini1017/edi-835-parser | 5c7c9549621a71cea893b37998e4bdea94822c5c | [
"MIT"
] | null | null | null | edi_835_parser/segments/outpatient_adjudication.py | shalini1017/edi-835-parser | 5c7c9549621a71cea893b37998e4bdea94822c5c | [
"MIT"
] | null | null | null | from edi_835_parser.elements.identifier import Identifier
from edi_835_parser.segments.utilities import split_segment, get_element
class OutpatientAdjudication:
identification = 'MOA'
identifier = Identifier()
def __init__(self, segment: str):
self.index = segment.split(':', 1)[0]
segment = segment.split(':', 1)[1]
self.segment = segment
segment = split_segment(segment)
self.identifier = segment[0]
self.reimbursement_rate = segment[1],
self.claim_hcpcs_payment_amount = segment[2]
self.remark_code1 = get_element(segment, 3)
self.remark_code2 = get_element(segment, 4)
self.remark_code3 = get_element(segment, 5)
self.remark_code4 = get_element(segment, 6)
self.remark_code5 = get_element(segment, 7)
self.claim_esrd_payment_amount = get_element(segment, 8)
self.non_payable_professional_component_amount = get_element(segment, 9)
def __repr__(self):
return '\n'.join(str(item) for item in self.__dict__.items())
if __name__ == '__main__':
pass
| 29.205882 | 74 | 0.758308 |
339bbd51d145912f21a5856d770f573e2081d6fe | 4,007 | py | Python | src/azure-cli-core/tests/test_arm.py | henrypan/azure-cli | 8de0ab5216ed3dc700546ae9a3c485710322376b | [
"MIT"
] | null | null | null | src/azure-cli-core/tests/test_arm.py | henrypan/azure-cli | 8de0ab5216ed3dc700546ae9a3c485710322376b | [
"MIT"
] | 2 | 2021-03-25T21:38:56.000Z | 2021-11-15T17:46:45.000Z | src/azure-cli-core/tests/test_arm.py | Visual-Studio-China/azure-cli-int | 48c7c7f371a0ecc4ebfd4dcfdc72764beddf5c31 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from azure.cli.core.commands.arm import parse_resource_id
class TestARM(unittest.TestCase):
def test_resource_parse(self):
tests = [{
'resource_id': '/subscriptions/fakesub/resourcegroups/testgroup/providers'
'/Microsoft.Storage/storageAccounts/foo/providers'
'/Microsoft.Authorization/locks/bar',
'expected': {
'name': 'foo',
'type': 'storageAccounts',
'namespace': 'Microsoft.Storage',
'child_name': 'bar',
'child_namespace': 'Microsoft.Authorization',
'child_type': 'locks',
'resource_group': 'testgroup',
'subscription': 'fakesub',
}
}, {
'resource_id': '/subscriptions/fakesub/resourcegroups/testgroup/providers'
'/Microsoft.Storage/storageAccounts/foo'
'/locks/bar',
'expected': {
'name': 'foo',
'type': 'storageAccounts',
'namespace': 'Microsoft.Storage',
'child_name': 'bar',
'child_type': 'locks',
'resource_group': 'testgroup',
'subscription': 'fakesub',
}
}, {
'resource_id': '/subscriptions/fakesub/resourcegroups/testgroup/providers'
'/Microsoft.Storage/storageAccounts/foo',
'expected': {
'name': 'foo',
'type': 'storageAccounts',
'namespace': 'Microsoft.Storage',
'resource_group': 'testgroup',
'subscription': 'fakesub',
}
}, {
'resource_id': '/subscriptions/fakesub/providers/Microsoft.Authorization'
'/locks/foo',
'expected': {
'name': 'foo',
'type': 'locks',
'namespace': 'Microsoft.Authorization',
'subscription': 'fakesub',
}
}, {
'resource_id': '/subscriptions/fakesub/resourcegroups/testgroup/providers'
'/Microsoft.Storage/storageAccounts/foo/providers'
'/Microsoft.Authorization/locks/bar/nets/gc',
'expected': {
'name': 'foo',
'type': 'storageAccounts',
'namespace': 'Microsoft.Storage',
'child_name': 'bar',
'child_namespace': 'Microsoft.Authorization',
'child_type': 'locks',
'grandchild_name': 'gc',
'grandchild_type': 'nets',
'resource_group': 'testgroup',
'subscription': 'fakesub',
}
}, {
'resource_id': '/subscriptions/fakesub/resourcegroups/testgroup/providers'
'/Microsoft.Storage/storageAccounts/foo'
'/locks/bar/nets/gc',
'expected': {
'name': 'foo',
'type': 'storageAccounts',
'namespace': 'Microsoft.Storage',
'child_name': 'bar',
'child_type': 'locks',
'grandchild_name': 'gc',
'grandchild_type': 'nets',
'resource_group': 'testgroup',
'subscription': 'fakesub',
}
}]
for test in tests:
resource = parse_resource_id(test['resource_id'])
self.assertDictEqual(resource, test['expected'])
if __name__ == "__main__":
unittest.main()
| 40.474747 | 94 | 0.465935 |
f8b44bad857597cb7ad7087ffa81f85df7f473a7 | 1,350 | py | Python | examples/shouldi/tests/test_cli_use.py | Yash-Varshney/dffml | 70a76e45c3120b1a984fdfaa5638c594ef386da7 | [
"MIT"
] | 2 | 2020-02-21T00:03:54.000Z | 2020-04-08T17:38:47.000Z | examples/shouldi/tests/test_cli_use.py | Yash-Varshney/dffml | 70a76e45c3120b1a984fdfaa5638c594ef386da7 | [
"MIT"
] | null | null | null | examples/shouldi/tests/test_cli_use.py | Yash-Varshney/dffml | 70a76e45c3120b1a984fdfaa5638c594ef386da7 | [
"MIT"
] | null | null | null | import io
import pathlib
from unittest.mock import patch
from dffml import prepend_to_path, AsyncTestCase
from shouldi.cli import ShouldI
from .binaries import cached_node, cached_target_javascript_algorithms
class TestCLIUse(AsyncTestCase):
async def test_use_python(self):
# Issue is B322, use of input in Python 2 is unsafe (eval). DFFML is
# Python 3.7+ only, so doesn't effect us (it's the 1 high that will be
# found).
dffml_source_root = list(pathlib.Path(__file__).parents)[3]
with patch("sys.stdout", new_callable=io.StringIO) as stdout:
await ShouldI.cli("use", str(dffml_source_root))
output = stdout.getvalue()
self.assertIn("high=1", output)
@cached_node
@cached_target_javascript_algorithms
async def test_use_javascript(self, node, javascript_algo):
with prepend_to_path(node / "node-v14.2.0-linux-x64" / "bin",):
with patch("sys.stdout", new_callable=io.StringIO) as stdout:
await ShouldI.cli(
"use",
str(
javascript_algo
/ "javascript-algorithms-ba2d8dc4a8e27659c1420fe52390cb7981df4a94"
),
)
output = stdout.getvalue()
self.assertIn("high=2941", output)
| 36.486486 | 90 | 0.626667 |
7e0826648cd7686cdb179d4a70e767367001cd8e | 3,474 | py | Python | great_expectations/data_context/store/store.py | joshuataylor/great_expectations | 19dcead43aef9a833b3aa894a1226714a80ab840 | [
"Apache-2.0"
] | 1 | 2021-05-04T17:26:22.000Z | 2021-05-04T17:26:22.000Z | great_expectations/data_context/store/store.py | joshuataylor/great_expectations | 19dcead43aef9a833b3aa894a1226714a80ab840 | [
"Apache-2.0"
] | 47 | 2020-07-15T06:32:50.000Z | 2022-03-29T12:03:23.000Z | great_expectations/data_context/store/store.py | joshuataylor/great_expectations | 19dcead43aef9a833b3aa894a1226714a80ab840 | [
"Apache-2.0"
] | null | null | null | import logging
from great_expectations.core.data_context_key import DataContextKey
from great_expectations.data_context.store.store_backend import StoreBackend
from great_expectations.data_context.util import instantiate_class_from_config
from great_expectations.exceptions import ClassInstantiationError, DataContextError
logger = logging.getLogger(__name__)
class Store(object):
"""A store is responsible for reading and writing Great Expectations objects
to appropriate backends. It provides a generic API that the DataContext can
use independently of any particular ORM and backend.
An implementation of a store will generally need to define the following:
- serialize
- deserialize
- _key_class (class of expected key type)
All keys must have a to_tuple() method.
"""
_key_class = DataContextKey
def __init__(self, store_backend=None, runtime_environment=None):
"""Runtime environment may be necessary to instantiate store backend elements."""
if store_backend is None:
store_backend = {"class_name": "InMemoryStoreBackend"}
logger.debug("Building store_backend.")
module_name = "great_expectations.data_context.store"
self._store_backend = instantiate_class_from_config(
config=store_backend,
runtime_environment=runtime_environment or {},
config_defaults={"module_name": module_name},
)
if not self._store_backend:
raise ClassInstantiationError(
module_name=module_name, package_name=None, class_name=store_backend
)
if not isinstance(self._store_backend, StoreBackend):
raise DataContextError(
"Invalid StoreBackend configuration: expected a StoreBackend instance."
)
self._use_fixed_length_key = self._store_backend.fixed_length_key
def _validate_key(self, key):
if not isinstance(key, self._key_class):
raise TypeError(
"key must be an instance of %s, not %s"
% (self._key_class.__name__, type(key))
)
@property
def store_backend(self):
return self._store_backend
# noinspection PyMethodMayBeStatic
def serialize(self, key, value):
return value
# noinspection PyMethodMayBeStatic
def key_to_tuple(self, key):
if self._use_fixed_length_key:
return key.to_fixed_length_tuple()
return key.to_tuple()
def tuple_to_key(self, tuple_):
if self._use_fixed_length_key:
return self._key_class.from_fixed_length_tuple(tuple_)
return self._key_class.from_tuple(tuple_)
# noinspection PyMethodMayBeStatic
def deserialize(self, key, value):
return value
def get(self, key):
self._validate_key(key)
value = self._store_backend.get(self.key_to_tuple(key))
if value:
return self.deserialize(key, value)
def set(self, key, value):
self._validate_key(key)
return self._store_backend.set(
self.key_to_tuple(key), self.serialize(key, value)
)
def list_keys(self):
return [self.tuple_to_key(key) for key in self._store_backend.list_keys()]
def has_key(self, key):
if self._use_fixed_length_key:
return self._store_backend.has_key(key.to_fixed_length_tuple())
return self._store_backend.has_key(key.to_tuple())
| 36.1875 | 89 | 0.688256 |
a099e17fd221ca779326356ff8d0fef405c1981c | 7,122 | py | Python | fastclass/fc_download.py | mpoisot/fastclass | 1aedf1d862a4d96e7c9a175a7abc680028602e7f | [
"Apache-2.0"
] | null | null | null | fastclass/fc_download.py | mpoisot/fastclass | 1aedf1d862a4d96e7c9a175a7abc680028602e7f | [
"Apache-2.0"
] | null | null | null | fastclass/fc_download.py | mpoisot/fastclass | 1aedf1d862a4d96e7c9a175a7abc680028602e7f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# fastclass - fc_download.py
#
# Christian Werner, 2018-10-23
#
# TODO:
# - print report (images per class etc)
# - check if we need grace periods to avoid blocking
import click
import glob
from icrawler import ImageDownloader
from icrawler.builtin import (
GoogleImageCrawler,
BingImageCrawler,
BaiduImageCrawler,
FlickrImageCrawler,
)
import logging
import os
import shutil
import tempfile
from typing import List, Dict
from .deduplicate import remove_dups
from .imageprocessing import resize
from .misc import sanitize_searchstring
EPILOG = """::: FastClass fcd :::\r
\r
...an easy way to crawl the net for images when building a\r
dataset for deep learning.\r
\r
Example: fcd -c GOOGLE -c BING -s 224 example/guitars.csv
"""
class ImageLog:
"""Dummy class to attach registry of source urls to ImageDownloader"""
registry = {}
class CustomDownloader(ImageDownloader, ImageLog):
def process_meta(self, task):
ImageLog.registry[task["filename"]] = task["file_url"]
def crawl(
folder: str,
search: str,
maxnum: int,
min_size: int,
crawlers: [List[str]] = ["GOOGLE", "BING", "BAIDU", "FLICKR"],
) -> Dict[str, str]:
"""Crawl web sites for images"""
print("(1) Crawling ...")
# prepare folders
os.makedirs(folder, exist_ok=True)
if maxnum > 1000:
print("Max num limited to 1000")
maxnum = 1000
crawler_init_defaults = {
"downloader_cls": CustomDownloader,
"log_level": logging.CRITICAL,
"feeder_threads": 1,
"parser_threads": 1,
"downloader_threads": 4,
"storage": {"root_dir": folder},
}
crawling_defaults = {
"offset": 0,
"max_num": maxnum,
"min_size": min_size,
"max_size": None,
"file_idx_offset": "auto",
}
for c in crawlers:
print(f" -> {c}")
if c == "GOOGLE":
google_crawler = GoogleImageCrawler(**crawler_init_defaults)
google_crawler.crawl(keyword=search, **crawling_defaults)
if c == "BING":
bing_crawler = BingImageCrawler(**crawler_init_defaults)
bing_crawler.crawl(keyword=search, filters=None, **crawling_defaults)
if c == "BAIDU":
baidu_crawler = BaiduImageCrawler(**crawler_init_defaults)
baidu_crawler.crawl(keyword=search, **crawling_defaults)
if c == "FLICKR":
flickr_api_key = os.environ.get("FLICKR_API_KEY")
if not flickr_api_key:
print(
"Error: Flickr crawler requires FLICKR_API_KEY environment variable to be set with your non-secret API key."
)
exit(-1)
flickr_crawler = FlickrImageCrawler(flickr_api_key, **crawler_init_defaults)
flickr_crawler.crawl(text=search, **crawling_defaults)
return {k: v for k, v in CustomDownloader.registry.items() if k is not None}
def main(
infile: str, size: int, crawler: List[str], keep: bool, maxnum: int, outpath: str, padding: bool
):
final_size = (size, size)
classes = []
if "ALL" in crawler:
crawler = ["GOOGLE", "BING"]
if os.path.isdir(outpath):
print(
f'Directory "{outpath}" exists. Would you like to overwrite the directory? [y/n]'
)
choice = input().lower()
while not (choice == "y" or "n"):
print("Please reply with 'y' or 'n'")
choice = input().lower()
if choice == "y":
shutil.rmtree(outpath)
if os.path.isdir(outpath + ".raw"):
shutil.rmtree(outpath + ".raw")
else:
exit(-1)
os.makedirs(outpath)
print(f"INFO: final dataset will be located in {outpath}")
with tempfile.TemporaryDirectory() as tmp:
for lcnt, line in enumerate(infile):
if lcnt > 0:
no_cols = line[:-1].count(",") + 1
if no_cols > 1:
search_term, remove_terms = line[:-1].split(",")
else:
search_term = line[:-1]
remove_terms = None
classes.append((search_term, remove_terms))
for i, (search_term, remove_terms) in enumerate(classes):
print(f"[{i+1}/{len(classes)}] Searching: >> {search_term} <<")
out_name = sanitize_searchstring(search_term, rstring=remove_terms)
raw_folder = os.path.join(tmp, out_name)
source_urls = crawl(
folder=raw_folder,
search=search_term,
maxnum=maxnum,
crawlers=crawler,
min_size=final_size,
)
# M: looks like dupe detection is only done per folder (search query). Prob better to do it for all images downloaded. But then the question is which folder/query gets to keep the one copy.
remove_dups(raw_folder)
# resize
out_resized = os.path.join(outpath, out_name)
os.makedirs(out_resized, exist_ok=True)
files = sorted(glob.glob(raw_folder + "/*"))
source_urls = resize(
files, outpath=out_resized, size=final_size, urls=source_urls, padding=padding
)
# write report file
with open(out_resized + ".log", "w", encoding="utf-8") as log:
log.write("image,source\n")
for item in source_urls:
log.write(",".join([item, source_urls[item]]) + "\n")
if keep:
shutil.copytree(tmp, outpath + ".raw")
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
click.Context.get_usage = click.Context.get_help
@click.command(context_settings=CONTEXT_SETTINGS, epilog=EPILOG)
@click.option(
"-c",
"--crawler",
default=["ALL"],
type=click.Choice(["ALL", "GOOGLE", "BING", "BAIDU", "FLICKR"]),
show_default=True,
multiple=True,
help="selection of crawler (multiple invocations supported)",
)
@click.option(
"-k",
"--keep",
default=False,
is_flag=True,
show_default=True,
help="keep original results of crawlers (copy them to .raw folder",
)
@click.option(
"-m",
"--maxnum",
default=1000,
show_default=True,
type=int,
help="maximum number of images per crawler (lower is faster, 1000 is max)",
)
@click.option(
"-s",
"--size",
default=299,
show_default=True,
type=int,
help="image size for rescaling. Set to 0 to keep original size.",
)
@click.option(
"-o",
"--outpath",
default="dataset",
show_default=True,
help="name of output directory",
)
@click.option(
"-p",
"--padding",
default=True,
is_flag=True,
show_default=True,
help="add white padding to make rectangular images square. Only applicable when resizing.",
)
@click.argument("infile", type=click.File("r"), required=True)
def cli(infile, size, crawler, keep, maxnum, outpath, padding):
main(infile, size, crawler, keep, maxnum, outpath, padding)
if __name__ == "__main__":
cli()
| 28.95122 | 201 | 0.600674 |
fb24e9bf671324730a051f50fd0670b5dd473435 | 5,858 | py | Python | code/train.py | alaakh42/Taxi-driver-Arrival-Time-prediction | 19e45c745d032fd76eea794b12337422053310c8 | [
"MIT"
] | 1 | 2018-09-17T19:42:41.000Z | 2018-09-17T19:42:41.000Z | code/train.py | alaakh42/Taxi-driver-Arrival-Time-prediction | 19e45c745d032fd76eea794b12337422053310c8 | [
"MIT"
] | null | null | null | code/train.py | alaakh42/Taxi-driver-Arrival-Time-prediction | 19e45c745d032fd76eea794b12337422053310c8 | [
"MIT"
] | null | null | null | """
Original Data fields
id - a unique identifier for each trip
vendor_id - a code indicating the provider associated with the trip record
pickup_datetime - date and time when the meter was engaged
dropoff_datetime - date and time when the meter was disengaged
passenger_count - the number of passengers in the vehicle (driver entered value)
pickup_longitude - the longitude where the meter was engaged
pickup_latitude - the latitude where the meter was engaged
dropoff_longitude - the longitude where the meter was disengaged
dropoff_latitude - the latitude where the meter was disengaged
store_and_fwd_flag - This flag indicates whether the trip record was held in vehicle memory before sending to the vendor because the vehicle did not have a connection to the server - Y=store and forward; N=not a store and forward trip
trip_duration - duration of the trip in seconds
Disclaimer: The decision was made to not remove dropoff coordinates from the dataset order to provide an expanded set of variables to use in Kernels.
Extracted Features is in features list below
"""
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from feature_extraction import merge_data
import pandas as pd
import numpy as np
import xgboost
import pickle
import gc
# Load data + extracted features
wd_train_fastest, test = merge_data()
features = wd_train_fastest[['vendor_id', 'passenger_count', 'pickup_longitude', 'pickup_latitude',
'dropoff_longitude', 'dropoff_latitude','pickup_hour','pickup_day', 'pickup_weekday', 'pickup_minute',
'pickup_hour_weekofyear', 'pickup_weekday_hour','pickup_month', 'distance', 'precipitation', 'snow fall', 'snow depth',
'total_distance', 'total_travel_time','jfk','lga']]
target = wd_train_fastest['trip_duration']
tfeatures = test[['vendor_id', 'passenger_count','pickup_longitude', 'pickup_latitude', 'dropoff_longitude',
'dropoff_latitude', 'pickup_hour','pickup_day', 'pickup_weekday', 'pickup_minute',
'pickup_hour_weekofyear', 'pickup_weekday_hour', 'pickup_month', 'distance',
'total_distance', 'total_travel_time', 'precipitation',
'snow fall', 'snow depth','jfk','lga']]
def rmsle(evaluator, X, real):
""" a function that calculate RMSE to check how good is the model"""
sum = 0.0
predicted = evaluator.predict(X)
print("Number predicted less than 0: {}".format(np.where(predicted < 0)[0].shape))
predicted[predicted < 0] = 0
for x in range(len(predicted)):
p = np.log(predicted[x]+1)
r = np.log(real[x]+1)
sum = sum + (p-r)**2
return (sum/len(predicted))**0.5
# >>>> I WILL MAKE THE ASSUMTION THAT total_travel_time from fastest_test == trip_duration
# then check the rmse value then
benchmark = fastest_test[['id','total_travel_time']]
benchmark = benchmark.rename(index=str, columns={"total_travel_time": "trip_duration"})
benchmark['trip_duration'].isnull().values.any()
print("The score for benchmark:{}".format(mean_squared_error(target, fastest_test.trip_duration)))
benchmark.to_csv('results/benchmark.csv', index=False)
# >>>> TRY LINEAR REGRESSION AS A BENCHMARK
reg = LinearRegression()
cv = ShuffleSplit(n_splits=4, test_size=0.1, random_state=0)
print("The Cross-validation rmse score for Linear Regression:{}".format(cross_val_score(reg, features, np.ravel(target), cv=cv, scoring=rmsle, n_jobs=-1)))
reg.fit(features, target)
pred = reg.predict(tfeatures)
print(np.where(pred < 0)[0].shape)
pred[pred < 0]=0
test['trip_duration']=pred.astype(int)
out = test[['id','trip_duration']]
out.to_csv("../results/LinearRegression_preds.csv", index=False)
# >>>> TRY KNeighbors REGRESSION
neigh = KNeighborsRegressor(n_neighbors=10)
cv = ShuffleSplit(n_splits=4, test_size=0.1, random_state=0)
print("The Cross-validation rmse score for KNeighbors Regressor:{}".format(cross_val_score(neigh, features, np.ravel(target), cv=cv,scoring=rmsle, n_jobs=-1)))
neigh.fit(features, target)
pred = neigh.predict(tfeatures)
print("KNeighborsRegressor Predictions shape: {}".format(np.where(pred < 0)[0].shape))
test['trip_duration']=pred.astype(int)
out = test[['id','trip_duration']]
out.to_csv("../results/KNeighbors_preds.csv", index=False)
# >>>> TRY RANDOM FOREST REGRESSOR
rf = RandomForestRegressor()
cv = ShuffleSplit(n_splits=4, test_size=0.1, random_state=0)
print("The Cross-validation rmse score for RandomForest Regressor:{}".format(cross_val_score(rf, features, np.ravel(target), cv=cv,scoring=rmsle, n_jobs=-1)))
rf = rf.fit(features,np.ravel(target))
pred = rf.predict(tfeatures)
print("RandomForestRegressor Predictions shape: {}".format(np.where(pred < 0)[0].shape))
test['trip_duration']=pred.astype(int)
out = test[['id','trip_duration']]
out.to_csv("../results/RandomForest_preds.csv", index=False)
# >>>> FINALLY TRY XGBOOST
xgb = xgboost.XGBRegressor(n_estimators=100, learning_rate=0.08, gamma=0, subsample=0.75,
colsample_bytree=1, max_depth=7)
cv = ShuffleSplit(n_splits=4, test_size=0.1, random_state=0)
print("The Cross-validation rmse score for XGBOOST Regressor:{}".format(cross_val_score(xgb, features, np.ravel(target), cv=cv,scoring=rmsle, n_jobs=-2)))
xgb.fit(features, target)
pred = xgb.predict(tfeatures)
pred[pred < 0] = 0
print("XGBRegressor Predictions shape: {}".format(np.where(pred < 0)[0].shape))
test['trip_duration']=pred.astype(int)
out = test[['id','trip_duration']]
out.to_csv("../results/XGBRegressor_preds.csv", index=False)
# Dump the highest score model
pickle.dump(xgb, open('../models/xgb_model.sav','wb'), protocol=2)
# plot Gradient Boosting tree
# from xgboost import plot_tree
# plot_tree(xgb)
| 48.413223 | 234 | 0.738819 |
3050574f11ea5ef3c6dcf6ab70d20d6aba0b32e6 | 24,463 | py | Python | imagecodecs/_imagecodecs.py | sparkingdark/imagecodecs | 4ea4e82abbcdee650ddb3327df6a449e4898193f | [
"BSD-3-Clause"
] | null | null | null | imagecodecs/_imagecodecs.py | sparkingdark/imagecodecs | 4ea4e82abbcdee650ddb3327df6a449e4898193f | [
"BSD-3-Clause"
] | null | null | null | imagecodecs/_imagecodecs.py | sparkingdark/imagecodecs | 4ea4e82abbcdee650ddb3327df6a449e4898193f | [
"BSD-3-Clause"
] | 2 | 2021-05-26T04:10:19.000Z | 2022-01-27T12:48:18.000Z | # _imagecodecs.py
# Copyright (c) 2008-2020, Christoph Gohlke
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Python implementations of image codecs.
This module implements a limited set of image and compression codecs using
pure Python and 3rd party Python packages.
The module is intended for testing and reference, not production code.
:Author:
`Christoph Gohlke <https://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics. University of California, Irvine
:License: BSD 3-Clause
:Version: 2020.5.30
"""
__version__ = '2020.5.30'
import sys
import struct
import functools
import io
import zlib
import lzma
import bz2
import numpy
try:
import tifffile
except Exception:
tifffile = None
try:
import czifile
except Exception:
czifile = None
try:
import zstd
except ImportError:
zstd = None
try:
import lz4
import lz4.block
except ImportError:
lz4 = None
try:
import lzf
except ImportError:
lzf = None
try:
import zfpy as zfp
except ImportError:
zfp = None
try:
import blosc
except ImportError:
blosc = None
try:
import brotli
except ImportError:
brotli = None
try:
import bitshuffle
except ImportError:
bitshuffle = None
try:
import snappy
except ImportError:
snappy = None
try:
import zopfli
except ImportError:
zopfli = None
try:
import PIL as pillow
except ImportError:
pillow = None
def version(astype=None, _versions_=[]):
"""Return detailed version information about test dependencies."""
if not _versions_:
_versions_.extend((
('imagecodecs.py', __version__),
('numpy', numpy.__version__),
('zlib', zlib.ZLIB_VERSION),
('bz2', 'stdlib'),
('lzma', getattr(lzma, '__version__', 'stdlib')),
('blosc', blosc.__version__ if blosc else 'n/a'),
('zstd', zstd.version() if zstd else 'n/a'),
('lz4', lz4.VERSION if lz4 else 'n/a'),
('lzf', 'unknown' if lzf else 'n/a'),
('snappy', 'unknown' if snappy else 'n/a'),
('zopflipy', zopfli.__version__ if zopfli else 'n/a'),
('zfpy', zfp.__version__ if zfp else 'n/a'),
('bitshuffle', bitshuffle.__version__ if bitshuffle else 'n/a'),
('pillow', pillow.__version__ if pillow else 'n/a'),
('tifffile', tifffile.__version__ if tifffile else 'n/a'),
('czifile', czifile.__version__ if czifile else 'n/a'),
))
if astype is str or astype is None:
return ', '.join(f'{k}-{v}' for k, v in _versions_)
if astype is dict:
return dict(_versions_)
return tuple(_versions_)
def notimplemented(arg=False):
"""Return function decorator that raises NotImplementedError if not arg.
>>> @notimplemented
... def test(): pass
>>> test()
Traceback (most recent call last):
...
NotImplementedError: test not implemented
>>> @notimplemented(True)
... def test(): pass
>>> test()
"""
def wrapper(func):
@functools.wraps(func)
def notimplemented(*args, **kwargs):
raise NotImplementedError(f'{func.__name__} not implemented')
return notimplemented
if callable(arg):
return wrapper(arg)
if not arg:
return wrapper
def nop(func):
return func
return nop
def none_decode(data, *args, **kwargs):
"""Decode NOP."""
return data
def none_encode(data, *args, **kwargs):
"""Encode NOP."""
return data
def numpy_decode(data, index=0, out=None, **kwargs):
"""Decode NPY and NPZ."""
with io.BytesIO(data) as fh:
out = numpy.load(fh, **kwargs)
if hasattr(out, 'files'):
try:
index = out.files[index]
except Exception:
pass
out = out[index]
return out
def numpy_encode(data, level=None, out=None, **kwargs):
"""Encode NPY and NPZ."""
with io.BytesIO() as fh:
if level:
numpy.savez_compressed(fh, data, **kwargs)
else:
numpy.save(fh, data, **kwargs)
fh.seek(0)
out = fh.read()
return out
def delta_encode(data, axis=-1, out=None):
r"""Encode Delta.
>>> delta_encode(b'0123456789')
b'0\x01\x01\x01\x01\x01\x01\x01\x01\x01'
"""
if isinstance(data, (bytes, bytearray)):
data = numpy.frombuffer(data, dtype='u1')
diff = numpy.diff(data, axis=0)
return numpy.insert(diff, 0, data[0]).tobytes()
dtype = data.dtype
if dtype.kind == 'f':
data = data.view(f'u{dtype.itemsize}')
diff = numpy.diff(data, axis=axis)
key = [slice(None)] * data.ndim
key[axis] = 0
diff = numpy.insert(diff, 0, data[tuple(key)], axis=axis)
if dtype.kind == 'f':
return diff.view(dtype)
return diff
def delta_decode(data, axis=-1, out=None):
"""Decode Delta.
>>> delta_decode(b'0\x01\x01\x01\x01\x01\x01\x01\x01\x01')
b'0123456789'
"""
if out is not None and not out.flags.writeable:
out = None
if isinstance(data, (bytes, bytearray)):
data = numpy.frombuffer(data, dtype='u1')
return numpy.cumsum(data, axis=0, dtype='u1', out=out).tobytes()
if data.dtype.kind == 'f':
view = data.view(f'u{data.dtype.itemsize}')
view = numpy.cumsum(view, axis=axis, dtype=view.dtype)
return view.view(data.dtype)
return numpy.cumsum(data, axis=axis, dtype=data.dtype, out=out)
def xor_encode(data, axis=-1, out=None):
r"""Encode XOR delta.
>>> xor_encode(b'0123456789')
b'0\x01\x03\x01\x07\x01\x03\x01\x0f\x01'
"""
if isinstance(data, (bytes, bytearray)):
data = numpy.frombuffer(data, dtype='u1')
xor = numpy.bitwise_xor(data[1:], data[:-1])
return numpy.insert(xor, 0, data[0]).tobytes()
dtype = data.dtype
if dtype.kind == 'f':
data = data.view(f'u{dtype.itemsize}')
key = [slice(None)] * data.ndim
key[axis] = 0
key0 = [slice(None)] * data.ndim
key0[axis] = slice(1, None, None)
key1 = [slice(None)] * data.ndim
key1[axis] = slice(0, -1, None)
key = tuple(key)
key0 = tuple(key0)
key1 = tuple(key1)
xor = numpy.bitwise_xor(data[key0], data[key1])
xor = numpy.insert(xor, 0, data[key], axis=axis)
if dtype.kind == 'f':
return xor.view(dtype)
return xor
def xor_decode(data, axis=-1, out=None):
r"""Decode XOR delta.
>>> xor_decode(b'0\x01\x03\x01\x07\x01\x03\x01\x0f\x01')
b'0123456789'
"""
if isinstance(data, (bytes, bytearray)):
prev = data[0]
b = [chr(prev)]
for c in data[1:]:
prev = c ^ prev
b.append(chr(prev))
return ''.join(b).encode('latin1')
raise NotImplementedError()
def floatpred_decode(data, axis=-2, out=None):
"""Decode floating point horizontal differencing.
The TIFF predictor type 3 reorders the bytes of the image values and
applies horizontal byte differencing to improve compression of floating
point images. The ordering of interleaved color channels is preserved.
Parameters
----------
data : numpy.ndarray
The image to be decoded. The dtype must be a floating point.
The shape must include the number of contiguous samples per pixel
even if 1.
"""
if axis != -2:
raise NotImplementedError(f'axis {axis!r} != -2')
shape = data.shape
dtype = data.dtype
if len(shape) < 3:
raise ValueError('invalid data shape')
if dtype.char not in 'dfe':
raise ValueError('not a floating point image')
littleendian = data.dtype.byteorder == '<' or (
sys.byteorder == 'little' and data.dtype.byteorder == '=')
# undo horizontal byte differencing
data = data.view('uint8')
data.shape = shape[:-2] + (-1,) + shape[-1:]
numpy.cumsum(data, axis=-2, dtype='uint8', out=data)
# reorder bytes
if littleendian:
data.shape = shape[:-2] + (-1,) + shape[-2:]
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
data = data[..., ::-1]
# back to float
data = numpy.ascontiguousarray(data)
data = data.view(dtype)
data.shape = shape
return data
@notimplemented
def floatpred_encode(data, axis=-1, out=None):
"""Encode Floating Point Predictor."""
def bitorder_decode(data, out=None, _bitorder=[]):
"""Reverse bits in each byte of byte string or numpy array.
Decode data where pixels with lower column values are stored in the
lower-order bits of the bytes (TIFF FillOrder is LSB2MSB).
Parameters
----------
data : byte string or ndarray
The data to be bit reversed. If byte string, a new bit-reversed byte
string is returned. Numpy arrays are bit-reversed in-place.
Examples
--------
>>> bitorder_decode(b'\\x01\\x64')
b'\\x80&'
>>> data = numpy.array([1, 666], dtype='uint16')
>>> bitorder_decode(data)
array([ 128, 16473], dtype=uint16)
>>> data
array([ 128, 16473], dtype=uint16)
"""
if not _bitorder:
_bitorder.append(
b'\x00\x80@\xc0 \xa0`\xe0\x10\x90P\xd00\xb0p\xf0\x08\x88H\xc8('
b'\xa8h\xe8\x18\x98X\xd88\xb8x\xf8\x04\x84D\xc4$\xa4d\xe4\x14'
b'\x94T\xd44\xb4t\xf4\x0c\x8cL\xcc,\xacl\xec\x1c\x9c\\\xdc<\xbc|'
b'\xfc\x02\x82B\xc2"\xa2b\xe2\x12\x92R\xd22\xb2r\xf2\n\x8aJ\xca*'
b'\xaaj\xea\x1a\x9aZ\xda:\xbaz\xfa\x06\x86F\xc6&\xa6f\xe6\x16'
b'\x96V\xd66\xb6v\xf6\x0e\x8eN\xce.\xaen\xee\x1e\x9e^\xde>\xbe~'
b'\xfe\x01\x81A\xc1!\xa1a\xe1\x11\x91Q\xd11\xb1q\xf1\t\x89I\xc9)'
b'\xa9i\xe9\x19\x99Y\xd99\xb9y\xf9\x05\x85E\xc5%\xa5e\xe5\x15'
b'\x95U\xd55\xb5u\xf5\r\x8dM\xcd-\xadm\xed\x1d\x9d]\xdd=\xbd}'
b'\xfd\x03\x83C\xc3#\xa3c\xe3\x13\x93S\xd33\xb3s\xf3\x0b\x8bK'
b'\xcb+\xabk\xeb\x1b\x9b[\xdb;\xbb{\xfb\x07\x87G\xc7\'\xa7g\xe7'
b'\x17\x97W\xd77\xb7w\xf7\x0f\x8fO\xcf/\xafo\xef\x1f\x9f_'
b'\xdf?\xbf\x7f\xff')
_bitorder.append(numpy.frombuffer(_bitorder[0], dtype='uint8'))
try:
view = data.view('uint8')
numpy.take(_bitorder[1], view, out=view)
return data
except AttributeError:
return data.translate(_bitorder[0])
except ValueError:
raise NotImplementedError('slices of arrays not supported')
return None
bitorder_encode = bitorder_decode
def packbits_decode(encoded, out=None):
r"""Decompress PackBits encoded byte string.
>>> packbits_decode(b'\x80\x80') # NOP
b''
>>> packbits_decode(b'\x02123')
b'123'
>>> packbits_decode(
... b'\xfe\xaa\x02\x80\x00\x2a\xfd\xaa\x03\x80\x00\x2a\x22\xf7\xaa')[:-4]
b'\xaa\xaa\xaa\x80\x00*\xaa\xaa\xaa\xaa\x80\x00*"\xaa\xaa\xaa\xaa\xaa\xaa'
"""
out = []
out_extend = out.extend
i = 0
try:
while True:
n = ord(encoded[i:i+1]) + 1
i += 1
if n > 129:
# replicate
out_extend(encoded[i:i+1] * (258 - n))
i += 1
elif n < 129:
# literal
out_extend(encoded[i:i+n])
i += n
except TypeError:
pass
return bytes(out)
def lzw_decode(encoded, buffersize=0, out=None):
r"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This implementation of the LZW decoding algorithm is described in TIFF v6
and is not compatible with old style LZW compressed files like
quad-lzw.tif.
>>> lzw_decode(b'\x80\x1c\xcc\'\x91\x01\xa0\xc2m6\x99NB\x03\xc9\xbe\x0b'
... b'\x07\x84\xc2\xcd\xa68|"\x14 3\xc3\xa0\xd1c\x94\x02\x02')
b'say hammer yo hammer mc hammer go hammer'
"""
len_encoded = len(encoded)
bitcount_max = len_encoded * 8
unpack = struct.unpack
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
# return integer of 'bitw' bits at 'bitcount' position in encoded
start = bitcount // 8
s = encoded[start:start+4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00'*(4-len(s)))[0]
code <<= bitcount % 8
code &= mask
return code >> shr
switchbits = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9*'1'+'0'*23, 2)),
511: (10, 22, int(10*'1'+'0'*22, 2)),
1023: (11, 21, int(11*'1'+'0'*21, 2)),
2047: (12, 20, int(12*'1'+'0'*20, 2)), }
bitw, shr, mask = switchbits[255]
bitcount = 0
if len_encoded < 4:
raise ValueError('strip must be at least 4 characters long')
if next_code() != 256:
raise ValueError('strip must begin with CLEAR code')
code = 0
oldcode = 0
result = []
result_append = result.append
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257 or bitcount >= bitcount_max: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
table_append = table.append
lentable = 258
bitw, shr, mask = switchbits[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result_append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result_append(decoded)
table_append(newcode)
lentable += 1
oldcode = code
if lentable in switchbits:
bitw, shr, mask = switchbits[lentable]
if code != 257:
# logging.warning(f'unexpected end of LZW stream (code {code!r})')
pass
return b''.join(result)
def packints_decode(data, dtype, numbits, runlen=0, out=None):
"""Decompress byte string to array of integers of any bit size <= 32.
This Python implementation is slow and only handles itemsizes 1, 2, 4, 8,
16, 32, and 64.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
numbits : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
Examples
--------
>>> packints_decode(b'a', 'B', 1)
array([0, 1, 1, 0, 0, 0, 0, 1], dtype=uint8)
>>> packints_decode(b'ab', 'B', 2)
array([1, 2, 0, 1, 1, 2, 0, 2], dtype=uint8)
"""
if numbits == 1: # bitarray
data = numpy.frombuffer(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen + (8 - runlen % 8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if numbits in (8, 16, 32, 64):
return numpy.frombuffer(data, dtype)
if numbits not in (1, 2, 4, 8, 16, 32):
raise ValueError(f'itemsize not supported: {numbits}')
if dtype.kind not in 'biu':
raise ValueError('invalid dtype')
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= numbits)
if itembytes != dtype.itemsize:
raise ValueError('dtype.itemsize too small')
if runlen == 0:
runlen = (8 * len(data)) // numbits
skipbits = runlen * numbits % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes*8 - numbits
bitmask = int(numbits*'1'+'0'*shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big-endian?
unpack = struct.unpack
size = runlen * (len(data)*8 // (runlen*numbits + skipbits))
result = numpy.empty((size,), dtype)
bitcount = 0
for i in range(size):
start = bitcount // 8
s = data[start:start+itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0]
code <<= bitcount % 8
code &= bitmask
result[i] = code >> shrbits
bitcount += numbits
if (i + 1) % runlen == 0:
bitcount += skipbits
return result
@notimplemented(bitshuffle)
def bitshuffle_encode(data, level=1, itemsize=1, blocksize=0, out=None):
"""Bitshuffle."""
if isinstance(data, numpy.ndarray):
return bitshuffle.bitshuffle(data, blocksize)
data = numpy.frombuffer(data, dtype=f'uint{itemsize * 8}')
data = bitshuffle.bitshuffle(data, blocksize)
return data.tobytes()
@notimplemented(bitshuffle)
def bitshuffle_decode(data, itemsize=1, blocksize=0, out=None):
"""Bitunshuffle."""
if isinstance(data, numpy.ndarray):
return bitshuffle.bitunshuffle(data, blocksize)
data = numpy.frombuffer(data, dtype=f'uint{itemsize * 8}')
data = bitshuffle.bitunshuffle(data, blocksize)
return data.tobytes()
def zlib_encode(data, level=6, out=None):
"""Compress Zlib."""
return zlib.compress(data, level)
def zlib_decode(data, out=None):
"""Decompress Zlib."""
return zlib.decompress(data)
def bz2_encode(data, level=9, out=None):
"""Compress BZ2."""
return bz2.compress(data, level)
def bz2_decode(data, out=None):
"""Decompress BZ2."""
return bz2.decompress(data)
@notimplemented(blosc)
def blosc_encode(data, level=None, compressor='blosclz', numthreads=1,
typesize=8, blocksize=0, shuffle=None, out=None):
"""Compress Blosc."""
if shuffle is None:
shuffle = blosc.SHUFFLE
if level is None:
level = 9
return blosc.compress(data, typesize=typesize, clevel=level,
shuffle=shuffle, cname=compressor)
@notimplemented(blosc)
def blosc_decode(data, out=None):
"""Decompress Blosc."""
return blosc.decompress(data)
def lzma_encode(data, level=None, out=None):
"""Compress LZMA."""
return lzma.compress(data)
def lzma_decode(data, out=None):
"""Decompress LZMA."""
return lzma.decompress(data)
@notimplemented(zstd)
def zstd_encode(data, level=5, out=None):
"""Compress ZStandard."""
return zstd.compress(data, level)
@notimplemented(zstd)
def zstd_decode(data, out=None):
"""Decompress ZStandard."""
return zstd.decompress(data)
@notimplemented(brotli)
def brotli_encode(data, level=11, mode=0, lgwin=22, out=None):
"""Compress Brotli."""
return brotli.compress(data, quality=level, mode=mode, lgwin=lgwin)
@notimplemented(brotli)
def brotli_decode(data, out=None):
"""Decompress Brotli."""
return brotli.decompress(data)
@notimplemented(snappy)
def snappy_encode(data, level=None, out=None):
"""Compress Snappy."""
return snappy.compress(data)
@notimplemented(snappy)
def snappy_decode(data, out=None):
"""Decompress Snappy."""
return snappy.decompress(data)
@notimplemented(zopfli)
def zopfli_encode(data, level=None, out=None):
"""Compress Zopfli."""
c = zopfli.ZopfliCompressor(zopfli.ZOPFLI_FORMAT_ZLIB)
return c.compress(data) + c.flush()
@notimplemented(zopfli)
def zopfli_decode(data, out=None):
"""Decompress Zopfli."""
d = zopfli.ZopfliDecompressor(zopfli.ZOPFLI_FORMAT_ZLIB)
return d.decompress(data) + d.flush()
@notimplemented(lzf)
def lzf_encode(data, level=None, header=False, out=None):
"""Compress LZF."""
return lzf.compress(data)
@notimplemented(lzf)
def lzf_decode(data, header=False, out=None):
"""Decompress LZF."""
return lzf.decompress(data)
@notimplemented(zfp)
def zfp_encode(data, level=None, mode=None, execution=None, header=True,
out=None):
kwargs = {'write_header': header}
if mode in (None, zfp.mode_null, 'R', 'reversible'): # zfp.mode_reversible
pass
elif mode in (zfp.mode_fixed_precision, 'p', 'precision'):
kwargs['precision'] = -1 if level is None else level
elif mode in (zfp.mode_fixed_rate, 'r', 'rate'):
kwargs['rate'] = -1 if level is None else level
elif mode in (zfp.mode_fixed_accuracy, 'a', 'accuracy'):
kwargs['tolerance'] = -1 if level is None else level
elif mode in (zfp.mode_expert, 'c', 'expert'):
minbits, maxbits, maxprec, minexp = level
raise NotImplementedError()
return zfp.compress_numpy(data, **kwargs)
@notimplemented(zfp)
def zfp_decode(data, shape=None, dtype=None, out=None):
"""Decompress ZFP."""
return zfp.decompress_numpy(data)
@notimplemented(bitshuffle)
def bitshuffle_lz4_encode(data, level=1, blocksize=0, out=None):
"""Compress LZ4 with Bitshuffle."""
return bitshuffle.compress_lz4(data, blocksize)
@notimplemented(bitshuffle)
def bitshuffle_lz4_decode(data, shape, dtype, blocksize=0, out=None):
"""Decompress LZ4 with Bitshuffle."""
return bitshuffle.decompress_lz4(data, shape, dtype, blocksize)
@notimplemented(lz4)
def lz4_encode(data, level=1, header=False, out=None):
"""Compress LZ4."""
return lz4.block.compress(data, store_size=header)
@notimplemented(lz4)
def lz4_decode(data, header=False, out=None):
"""Decompress LZ4."""
if header:
return lz4.block.decompress(data)
if isinstance(out, int):
return lz4.block.decompress(data, uncompressed_size=out)
outsize = max(24, 24 + 255 * (len(data) - 10)) # ugh
return lz4.block.decompress(data, uncompressed_size=outsize)
@notimplemented(tifffile)
def tiff_decode(data, key=None, **kwargs):
"""Decode TIFF."""
with io.BytesIO(data) as fh:
out = tifffile.imread(fh, key=key, **kwargs)
return out
@notimplemented(tifffile)
def tiff_encode(data, level=1, **kwargs):
"""Encode TIFF."""
with io.BytesIO() as fh:
tifffile.imwrite(fh, data, **kwargs)
fh.seek(0)
out = fh.read()
return out
@notimplemented(pillow)
def pil_decode(data, out=None):
"""Decode image data using Pillow."""
return numpy.asarray(pillow.Image.open(io.BytesIO(data)))
@notimplemented(pillow)
def jpeg8_decode(data, tables=None, colorspace=None, outcolorspace=None,
out=None):
"""Decode JPEG 8-bit."""
return pil_decode(data)
@notimplemented(pillow)
def jpeg2k_decode(data, verbose=0, out=None):
"""Decode JPEG 2000."""
return pil_decode(data)
@notimplemented(pillow)
def webp_decode(data, out=None):
"""Decode WebP."""
return pil_decode(data)
@notimplemented(pillow)
def png_decode(data, out=None):
"""Decode PNG."""
return pil_decode(data)
if __name__ == '__main__':
import doctest
print(version())
numpy.set_printoptions(suppress=True, precision=2)
doctest.testmod()
| 28.881936 | 79 | 0.622859 |
02f5cb070439d68925f7a47908e1770d1a8b3816 | 79 | py | Python | examples/pluggable/subcomponents/site/__init__.py | pauleveritt/viewdom_wired | 40d8f2f190a12bbd07ff957654626001a9f3a778 | [
"MIT"
] | null | null | null | examples/pluggable/subcomponents/site/__init__.py | pauleveritt/viewdom_wired | 40d8f2f190a12bbd07ff957654626001a9f3a778 | [
"MIT"
] | null | null | null | examples/pluggable/subcomponents/site/__init__.py | pauleveritt/viewdom_wired | 40d8f2f190a12bbd07ff957654626001a9f3a778 | [
"MIT"
] | null | null | null | from ..plugins import greeting, punctuation
plugins = (greeting, punctuation)
| 19.75 | 43 | 0.78481 |
5088b540af306f1c663dcaddb613937c553961da | 92,869 | py | Python | Lib/test/test_itertools.py | Taywee/isolated-python | cb6aab1248c4aec4dd578bea717854505a6fb55d | [
"PSF-2.0"
] | 4 | 2016-04-02T00:01:50.000Z | 2017-07-13T02:11:04.000Z | Lib/test/test_itertools.py | vic/pysano | bcfd0522711efaaacf68821b831674b0ff48b6a1 | [
"PSF-2.0"
] | null | null | null | Lib/test/test_itertools.py | vic/pysano | bcfd0522711efaaacf68821b831674b0ff48b6a1 | [
"PSF-2.0"
] | null | null | null | import unittest
from test import support
from itertools import *
import weakref
from decimal import Decimal
from fractions import Fraction
import sys
import operator
import random
import copy
import pickle
from functools import reduce
import sys
import struct
maxsize = support.MAX_Py_ssize_t
minsize = -maxsize-1
def lzip(*args):
return list(zip(*args))
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
def tupleize(*args):
return args
def irange(n):
for i in range(n):
yield i
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
# root level methods for pickling ability
def testR(r):
return r[0]
def testR2(r):
return r[2]
def underten(x):
return x<10
picklecopiers = [lambda s, proto=proto: pickle.loads(pickle.dumps(s, proto))
for proto in range(pickle.HIGHEST_PROTOCOL + 1)]
class TestBasicOps(unittest.TestCase):
def pickletest(self, protocol, it, stop=4, take=1, compare=None):
"""Test that an iterator is the same after pickling, also when part-consumed"""
def expand(it, i=0):
# Recursively expand iterables, within sensible bounds
if i > 10:
raise RuntimeError("infinite recursion encountered")
if isinstance(it, str):
return it
try:
l = list(islice(it, stop))
except TypeError:
return it # can't expand it
return [expand(e, i+1) for e in l]
# Test the initial copy against the original
dump = pickle.dumps(it, protocol)
i2 = pickle.loads(dump)
self.assertEqual(type(it), type(i2))
a, b = expand(it), expand(i2)
self.assertEqual(a, b)
if compare:
c = expand(compare)
self.assertEqual(a, c)
# Take from the copy, and create another copy and compare them.
i3 = pickle.loads(dump)
took = 0
try:
for i in range(take):
next(i3)
took += 1
except StopIteration:
pass #in case there is less data than 'take'
dump = pickle.dumps(i3, protocol)
i4 = pickle.loads(dump)
a, b = expand(i3), expand(i4)
self.assertEqual(a, b)
if compare:
c = expand(compare[took:])
self.assertEqual(a, c);
def test_accumulate(self):
self.assertEqual(list(accumulate(range(10))), # one positional arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
self.assertEqual(list(accumulate(iterable=range(10))), # kw arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
for typ in int, complex, Decimal, Fraction: # multiple types
self.assertEqual(
list(accumulate(map(typ, range(10)))),
list(map(typ, [0, 1, 3, 6, 10, 15, 21, 28, 36, 45])))
self.assertEqual(list(accumulate('abc')), ['a', 'ab', 'abc']) # works with non-numeric
self.assertEqual(list(accumulate([])), []) # empty iterable
self.assertEqual(list(accumulate([7])), [7]) # iterable of length one
self.assertRaises(TypeError, accumulate, range(10), 5, 6) # too many args
self.assertRaises(TypeError, accumulate) # too few args
self.assertRaises(TypeError, accumulate, x=range(10)) # unexpected kwd arg
self.assertRaises(TypeError, list, accumulate([1, []])) # args that don't add
s = [2, 8, 9, 5, 7, 0, 3, 4, 1, 6]
self.assertEqual(list(accumulate(s, min)),
[2, 2, 2, 2, 2, 0, 0, 0, 0, 0])
self.assertEqual(list(accumulate(s, max)),
[2, 8, 9, 9, 9, 9, 9, 9, 9, 9])
self.assertEqual(list(accumulate(s, operator.mul)),
[2, 16, 144, 720, 5040, 0, 0, 0, 0, 0])
with self.assertRaises(TypeError):
list(accumulate(s, chr)) # unary-operation
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, accumulate(range(10))) # test pickling
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
def test_chain_reducible(self):
for oper in [copy.deepcopy] + picklecopiers:
it = chain('abc', 'def')
self.assertEqual(list(oper(it)), list('abcdef'))
self.assertEqual(next(it), 'a')
self.assertEqual(list(oper(it)), list('bcdef'))
self.assertEqual(list(oper(chain(''))), [])
self.assertEqual(take(4, oper(chain('abc', 'def'))), list('abcd'))
self.assertRaises(TypeError, list, oper(chain(2, 3)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, chain('abc', 'def'), compare=list('abcdef'))
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
for op in [lambda a:a] + picklecopiers:
self.assertEqual(list(op(combinations('abc', 32))), []) # r > n
self.assertEqual(list(op(combinations('ABCD', 2))),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
testIntermediate = combinations('ABCD', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(op(combinations(range(4), 3))),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
testIntermediate = combinations(range(4), 3)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[(0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations3(iterable, r):
'Pure python version from cwr()'
pool = tuple(iterable)
n = len(pool)
for indices in combinations_with_replacement(range(n), r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(combinations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(r) / fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version
self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, combinations(values, r)) # test pickling
@support.bigaddrspacetest
def test_combinations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations("AA", 2**29)
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_tuple_reuse(self):
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
def test_combinations_with_replacement(self):
cwr = combinations_with_replacement
self.assertRaises(TypeError, cwr, 'abc') # missing r argument
self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, cwr, None) # pool is not iterable
self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative
for op in [lambda a:a] + picklecopiers:
self.assertEqual(list(op(cwr('ABC', 2))),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
testIntermediate = cwr('ABC', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def cwr1(iterable, r):
'Pure python version shown in the docs'
# number items returned: (n+r-1)! / r! / (n-1)! when n>0
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def cwr2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def numcombs(n, r):
if not n:
return 0 if r else 1
return fact(n+r-1) / fact(r)/ fact(n-1)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(cwr(values, r))
self.assertEqual(len(result), numcombs(n, r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
regular_combs = list(combinations(values, r)) # compare to combs without replacement
if n == 0 or r <= 1:
self.assertEqual(result, regular_combs) # cases that should be identical
else:
self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs
for c in result:
self.assertEqual(len(c), r) # r-length combinations
noruns = [k for k,v in groupby(c)] # combo without consecutive repeats
self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(noruns,
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version
self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, cwr(values,r)) # test pickling
@support.bigaddrspacetest
def test_combinations_with_replacement_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations_with_replacement("AA", 2**30)
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_with_replacement_tuple_reuse(self):
cwr = combinations_with_replacement
self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1)
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertEqual(list(permutations('abc', 32)), []) # r > n
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = list(range(n))
cycles = list(range(n-r+1, n+1))[::-1]
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(permutations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assertTrue(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, permutations(values, r)) # test pickling
@support.bigaddrspacetest
def test_permutations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
permutations("A", 2**30)
@support.impl_detail("tuple reuse is specific to CPython")
def test_permutations_tuple_reuse(self):
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_combinatorics(self):
# Test relationships between product(), permutations(),
# combinations() and combinations_with_replacement().
for n in range(6):
s = 'ABCDEFG'[:n]
for r in range(8):
prod = list(product(s, repeat=r))
cwr = list(combinations_with_replacement(s, r))
perm = list(permutations(s, r))
comb = list(combinations(s, r))
# Check size
self.assertEqual(len(prod), n**r)
self.assertEqual(len(cwr), (fact(n+r-1) / fact(r)/ fact(n-1)) if n else (not r))
self.assertEqual(len(perm), 0 if r>n else fact(n) / fact(n-r))
self.assertEqual(len(comb), 0 if r>n else fact(n) / fact(r) / fact(n-r))
# Check lexicographic order without repeated tuples
self.assertEqual(prod, sorted(set(prod)))
self.assertEqual(cwr, sorted(set(cwr)))
self.assertEqual(perm, sorted(set(perm)))
self.assertEqual(comb, sorted(set(comb)))
# Check interrelationships
self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted
self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups
self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted
self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups
self.assertEqual(comb, list(filter(set(cwr).__contains__, perm))) # comb: perm that is a cwr
self.assertEqual(comb, list(filter(set(perm).__contains__, cwr))) # comb: cwr that is a perm
self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm
def test_compress(self):
self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list(''))
self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC'))
self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC'))
n = 10000
data = chain.from_iterable(repeat(range(6), n))
selectors = chain.from_iterable(repeat((0, 1)))
self.assertEqual(list(compress(data, selectors)), [1,3,5] * n)
self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable
self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable
self.assertRaises(TypeError, compress, range(6)) # too few args
self.assertRaises(TypeError, compress, range(6), None) # too many args
# check copy, deepcopy, pickle
for op in [lambda a:copy.copy(a), lambda a:copy.deepcopy(a)] + picklecopiers:
for data, selectors, result1, result2 in [
('ABCDEF', [1,0,1,0,1,1], 'ACEF', 'CEF'),
('ABCDEF', [0,0,0,0,0,0], '', ''),
('ABCDEF', [1,1,1,1,1,1], 'ABCDEF', 'BCDEF'),
('ABCDEF', [1,0,1], 'AC', 'C'),
('ABC', [0,1,1,1,1,1], 'BC', 'C'),
]:
self.assertEqual(list(op(compress(data=data, selectors=selectors))), list(result1))
self.assertEqual(list(op(compress(data, selectors))), list(result1))
testIntermediate = compress(data, selectors)
if result1:
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)), list(result2))
def test_count(self):
self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, lzip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3, 4)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(list(islice(count(maxsize-5), 10)),
list(range(maxsize-5, maxsize+5)))
self.assertEqual(list(islice(count(-maxsize-5), 10)),
list(range(-maxsize-5, -maxsize+5)))
self.assertEqual(list(islice(count(10, maxsize+5), 3)),
list(range(10, 10+3*(maxsize+5), maxsize+5)))
c = count(3)
self.assertEqual(repr(c), 'count(3)')
next(c)
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
next(c)
self.assertEqual(repr(count(10.25)), 'count(10.25)')
self.assertEqual(next(c), -8)
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr
r1 = repr(count(i))
r2 = 'count(%r)'.__mod__(i)
self.assertEqual(r1, r2)
# check copy, deepcopy, pickle
for value in -3, 3, maxsize-5, maxsize+5:
c = count(value)
self.assertEqual(next(copy.copy(c)), value)
self.assertEqual(next(copy.deepcopy(c)), value)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, count(value))
#check proper internal error handling for large "step' sizes
count(1, maxsize+5); sys.exc_info()
def test_count_with_stride(self):
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(start=2,step=3)),
[('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(step=-1)),
[('a', 0), ('b', -1), ('c', -2)])
self.assertEqual(lzip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)])
self.assertEqual(lzip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3)))
self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3)))
self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j])
self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))),
[Decimal('1.1'), Decimal('1.2'), Decimal('1.3')])
self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))),
[Fraction(2,3), Fraction(17,21), Fraction(20,21)])
self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0]))
c = count(3, 5)
self.assertEqual(repr(c), 'count(3, 5)')
next(c)
self.assertEqual(repr(c), 'count(8, 5)')
c = count(-9, 0)
self.assertEqual(repr(c), 'count(-9, 0)')
next(c)
self.assertEqual(repr(c), 'count(-9, 0)')
c = count(-9, -3)
self.assertEqual(repr(c), 'count(-9, -3)')
next(c)
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)')
self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int
self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
for j in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 1, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr
r1 = repr(count(i, j))
if j == 1:
r2 = ('count(%r)' % i)
else:
r2 = ('count(%r, %r)' % (i, j))
self.assertEqual(r1, r2)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, count(i, j))
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
# check copy, deepcopy, pickle
c = cycle('abc')
self.assertEqual(next(c), 'a')
#simple copy currently not supported, because __reduce__ returns
#an internal iterator
#self.assertEqual(take(10, copy.copy(c)), list('bcabcabcab'))
self.assertEqual(take(10, copy.deepcopy(c)), list('bcabcabcab'))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertEqual(take(10, pickle.loads(pickle.dumps(c, proto))),
list('bcabcabcab'))
next(c)
self.assertEqual(take(10, pickle.loads(pickle.dumps(c, proto))),
list('cabcabcabc'))
next(c)
next(c)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, cycle('abc'))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# test with partial consumed input iterable
it = iter('abcde')
c = cycle(it)
_ = [next(c) for i in range(2)] # consume 2 of 5 inputs
p = pickle.dumps(c, proto)
d = pickle.loads(p) # rebuild the cycle object
self.assertEqual(take(20, d), list('cdeabcdeabcdeabcdeab'))
# test with completely consumed input iterable
it = iter('abcde')
c = cycle(it)
_ = [next(c) for i in range(7)] # consume 7 of 5 inputs
p = pickle.dumps(c, proto)
d = pickle.loads(p) # rebuild the cycle object
self.assertEqual(take(20, d), list('cdeabcdeabcdeabcdeab'))
def test_cycle_setstate(self):
# Verify both modes for restoring state
# Mode 0 is efficient. It uses an incompletely consumed input
# iterator to build a cycle object and then passes in state with
# a list of previously consumed values. There is no data
# overlap bewteen the two.
c = cycle('defg')
c.__setstate__((list('abc'), 0))
self.assertEqual(take(20, c), list('defgabcdefgabcdefgab'))
# Mode 1 is inefficient. It starts with a cycle object built
# from an iterator over the remaining elements in a partial
# cycle and then passes in state with all of the previously
# seen values (this overlaps values included in the iterator).
c = cycle('defg')
c.__setstate__((list('abcdefg'), 1))
self.assertEqual(take(20, c), list('defgabcdefgabcdefgab'))
# The first argument to setstate needs to be a tuple
with self.assertRaises(SystemError):
cycle('defg').__setstate__([list('abcdefg'), 0])
# The first argument in the setstate tuple must be a list
with self.assertRaises(TypeError):
c = cycle('defg')
c.__setstate__((dict.fromkeys('defg'), 0))
take(20, c)
# The first argument in the setstate tuple must be a list
with self.assertRaises(TypeError):
cycle('defg').__setstate__((list('abcdefg'), 'x'))
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check normal pickled
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR), proto)):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, testR):
for ik, ig in groupby(g, testR2):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested and pickled
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR), proto)):
for ik, ig in pickle.loads(pickle.dumps(groupby(g, testR2), proto)):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, testR)]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.__next__ failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.__next__ failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.__next__ failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __eq__ failure
class DummyCmp:
def __eq__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __eq__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __eq__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
def test_filter(self):
self.assertEqual(list(filter(isEven, range(6))), [0,2,4])
self.assertEqual(list(filter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(filter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, filter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, filter)
self.assertRaises(TypeError, filter, lambda x:x)
self.assertRaises(TypeError, filter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filter, isEven, 3)
self.assertRaises(TypeError, next, filter(range(6), range(6)))
# check copy, deepcopy, pickle
ans = [0,2,4]
c = filter(isEven, range(6))
self.assertEqual(list(copy.copy(c)), ans)
c = filter(isEven, range(6))
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = filter(isEven, range(6))
self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans)
next(c)
self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans[1:])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = filter(isEven, range(6))
self.pickletest(proto, c)
def test_filterfalse(self):
self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(filterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, filterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, filterfalse)
self.assertRaises(TypeError, filterfalse, lambda x:x)
self.assertRaises(TypeError, filterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filterfalse, isEven, 3)
self.assertRaises(TypeError, next, filterfalse(range(6), range(6)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, filterfalse(isEven, range(6)))
def test_zip(self):
# XXX This is rather silly now that builtin zip() calls zip()...
ans = [(x,y) for x, y in zip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(zip('abc', range(6))), lzip('abc', range(6)))
self.assertEqual(list(zip('abcdef', range(3))), lzip('abcdef', range(3)))
self.assertEqual(take(3,zip('abcdef', count())), lzip('abcdef', range(3)))
self.assertEqual(list(zip('abcdef')), lzip('abcdef'))
self.assertEqual(list(zip()), lzip())
self.assertRaises(TypeError, zip, 3)
self.assertRaises(TypeError, zip, range(3), 3)
self.assertEqual([tuple(list(pair)) for pair in zip('abc', 'def')],
lzip('abc', 'def'))
self.assertEqual([pair for pair in zip('abc', 'def')],
lzip('abc', 'def'))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_tuple_reuse(self):
ids = list(map(id, zip('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
# check copy, deepcopy, pickle
ans = [(x,y) for x, y in copy.copy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
ans = [(x,y) for x, y in copy.deepcopy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(zip('abc',count()), proto))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
testIntermediate = zip('abc',count())
next(testIntermediate)
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(testIntermediate, proto))]
self.assertEqual(ans, [('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, zip('abc', count()))
def test_ziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
target = [tuple([arg[i] if i < len(arg) else None for arg in args])
for i in range(max(map(len, args)))]
self.assertEqual(list(zip_longest(*args)), target)
self.assertEqual(list(zip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(zip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,zip_longest('abcdef', count())), list(zip('abcdef', range(3)))) # take 3 from infinite input
self.assertEqual(list(zip_longest()), list(zip()))
self.assertEqual(list(zip_longest([])), list(zip([])))
self.assertEqual(list(zip_longest('abcdef')), list(zip('abcdef')))
self.assertEqual(list(zip_longest('abc', 'defg', **{})),
list(zip(list('abc')+[None], 'defg'))) # empty keyword dict
self.assertRaises(TypeError, zip_longest, 3)
self.assertRaises(TypeError, zip_longest, range(3), 3)
for stmt in [
"zip_longest('abc', fv=1)",
"zip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
self.assertEqual([tuple(list(pair)) for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
self.assertEqual([pair for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_longest_tuple_reuse(self):
ids = list(map(id, zip_longest('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip_longest('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_zip_longest_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, zip_longest("abc", "def"))
self.pickletest(proto, zip_longest("abc", "defgh"))
self.pickletest(proto, zip_longest("abc", "defgh", fillvalue=1))
self.pickletest(proto, zip_longest("", "defgh"))
def test_bug_7244(self):
class Repeater:
# this class is similar to itertools.repeat
def __init__(self, o, t, e):
self.o = o
self.t = int(t)
self.e = e
def __iter__(self): # its iterator is itself
return self
def __next__(self):
if self.t > 0:
self.t -= 1
return self.o
else:
raise self.e
# Formerly this code in would fail in debug mode
# with Undetected Error and Stop Iteration
r1 = Repeater(1, 3, StopIteration)
r2 = Repeater(2, 4, StopIteration)
def run(r1, r2):
result = []
for i, j in zip_longest(r1, r2, fillvalue=0):
with support.captured_output('stdout'):
print((i, j))
result.append((i, j))
return result
self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)])
# Formerly, the RuntimeError would be lost
# and StopIteration would stop as expected
r1 = Repeater(1, 3, RuntimeError)
r2 = Repeater(2, 4, StopIteration)
it = zip_longest(r1, r2, fillvalue=0)
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertRaises(RuntimeError, next, it)
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', range(0), range(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
@support.bigaddrspacetest
def test_product_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
product(*(['ab']*2**5), repeat=2**25)
@support.impl_detail("tuple reuse is specific to CPython")
def test_product_tuple_reuse(self):
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
def test_product_pickling(self):
# check copy, deepcopy, pickle
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(copy.copy(product(*args))), result)
self.assertEqual(list(copy.deepcopy(product(*args))), result)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, product(*args))
def test_product_issue_25021(self):
# test that indices are properly clamped to the length of the tuples
p = product((1, 2),(3,))
p.__setstate__((0, 0x1000)) # will access tuple element 1 if not clamped
self.assertEqual(next(p), (2, 3))
# test that empty tuple in the list will result in an immediate StopIteration
p = product((1, 2), (), (3,))
p.__setstate__((0, 0, 0x1000)) # will access tuple element 1 if not clamped
self.assertRaises(StopIteration, next, p)
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
self.assertEqual(lzip(range(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
# check copy, deepcopy, pickle
c = repeat(object='a', times=10)
self.assertEqual(next(c), 'a')
self.assertEqual(take(2, copy.copy(c)), list('a' * 2))
self.assertEqual(take(2, copy.deepcopy(c)), list('a' * 2))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, repeat(object='a', times=10))
def test_repeat_with_negative_times(self):
self.assertEqual(repr(repeat('a', -1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', -2)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-2)), "repeat('a', 0)")
def test_map(self):
self.assertEqual(list(map(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
self.assertEqual(list(map(tupleize, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(map(tupleize, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,map(tupleize, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(map(operator.pow, [])), [])
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, list, map(None, range(3), range(3)))
self.assertRaises(TypeError, map, operator.neg)
self.assertRaises(TypeError, next, map(10, range(5)))
self.assertRaises(ValueError, next, map(errfunc, [4], [5]))
self.assertRaises(TypeError, next, map(onearg, [4], [5]))
# check copy, deepcopy, pickle
ans = [('a',0),('b',1),('c',2)]
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.copy(c)), ans)
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = map(tupleize, 'abc', count())
self.pickletest(proto, c)
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, zip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, starmap(10, [(4,5)]))
self.assertRaises(ValueError, next, starmap(errfunc, [(4,5)]))
self.assertRaises(TypeError, next, starmap(onearg, [(4,5)]))
# check copy, deepcopy, pickle
ans = [0**1, 1**2, 2**3]
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.copy(c)), ans)
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.pickletest(proto, c)
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*args)))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*tgtargs)))
# Test stop=None
self.assertEqual(list(islice(range(10), None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), 2, None)), list(range(2, 10)))
self.assertEqual(list(islice(range(10), 1, None, 2)), list(range(1, 10, 2)))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
# Test invalid arguments
ra = range(10)
self.assertRaises(TypeError, islice, ra)
self.assertRaises(TypeError, islice, ra, 1, 2, 3, 4)
self.assertRaises(ValueError, islice, ra, -5, 10, 1)
self.assertRaises(ValueError, islice, ra, 1, -5, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, 0)
self.assertRaises(ValueError, islice, ra, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1)
self.assertRaises(ValueError, islice, ra, 1, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1, 1)
self.assertRaises(ValueError, islice, ra, 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
# Issue #10323: Less islice in a predictable state
c = count()
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
# check copy, deepcopy, pickle
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(copy.copy(islice(range(100), *args))),
list(range(*args)))
self.assertEqual(list(copy.deepcopy(islice(range(100), *args))),
list(range(*args)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, islice(range(100), *args))
# Issue #21321: check source iterator is not referenced
# from islice() after the latter has been exhausted
it = (x for x in (1, 2))
wr = weakref.ref(it)
it = islice(it, 1)
self.assertIsNotNone(wr())
list(it) # exhaust the iterator
support.gc_collect()
self.assertIsNone(wr())
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, takewhile(10, [(4,5)]))
self.assertRaises(ValueError, next, takewhile(errfunc, [(4,5)]))
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, next, t)
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(takewhile(underten, data))), [1, 3, 5])
self.assertEqual(list(copy.deepcopy(takewhile(underten, data))),
[1, 3, 5])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, takewhile(underten, data))
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, dropwhile(10, [(4,5)]))
self.assertRaises(ValueError, next, dropwhile(errfunc, [(4,5)]))
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(dropwhile(underten, data))), [20, 2, 4, 6, 8])
self.assertEqual(list(copy.deepcopy(dropwhile(underten, data))),
[20, 2, 4, 6, 8])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, dropwhile(underten, data))
def test_tee(self):
n = 200
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(lzip(a,b), lzip(range(n), range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), list(range(n)))
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in range(100):
self.assertEqual(next(a), i)
del a
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in range(100):
self.assertEqual(next(a), i)
del b
self.assertEqual(list(a), list(range(100, n)))
for j in range(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = next(its[i])
lists[i].append(value)
self.assertEqual(lists[0], list(range(n)))
self.assertEqual(lists[1], list(range(n)))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(range(2000), 3)
for i in range(100):
self.assertEqual(next(a), i)
self.assertEqual(list(b), list(range(2000)))
self.assertEqual([next(c), next(c)], list(range(2)))
self.assertEqual(list(a), list(range(100,2000)))
self.assertEqual(list(c), list(range(2,2000)))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in range(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual([list(x) for x in result], [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assertTrue(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(range(10))
p = weakref.proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
self.assertRaises(ReferenceError, getattr, p, '__class__')
ans = list('abc')
long_ans = list(range(10000))
# check copy
a, b = tee('abc')
self.assertEqual(list(copy.copy(a)), ans)
self.assertEqual(list(copy.copy(b)), ans)
a, b = tee(list(range(10000)))
self.assertEqual(list(copy.copy(a)), long_ans)
self.assertEqual(list(copy.copy(b)), long_ans)
# check partially consumed copy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.copy(a)), ans[2:])
self.assertEqual(list(copy.copy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.copy(a)), long_ans[100:])
self.assertEqual(list(copy.copy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check deepcopy
a, b = tee('abc')
self.assertEqual(list(copy.deepcopy(a)), ans)
self.assertEqual(list(copy.deepcopy(b)), ans)
self.assertEqual(list(a), ans)
self.assertEqual(list(b), ans)
a, b = tee(range(10000))
self.assertEqual(list(copy.deepcopy(a)), long_ans)
self.assertEqual(list(copy.deepcopy(b)), long_ans)
self.assertEqual(list(a), long_ans)
self.assertEqual(list(b), long_ans)
# check partially consumed deepcopy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.deepcopy(a)), ans[2:])
self.assertEqual(list(copy.deepcopy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.deepcopy(a)), long_ans[100:])
self.assertEqual(list(copy.deepcopy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check pickle
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, iter(tee('abc')))
a, b = tee('abc')
self.pickletest(proto, a, compare=ans)
self.pickletest(proto, b, compare=ans)
# Issue 13454: Crash when deleting backward iterator from tee()
def test_tee_del_backward(self):
forward, backward = tee(repeat(None, 20000000))
try:
any(forward) # exhaust the iterator
del backward
except:
del forward, backward
raise
def test_StopIteration(self):
self.assertRaises(StopIteration, next, zip())
for f in (chain, cycle, zip, groupby):
self.assertRaises(StopIteration, next, f([]))
self.assertRaises(StopIteration, next, f(StopNow()))
self.assertRaises(StopIteration, next, islice([], None))
self.assertRaises(StopIteration, next, islice(StopNow(), None))
p, q = tee([])
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
p, q = tee(StopNow())
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
self.assertRaises(StopIteration, next, repeat(None, 0))
for f in (filter, filterfalse, map, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, next, f(lambda x:x, []))
self.assertRaises(StopIteration, next, f(lambda x:x, StopNow()))
class TestExamples(unittest.TestCase):
def test_accumulate(self):
self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15])
def test_accumulate_reducible(self):
# check copy, deepcopy, pickle
data = [1, 2, 3, 4, 5]
accumulated = [1, 3, 6, 10, 15]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = accumulate(data)
self.assertEqual(list(pickle.loads(pickle.dumps(it, proto))), accumulated[:])
self.assertEqual(next(it), 1)
self.assertEqual(list(pickle.loads(pickle.dumps(it, proto))), accumulated[1:])
it = accumulate(data)
self.assertEqual(next(it), 1)
self.assertEqual(list(copy.deepcopy(it)), accumulated[1:])
self.assertEqual(list(copy.copy(it)), accumulated[1:])
def test_accumulate_reducible_none(self):
# Issue #25718: total is None
it = accumulate([None, None, None], operator.is_)
self.assertEqual(next(it), None)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it_copy = pickle.loads(pickle.dumps(it, proto))
self.assertEqual(list(it_copy), [True, False])
self.assertEqual(list(copy.deepcopy(it)), [True, False])
self.assertEqual(list(copy.copy(it)), [True, False])
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_combinations_with_replacement(self):
self.assertEqual(list(combinations_with_replacement('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def test_compress(self):
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_filter(self):
self.assertEqual(list(filter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_filterfalse(self):
self.assertEqual(list(filterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_map(self):
self.assertEqual(list(map(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_zip(self):
self.assertEqual(list(zip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_zip_longest(self):
self.assertEqual(list(zip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
list(map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split())))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
list(map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split())))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
next(iterator)
del container, iterator
def test_accumulate(self):
a = []
self.makecycle(accumulate([1,2,a,3]), a)
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(range(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_filter(self):
a = []
self.makecycle(filter(lambda x:True, [a]*2), a)
def test_filterfalse(self):
a = []
self.makecycle(filterfalse(lambda x:False, a), a)
def test_zip(self):
a = []
self.makecycle(zip([a]*2, [a]*3), a)
def test_zip_longest(self):
a = []
self.makecycle(zip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(zip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_map(self):
a = []
self.makecycle(map(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_accumulate(self):
s = [1,2,3,4,5]
r = [1,3,6,10,15]
n = len(s)
for g in (G, I, Ig, L, R):
self.assertEqual(list(accumulate(g(s))), r)
self.assertEqual(list(accumulate(S(s))), [])
self.assertRaises(TypeError, accumulate, X(s))
self.assertRaises(TypeError, accumulate, N(s))
self.assertRaises(ZeroDivisionError, list, accumulate(E(s)))
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, compress, N(s), repeat(1))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, cycle, N(s))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, groupby, N(s))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_filter(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filter(isEven, g(s))),
[x for x in g(s) if isEven(x)])
self.assertRaises(TypeError, filter, isEven, X(s))
self.assertRaises(TypeError, filter, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filter(isEven, E(s)))
def test_filterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filterfalse(isEven, g(s))),
[x for x in g(s) if isOdd(x)])
self.assertRaises(TypeError, filterfalse, isEven, X(s))
self.assertRaises(TypeError, filterfalse, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filterfalse(isEven, E(s)))
def test_zip(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip(g(s))), lzip(g(s)))
self.assertEqual(list(zip(g(s), g(s))), lzip(g(s), g(s)))
self.assertRaises(TypeError, zip, X(s))
self.assertRaises(TypeError, zip, N(s))
self.assertRaises(ZeroDivisionError, list, zip(E(s)))
def test_ziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip_longest(g(s))), list(zip(g(s))))
self.assertEqual(list(zip_longest(g(s), g(s))), list(zip(g(s), g(s))))
self.assertRaises(TypeError, zip_longest, X(s))
self.assertRaises(TypeError, zip_longest, N(s))
self.assertRaises(ZeroDivisionError, list, zip_longest(E(s)))
def test_map(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(map(onearg, g(s))),
[onearg(x) for x in g(s)])
self.assertEqual(list(map(operator.pow, g(s), g(s))),
[x**x for x in g(s)])
self.assertRaises(TypeError, map, onearg, X(s))
self.assertRaises(TypeError, map, onearg, N(s))
self.assertRaises(ZeroDivisionError, list, map(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, islice, N(s), 10)
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = lzip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))),
[x**x for x in g(s)])
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, starmap, operator.pow, N(ss))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, takewhile, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, dropwhile, isOdd, N(s))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, tee, N(s))
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
def test_repeat(self):
self.assertEqual(operator.length_hint(repeat(None, 50)), 50)
self.assertEqual(operator.length_hint(repeat(None, 0)), 0)
self.assertEqual(operator.length_hint(repeat(None), 12), 12)
def test_repeat_with_negative_times(self):
self.assertEqual(operator.length_hint(repeat(None, -1)), 0)
self.assertEqual(operator.length_hint(repeat(None, -2)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-1)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-2)), 0)
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(next(z))
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = map(g, items)
z = zip(*[gen]*len(tuple1))
next(z)
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
class SubclassWithKwargsTest(unittest.TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
for cls in (repeat, zip, filter, filterfalse, chain, map,
starmap, islice, takewhile, dropwhile, cycle, compress):
class Subclass(cls):
def __init__(self, newarg=None, *args):
cls.__init__(self, *args)
try:
Subclass(newarg=1)
except TypeError as err:
# we expect type errors because of wrong argument count
self.assertNotIn("does not take keyword arguments", err.args[0])
@support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.ssize_t = struct.calcsize('n')
check_sizeof = support.check_sizeof
def test_product_sizeof(self):
basesize = support.calcobjsize('3Pi')
check = self.check_sizeof
check(product('ab', '12'), basesize + 2 * self.ssize_t)
check(product(*(('abc',) * 10)), basesize + 10 * self.ssize_t)
def test_combinations_sizeof(self):
basesize = support.calcobjsize('3Pni')
check = self.check_sizeof
check(combinations('abcd', 3), basesize + 3 * self.ssize_t)
check(combinations(range(10), 4), basesize + 4 * self.ssize_t)
def test_combinations_with_replacement_sizeof(self):
cwr = combinations_with_replacement
basesize = support.calcobjsize('3Pni')
check = self.check_sizeof
check(cwr('abcd', 3), basesize + 3 * self.ssize_t)
check(cwr(range(10), 4), basesize + 4 * self.ssize_t)
def test_permutations_sizeof(self):
basesize = support.calcobjsize('4Pni')
check = self.check_sizeof
check(permutations('abcd'),
basesize + 4 * self.ssize_t + 4 * self.ssize_t)
check(permutations('abcd', 3),
basesize + 4 * self.ssize_t + 3 * self.ssize_t)
check(permutations('abcde', 3),
basesize + 5 * self.ssize_t + 3 * self.ssize_t)
check(permutations(range(10), 4),
basesize + 10 * self.ssize_t + 4 * self.ssize_t)
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in zip(count(1200), amounts):
... print('Check %d is for $%.2f' % (checknum, amount))
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in map(operator.pow, range(1,4), repeat(3)):
... print(cube)
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print(name.title())
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(sorted(d.items()), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print(k, list(map(itemgetter(0), g)))
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]):
... print(list(map(operator.itemgetter(1), g)))
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, iterable):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
>>> def enumerate(iterable, start=0):
... return zip(count(start), iterable)
>>> def tabulate(function, start=0):
... "Return function(0), function(1), ..."
... return map(function, count(start))
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
>>> def all_equal(iterable):
... "Returns True if all the elements are equal to each other"
... g = groupby(iterable)
... return next(g, True) and not next(g, False)
>>> def quantify(iterable, pred=bool):
... "Count how many times the predicate is true"
... return sum(map(pred, iterable))
>>> def padnone(iterable):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(iterable, repeat(None))
>>> def ncycles(iterable, n):
... "Returns the sequence elements n times"
... return chain(*repeat(iterable, n))
>>> def dotproduct(vec1, vec2):
... return sum(map(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain.from_iterable(listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def pairwise(iterable):
... "s -> (s0,s1), (s1,s2), (s2, s3), ..."
... a, b = tee(iterable)
... try:
... next(b)
... except StopIteration:
... pass
... return zip(a, b)
>>> def grouper(n, iterable, fillvalue=None):
... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
... args = [iter(iterable)] * n
... return zip_longest(*args, fillvalue=fillvalue)
>>> def roundrobin(*iterables):
... "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
... # Recipe credited to George Sakkis
... pending = len(iterables)
... nexts = cycle(iter(it).__next__ for it in iterables)
... while pending:
... try:
... for next in nexts:
... yield next()
... except StopIteration:
... pending -= 1
... nexts = cycle(islice(nexts, pending))
>>> def powerset(iterable):
... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
... s = list(iterable)
... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
>>> def unique_everseen(iterable, key=None):
... "List unique elements, preserving order. Remember all elements ever seen."
... # unique_everseen('AAAABBBCCDAABBB') --> A B C D
... # unique_everseen('ABBCcAD', str.lower) --> A B C D
... seen = set()
... seen_add = seen.add
... if key is None:
... for element in iterable:
... if element not in seen:
... seen_add(element)
... yield element
... else:
... for element in iterable:
... k = key(element)
... if k not in seen:
... seen_add(k)
... yield element
>>> def unique_justseen(iterable, key=None):
... "List unique elements, preserving order. Remember only the element just seen."
... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
... # unique_justseen('ABBCcAD', str.lower) --> A B C A D
... return map(next, map(itemgetter(1), groupby(iterable, key)))
>>> def first_true(iterable, default=False, pred=None):
... '''Returns the first true value in the iterable.
...
... If no true value is found, returns *default*
...
... If *pred* is not None, returns the first item
... for which pred(item) is true.
...
... '''
... # first_true([a,b,c], x) --> a or b or c or x
... # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x
... return next(filter(pred, iterable), default)
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> nth('abcde', 3)
'd'
>>> nth('abcde', 9) is None
True
>>> [all_equal(s) for s in ('', 'A', 'AAAA', 'AAAB', 'AAABA')]
[True, True, True, False, False]
>>> quantify(range(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, map(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
>>> list(pairwise([]))
[]
>>> list(pairwise('a'))
[]
>>> list(islice(padnone('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]
>>> list(roundrobin('abc', 'd', 'ef'))
['a', 'd', 'e', 'b', 'f', 'c']
>>> list(powerset([1,2,3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
>>> all(len(list(powerset(range(n)))) == 2**n for n in range(18))
True
>>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len)
True
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
>>> first_true('ABC0DEF1', '9', str.isdigit)
'0'
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples,
SizeofTest)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
# doctest the examples in the library reference
support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
| 40.803603 | 124 | 0.545769 |
ecfa518e599a266ce843bd03ca710315d6bc0d24 | 1,805 | py | Python | example-project/find_duplicate_items.py | alexdevmotion/scrapy-elastic-web-crawling | ad09698bf597ed67a56a0c9e6b0dfad0f7798653 | [
"BSD-3-Clause"
] | null | null | null | example-project/find_duplicate_items.py | alexdevmotion/scrapy-elastic-web-crawling | ad09698bf597ed67a56a0c9e6b0dfad0f7798653 | [
"BSD-3-Clause"
] | null | null | null | example-project/find_duplicate_items.py | alexdevmotion/scrapy-elastic-web-crawling | ad09698bf597ed67a56a0c9e6b0dfad0f7798653 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import redis
import sys
import time
start_time = time.time()
def main(id, no_processes):
r = redis.Redis(host='54.201.225.114')
no_items = r.llen("dmoz:items");
main_interval = getMainInterval(no_items, no_processes, id)
all_items = r.lrange("dmoz:items", 0, no_items - 1)
main_items = all_items[main_interval[0]:main_interval[1]]
seen_links = set()
for main_item in main_items:
main_item = json.loads(main_item)
if main_item["link"] not in seen_links:
seen_links.add(main_item["link"])
else:
r.lpush("dmoz:blacklist", main_item)
secondary_intervals = getRemainingIntervals(no_items, no_processes, id)
for secondary_interval in secondary_intervals:
secondary_items = all_items[secondary_interval[0]:secondary_interval[1]]
for secondary_item in secondary_items:
secondary_item = json.loads(secondary_item)
if secondary_item["link"] in seen_links:
r.lpush("dmoz:blacklist", secondary_item)
print("--- %i duplicates found in %s seconds ---" % ((r.llen("dmoz:blacklist")), (time.time() - start_time)))
def getMainInterval(len, no_processes, id):
chunk_len = len / no_processes
while (chunk_len * no_processes) < len:
chunk_len = chunk_len + 1
end = (id + 1) * chunk_len - 1
if end > len - 1:
end = len - 1
return [id * chunk_len, end]
def getRemainingIntervals(len, no_processes, id):
intervals = []
for i in range(0, no_processes):
if id != i:
intervals.append(getMainInterval(len, no_processes, i))
return intervals
if __name__ == '__main__':
id = int(sys.argv[1])
no_processes = int(sys.argv[2])
main(id, no_processes)
| 34.711538 | 113 | 0.651524 |
d0ca05fd3771f129f591032df0b9cf0943a23bab | 7,407 | py | Python | plot_metrics.py | crepuscularlight/arcnet | 6c05eb0dcd7445f6efd59311bc81dd9a39db0506 | [
"MIT"
] | 1 | 2021-09-03T06:57:46.000Z | 2021-09-03T06:57:46.000Z | plot_metrics.py | crepuscularlight/arcnet | 6c05eb0dcd7445f6efd59311bc81dd9a39db0506 | [
"MIT"
] | null | null | null | plot_metrics.py | crepuscularlight/arcnet | 6c05eb0dcd7445f6efd59311bc81dd9a39db0506 | [
"MIT"
] | null | null | null | import json
import matplotlib.pyplot as plt
import numpy as np
import argparse
def load_json_arr(json_path):
lines = []
with open(json_path, 'r') as f:
for line in f:
lines.append(json.loads(line))
return lines
# Parsing global arguments
parser = argparse.ArgumentParser(description='Custom implementation of Detectron2 using the TACO dataset.')
parser.add_argument('--data_path', required=True, default='./output/metrics.json', metavar="/path/file.json", help='Data to Metrics')
parser.add_argument('--metrics_left', required=True, default='[metrics]', metavar="metrics to plot", nargs='+', help='metrics to plot as an array')
parser.add_argument('--metrics_right', required=False, default='[metrics]', metavar="metrics to plot", nargs='+', help='metrics to plot as an array')
args = parser.parse_args()
# # # # Configurations # # #
color_base = 'black'
color_APsml = 'tab:orange'
color_AP = 'tab:blue'
lr = "lr 0.01"
freeze = "freeze at 2"
experiment_folder = './output'
experiment_metrics = load_json_arr(args.data_path)
# # # # # # # # # # # # # #
# Plotting bbox AP and Segm AP metrics.
fig, ax1 = plt.subplots()
ax1.set_xlabel('Iteration')
for metric in args.metrics_left:
ax1.plot(
[x['iteration'] for x in experiment_metrics if metric in x],
[x[metric] for x in experiment_metrics if metric in x], color=color_base, label=metric)
ax1.tick_params(axis='y')
plt.legend(loc='best')
ax2 = ax1.twinx()
ax2.set_ylabel(args.metrics_right[0])
if args.metrics_right is not None:
for metric in args.metrics_right:
ax2.plot(
[x['iteration'] for x in experiment_metrics if metric in x],
[x[metric] for x in experiment_metrics if metric in x], color=color_base, label=metric, linestyle="dashed")
ax2.tick_params(axis='y')
plt.legend(loc='upper right')
plt.title(f"MRCNN Metrics - TACO at {lr}")
plt.show()
exit()
# Plotting only Total Loss Metric together with Validation Loss
plt.plot(
[x['iteration'] for x in experiment_metrics if 'total_loss' in x],
[x['total_loss'] for x in experiment_metrics if 'total_loss' in x])
plt.plot(
[x['iteration'] for x in experiment_metrics if 'total_val_loss' in x],
[x['total_val_loss'] for x in experiment_metrics if 'total_val_loss' in x])
plt.legend(['total loss', 'validation loss'], loc='upper right')
plt.title(f"Total loss and Validation Loss for MRCNN Trained on TACO - {lr}")
plt.xlabel("Iteration")
plt.ylabel("Total Loss")
plt.show()
# Plotting bbox AP and Segm AP metrics.
fig, ax1 = plt.subplots()
ax1.set_xlabel('Iteration')
ax1.set_ylabel('Loss')
ax1.plot(
[x['iteration'] for x in experiment_metrics if 'total_loss' in x],
[x['total_loss'] for x in experiment_metrics if 'total_loss' in x], color=color_base, label="Total Loss")
ax1.tick_params(axis='y')
plt.legend(loc='upper left')
ax2 = ax1.twinx()
ax2.set_ylabel('AP')
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/AP' in x],
[x['bbox/AP'] for x in experiment_metrics if 'bbox/AP' in x], color=color_AP, label="BBox AP", linestyle="dashed")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/AP50' in x],
[x['bbox/AP50'] for x in experiment_metrics if 'bbox/AP50' in x], color=color_AP, label="BBox AP@.50")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/AP75' in x],
[x['bbox/AP75'] for x in experiment_metrics if 'bbox/AP75' in x], color=color_AP, label="BBox AP@.75", linestyle="-.")
# Plotting Size dependent metrics (APs, APm, APl)
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/APs' in x],
[x['bbox/APs'] for x in experiment_metrics if 'bbox/APs' in x], color=color_APsml, label="BBox APs")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/APm' in x],
[x['bbox/APm'] for x in experiment_metrics if 'bbox/APs' in x], color=color_APsml, label="BBox APm", linestyle="-.")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/APl' in x],
[x['bbox/APl'] for x in experiment_metrics if 'bbox/APl' in x], color=color_APsml, label="BBox APl", linestyle="dotted")
ax2.tick_params(axis='y')
plt.legend(loc='upper right')
plt.title(f"MRCNN Metrics - Bounding Box AP - TACO at {lr}")
plt.show()
# Plotting segmentation metrics
fig, ax1 = plt.subplots()
ax1.set_xlabel('Iteration')
ax1.set_ylabel('Loss')
ax1.plot(
[x['iteration'] for x in experiment_metrics if 'total_loss' in x],
[x['total_loss'] for x in experiment_metrics if 'total_loss' in x], color=color_base, label="Total Loss")
ax1.tick_params(axis='y')
plt.legend(loc='upper left')
ax2 = ax1.twinx()
color = 'tab:orange'
ax2.set_ylabel('AP')
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'segm/AP' in x],
[x['segm/AP'] for x in experiment_metrics if 'segm/AP' in x], color=color_AP, label="Segmentation AP", linestyle="dashed")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'segm/AP50' in x],
[x['segm/AP50'] for x in experiment_metrics if 'segm/AP50' in x], color=color_AP, label="Segmentation AP@.50")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/AP75' in x],
[x['segm/AP75'] for x in experiment_metrics if 'bbox/AP75' in x], color=color_AP, label="Segmentation AP@.75", linestyle="-.")
# Plotting size dependent metrics
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/APs' in x],
[x['segm/APs'] for x in experiment_metrics if 'bbox/APs' in x], color=color_APsml, label="Segmentation APs")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/APm' in x],
[x['segm/APm'] for x in experiment_metrics if 'bbox/APs' in x], color=color_APsml, label="Segmentation APm", linestyle="-.")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'bbox/APl' in x],
[x['segm/APl'] for x in experiment_metrics if 'bbox/APl' in x], color=color_APsml, label="Segmentation APl", linestyle="dotted")
ax2.tick_params(axis='y')
plt.legend(loc='upper right')
plt.title(f"MRCNN Metrics - Segmentation AP - TACO at {lr}")
plt.show()
# Plotting Accuracy, False Positive and False Negative Metrics
fig, ax1 = plt.subplots()
ax1.set_xlabel('Iteration')
ax1.set_ylabel('Loss')
ax1.plot(
[x['iteration'] for x in experiment_metrics if 'total_loss' in x],
[x['total_loss'] for x in experiment_metrics if 'total_loss' in x], color=color_base, label="Total Loss")
ax1.tick_params(axis='y')
plt.legend(loc='upper left')
ax2 = ax1.twinx()
color = 'tab:orange'
ax2.set_ylabel('Percent')
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'mask_rcnn/accuracy' in x],
[x['mask_rcnn/accuracy'] for x in experiment_metrics if 'mask_rcnn/accuracy' in x], color=color_AP, label="Mask R-CNN Accuracy", linestyle="dashed")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'mask_rcnn/false_negative' in x],
[x['mask_rcnn/false_negative'] for x in experiment_metrics if 'mask_rcnn/false_negative' in x], color=color_APsml, label="Mask R-CNN False Negative")
ax2.plot(
[x['iteration'] for x in experiment_metrics if 'mask_rcnn/false_positive' in x],
[x['mask_rcnn/false_positive'] for x in experiment_metrics if 'mask_rcnn/false_positive' in x], color="tab:red", label="Mask R-CNN False Positive", linestyle="-.")
ax2.tick_params(axis='y')
plt.legend(loc='best')
plt.title(f"MRCNN Performance Metrics - {lr} - {freeze}")
plt.show() | 40.697802 | 167 | 0.699203 |
e6ea4dbc080cae69ae5783031f3f24ffd424d231 | 2,058 | py | Python | pincer/objects/intents.py | WhyDoWeLiveWithoutMeaning/Pincer | d130d8e92eba259fa662a77e3b23549c5d0ef0ff | [
"MIT"
] | null | null | null | pincer/objects/intents.py | WhyDoWeLiveWithoutMeaning/Pincer | d130d8e92eba259fa662a77e3b23549c5d0ef0ff | [
"MIT"
] | null | null | null | pincer/objects/intents.py | WhyDoWeLiveWithoutMeaning/Pincer | d130d8e92eba259fa662a77e3b23549c5d0ef0ff | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 Pincer
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import annotations
from enum import Enum
class Intents(Enum):
"""
Discord client intents.
These give your client more permissions.
NOTE: The given Intents must also be enabled for your client on
the discord dashboard.
"""
GUILDS = 1 << 0
GUILD_MEMBERS = 1 << 1
GUILD_BANS = 1 << 2
GUILD_EMOJIS_AND_STICKERS = 1 << 3
GUILD_INTEGRATIONS = 1 << 4
GUILD_WEBHOOKS = 1 << 5
GUILD_INVITES = 1 << 6
GUILD_VOICE_STATES = 1 << 7
GUILD_PRESENCES = 1 << 8
GUILD_MESSAGES = 1 << 9
GUILD_MESSAGE_REACTIONS = 1 << 10
GUILD_MESSAGE_TYPING = 1 << 11
DIRECT_MESSAGES = 1 << 12
DIRECT_MESSAGE_REACTIONS = 1 << 13
DIRECT_MESSAGE_TYPING = 1 << 14
@staticmethod
def all():
"""Consists of all intents"""
res = 0
for intent in list(map(lambda itm: itm.value, Intents)):
res |= intent
return res
| 32.666667 | 72 | 0.700194 |
5dae7237b8d494822df2a02eae0d6130538f391a | 762 | py | Python | heat/db/sqlalchemy/migrate_repo/versions/070_placeholder.py | grebennikov/heat1 | 6a11bd0b5984c8f880d1a24ed324620020032b5a | [
"Apache-2.0"
] | 1 | 2015-12-18T21:46:55.000Z | 2015-12-18T21:46:55.000Z | heat/db/sqlalchemy/migrate_repo/versions/070_placeholder.py | grebennikov/heat1 | 6a11bd0b5984c8f880d1a24ed324620020032b5a | [
"Apache-2.0"
] | null | null | null | heat/db/sqlalchemy/migrate_repo/versions/070_placeholder.py | grebennikov/heat1 | 6a11bd0b5984c8f880d1a24ed324620020032b5a | [
"Apache-2.0"
] | 1 | 2021-03-21T11:37:03.000Z | 2021-03-21T11:37:03.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is a placeholder for Liberty backports.
# Do not use this number for new Mitaka work. New Mitaka work starts after
# all the placeholders.
def upgrade(migrate_engine):
pass
| 36.285714 | 78 | 0.730971 |
abd453051bf4fd964dce2154a3116370e0157839 | 13,822 | py | Python | modules/data/RandAugment.py | bomtorazek/SupContrast | 943d1157d38136f9df55418e0b44fbc60744b142 | [
"BSD-2-Clause"
] | null | null | null | modules/data/RandAugment.py | bomtorazek/SupContrast | 943d1157d38136f9df55418e0b44fbc60744b142 | [
"BSD-2-Clause"
] | null | null | null | modules/data/RandAugment.py | bomtorazek/SupContrast | 943d1157d38136f9df55418e0b44fbc60744b142 | [
"BSD-2-Clause"
] | 1 | 2021-06-18T14:50:23.000Z | 2021-06-18T14:50:23.000Z | """ AutoAugment and RandAugment
Implementation adapted from:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py
Papers: https://arxiv.org/abs/1805.09501, https://arxiv.org/abs/1906.11172, and https://arxiv.org/abs/1909.13719
Hacked together by Ross Wightman
"""
import random
import math
import re
from PIL import Image, ImageOps, ImageEnhance
import PIL
import numpy as np
_PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]])
_FILL = (128, 128, 128)
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
_HPARAMS_DEFAULT = dict(
translate_const=250,
img_mean=_FILL,
)
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
def _interpolation(kwargs):
interpolation = kwargs.pop('resample', Image.BILINEAR)
if isinstance(interpolation, (list, tuple)):
return random.choice(interpolation)
else:
return interpolation
def _check_args_tf(kwargs):
if 'fillcolor' in kwargs and _PIL_VER < (5, 0):
kwargs.pop('fillcolor')
kwargs['resample'] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs)
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs)
def translate_x_rel(img, pct, **kwargs):
pixels = pct * img.size[0]
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_rel(img, pct, **kwargs):
pixels = pct * img.size[1]
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs)
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs)
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if _PIL_VER >= (5, 2):
return img.rotate(degrees, **kwargs)
elif _PIL_VER >= (5, 0):
w, h = img.size
post_trans = (0, 0)
rotn_center = (w / 2.0, h / 2.0)
angle = -math.radians(degrees)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix
)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
else:
return img.rotate(degrees, resample=kwargs['resample'])
def auto_contrast(img, **__):
return ImageOps.autocontrast(img)
def invert(img, **__):
return ImageOps.invert(img)
def identity(img, **__):
return img
def equalize(img, **__):
return ImageOps.equalize(img)
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh)
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if i < thresh:
lut.append(min(255, i + add))
else:
lut.append(i)
if img.mode in ("L", "RGB"):
if img.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return img.point(lut)
else:
return img
def posterize(img, bits_to_keep, **__):
if bits_to_keep >= 8:
return img
return ImageOps.posterize(img, bits_to_keep)
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor)
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor)
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor)
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor)
def _randomly_negate(v):
"""With 50% prob, negate the value"""
return -v if random.random() > 0.5 else v
def _rotate_level_to_arg(level, _hparams):
# range [-30, 30]
level = (level / _MAX_LEVEL) * 30.
level = _randomly_negate(level)
return level,
def _enhance_level_to_arg(level, _hparams):
# range [0.1, 1.9]
return (level / _MAX_LEVEL) * 1.8 + 0.1,
def _shear_level_to_arg(level, _hparams):
# range [-0.3, 0.3]
level = (level / _MAX_LEVEL) * 0.3
level = _randomly_negate(level)
return level,
def _translate_abs_level_to_arg(level, hparams):
translate_const = hparams['translate_const']
level = (level / _MAX_LEVEL) * float(translate_const)
level = _randomly_negate(level)
return level,
def _translate_rel_level_to_arg(level, _hparams):
# range [-0.45, 0.45]
level = (level / _MAX_LEVEL) * 0.45
level = _randomly_negate(level)
return level,
def _posterize_original_level_to_arg(level, _hparams):
# As per original AutoAugment paper description
# range [4, 8], 'keep 4 up to 8 MSB of image'
return int((level / _MAX_LEVEL) * 4) + 4,
def _posterize_research_level_to_arg(level, _hparams):
# As per Tensorflow models research and UDA impl
# range [4, 0], 'keep 4 down to 0 MSB of original image'
return 4 - int((level / _MAX_LEVEL) * 4),
def _posterize_tpu_level_to_arg(level, _hparams):
# As per Tensorflow TPU EfficientNet impl
# range [0, 4], 'keep 0 up to 4 MSB of original image'
return int((level / _MAX_LEVEL) * 4),
def _solarize_level_to_arg(level, _hparams):
# range [0, 256]
return int((level / _MAX_LEVEL) * 256),
def _solarize_add_level_to_arg(level, _hparams):
# range [0, 110]
return int((level / _MAX_LEVEL) * 110),
LEVEL_TO_ARG = {
'AutoContrast': None,
'Equalize': None,
'Invert': None,
'Identity': None,
'Rotate': _rotate_level_to_arg,
# There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers
'PosterizeOriginal': _posterize_original_level_to_arg,
'PosterizeResearch': _posterize_research_level_to_arg,
'PosterizeTpu': _posterize_tpu_level_to_arg,
'Solarize': _solarize_level_to_arg,
'SolarizeAdd': _solarize_add_level_to_arg,
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'ShearX': _shear_level_to_arg,
'ShearY': _shear_level_to_arg,
'TranslateX': _translate_abs_level_to_arg,
'TranslateY': _translate_abs_level_to_arg,
'TranslateXRel': _translate_rel_level_to_arg,
'TranslateYRel': _translate_rel_level_to_arg,
}
NAME_TO_OP = {
'AutoContrast': auto_contrast,
'Equalize': equalize,
'Invert': invert,
'Identity': identity,
'Rotate': rotate,
'PosterizeOriginal': posterize,
'PosterizeResearch': posterize,
'PosterizeTpu': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x_abs,
'TranslateY': translate_y_abs,
'TranslateXRel': translate_x_rel,
'TranslateYRel': translate_y_rel,
}
class AutoAugmentOp:
def __init__(self, name, prob=0.5, magnitude=10, hparams=None):
hparams = hparams or _HPARAMS_DEFAULT
self.aug_fn = NAME_TO_OP[name]
self.level_fn = LEVEL_TO_ARG[name]
self.prob = prob
self.magnitude = magnitude
self.hparams = hparams.copy()
self.kwargs = dict(
fillcolor=hparams['img_mean'] if 'img_mean' in hparams else _FILL,
resample=hparams['interpolation'] if 'interpolation' in hparams else _RANDOM_INTERPOLATION,
)
# If magnitude_std is > 0, we introduce some randomness
# in the usually fixed policy and sample magnitude from a normal distribution
# with mean `magnitude` and std-dev of `magnitude_std`.
# NOTE This is my own hack, being tested, not in papers or reference impls.
self.magnitude_std = self.hparams.get('magnitude_std', 0)
def __call__(self, img):
if random.random() > self.prob:
return img
magnitude = self.magnitude
if self.magnitude_std and self.magnitude_std > 0:
magnitude = random.gauss(magnitude, self.magnitude_std)
magnitude = min(_MAX_LEVEL, max(0, magnitude)) # clip to valid range
level_args = self.level_fn(magnitude, self.hparams) if self.level_fn is not None else tuple()
return self.aug_fn(img, *level_args, **self.kwargs)
_RAND_TRANSFORMS = [
'AutoContrast',
'Equalize',
'Invert',
'Rotate',
'PosterizeTpu',
'Solarize',
'SolarizeAdd',
'Color',
'Contrast',
'Brightness',
'Sharpness',
'ShearX',
'ShearY',
'TranslateXRel',
'TranslateYRel',
#'Cutout' # FIXME I implement this as random erasing separately
]
_RAND_TRANSFORMS_CMC = [
'AutoContrast',
'Identity',
'Rotate',
'Sharpness',
'ShearX',
'ShearY',
'TranslateXRel',
'TranslateYRel',
#'Cutout' # FIXME I implement this as random erasing separately
]
# These experimental weights are based loosely on the relative improvements mentioned in paper.
# They may not result in increased performance, but could likely be tuned to so.
_RAND_CHOICE_WEIGHTS_0 = {
'Rotate': 0.3,
'ShearX': 0.2,
'ShearY': 0.2,
'TranslateXRel': 0.1,
'TranslateYRel': 0.1,
'Color': .025,
'Sharpness': 0.025,
'AutoContrast': 0.025,
'Solarize': .005,
'SolarizeAdd': .005,
'Contrast': .005,
'Brightness': .005,
'Equalize': .005,
'PosterizeTpu': 0,
'Invert': 0,
}
def _select_rand_weights(weight_idx=0, transforms=None):
transforms = transforms or _RAND_TRANSFORMS
assert weight_idx == 0 # only one set of weights currently
rand_weights = _RAND_CHOICE_WEIGHTS_0
probs = [rand_weights[k] for k in transforms]
probs /= np.sum(probs)
return probs
def rand_augment_ops(magnitude=10, hparams=None, transforms=None):
"""rand augment ops for RGB images"""
hparams = hparams or _HPARAMS_DEFAULT
transforms = transforms or _RAND_TRANSFORMS
return [AutoAugmentOp(
name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms]
def rand_augment_ops_cmc(magnitude=10, hparams=None, transforms=None):
"""rand augment ops for CMC images (removing color ops)"""
hparams = hparams or _HPARAMS_DEFAULT
transforms = transforms or _RAND_TRANSFORMS_CMC
return [AutoAugmentOp(
name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms]
class RandAugment:
def __init__(self, ops, num_layers=2, choice_weights=None):
self.ops = ops
self.num_layers = num_layers
self.choice_weights = choice_weights
def __call__(self, img):
# no replacement when using weighted choice
ops = np.random.choice(
self.ops, self.num_layers, replace=self.choice_weights is None, p=self.choice_weights)
for op in ops:
img = op(img)
return img
def rand_augment_transform(config_str, hparams, use_cmc=False):
"""
Create a RandAugment transform
:param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by
dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining
sections, not order sepecific determine
'm' - integer magnitude of rand augment
'n' - integer num layers (number of transform ops selected per image)
'w' - integer probabiliy weight index (index of a set of weights to influence choice of op)
'mstd' - float std deviation of magnitude noise applied
Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5
'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2
:param hparams: Other hparams (kwargs) for the RandAugmentation scheme
:param use_cmc: Flag indicates removing augmentation for coloring ops.
:return: A PyTorch compatible Transform
"""
magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10)
num_layers = 2 # default to 2 ops per image
weight_idx = None # default to no probability weights for op choice
config = config_str.split('-')
assert config[0] == 'rand'
config = config[1:]
for c in config:
cs = re.split(r'(\d.*)', c)
if len(cs) < 2:
continue
key, val = cs[:2]
if key == 'mstd':
# noise param injected via hparams for now
hparams.setdefault('magnitude_std', float(val))
elif key == 'm':
magnitude = int(val)
elif key == 'n':
num_layers = int(val)
elif key == 'w':
weight_idx = int(val)
else:
assert False, 'Unknown RandAugment config section'
if use_cmc:
ra_ops = rand_augment_ops_cmc(magnitude=magnitude, hparams=hparams)
else:
ra_ops = rand_augment_ops(magnitude=magnitude, hparams=hparams)
choice_weights = None if weight_idx is None else _select_rand_weights(weight_idx)
return RandAugment(ra_ops, num_layers, choice_weights=choice_weights) | 30.852679 | 119 | 0.660469 |
63b56df93aaf94718f3657a0a02219277964686a | 439 | py | Python | studybuddyfinder/migrations/0021_auto_20201110_0042.py | SindhuMente/CS3240-StudyBuddyFinder | c3c2f2b80b8351df9255e44194bce6503f984183 | [
"MIT"
] | 2 | 2020-12-10T02:39:00.000Z | 2021-03-16T23:32:46.000Z | studybuddyfinder/migrations/0021_auto_20201110_0042.py | SindhuMente/CS3240-StudyBuddyFinder | c3c2f2b80b8351df9255e44194bce6503f984183 | [
"MIT"
] | null | null | null | studybuddyfinder/migrations/0021_auto_20201110_0042.py | SindhuMente/CS3240-StudyBuddyFinder | c3c2f2b80b8351df9255e44194bce6503f984183 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-11-10 05:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studybuddyfinder', '0020_auto_20201110_0039'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='Date'),
),
]
| 23.105263 | 90 | 0.628702 |
9343495a05a45f42e67686c4466293fd00721be3 | 74,096 | py | Python | Tests/feaLib/parser_test.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 240 | 2021-01-11T14:49:24.000Z | 2022-03-29T22:33:49.000Z | Tests/feaLib/parser_test.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 77 | 2021-01-12T20:23:30.000Z | 2022-03-28T12:14:34.000Z | Tests/feaLib/parser_test.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 28 | 2021-01-17T05:44:11.000Z | 2022-01-11T19:58:46.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.parser import Parser, SymbolTable
from fontTools.misc.py23 import *
import warnings
import fontTools.feaLib.ast as ast
import os
import unittest
def glyphstr(glyphs):
def f(x):
if len(x) == 1:
return list(x)[0]
else:
return '[%s]' % ' '.join(sorted(list(x)))
return ' '.join(f(g.glyphSet()) for g in glyphs)
def mapping(s):
b = []
for a in s.glyphs:
b.extend(a.glyphSet())
c = []
for a in s.replacements:
c.extend(a.glyphSet())
if len(c) == 1:
c = c * len(b)
return dict(zip(b, c))
GLYPHNAMES = ("""
.notdef space A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
A.sc B.sc C.sc D.sc E.sc F.sc G.sc H.sc I.sc J.sc K.sc L.sc M.sc
N.sc O.sc P.sc Q.sc R.sc S.sc T.sc U.sc V.sc W.sc X.sc Y.sc Z.sc
A.swash B.swash X.swash Y.swash Z.swash
a b c d e f g h i j k l m n o p q r s t u v w x y z
a.sc b.sc c.sc d.sc e.sc f.sc g.sc h.sc i.sc j.sc k.sc l.sc m.sc
n.sc o.sc p.sc q.sc r.sc s.sc t.sc u.sc v.sc w.sc x.sc y.sc z.sc
a.swash b.swash x.swash y.swash z.swash
foobar foo.09 foo.1234 foo.9876
""").split() + ["foo.%d" % i for i in range(1, 200)]
class ParserTest(unittest.TestCase):
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
# and fires deprecation warnings if a program uses the old name.
if not hasattr(self, "assertRaisesRegex"):
self.assertRaisesRegex = self.assertRaisesRegexp
def test_glyphMap_deprecated(self):
glyphMap = {'a': 0, 'b': 1, 'c': 2}
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parser = Parser(UnicodeIO(), glyphMap=glyphMap)
self.assertEqual(len(w), 1)
self.assertEqual(w[-1].category, UserWarning)
self.assertIn("deprecated", str(w[-1].message))
self.assertEqual(parser.glyphNames_, {'a', 'b', 'c'})
self.assertRaisesRegex(
TypeError, "mutually exclusive",
Parser, UnicodeIO(), ("a",), glyphMap={"a": 0})
self.assertRaisesRegex(
TypeError, "unsupported keyword argument",
Parser, UnicodeIO(), foo="bar")
def test_comments(self):
doc = self.parse(
""" # Initial
feature test {
sub A by B; # simple
} test;""")
c1 = doc.statements[0]
c2 = doc.statements[1].statements[1]
self.assertEqual(type(c1), ast.Comment)
self.assertEqual(c1.text, "# Initial")
self.assertEqual(str(c1), "# Initial")
self.assertEqual(type(c2), ast.Comment)
self.assertEqual(c2.text, "# simple")
self.assertEqual(doc.statements[1].name, "test")
def test_only_comments(self):
doc = self.parse("""\
# Initial
""")
c1 = doc.statements[0]
self.assertEqual(type(c1), ast.Comment)
self.assertEqual(c1.text, "# Initial")
self.assertEqual(str(c1), "# Initial")
def test_anchor_format_a(self):
doc = self.parse(
"feature test {"
" pos cursive A <anchor 120 -20> <anchor NULL>;"
"} test;")
anchor = doc.statements[0].statements[0].entryAnchor
self.assertEqual(type(anchor), ast.Anchor)
self.assertEqual(anchor.x, 120)
self.assertEqual(anchor.y, -20)
self.assertIsNone(anchor.contourpoint)
self.assertIsNone(anchor.xDeviceTable)
self.assertIsNone(anchor.yDeviceTable)
def test_anchor_format_b(self):
doc = self.parse(
"feature test {"
" pos cursive A <anchor 120 -20 contourpoint 5> <anchor NULL>;"
"} test;")
anchor = doc.statements[0].statements[0].entryAnchor
self.assertEqual(type(anchor), ast.Anchor)
self.assertEqual(anchor.x, 120)
self.assertEqual(anchor.y, -20)
self.assertEqual(anchor.contourpoint, 5)
self.assertIsNone(anchor.xDeviceTable)
self.assertIsNone(anchor.yDeviceTable)
def test_anchor_format_c(self):
doc = self.parse(
"feature test {"
" pos cursive A "
" <anchor 120 -20 <device 11 111, 12 112> <device NULL>>"
" <anchor NULL>;"
"} test;")
anchor = doc.statements[0].statements[0].entryAnchor
self.assertEqual(type(anchor), ast.Anchor)
self.assertEqual(anchor.x, 120)
self.assertEqual(anchor.y, -20)
self.assertIsNone(anchor.contourpoint)
self.assertEqual(anchor.xDeviceTable, ((11, 111), (12, 112)))
self.assertIsNone(anchor.yDeviceTable)
def test_anchor_format_d(self):
doc = self.parse(
"feature test {"
" pos cursive A <anchor 120 -20> <anchor NULL>;"
"} test;")
anchor = doc.statements[0].statements[0].exitAnchor
self.assertIsNone(anchor)
def test_anchor_format_e(self):
doc = self.parse(
"feature test {"
" anchorDef 120 -20 contourpoint 7 Foo;"
" pos cursive A <anchor Foo> <anchor NULL>;"
"} test;")
anchor = doc.statements[0].statements[1].entryAnchor
self.assertEqual(type(anchor), ast.Anchor)
self.assertEqual(anchor.x, 120)
self.assertEqual(anchor.y, -20)
self.assertEqual(anchor.contourpoint, 7)
self.assertIsNone(anchor.xDeviceTable)
self.assertIsNone(anchor.yDeviceTable)
def test_anchor_format_e_undefined(self):
self.assertRaisesRegex(
FeatureLibError, 'Unknown anchor "UnknownName"', self.parse,
"feature test {"
" position cursive A <anchor UnknownName> <anchor NULL>;"
"} test;")
def test_anchordef(self):
[foo] = self.parse("anchorDef 123 456 foo;").statements
self.assertEqual(type(foo), ast.AnchorDefinition)
self.assertEqual(foo.name, "foo")
self.assertEqual(foo.x, 123)
self.assertEqual(foo.y, 456)
self.assertEqual(foo.contourpoint, None)
def test_anchordef_contourpoint(self):
[foo] = self.parse("anchorDef 123 456 contourpoint 5 foo;").statements
self.assertEqual(type(foo), ast.AnchorDefinition)
self.assertEqual(foo.name, "foo")
self.assertEqual(foo.x, 123)
self.assertEqual(foo.y, 456)
self.assertEqual(foo.contourpoint, 5)
def test_anon(self):
anon = self.parse("anon TEST { # a\nfoo\n } TEST; # qux").statements[0]
self.assertIsInstance(anon, ast.AnonymousBlock)
self.assertEqual(anon.tag, "TEST")
self.assertEqual(anon.content, "foo\n ")
def test_anonymous(self):
anon = self.parse("anonymous TEST {\nbar\n} TEST;").statements[0]
self.assertIsInstance(anon, ast.AnonymousBlock)
self.assertEqual(anon.tag, "TEST")
# feature file spec requires passing the final end-of-line
self.assertEqual(anon.content, "bar\n")
def test_anon_missingBrace(self):
self.assertRaisesRegex(
FeatureLibError, "Expected '} TEST;' to terminate anonymous block",
self.parse, "anon TEST { \n no end in sight")
def test_attach(self):
doc = self.parse("table GDEF {Attach [a e] 2;} GDEF;")
s = doc.statements[0].statements[0]
self.assertIsInstance(s, ast.AttachStatement)
self.assertEqual(glyphstr([s.glyphs]), "[a e]")
self.assertEqual(s.contourPoints, {2})
def test_feature_block(self):
[liga] = self.parse("feature liga {} liga;").statements
self.assertEqual(liga.name, "liga")
self.assertFalse(liga.use_extension)
def test_feature_block_useExtension(self):
[liga] = self.parse("feature liga useExtension {} liga;").statements
self.assertEqual(liga.name, "liga")
self.assertTrue(liga.use_extension)
self.assertEqual(liga.asFea(),
"feature liga useExtension {\n \n} liga;\n")
def test_feature_comment(self):
[liga] = self.parse("feature liga { # Comment\n } liga;").statements
[comment] = liga.statements
self.assertIsInstance(comment, ast.Comment)
self.assertEqual(comment.text, "# Comment")
def test_feature_reference(self):
doc = self.parse("feature aalt { feature salt; } aalt;")
ref = doc.statements[0].statements[0]
self.assertIsInstance(ref, ast.FeatureReferenceStatement)
self.assertEqual(ref.featureName, "salt")
def test_FeatureNames_bad(self):
self.assertRaisesRegex(
FeatureLibError, 'Expected "name"',
self.parse, "feature ss01 { featureNames { feature test; } ss01;")
def test_FeatureNames_comment(self):
[feature] = self.parse(
"feature ss01 { featureNames { # Comment\n }; } ss01;").statements
[featureNames] = feature.statements
self.assertIsInstance(featureNames, ast.NestedBlock)
[comment] = featureNames.statements
self.assertIsInstance(comment, ast.Comment)
self.assertEqual(comment.text, "# Comment")
def test_FeatureNames_emptyStatements(self):
[feature] = self.parse(
"feature ss01 { featureNames { ;;; }; } ss01;").statements
[featureNames] = feature.statements
self.assertIsInstance(featureNames, ast.NestedBlock)
self.assertEqual(featureNames.statements, [])
def test_FontRevision(self):
doc = self.parse("table head {FontRevision 2.5;} head;")
s = doc.statements[0].statements[0]
self.assertIsInstance(s, ast.FontRevisionStatement)
self.assertEqual(s.revision, 2.5)
def test_FontRevision_negative(self):
self.assertRaisesRegex(
FeatureLibError, "Font revision numbers must be positive",
self.parse, "table head {FontRevision -17.2;} head;")
def test_glyphclass(self):
[gc] = self.parse("@dash = [endash emdash figuredash];").statements
self.assertEqual(gc.name, "dash")
self.assertEqual(gc.glyphSet(), ("endash", "emdash", "figuredash"))
def test_glyphclass_glyphNameTooLong(self):
self.assertRaisesRegex(
FeatureLibError, "must not be longer than 63 characters",
self.parse, "@GlyphClass = [%s];" % ("G" * 64))
def test_glyphclass_bad(self):
self.assertRaisesRegex(
FeatureLibError,
"Expected glyph name, glyph range, or glyph class reference",
self.parse, "@bad = [a 123];")
def test_glyphclass_duplicate(self):
# makeotf accepts this, so we should too
ab, xy = self.parse("@dup = [a b]; @dup = [x y];").statements
self.assertEqual(glyphstr([ab]), "[a b]")
self.assertEqual(glyphstr([xy]), "[x y]")
def test_glyphclass_empty(self):
[gc] = self.parse("@empty_set = [];").statements
self.assertEqual(gc.name, "empty_set")
self.assertEqual(gc.glyphSet(), tuple())
def test_glyphclass_equality(self):
[foo, bar] = self.parse("@foo = [a b]; @bar = @foo;").statements
self.assertEqual(foo.glyphSet(), ("a", "b"))
self.assertEqual(bar.glyphSet(), ("a", "b"))
def test_glyphclass_from_markClass(self):
doc = self.parse(
"markClass [acute grave] <anchor 500 800> @TOP_MARKS;"
"markClass cedilla <anchor 500 -100> @BOTTOM_MARKS;"
"@MARKS = [@TOP_MARKS @BOTTOM_MARKS ogonek];"
"@ALL = @MARKS;")
self.assertEqual(doc.statements[-1].glyphSet(),
("acute", "grave", "cedilla", "ogonek"))
def test_glyphclass_range_cid(self):
[gc] = self.parse(r"@GlyphClass = [\999-\1001];").statements
self.assertEqual(gc.name, "GlyphClass")
self.assertEqual(gc.glyphSet(), ("cid00999", "cid01000", "cid01001"))
def test_glyphclass_range_cid_bad(self):
self.assertRaisesRegex(
FeatureLibError,
"Bad range: start should be less than limit",
self.parse, r"@bad = [\998-\995];")
def test_glyphclass_range_uppercase(self):
[gc] = self.parse("@swashes = [X.swash-Z.swash];").statements
self.assertEqual(gc.name, "swashes")
self.assertEqual(gc.glyphSet(), ("X.swash", "Y.swash", "Z.swash"))
def test_glyphclass_range_lowercase(self):
[gc] = self.parse("@defg.sc = [d.sc-g.sc];").statements
self.assertEqual(gc.name, "defg.sc")
self.assertEqual(gc.glyphSet(), ("d.sc", "e.sc", "f.sc", "g.sc"))
def test_glyphclass_range_dash(self):
glyphNames = "A-foo.sc B-foo.sc C-foo.sc".split()
[gc] = self.parse("@range = [A-foo.sc-C-foo.sc];", glyphNames).statements
self.assertEqual(gc.glyphSet(), ("A-foo.sc", "B-foo.sc", "C-foo.sc"))
def test_glyphclass_range_dash_with_space(self):
gn = "A-foo.sc B-foo.sc C-foo.sc".split()
[gc] = self.parse("@range = [A-foo.sc - C-foo.sc];", gn).statements
self.assertEqual(gc.glyphSet(), ("A-foo.sc", "B-foo.sc", "C-foo.sc"))
def test_glyphclass_glyph_name_should_win_over_range(self):
# The OpenType Feature File Specification v1.20 makes it clear
# that if a dashed name could be interpreted either as a glyph name
# or as a range, then the semantics should be the single dashed name.
glyphNames = (
"A-foo.sc-C-foo.sc A-foo.sc B-foo.sc C-foo.sc".split())
[gc] = self.parse("@range = [A-foo.sc-C-foo.sc];", glyphNames).statements
self.assertEqual(gc.glyphSet(), ("A-foo.sc-C-foo.sc",))
def test_glyphclass_range_dash_ambiguous(self):
glyphNames = "A B C A-B B-C".split()
self.assertRaisesRegex(
FeatureLibError,
'Ambiguous glyph range "A-B-C"; '
'please use "A - B-C" or "A-B - C" to clarify what you mean',
self.parse, r"@bad = [A-B-C];", glyphNames)
def test_glyphclass_range_digit1(self):
[gc] = self.parse("@range = [foo.2-foo.5];").statements
self.assertEqual(gc.glyphSet(), ("foo.2", "foo.3", "foo.4", "foo.5"))
def test_glyphclass_range_digit2(self):
[gc] = self.parse("@range = [foo.09-foo.11];").statements
self.assertEqual(gc.glyphSet(), ("foo.09", "foo.10", "foo.11"))
def test_glyphclass_range_digit3(self):
[gc] = self.parse("@range = [foo.123-foo.125];").statements
self.assertEqual(gc.glyphSet(), ("foo.123", "foo.124", "foo.125"))
def test_glyphclass_range_bad(self):
self.assertRaisesRegex(
FeatureLibError,
"Bad range: \"a\" and \"foobar\" should have the same length",
self.parse, "@bad = [a-foobar];")
self.assertRaisesRegex(
FeatureLibError, "Bad range: \"A.swash-z.swash\"",
self.parse, "@bad = [A.swash-z.swash];")
self.assertRaisesRegex(
FeatureLibError, "Start of range must be smaller than its end",
self.parse, "@bad = [B.swash-A.swash];")
self.assertRaisesRegex(
FeatureLibError, "Bad range: \"foo.1234-foo.9876\"",
self.parse, "@bad = [foo.1234-foo.9876];")
def test_glyphclass_range_mixed(self):
[gc] = self.parse("@range = [a foo.09-foo.11 X.sc-Z.sc];").statements
self.assertEqual(gc.glyphSet(), (
"a", "foo.09", "foo.10", "foo.11", "X.sc", "Y.sc", "Z.sc"
))
def test_glyphclass_reference(self):
[vowels_lc, vowels_uc, vowels] = self.parse(
"@Vowels.lc = [a e i o u]; @Vowels.uc = [A E I O U];"
"@Vowels = [@Vowels.lc @Vowels.uc y Y];").statements
self.assertEqual(vowels_lc.glyphSet(), tuple("aeiou"))
self.assertEqual(vowels_uc.glyphSet(), tuple("AEIOU"))
self.assertEqual(vowels.glyphSet(), tuple("aeiouAEIOUyY"))
self.assertEqual(vowels.asFea(),
"@Vowels = [@Vowels.lc @Vowels.uc y Y];")
self.assertRaisesRegex(
FeatureLibError, "Unknown glyph class @unknown",
self.parse, "@bad = [@unknown];")
def test_glyphclass_scoping(self):
[foo, liga, smcp] = self.parse(
"@foo = [a b];"
"feature liga { @bar = [@foo l]; } liga;"
"feature smcp { @bar = [@foo s]; } smcp;"
).statements
self.assertEqual(foo.glyphSet(), ("a", "b"))
self.assertEqual(liga.statements[0].glyphSet(), ("a", "b", "l"))
self.assertEqual(smcp.statements[0].glyphSet(), ("a", "b", "s"))
def test_glyphclass_scoping_bug496(self):
# https://github.com/fonttools/fonttools/issues/496
f1, f2 = self.parse(
"feature F1 { lookup L { @GLYPHCLASS = [A B C];} L; } F1;"
"feature F2 { sub @GLYPHCLASS by D; } F2;"
).statements
self.assertEqual(list(f2.statements[0].glyphs[0].glyphSet()),
["A", "B", "C"])
def test_GlyphClassDef(self):
doc = self.parse("table GDEF {GlyphClassDef [b],[l],[m],[C c];} GDEF;")
s = doc.statements[0].statements[0]
self.assertIsInstance(s, ast.GlyphClassDefStatement)
self.assertEqual(glyphstr([s.baseGlyphs]), "b")
self.assertEqual(glyphstr([s.ligatureGlyphs]), "l")
self.assertEqual(glyphstr([s.markGlyphs]), "m")
self.assertEqual(glyphstr([s.componentGlyphs]), "[C c]")
def test_GlyphClassDef_noCLassesSpecified(self):
doc = self.parse("table GDEF {GlyphClassDef ,,,;} GDEF;")
s = doc.statements[0].statements[0]
self.assertIsNone(s.baseGlyphs)
self.assertIsNone(s.ligatureGlyphs)
self.assertIsNone(s.markGlyphs)
self.assertIsNone(s.componentGlyphs)
def test_ignore_pos(self):
doc = self.parse("feature test {ignore pos e t' c, q u' u' x;} test;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.IgnorePosStatement)
[(pref1, glyphs1, suff1), (pref2, glyphs2, suff2)] = sub.chainContexts
self.assertEqual(glyphstr(pref1), "e")
self.assertEqual(glyphstr(glyphs1), "t")
self.assertEqual(glyphstr(suff1), "c")
self.assertEqual(glyphstr(pref2), "q")
self.assertEqual(glyphstr(glyphs2), "u u")
self.assertEqual(glyphstr(suff2), "x")
def test_ignore_position(self):
doc = self.parse(
"feature test {"
" ignore position f [a e] d' [a u]' [e y];"
"} test;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.IgnorePosStatement)
[(prefix, glyphs, suffix)] = sub.chainContexts
self.assertEqual(glyphstr(prefix), "f [a e]")
self.assertEqual(glyphstr(glyphs), "d [a u]")
self.assertEqual(glyphstr(suffix), "[e y]")
def test_ignore_position_with_lookup(self):
self.assertRaisesRegex(
FeatureLibError,
'No lookups can be specified for "ignore pos"',
self.parse,
"lookup L { pos [A A.sc] -100; } L;"
"feature test { ignore pos f' i', A' lookup L; } test;")
def test_ignore_sub(self):
doc = self.parse("feature test {ignore sub e t' c, q u' u' x;} test;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.IgnoreSubstStatement)
[(pref1, glyphs1, suff1), (pref2, glyphs2, suff2)] = sub.chainContexts
self.assertEqual(glyphstr(pref1), "e")
self.assertEqual(glyphstr(glyphs1), "t")
self.assertEqual(glyphstr(suff1), "c")
self.assertEqual(glyphstr(pref2), "q")
self.assertEqual(glyphstr(glyphs2), "u u")
self.assertEqual(glyphstr(suff2), "x")
def test_ignore_substitute(self):
doc = self.parse(
"feature test {"
" ignore substitute f [a e] d' [a u]' [e y];"
"} test;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.IgnoreSubstStatement)
[(prefix, glyphs, suffix)] = sub.chainContexts
self.assertEqual(glyphstr(prefix), "f [a e]")
self.assertEqual(glyphstr(glyphs), "d [a u]")
self.assertEqual(glyphstr(suffix), "[e y]")
def test_ignore_substitute_with_lookup(self):
self.assertRaisesRegex(
FeatureLibError,
'No lookups can be specified for "ignore sub"',
self.parse,
"lookup L { sub [A A.sc] by a; } L;"
"feature test { ignore sub f' i', A' lookup L; } test;")
def test_include_statement(self):
doc = self.parse("""\
include(../family.fea);
include # Comment
(foo)
;
""", followIncludes=False)
s1, s2, s3 = doc.statements
self.assertEqual(type(s1), ast.IncludeStatement)
self.assertEqual(s1.filename, "../family.fea")
self.assertEqual(s1.asFea(), "include(../family.fea);")
self.assertEqual(type(s2), ast.IncludeStatement)
self.assertEqual(s2.filename, "foo")
self.assertEqual(s2.asFea(), "include(foo);")
self.assertEqual(type(s3), ast.Comment)
self.assertEqual(s3.text, "# Comment")
def test_include_statement_no_semicolon(self):
doc = self.parse("""\
include(../family.fea)
""", followIncludes=False)
s1 = doc.statements[0]
self.assertEqual(type(s1), ast.IncludeStatement)
self.assertEqual(s1.filename, "../family.fea")
self.assertEqual(s1.asFea(), "include(../family.fea);")
def test_language(self):
doc = self.parse("feature test {language DEU;} test;")
s = doc.statements[0].statements[0]
self.assertEqual(type(s), ast.LanguageStatement)
self.assertEqual(s.language, "DEU ")
self.assertTrue(s.include_default)
self.assertFalse(s.required)
def test_language_exclude_dflt(self):
doc = self.parse("feature test {language DEU exclude_dflt;} test;")
s = doc.statements[0].statements[0]
self.assertEqual(type(s), ast.LanguageStatement)
self.assertEqual(s.language, "DEU ")
self.assertFalse(s.include_default)
self.assertFalse(s.required)
def test_language_exclude_dflt_required(self):
doc = self.parse("feature test {"
" language DEU exclude_dflt required;"
"} test;")
s = doc.statements[0].statements[0]
self.assertEqual(type(s), ast.LanguageStatement)
self.assertEqual(s.language, "DEU ")
self.assertFalse(s.include_default)
self.assertTrue(s.required)
def test_language_include_dflt(self):
doc = self.parse("feature test {language DEU include_dflt;} test;")
s = doc.statements[0].statements[0]
self.assertEqual(type(s), ast.LanguageStatement)
self.assertEqual(s.language, "DEU ")
self.assertTrue(s.include_default)
self.assertFalse(s.required)
def test_language_include_dflt_required(self):
doc = self.parse("feature test {"
" language DEU include_dflt required;"
"} test;")
s = doc.statements[0].statements[0]
self.assertEqual(type(s), ast.LanguageStatement)
self.assertEqual(s.language, "DEU ")
self.assertTrue(s.include_default)
self.assertTrue(s.required)
def test_language_DFLT(self):
self.assertRaisesRegex(
FeatureLibError,
'"DFLT" is not a valid language tag; use "dflt" instead',
self.parse, "feature test { language DFLT; } test;")
def test_ligatureCaretByIndex_glyphClass(self):
doc = self.parse("table GDEF{LigatureCaretByIndex [c_t f_i] 2;}GDEF;")
s = doc.statements[0].statements[0]
self.assertIsInstance(s, ast.LigatureCaretByIndexStatement)
self.assertEqual(glyphstr([s.glyphs]), "[c_t f_i]")
self.assertEqual(s.carets, [2])
def test_ligatureCaretByIndex_singleGlyph(self):
doc = self.parse("table GDEF{LigatureCaretByIndex f_f_i 3 7;}GDEF;")
s = doc.statements[0].statements[0]
self.assertIsInstance(s, ast.LigatureCaretByIndexStatement)
self.assertEqual(glyphstr([s.glyphs]), "f_f_i")
self.assertEqual(s.carets, [3, 7])
def test_ligatureCaretByPos_glyphClass(self):
doc = self.parse("table GDEF {LigatureCaretByPos [c_t f_i] 7;} GDEF;")
s = doc.statements[0].statements[0]
self.assertIsInstance(s, ast.LigatureCaretByPosStatement)
self.assertEqual(glyphstr([s.glyphs]), "[c_t f_i]")
self.assertEqual(s.carets, [7])
def test_ligatureCaretByPos_singleGlyph(self):
doc = self.parse("table GDEF {LigatureCaretByPos f_i 400 380;} GDEF;")
s = doc.statements[0].statements[0]
self.assertIsInstance(s, ast.LigatureCaretByPosStatement)
self.assertEqual(glyphstr([s.glyphs]), "f_i")
self.assertEqual(s.carets, [400, 380])
def test_lookup_block(self):
[lookup] = self.parse("lookup Ligatures {} Ligatures;").statements
self.assertEqual(lookup.name, "Ligatures")
self.assertFalse(lookup.use_extension)
def test_lookup_block_useExtension(self):
[lookup] = self.parse("lookup Foo useExtension {} Foo;").statements
self.assertEqual(lookup.name, "Foo")
self.assertTrue(lookup.use_extension)
self.assertEqual(lookup.asFea(),
"lookup Foo useExtension {\n \n} Foo;\n")
def test_lookup_block_name_mismatch(self):
self.assertRaisesRegex(
FeatureLibError, 'Expected "Foo"',
self.parse, "lookup Foo {} Bar;")
def test_lookup_block_with_horizontal_valueRecordDef(self):
doc = self.parse("feature liga {"
" lookup look {"
" valueRecordDef 123 foo;"
" } look;"
"} liga;")
[liga] = doc.statements
[look] = liga.statements
[foo] = look.statements
self.assertEqual(foo.value.xAdvance, 123)
self.assertIsNone(foo.value.yAdvance)
def test_lookup_block_with_vertical_valueRecordDef(self):
doc = self.parse("feature vkrn {"
" lookup look {"
" valueRecordDef 123 foo;"
" } look;"
"} vkrn;")
[vkrn] = doc.statements
[look] = vkrn.statements
[foo] = look.statements
self.assertIsNone(foo.value.xAdvance)
self.assertEqual(foo.value.yAdvance, 123)
def test_lookup_comment(self):
[lookup] = self.parse("lookup L { # Comment\n } L;").statements
[comment] = lookup.statements
self.assertIsInstance(comment, ast.Comment)
self.assertEqual(comment.text, "# Comment")
def test_lookup_reference(self):
[foo, bar] = self.parse("lookup Foo {} Foo;"
"feature Bar {lookup Foo;} Bar;").statements
[ref] = bar.statements
self.assertEqual(type(ref), ast.LookupReferenceStatement)
self.assertEqual(ref.lookup, foo)
def test_lookup_reference_to_lookup_inside_feature(self):
[qux, bar] = self.parse("feature Qux {lookup Foo {} Foo;} Qux;"
"feature Bar {lookup Foo;} Bar;").statements
[foo] = qux.statements
[ref] = bar.statements
self.assertIsInstance(ref, ast.LookupReferenceStatement)
self.assertEqual(ref.lookup, foo)
def test_lookup_reference_unknown(self):
self.assertRaisesRegex(
FeatureLibError, 'Unknown lookup "Huh"',
self.parse, "feature liga {lookup Huh;} liga;")
def parse_lookupflag_(self, s):
return self.parse("lookup L {%s} L;" % s).statements[0].statements[-1]
def test_lookupflag_format_A(self):
flag = self.parse_lookupflag_("lookupflag RightToLeft IgnoreMarks;")
self.assertIsInstance(flag, ast.LookupFlagStatement)
self.assertEqual(flag.value, 9)
self.assertIsNone(flag.markAttachment)
self.assertIsNone(flag.markFilteringSet)
self.assertEqual(flag.asFea(), "lookupflag RightToLeft IgnoreMarks;")
def test_lookupflag_format_A_MarkAttachmentType(self):
flag = self.parse_lookupflag_(
"@TOP_MARKS = [acute grave macron];"
"lookupflag RightToLeft MarkAttachmentType @TOP_MARKS;")
self.assertIsInstance(flag, ast.LookupFlagStatement)
self.assertEqual(flag.value, 1)
self.assertIsInstance(flag.markAttachment, ast.GlyphClassName)
self.assertEqual(flag.markAttachment.glyphSet(),
("acute", "grave", "macron"))
self.assertIsNone(flag.markFilteringSet)
self.assertEqual(flag.asFea(),
"lookupflag RightToLeft MarkAttachmentType @TOP_MARKS;")
def test_lookupflag_format_A_UseMarkFilteringSet(self):
flag = self.parse_lookupflag_(
"@BOTTOM_MARKS = [cedilla ogonek];"
"lookupflag UseMarkFilteringSet @BOTTOM_MARKS IgnoreLigatures;")
self.assertIsInstance(flag, ast.LookupFlagStatement)
self.assertEqual(flag.value, 4)
self.assertIsNone(flag.markAttachment)
self.assertIsInstance(flag.markFilteringSet, ast.GlyphClassName)
self.assertEqual(flag.markFilteringSet.glyphSet(),
("cedilla", "ogonek"))
self.assertEqual(flag.asFea(),
"lookupflag IgnoreLigatures UseMarkFilteringSet @BOTTOM_MARKS;")
def test_lookupflag_format_B(self):
flag = self.parse_lookupflag_("lookupflag 7;")
self.assertIsInstance(flag, ast.LookupFlagStatement)
self.assertEqual(flag.value, 7)
self.assertIsNone(flag.markAttachment)
self.assertIsNone(flag.markFilteringSet)
self.assertEqual(flag.asFea(),
"lookupflag RightToLeft IgnoreBaseGlyphs IgnoreLigatures;")
def test_lookupflag_format_B_zero(self):
flag = self.parse_lookupflag_("lookupflag 0;")
self.assertIsInstance(flag, ast.LookupFlagStatement)
self.assertEqual(flag.value, 0)
self.assertIsNone(flag.markAttachment)
self.assertIsNone(flag.markFilteringSet)
self.assertEqual(flag.asFea(), "lookupflag 0;")
def test_lookupflag_no_value(self):
self.assertRaisesRegex(
FeatureLibError,
'lookupflag must have a value',
self.parse,
"feature test {lookupflag;} test;")
def test_lookupflag_repeated(self):
self.assertRaisesRegex(
FeatureLibError,
'RightToLeft can be specified only once',
self.parse,
"feature test {lookupflag RightToLeft RightToLeft;} test;")
def test_lookupflag_unrecognized(self):
self.assertRaisesRegex(
FeatureLibError,
'"IgnoreCookies" is not a recognized lookupflag',
self.parse, "feature test {lookupflag IgnoreCookies;} test;")
def test_gpos_type_1_glyph(self):
doc = self.parse("feature kern {pos one <1 2 3 4>;} kern;")
pos = doc.statements[0].statements[0]
self.assertIsInstance(pos, ast.SinglePosStatement)
[(glyphs, value)] = pos.pos
self.assertEqual(glyphstr([glyphs]), "one")
self.assertEqual(value.asFea(), "<1 2 3 4>")
def test_gpos_type_1_glyphclass_horizontal(self):
doc = self.parse("feature kern {pos [one two] -300;} kern;")
pos = doc.statements[0].statements[0]
self.assertIsInstance(pos, ast.SinglePosStatement)
[(glyphs, value)] = pos.pos
self.assertEqual(glyphstr([glyphs]), "[one two]")
self.assertEqual(value.asFea(), "-300")
def test_gpos_type_1_glyphclass_vertical(self):
doc = self.parse("feature vkrn {pos [one two] -300;} vkrn;")
pos = doc.statements[0].statements[0]
self.assertIsInstance(pos, ast.SinglePosStatement)
[(glyphs, value)] = pos.pos
self.assertEqual(glyphstr([glyphs]), "[one two]")
self.assertEqual(value.asFea(), "-300")
def test_gpos_type_1_multiple(self):
doc = self.parse("feature f {pos one'1 two'2 [five six]'56;} f;")
pos = doc.statements[0].statements[0]
self.assertIsInstance(pos, ast.SinglePosStatement)
[(glyphs1, val1), (glyphs2, val2), (glyphs3, val3)] = pos.pos
self.assertEqual(glyphstr([glyphs1]), "one")
self.assertEqual(val1.asFea(), "1")
self.assertEqual(glyphstr([glyphs2]), "two")
self.assertEqual(val2.asFea(), "2")
self.assertEqual(glyphstr([glyphs3]), "[five six]")
self.assertEqual(val3.asFea(), "56")
self.assertEqual(pos.prefix, [])
self.assertEqual(pos.suffix, [])
def test_gpos_type_1_enumerated(self):
self.assertRaisesRegex(
FeatureLibError,
'"enumerate" is only allowed with pair positionings',
self.parse, "feature test {enum pos T 100;} test;")
self.assertRaisesRegex(
FeatureLibError,
'"enumerate" is only allowed with pair positionings',
self.parse, "feature test {enumerate pos T 100;} test;")
def test_gpos_type_1_chained(self):
doc = self.parse("feature kern {pos [A B] [T Y]' 20 comma;} kern;")
pos = doc.statements[0].statements[0]
self.assertIsInstance(pos, ast.SinglePosStatement)
[(glyphs, value)] = pos.pos
self.assertEqual(glyphstr([glyphs]), "[T Y]")
self.assertEqual(value.asFea(), "20")
self.assertEqual(glyphstr(pos.prefix), "[A B]")
self.assertEqual(glyphstr(pos.suffix), "comma")
def test_gpos_type_1_chained_special_kern_format_valuerecord_format_a(self):
doc = self.parse("feature kern {pos [A B] [T Y]' comma 20;} kern;")
pos = doc.statements[0].statements[0]
self.assertIsInstance(pos, ast.SinglePosStatement)
[(glyphs, value)] = pos.pos
self.assertEqual(glyphstr([glyphs]), "[T Y]")
self.assertEqual(value.asFea(), "20")
self.assertEqual(glyphstr(pos.prefix), "[A B]")
self.assertEqual(glyphstr(pos.suffix), "comma")
def test_gpos_type_1_chained_special_kern_format_valuerecord_format_b(self):
doc = self.parse("feature kern {pos [A B] [T Y]' comma <0 0 0 0>;} kern;")
pos = doc.statements[0].statements[0]
self.assertIsInstance(pos, ast.SinglePosStatement)
[(glyphs, value)] = pos.pos
self.assertEqual(glyphstr([glyphs]), "[T Y]")
self.assertEqual(value.asFea(), "<0 0 0 0>")
self.assertEqual(glyphstr(pos.prefix), "[A B]")
self.assertEqual(glyphstr(pos.suffix), "comma")
def test_gpos_type_2_format_a(self):
doc = self.parse("feature kern {"
" pos [T V] -60 [a b c] <1 2 3 4>;"
"} kern;")
pos = doc.statements[0].statements[0]
self.assertEqual(type(pos), ast.PairPosStatement)
self.assertFalse(pos.enumerated)
self.assertEqual(glyphstr([pos.glyphs1]), "[T V]")
self.assertEqual(pos.valuerecord1.asFea(), "-60")
self.assertEqual(glyphstr([pos.glyphs2]), "[a b c]")
self.assertEqual(pos.valuerecord2.asFea(), "<1 2 3 4>")
def test_gpos_type_2_format_a_enumerated(self):
doc = self.parse("feature kern {"
" enum pos [T V] -60 [a b c] <1 2 3 4>;"
"} kern;")
pos = doc.statements[0].statements[0]
self.assertEqual(type(pos), ast.PairPosStatement)
self.assertTrue(pos.enumerated)
self.assertEqual(glyphstr([pos.glyphs1]), "[T V]")
self.assertEqual(pos.valuerecord1.asFea(), "-60")
self.assertEqual(glyphstr([pos.glyphs2]), "[a b c]")
self.assertEqual(pos.valuerecord2.asFea(), "<1 2 3 4>")
def test_gpos_type_2_format_a_with_null_first(self):
doc = self.parse("feature kern {"
" pos [T V] <NULL> [a b c] <1 2 3 4>;"
"} kern;")
pos = doc.statements[0].statements[0]
self.assertEqual(type(pos), ast.PairPosStatement)
self.assertFalse(pos.enumerated)
self.assertEqual(glyphstr([pos.glyphs1]), "[T V]")
self.assertFalse(pos.valuerecord1)
self.assertEqual(pos.valuerecord1.asFea(), "<NULL>")
self.assertEqual(glyphstr([pos.glyphs2]), "[a b c]")
self.assertEqual(pos.valuerecord2.asFea(), "<1 2 3 4>")
self.assertEqual(pos.asFea(), "pos [T V] <NULL> [a b c] <1 2 3 4>;")
def test_gpos_type_2_format_a_with_null_second(self):
doc = self.parse("feature kern {"
" pos [T V] <1 2 3 4> [a b c] <NULL>;"
"} kern;")
pos = doc.statements[0].statements[0]
self.assertEqual(type(pos), ast.PairPosStatement)
self.assertFalse(pos.enumerated)
self.assertEqual(glyphstr([pos.glyphs1]), "[T V]")
self.assertEqual(pos.valuerecord1.asFea(), "<1 2 3 4>")
self.assertEqual(glyphstr([pos.glyphs2]), "[a b c]")
self.assertFalse(pos.valuerecord2)
self.assertEqual(pos.asFea(), "pos [T V] [a b c] <1 2 3 4>;")
def test_gpos_type_2_format_b(self):
doc = self.parse("feature kern {"
" pos [T V] [a b c] <1 2 3 4>;"
"} kern;")
pos = doc.statements[0].statements[0]
self.assertEqual(type(pos), ast.PairPosStatement)
self.assertFalse(pos.enumerated)
self.assertEqual(glyphstr([pos.glyphs1]), "[T V]")
self.assertEqual(pos.valuerecord1.asFea(), "<1 2 3 4>")
self.assertEqual(glyphstr([pos.glyphs2]), "[a b c]")
self.assertIsNone(pos.valuerecord2)
def test_gpos_type_2_format_b_enumerated(self):
doc = self.parse("feature kern {"
" enumerate position [T V] [a b c] <1 2 3 4>;"
"} kern;")
pos = doc.statements[0].statements[0]
self.assertEqual(type(pos), ast.PairPosStatement)
self.assertTrue(pos.enumerated)
self.assertEqual(glyphstr([pos.glyphs1]), "[T V]")
self.assertEqual(pos.valuerecord1.asFea(), "<1 2 3 4>")
self.assertEqual(glyphstr([pos.glyphs2]), "[a b c]")
self.assertIsNone(pos.valuerecord2)
def test_gpos_type_3(self):
doc = self.parse("feature kern {"
" position cursive A <anchor 12 -2> <anchor 2 3>;"
"} kern;")
pos = doc.statements[0].statements[0]
self.assertEqual(type(pos), ast.CursivePosStatement)
self.assertEqual(pos.glyphclass.glyphSet(), ("A",))
self.assertEqual((pos.entryAnchor.x, pos.entryAnchor.y), (12, -2))
self.assertEqual((pos.exitAnchor.x, pos.exitAnchor.y), (2, 3))
def test_gpos_type_3_enumerated(self):
self.assertRaisesRegex(
FeatureLibError,
'"enumerate" is not allowed with cursive attachment positioning',
self.parse,
"feature kern {"
" enumerate position cursive A <anchor 12 -2> <anchor 2 3>;"
"} kern;")
def test_gpos_type_4(self):
doc = self.parse(
"markClass [acute grave] <anchor 150 -10> @TOP_MARKS;"
"markClass [dieresis umlaut] <anchor 300 -10> @TOP_MARKS;"
"markClass [cedilla] <anchor 300 600> @BOTTOM_MARKS;"
"feature test {"
" position base [a e o u] "
" <anchor 250 450> mark @TOP_MARKS "
" <anchor 210 -10> mark @BOTTOM_MARKS;"
"} test;")
pos = doc.statements[-1].statements[0]
self.assertEqual(type(pos), ast.MarkBasePosStatement)
self.assertEqual(pos.base.glyphSet(), ("a", "e", "o", "u"))
(a1, m1), (a2, m2) = pos.marks
self.assertEqual((a1.x, a1.y, m1.name), (250, 450, "TOP_MARKS"))
self.assertEqual((a2.x, a2.y, m2.name), (210, -10, "BOTTOM_MARKS"))
def test_gpos_type_4_enumerated(self):
self.assertRaisesRegex(
FeatureLibError,
'"enumerate" is not allowed with '
'mark-to-base attachment positioning',
self.parse,
"feature kern {"
" markClass cedilla <anchor 300 600> @BOTTOM_MARKS;"
" enumerate position base A <anchor 12 -2> mark @BOTTOM_MARKS;"
"} kern;")
def test_gpos_type_4_not_markClass(self):
self.assertRaisesRegex(
FeatureLibError, "@MARKS is not a markClass", self.parse,
"@MARKS = [acute grave];"
"feature test {"
" position base [a e o u] <anchor 250 450> mark @MARKS;"
"} test;")
def test_gpos_type_5(self):
doc = self.parse(
"markClass [grave acute] <anchor 150 500> @TOP_MARKS;"
"markClass [cedilla] <anchor 300 -100> @BOTTOM_MARKS;"
"feature test {"
" position "
" ligature [a_f_f_i o_f_f_i] "
" <anchor 50 600> mark @TOP_MARKS "
" <anchor 50 -10> mark @BOTTOM_MARKS "
" ligComponent "
" <anchor 30 800> mark @TOP_MARKS "
" ligComponent "
" <anchor NULL> "
" ligComponent "
" <anchor 30 -10> mark @BOTTOM_MARKS;"
"} test;")
pos = doc.statements[-1].statements[0]
self.assertEqual(type(pos), ast.MarkLigPosStatement)
self.assertEqual(pos.ligatures.glyphSet(), ("a_f_f_i", "o_f_f_i"))
[(a11, m11), (a12, m12)], [(a2, m2)], [], [(a4, m4)] = pos.marks
self.assertEqual((a11.x, a11.y, m11.name), (50, 600, "TOP_MARKS"))
self.assertEqual((a12.x, a12.y, m12.name), (50, -10, "BOTTOM_MARKS"))
self.assertEqual((a2.x, a2.y, m2.name), (30, 800, "TOP_MARKS"))
self.assertEqual((a4.x, a4.y, m4.name), (30, -10, "BOTTOM_MARKS"))
def test_gpos_type_5_enumerated(self):
self.assertRaisesRegex(
FeatureLibError,
'"enumerate" is not allowed with '
'mark-to-ligature attachment positioning',
self.parse,
"feature test {"
" markClass cedilla <anchor 300 600> @MARKS;"
" enumerate position "
" ligature f_i <anchor 100 0> mark @MARKS"
" ligComponent <anchor NULL>;"
"} test;")
def test_gpos_type_5_not_markClass(self):
self.assertRaisesRegex(
FeatureLibError, "@MARKS is not a markClass", self.parse,
"@MARKS = [acute grave];"
"feature test {"
" position ligature f_i <anchor 250 450> mark @MARKS;"
"} test;")
def test_gpos_type_6(self):
doc = self.parse(
"markClass damma <anchor 189 -103> @MARK_CLASS_1;"
"feature test {"
" position mark hamza <anchor 221 301> mark @MARK_CLASS_1;"
"} test;")
pos = doc.statements[-1].statements[0]
self.assertEqual(type(pos), ast.MarkMarkPosStatement)
self.assertEqual(pos.baseMarks.glyphSet(), ("hamza",))
[(a1, m1)] = pos.marks
self.assertEqual((a1.x, a1.y, m1.name), (221, 301, "MARK_CLASS_1"))
def test_gpos_type_6_enumerated(self):
self.assertRaisesRegex(
FeatureLibError,
'"enumerate" is not allowed with '
'mark-to-mark attachment positioning',
self.parse,
"markClass damma <anchor 189 -103> @MARK_CLASS_1;"
"feature test {"
" enum pos mark hamza <anchor 221 301> mark @MARK_CLASS_1;"
"} test;")
def test_gpos_type_6_not_markClass(self):
self.assertRaisesRegex(
FeatureLibError, "@MARKS is not a markClass", self.parse,
"@MARKS = [acute grave];"
"feature test {"
" position mark cedilla <anchor 250 450> mark @MARKS;"
"} test;")
def test_gpos_type_8(self):
doc = self.parse(
"lookup L1 {pos one 100;} L1; lookup L2 {pos two 200;} L2;"
"feature test {"
" pos [A a] [B b] I' lookup L1 [N n]' lookup L2 P' [Y y] [Z z];"
"} test;")
lookup1, lookup2 = doc.statements[0:2]
pos = doc.statements[-1].statements[0]
self.assertEqual(type(pos), ast.ChainContextPosStatement)
self.assertEqual(glyphstr(pos.prefix), "[A a] [B b]")
self.assertEqual(glyphstr(pos.glyphs), "I [N n] P")
self.assertEqual(glyphstr(pos.suffix), "[Y y] [Z z]")
self.assertEqual(pos.lookups, [lookup1, lookup2, None])
def test_gpos_type_8_lookup_with_values(self):
self.assertRaisesRegex(
FeatureLibError,
'If "lookup" is present, no values must be specified',
self.parse,
"lookup L1 {pos one 100;} L1;"
"feature test {"
" pos A' lookup L1 B' 20;"
"} test;")
def test_markClass(self):
doc = self.parse("markClass [acute grave] <anchor 350 3> @MARKS;")
mc = doc.statements[0]
self.assertIsInstance(mc, ast.MarkClassDefinition)
self.assertEqual(mc.markClass.name, "MARKS")
self.assertEqual(mc.glyphSet(), ("acute", "grave"))
self.assertEqual((mc.anchor.x, mc.anchor.y), (350, 3))
def test_nameid_windows_utf16(self):
doc = self.parse(
r'table name { nameid 9 "M\00fcller-Lanc\00e9"; } name;')
name = doc.statements[0].statements[0]
self.assertIsInstance(name, ast.NameRecord)
self.assertEqual(name.nameID, 9)
self.assertEqual(name.platformID, 3)
self.assertEqual(name.platEncID, 1)
self.assertEqual(name.langID, 0x0409)
self.assertEqual(name.string, "Müller-Lancé")
self.assertEqual(name.asFea(), r'nameid 9 "M\00fcller-Lanc\00e9";')
def test_nameid_windows_utf16_backslash(self):
doc = self.parse(r'table name { nameid 9 "Back\005cslash"; } name;')
name = doc.statements[0].statements[0]
self.assertEqual(name.string, r"Back\slash")
self.assertEqual(name.asFea(), r'nameid 9 "Back\005cslash";')
def test_nameid_windows_utf16_quotation_mark(self):
doc = self.parse(
r'table name { nameid 9 "Quotation \0022Mark\0022"; } name;')
name = doc.statements[0].statements[0]
self.assertEqual(name.string, 'Quotation "Mark"')
self.assertEqual(name.asFea(), r'nameid 9 "Quotation \0022Mark\0022";')
def test_nameid_windows_utf16_surroates(self):
doc = self.parse(r'table name { nameid 9 "Carrot \D83E\DD55"; } name;')
name = doc.statements[0].statements[0]
self.assertEqual(name.string, r"Carrot 🥕")
self.assertEqual(name.asFea(), r'nameid 9 "Carrot \d83e\dd55";')
def test_nameid_mac_roman(self):
doc = self.parse(
r'table name { nameid 9 1 "Joachim M\9fller-Lanc\8e"; } name;')
name = doc.statements[0].statements[0]
self.assertIsInstance(name, ast.NameRecord)
self.assertEqual(name.nameID, 9)
self.assertEqual(name.platformID, 1)
self.assertEqual(name.platEncID, 0)
self.assertEqual(name.langID, 0)
self.assertEqual(name.string, "Joachim Müller-Lancé")
self.assertEqual(name.asFea(),
r'nameid 9 1 "Joachim M\9fller-Lanc\8e";')
def test_nameid_mac_croatian(self):
doc = self.parse(
r'table name { nameid 9 1 0 18 "Jovica Veljovi\e6"; } name;')
name = doc.statements[0].statements[0]
self.assertEqual(name.nameID, 9)
self.assertEqual(name.platformID, 1)
self.assertEqual(name.platEncID, 0)
self.assertEqual(name.langID, 18)
self.assertEqual(name.string, "Jovica Veljović")
self.assertEqual(name.asFea(), r'nameid 9 1 0 18 "Jovica Veljovi\e6";')
def test_nameid_unsupported_platform(self):
self.assertRaisesRegex(
FeatureLibError, "Expected platform id 1 or 3",
self.parse, 'table name { nameid 9 666 "Foo"; } name;')
def test_rsub_format_a(self):
doc = self.parse("feature test {rsub a [b B] c' d [e E] by C;} test;")
rsub = doc.statements[0].statements[0]
self.assertEqual(type(rsub), ast.ReverseChainSingleSubstStatement)
self.assertEqual(glyphstr(rsub.old_prefix), "a [B b]")
self.assertEqual(rsub.glyphs[0].glyphSet(), ("c",))
self.assertEqual(rsub.replacements[0].glyphSet(), ("C",))
self.assertEqual(glyphstr(rsub.old_suffix), "d [E e]")
def test_rsub_format_a_cid(self):
doc = self.parse(r"feature test {rsub \1 [\2 \3] \4' \5 by \6;} test;")
rsub = doc.statements[0].statements[0]
self.assertEqual(type(rsub), ast.ReverseChainSingleSubstStatement)
self.assertEqual(glyphstr(rsub.old_prefix),
"cid00001 [cid00002 cid00003]")
self.assertEqual(rsub.glyphs[0].glyphSet(), ("cid00004",))
self.assertEqual(rsub.replacements[0].glyphSet(), ("cid00006",))
self.assertEqual(glyphstr(rsub.old_suffix), "cid00005")
def test_rsub_format_b(self):
doc = self.parse(
"feature smcp {"
" reversesub A B [one.fitted one.oldstyle]' C [d D] by one;"
"} smcp;")
rsub = doc.statements[0].statements[0]
self.assertEqual(type(rsub), ast.ReverseChainSingleSubstStatement)
self.assertEqual(glyphstr(rsub.old_prefix), "A B")
self.assertEqual(glyphstr(rsub.old_suffix), "C [D d]")
self.assertEqual(mapping(rsub), {
"one.fitted": "one",
"one.oldstyle": "one"
})
def test_rsub_format_c(self):
doc = self.parse(
"feature test {"
" reversesub BACK TRACK [a-d]' LOOK AHEAD by [A.sc-D.sc];"
"} test;")
rsub = doc.statements[0].statements[0]
self.assertEqual(type(rsub), ast.ReverseChainSingleSubstStatement)
self.assertEqual(glyphstr(rsub.old_prefix), "BACK TRACK")
self.assertEqual(glyphstr(rsub.old_suffix), "LOOK AHEAD")
self.assertEqual(mapping(rsub), {
"a": "A.sc",
"b": "B.sc",
"c": "C.sc",
"d": "D.sc"
})
def test_rsub_from(self):
self.assertRaisesRegex(
FeatureLibError,
'Reverse chaining substitutions do not support "from"',
self.parse, "feature test {rsub a from [a.1 a.2 a.3];} test;")
def test_rsub_nonsingle(self):
self.assertRaisesRegex(
FeatureLibError,
"In reverse chaining single substitutions, only a single glyph "
"or glyph class can be replaced",
self.parse, "feature test {rsub c d by c_d;} test;")
def test_rsub_multiple_replacement_glyphs(self):
self.assertRaisesRegex(
FeatureLibError,
'In reverse chaining single substitutions, the replacement '
r'\(after "by"\) must be a single glyph or glyph class',
self.parse, "feature test {rsub f_i by f i;} test;")
def test_script(self):
doc = self.parse("feature test {script cyrl;} test;")
s = doc.statements[0].statements[0]
self.assertEqual(type(s), ast.ScriptStatement)
self.assertEqual(s.script, "cyrl")
def test_script_dflt(self):
self.assertRaisesRegex(
FeatureLibError,
'"dflt" is not a valid script tag; use "DFLT" instead',
self.parse, "feature test {script dflt;} test;")
def test_sub_single_format_a(self): # GSUB LookupType 1
doc = self.parse("feature smcp {substitute a by a.sc;} smcp;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.SingleSubstStatement)
self.assertEqual(glyphstr(sub.prefix), "")
self.assertEqual(mapping(sub), {"a": "a.sc"})
self.assertEqual(glyphstr(sub.suffix), "")
def test_sub_single_format_a_chained(self): # chain to GSUB LookupType 1
doc = self.parse("feature test {sub [A a] d' [C] by d.alt;} test;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.SingleSubstStatement)
self.assertEqual(mapping(sub), {"d": "d.alt"})
self.assertEqual(glyphstr(sub.prefix), "[A a]")
self.assertEqual(glyphstr(sub.suffix), "C")
def test_sub_single_format_a_cid(self): # GSUB LookupType 1
doc = self.parse(r"feature smcp {substitute \12345 by \78987;} smcp;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.SingleSubstStatement)
self.assertEqual(glyphstr(sub.prefix), "")
self.assertEqual(mapping(sub), {"cid12345": "cid78987"})
self.assertEqual(glyphstr(sub.suffix), "")
def test_sub_single_format_b(self): # GSUB LookupType 1
doc = self.parse(
"feature smcp {"
" substitute [one.fitted one.oldstyle] by one;"
"} smcp;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.SingleSubstStatement)
self.assertEqual(mapping(sub), {
"one.fitted": "one",
"one.oldstyle": "one"
})
self.assertEqual(glyphstr(sub.prefix), "")
self.assertEqual(glyphstr(sub.suffix), "")
def test_sub_single_format_b_chained(self): # chain to GSUB LookupType 1
doc = self.parse(
"feature smcp {"
" substitute PRE FIX [one.fitted one.oldstyle]' SUF FIX by one;"
"} smcp;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.SingleSubstStatement)
self.assertEqual(mapping(sub), {
"one.fitted": "one",
"one.oldstyle": "one"
})
self.assertEqual(glyphstr(sub.prefix), "PRE FIX")
self.assertEqual(glyphstr(sub.suffix), "SUF FIX")
def test_sub_single_format_c(self): # GSUB LookupType 1
doc = self.parse(
"feature smcp {"
" substitute [a-d] by [A.sc-D.sc];"
"} smcp;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.SingleSubstStatement)
self.assertEqual(mapping(sub), {
"a": "A.sc",
"b": "B.sc",
"c": "C.sc",
"d": "D.sc"
})
self.assertEqual(glyphstr(sub.prefix), "")
self.assertEqual(glyphstr(sub.suffix), "")
def test_sub_single_format_c_chained(self): # chain to GSUB LookupType 1
doc = self.parse(
"feature smcp {"
" substitute [a-d]' X Y [Z z] by [A.sc-D.sc];"
"} smcp;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.SingleSubstStatement)
self.assertEqual(mapping(sub), {
"a": "A.sc",
"b": "B.sc",
"c": "C.sc",
"d": "D.sc"
})
self.assertEqual(glyphstr(sub.prefix), "")
self.assertEqual(glyphstr(sub.suffix), "X Y [Z z]")
def test_sub_single_format_c_different_num_elements(self):
self.assertRaisesRegex(
FeatureLibError,
'Expected a glyph class with 4 elements after "by", '
'but found a glyph class with 26 elements',
self.parse, "feature smcp {sub [a-d] by [A.sc-Z.sc];} smcp;")
def test_sub_with_values(self):
self.assertRaisesRegex(
FeatureLibError,
"Substitution statements cannot contain values",
self.parse, "feature smcp {sub A' 20 by A.sc;} smcp;")
def test_substitute_multiple(self): # GSUB LookupType 2
doc = self.parse("lookup Look {substitute f_f_i by f f i;} Look;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.MultipleSubstStatement)
self.assertEqual(sub.glyph, "f_f_i")
self.assertEqual(sub.replacement, ("f", "f", "i"))
def test_substitute_multiple_chained(self): # chain to GSUB LookupType 2
doc = self.parse("lookup L {sub [A-C] f_f_i' [X-Z] by f f i;} L;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.MultipleSubstStatement)
self.assertEqual(sub.glyph, "f_f_i")
self.assertEqual(sub.replacement, ("f", "f", "i"))
def test_substitute_multiple_force_chained(self):
doc = self.parse("lookup L {sub f_f_i' by f f i;} L;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.MultipleSubstStatement)
self.assertEqual(sub.glyph, "f_f_i")
self.assertEqual(sub.replacement, ("f", "f", "i"))
self.assertEqual(sub.asFea(), "sub f_f_i' by f f i;")
def test_substitute_multiple_by_mutliple(self):
self.assertRaisesRegex(
FeatureLibError,
"Direct substitution of multiple glyphs by multiple glyphs "
"is not supported",
self.parse,
"lookup MxM {sub a b c by d e f;} MxM;")
def test_split_marked_glyphs_runs(self):
self.assertRaisesRegex(
FeatureLibError,
"Unsupported contextual target sequence",
self.parse, "feature test{"
" ignore pos a' x x A';"
"} test;")
self.assertRaisesRegex(
FeatureLibError,
"Unsupported contextual target sequence",
self.parse, "lookup shift {"
" pos a <0 -10 0 0>;"
" pos A <0 10 0 0>;"
"} shift;"
"feature test {"
" sub a' lookup shift x x A' lookup shift;"
"} test;")
self.assertRaisesRegex(
FeatureLibError,
"Unsupported contextual target sequence",
self.parse, "feature test {"
" ignore sub a' x x A';"
"} test;")
self.assertRaisesRegex(
FeatureLibError,
"Unsupported contextual target sequence",
self.parse, "lookup upper {"
" sub a by A;"
"} upper;"
"lookup lower {"
" sub A by a;"
"} lower;"
"feature test {"
" sub a' lookup upper x x A' lookup lower;"
"} test;")
def test_substitute_mix_single_multiple(self):
doc = self.parse("lookup Look {"
" sub f_f by f f;"
" sub f by f;"
" sub f_f_i by f f i;"
"} Look;")
statements = doc.statements[0].statements
for sub in statements:
self.assertIsInstance(sub, ast.MultipleSubstStatement)
self.assertEqual(statements[1].glyph, "f")
self.assertEqual(statements[1].replacement, ["f"])
def test_substitute_from(self): # GSUB LookupType 3
doc = self.parse("feature test {"
" substitute a from [a.1 a.2 a.3];"
"} test;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.AlternateSubstStatement)
self.assertEqual(glyphstr(sub.prefix), "")
self.assertEqual(glyphstr([sub.glyph]), "a")
self.assertEqual(glyphstr(sub.suffix), "")
self.assertEqual(glyphstr([sub.replacement]), "[a.1 a.2 a.3]")
def test_substitute_from_chained(self): # chain to GSUB LookupType 3
doc = self.parse("feature test {"
" substitute A B a' [Y y] Z from [a.1 a.2 a.3];"
"} test;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.AlternateSubstStatement)
self.assertEqual(glyphstr(sub.prefix), "A B")
self.assertEqual(glyphstr([sub.glyph]), "a")
self.assertEqual(glyphstr(sub.suffix), "[Y y] Z")
self.assertEqual(glyphstr([sub.replacement]), "[a.1 a.2 a.3]")
def test_substitute_from_cid(self): # GSUB LookupType 3
doc = self.parse(r"feature test {"
r" substitute \7 from [\111 \222];"
r"} test;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.AlternateSubstStatement)
self.assertEqual(glyphstr(sub.prefix), "")
self.assertEqual(glyphstr([sub.glyph]), "cid00007")
self.assertEqual(glyphstr(sub.suffix), "")
self.assertEqual(glyphstr([sub.replacement]), "[cid00111 cid00222]")
def test_substitute_from_glyphclass(self): # GSUB LookupType 3
doc = self.parse("feature test {"
" @Ampersands = [ampersand.1 ampersand.2];"
" substitute ampersand from @Ampersands;"
"} test;")
[glyphclass, sub] = doc.statements[0].statements
self.assertIsInstance(sub, ast.AlternateSubstStatement)
self.assertEqual(glyphstr(sub.prefix), "")
self.assertEqual(glyphstr([sub.glyph]), "ampersand")
self.assertEqual(glyphstr(sub.suffix), "")
self.assertEqual(glyphstr([sub.replacement]),
"[ampersand.1 ampersand.2]")
def test_substitute_ligature(self): # GSUB LookupType 4
doc = self.parse("feature liga {substitute f f i by f_f_i;} liga;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.LigatureSubstStatement)
self.assertEqual(glyphstr(sub.glyphs), "f f i")
self.assertEqual(sub.replacement, "f_f_i")
self.assertEqual(glyphstr(sub.prefix), "")
self.assertEqual(glyphstr(sub.suffix), "")
def test_substitute_ligature_chained(self): # chain to GSUB LookupType 4
doc = self.parse("feature F {substitute A B f' i' Z by f_i;} F;")
sub = doc.statements[0].statements[0]
self.assertIsInstance(sub, ast.LigatureSubstStatement)
self.assertEqual(glyphstr(sub.glyphs), "f i")
self.assertEqual(sub.replacement, "f_i")
self.assertEqual(glyphstr(sub.prefix), "A B")
self.assertEqual(glyphstr(sub.suffix), "Z")
def test_substitute_lookups(self): # GSUB LookupType 6
doc = Parser(self.getpath("spec5fi1.fea"), GLYPHNAMES).parse()
[_, _, _, langsys, ligs, sub, feature] = doc.statements
self.assertEqual(feature.statements[0].lookups, [ligs, None, sub])
self.assertEqual(feature.statements[1].lookups, [ligs, None, sub])
def test_substitute_missing_by(self):
self.assertRaisesRegex(
FeatureLibError,
'Expected "by", "from" or explicit lookup references',
self.parse, "feature liga {substitute f f i;} liga;")
def test_subtable(self):
doc = self.parse("feature test {subtable;} test;")
s = doc.statements[0].statements[0]
self.assertIsInstance(s, ast.SubtableStatement)
def test_table_badEnd(self):
self.assertRaisesRegex(
FeatureLibError, 'Expected "GDEF"', self.parse,
"table GDEF {LigatureCaretByPos f_i 400;} ABCD;")
def test_table_comment(self):
for table in "BASE GDEF OS/2 head hhea name vhea".split():
doc = self.parse("table %s { # Comment\n } %s;" % (table, table))
comment = doc.statements[0].statements[0]
self.assertIsInstance(comment, ast.Comment)
self.assertEqual(comment.text, "# Comment")
def test_table_unsupported(self):
self.assertRaisesRegex(
FeatureLibError, '"table Foo" is not supported', self.parse,
"table Foo {LigatureCaretByPos f_i 400;} Foo;")
def test_valuerecord_format_a_horizontal(self):
doc = self.parse("feature liga {valueRecordDef 123 foo;} liga;")
valuedef = doc.statements[0].statements[0]
value = valuedef.value
self.assertIsNone(value.xPlacement)
self.assertIsNone(value.yPlacement)
self.assertEqual(value.xAdvance, 123)
self.assertIsNone(value.yAdvance)
self.assertIsNone(value.xPlaDevice)
self.assertIsNone(value.yPlaDevice)
self.assertIsNone(value.xAdvDevice)
self.assertIsNone(value.yAdvDevice)
self.assertEqual(valuedef.asFea(), "valueRecordDef 123 foo;")
self.assertEqual(value.asFea(), "123")
def test_valuerecord_format_a_vertical(self):
doc = self.parse("feature vkrn {valueRecordDef 123 foo;} vkrn;")
valuedef = doc.statements[0].statements[0]
value = valuedef.value
self.assertIsNone(value.xPlacement)
self.assertIsNone(value.yPlacement)
self.assertIsNone(value.xAdvance)
self.assertEqual(value.yAdvance, 123)
self.assertIsNone(value.xPlaDevice)
self.assertIsNone(value.yPlaDevice)
self.assertIsNone(value.xAdvDevice)
self.assertIsNone(value.yAdvDevice)
self.assertEqual(valuedef.asFea(), "valueRecordDef 123 foo;")
self.assertEqual(value.asFea(), "123")
def test_valuerecord_format_a_zero_horizontal(self):
doc = self.parse("feature liga {valueRecordDef 0 foo;} liga;")
valuedef = doc.statements[0].statements[0]
value = valuedef.value
self.assertIsNone(value.xPlacement)
self.assertIsNone(value.yPlacement)
self.assertEqual(value.xAdvance, 0)
self.assertIsNone(value.yAdvance)
self.assertIsNone(value.xPlaDevice)
self.assertIsNone(value.yPlaDevice)
self.assertIsNone(value.xAdvDevice)
self.assertIsNone(value.yAdvDevice)
self.assertEqual(valuedef.asFea(), "valueRecordDef 0 foo;")
self.assertEqual(value.asFea(), "0")
def test_valuerecord_format_a_zero_vertical(self):
doc = self.parse("feature vkrn {valueRecordDef 0 foo;} vkrn;")
valuedef = doc.statements[0].statements[0]
value = valuedef.value
self.assertIsNone(value.xPlacement)
self.assertIsNone(value.yPlacement)
self.assertIsNone(value.xAdvance)
self.assertEqual(value.yAdvance, 0)
self.assertIsNone(value.xPlaDevice)
self.assertIsNone(value.yPlaDevice)
self.assertIsNone(value.xAdvDevice)
self.assertIsNone(value.yAdvDevice)
self.assertEqual(valuedef.asFea(), "valueRecordDef 0 foo;")
self.assertEqual(value.asFea(), "0")
def test_valuerecord_format_a_vertical_contexts_(self):
for tag in "vkrn vpal vhal valt".split():
doc = self.parse(
"feature %s {valueRecordDef 77 foo;} %s;" % (tag, tag))
value = doc.statements[0].statements[0].value
if value.yAdvance != 77:
self.fail(msg="feature %s should be a vertical context "
"for ValueRecord format A" % tag)
def test_valuerecord_format_b(self):
doc = self.parse("feature liga {valueRecordDef <1 2 3 4> foo;} liga;")
valuedef = doc.statements[0].statements[0]
value = valuedef.value
self.assertEqual(value.xPlacement, 1)
self.assertEqual(value.yPlacement, 2)
self.assertEqual(value.xAdvance, 3)
self.assertEqual(value.yAdvance, 4)
self.assertIsNone(value.xPlaDevice)
self.assertIsNone(value.yPlaDevice)
self.assertIsNone(value.xAdvDevice)
self.assertIsNone(value.yAdvDevice)
self.assertEqual(valuedef.asFea(), "valueRecordDef <1 2 3 4> foo;")
self.assertEqual(value.asFea(), "<1 2 3 4>")
def test_valuerecord_format_b_zero(self):
doc = self.parse("feature liga {valueRecordDef <0 0 0 0> foo;} liga;")
valuedef = doc.statements[0].statements[0]
value = valuedef.value
self.assertEqual(value.xPlacement, 0)
self.assertEqual(value.yPlacement, 0)
self.assertEqual(value.xAdvance, 0)
self.assertEqual(value.yAdvance, 0)
self.assertIsNone(value.xPlaDevice)
self.assertIsNone(value.yPlaDevice)
self.assertIsNone(value.xAdvDevice)
self.assertIsNone(value.yAdvDevice)
self.assertEqual(valuedef.asFea(), "valueRecordDef <0 0 0 0> foo;")
self.assertEqual(value.asFea(), "<0 0 0 0>")
def test_valuerecord_format_c(self):
doc = self.parse(
"feature liga {"
" valueRecordDef <"
" 1 2 3 4"
" <device 8 88>"
" <device 11 111, 12 112>"
" <device NULL>"
" <device 33 -113, 44 -114, 55 115>"
" > foo;"
"} liga;")
value = doc.statements[0].statements[0].value
self.assertEqual(value.xPlacement, 1)
self.assertEqual(value.yPlacement, 2)
self.assertEqual(value.xAdvance, 3)
self.assertEqual(value.yAdvance, 4)
self.assertEqual(value.xPlaDevice, ((8, 88),))
self.assertEqual(value.yPlaDevice, ((11, 111), (12, 112)))
self.assertIsNone(value.xAdvDevice)
self.assertEqual(value.yAdvDevice, ((33, -113), (44, -114), (55, 115)))
self.assertEqual(value.asFea(),
"<1 2 3 4 <device 8 88> <device 11 111, 12 112>"
" <device NULL> <device 33 -113, 44 -114, 55 115>>")
def test_valuerecord_format_d(self):
doc = self.parse("feature test {valueRecordDef <NULL> foo;} test;")
value = doc.statements[0].statements[0].value
self.assertFalse(value)
self.assertEqual(value.asFea(), "<NULL>")
def test_valuerecord_named(self):
doc = self.parse("valueRecordDef <1 2 3 4> foo;"
"feature liga {valueRecordDef <foo> bar;} liga;")
value = doc.statements[1].statements[0].value
self.assertEqual(value.xPlacement, 1)
self.assertEqual(value.yPlacement, 2)
self.assertEqual(value.xAdvance, 3)
self.assertEqual(value.yAdvance, 4)
def test_valuerecord_named_unknown(self):
self.assertRaisesRegex(
FeatureLibError, "Unknown valueRecordDef \"unknown\"",
self.parse, "valueRecordDef <unknown> foo;")
def test_valuerecord_scoping(self):
[foo, liga, smcp] = self.parse(
"valueRecordDef 789 foo;"
"feature liga {valueRecordDef <foo> bar;} liga;"
"feature smcp {valueRecordDef <foo> bar;} smcp;"
).statements
self.assertEqual(foo.value.xAdvance, 789)
self.assertEqual(liga.statements[0].value.xAdvance, 789)
self.assertEqual(smcp.statements[0].value.xAdvance, 789)
def test_valuerecord_device_value_out_of_range(self):
self.assertRaisesRegex(
FeatureLibError, r"Device value out of valid range \(-128..127\)",
self.parse,
"valueRecordDef <1 2 3 4 <device NULL> <device NULL> "
"<device NULL> <device 11 128>> foo;")
def test_languagesystem(self):
[langsys] = self.parse("languagesystem latn DEU;").statements
self.assertEqual(langsys.script, "latn")
self.assertEqual(langsys.language, "DEU ")
[langsys] = self.parse("languagesystem DFLT DEU;").statements
self.assertEqual(langsys.script, "DFLT")
self.assertEqual(langsys.language, "DEU ")
self.assertRaisesRegex(
FeatureLibError,
'"dflt" is not a valid script tag; use "DFLT" instead',
self.parse, "languagesystem dflt dflt;")
self.assertRaisesRegex(
FeatureLibError,
'"DFLT" is not a valid language tag; use "dflt" instead',
self.parse, "languagesystem latn DFLT;")
self.assertRaisesRegex(
FeatureLibError, "Expected ';'",
self.parse, "languagesystem latn DEU")
self.assertRaisesRegex(
FeatureLibError, "longer than 4 characters",
self.parse, "languagesystem foobar DEU;")
self.assertRaisesRegex(
FeatureLibError, "longer than 4 characters",
self.parse, "languagesystem latn FOOBAR;")
def test_empty_statement_ignored(self):
doc = self.parse("feature test {;} test;")
self.assertFalse(doc.statements[0].statements)
doc = self.parse(";;;")
self.assertFalse(doc.statements)
for table in "BASE GDEF OS/2 head hhea name vhea".split():
doc = self.parse("table %s { ;;; } %s;" % (table, table))
self.assertEqual(doc.statements[0].statements, [])
def parse(self, text, glyphNames=GLYPHNAMES, followIncludes=True):
featurefile = UnicodeIO(text)
p = Parser(featurefile, glyphNames, followIncludes=followIncludes)
return p.parse()
@staticmethod
def getpath(testfile):
path, _ = os.path.split(__file__)
return os.path.join(path, "data", testfile)
class SymbolTableTest(unittest.TestCase):
def test_scopes(self):
symtab = SymbolTable()
symtab.define("foo", 23)
self.assertEqual(symtab.resolve("foo"), 23)
symtab.enter_scope()
self.assertEqual(symtab.resolve("foo"), 23)
symtab.define("foo", 42)
self.assertEqual(symtab.resolve("foo"), 42)
symtab.exit_scope()
self.assertEqual(symtab.resolve("foo"), 23)
def test_resolve_undefined(self):
self.assertEqual(SymbolTable().resolve("abc"), None)
if __name__ == "__main__":
import sys
sys.exit(unittest.main())
| 43.585882 | 82 | 0.598899 |
89d31863ac78a56f2b97d384efc6835af761feaf | 1,415 | py | Python | python/az/aro/azext_aro/_rbac.py | jim-minter/rp | e78a02c9c89036f1c82080b4b8c2166a32ccedf0 | [
"Apache-2.0"
] | 1 | 2019-11-05T17:12:50.000Z | 2019-11-05T17:12:50.000Z | python/az/aro/azext_aro/_rbac.py | jim-minter/rp | e78a02c9c89036f1c82080b4b8c2166a32ccedf0 | [
"Apache-2.0"
] | 41 | 2019-11-19T18:38:52.000Z | 2019-12-24T15:13:15.000Z | python/az/aro/azext_aro/_rbac.py | jim-minter/rp | e78a02c9c89036f1c82080b4b8c2166a32ccedf0 | [
"Apache-2.0"
] | 4 | 2019-11-08T15:40:56.000Z | 2019-11-27T05:07:08.000Z | import uuid
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.profiles import get_sdk
from azure.cli.core.profiles import ResourceType
from msrestazure.tools import resource_id
CONTRIBUTOR = "b24988ac-6180-42a0-ab88-20f7382dd24c"
def assign_contributor_to_vnet(cli_ctx, vnet, object_id):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION)
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
role_definition_id = resource_id(
subscription=get_subscription_id(cli_ctx),
namespace='Microsoft.Authorization',
type='roleDefinitions',
name=CONTRIBUTOR,
)
for assignment in list(client.role_assignments.list_for_scope(vnet)):
if assignment.role_definition_id.lower() == role_definition_id.lower() and \
assignment.principal_id.lower() == object_id.lower():
return
client.role_assignments.create(vnet, uuid.uuid4(), RoleAssignmentCreateParameters(
role_definition_id=role_definition_id,
principal_id=object_id,
principal_type="ServicePrincipal",
))
| 38.243243 | 92 | 0.717314 |
7d45b76000a84877e068154cc8f74844fe270c16 | 6,214 | py | Python | Supervised/video_classification/utils/videos_to_frame_folders.py | Kebniss/AutoDetect | 44ca4d6930ef5fbf044ebeed5c9fd925f04bc1a8 | [
"MIT"
] | 1 | 2019-07-25T02:16:32.000Z | 2019-07-25T02:16:32.000Z | Supervised/video_classification/utils/videos_to_frame_folders.py | Kebniss/AutoDetect | 44ca4d6930ef5fbf044ebeed5c9fd925f04bc1a8 | [
"MIT"
] | null | null | null | Supervised/video_classification/utils/videos_to_frame_folders.py | Kebniss/AutoDetect | 44ca4d6930ef5fbf044ebeed5c9fd925f04bc1a8 | [
"MIT"
] | null | null | null | """
Given a folder of mp4, saves each of them in the FrameFolder format.
If this video's name was "abcde", this would be the structure:
root/abcde/data.csv
root/abcde/images/1.png
root/abcde/images/2.png
where data.csv contains all the columns beyond image (ie the label).
"""
import cv2
import PIL
from PIL import Image
from pathlib import Path
from torchvision.transforms import CenterCrop, Compose
from IPython.display import Image as IPythonImage
import os
from tqdm import tqdm
import json
import pandas as pd
class ToPIL(object):
"""
Convert everything to PIL images
"""
def __init__(self):
pass
def __call__(self, clip):
is_PIL = isinstance(clip[0], PIL.Image.Image)
if not is_PIL:
return [PIL.Image.fromarray(img) for img in clip]
class CropOnCenter(object):
"""
Crop image on center
"""
def __init__(self, size):
self.size = size
self.crop = CenterCrop(size)
def __call__(self, img):
is_PIL = isinstance(img, PIL.Image.Image)
if not is_PIL:
img = PIL.Image.fromarray(img)
return self.crop(img)
class ScaleWidth(object):
"""
Scale based on width
"""
def __init__(self, target_width, method=Image.BICUBIC):
self.target_width = target_width
self.method = method
def __call__(self, img):
return self.scale_width(img)
def scale_width(self, img):
is_PIL = isinstance(img, PIL.Image.Image)
if not is_PIL:
img = PIL.Image.fromarray(img)
ow, oh = img.size
if (ow == self.target_width):
return img
w = self.target_width
h = int(self.target_width * oh / ow)
return img.resize((w, h), self.method)
class VideoProcessPipeline:
def __init__(self,
video_path,
videos_labels=None,
new_fps=10,
scale_to_width=640):
self.labeled_frames = []
self.video_path = video_path
cv2_video = cv2.VideoCapture(str(video_path))
self.video = cv2_video
self.video_fps = cv2_video.get(cv2.CAP_PROP_FPS)
self.video_name = self.video_path.stem
self.preprocess = Compose([
CropOnCenter((768, 1024)),
ScaleWidth(scale_to_width).scale_width
])
if self.video_fps < 29: # Some videos have super low fps, we just filter them out
return
self.save_every = self.video_fps // new_fps
self.frames = []
if self.video_name not in videos_labels: # No label, no point in processing
return
self.frame_labels = videos_labels[self.video_name]
self.labeled_frames = [{
'frame_id': frame_id,
'frame': frame,
'label': ("positive" if any(start <= frame_id <= end
for (start, end) in self.frame_labels)
else "negative")
} for (frame, frame_id) in self.read_video_frames()]
def read_video_frames(self):
frames = []
success, image = self.video.read()
i = 1
while success:
if i % self.save_every == 0:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = self.preprocess(image)
frames.append((image, i))
success, image = self.video.read()
i += 1
print(f'Processed {i // self.save_every} frames')
return frames
def save_to_folder(self, root):
"""
Save this video as a FrameFolder inside of root.
If this video's name was "abcde", this would be the structure:
root/abcde/data.csv
root/abcde/images/1.png
root/abcde/images/2.png
etc
"""
if not self.labeled_frames:
return
root = Path(root)
video_root = root / self.video_name
os.makedirs(video_root, exist_ok=True)
os.makedirs(video_root / "images", exist_ok=True)
for frame_dict in self.labeled_frames:
img = frame_dict['frame']
frame_dict['image_path'] = f"images/{frame_dict['frame_id']}.png"
img.save(video_root / frame_dict['image_path'], "PNG")
frame_dict.pop('frame', None)
df = pd.DataFrame(self.labeled_frames)
df[['frame_id', 'image_path', 'label']].to_csv(
video_root / "data.csv", index=None)
def as_gif(self, duration=100):
"""
Helpful to visualize it in a notebook
"""
self.labeled_frames[0]['frame'].save(
"/tmp/out.gif",
save_all=True,
append_images=[f['frame'] for f in self.labeled_frames[1:]],
duration=duration,
loop=0,
)
return IPythonImage(filename="/tmp/out.gif")
SOURCE_ROOT = Path("/Users/ludovica/Documents/Insight/data/source_data/")
SOURCE_TRAIN_ROOT = SOURCE_ROOT / "train"
SOURCE_VALID_ROOT = SOURCE_ROOT / "validation"
TARGET_ROOT = Path("/Users/ludovica/Documents/Insight/data/frame_data/")
TARGET_TRAIN_ROOT = TARGET_ROOT / "train"
TARGET_VALID_ROOT = TARGET_ROOT / "validation"
with open(SOURCE_ROOT / "labels.json", 'r') as fin:
labels = json.load(fin)
labels = {
Path(f['filename']).stem: [(t['start_frame_count'],
t['end_frame_count'])
for t in f['anomalies']]
for f in labels
}
train_video_paths = [p for p in SOURCE_TRAIN_ROOT.glob("**/*.mp4")]
validation_video_paths = [p for p in SOURCE_VALID_ROOT.glob("**/*.mp4")]
for video_path in tqdm(train_video_paths):
if os.path.isdir(TARGET_TRAIN_ROOT / video_path.stem):
continue # already done, skip
vv = VideoProcessPipeline(
video_path, videos_labels=labels, new_fps=10, scale_to_width=640)
vv.save_to_folder(TARGET_TRAIN_ROOT)
for video_path in tqdm(validation_video_paths):
if os.path.isdir(TARGET_VALID_ROOT / video_path.stem):
continue # already done
vv = VideoProcessPipeline(
video_path, videos_labels=labels, new_fps=10, scale_to_width=640)
vv.save_to_folder(TARGET_VALID_ROOT)
| 30.165049 | 90 | 0.607016 |
ce57980cf0331dc9c6de18f9bcc0778359be4de7 | 5,399 | py | Python | Python/biopsy/gapped_pssms/hmm.py | JohnReid/biopsy | 1eeb714ba5b53f2ecf776d865d32e2078cbc0338 | [
"MIT"
] | null | null | null | Python/biopsy/gapped_pssms/hmm.py | JohnReid/biopsy | 1eeb714ba5b53f2ecf776d865d32e2078cbc0338 | [
"MIT"
] | null | null | null | Python/biopsy/gapped_pssms/hmm.py | JohnReid/biopsy | 1eeb714ba5b53f2ecf776d865d32e2078cbc0338 | [
"MIT"
] | null | null | null | #
# Copyright John Reid 2007
#
import _gapped_pssms_hmm as hmm
def weblogo_data_from_dist( dist ):
"""Data for weblogo from pssm distribution"""
import weblogolib, corebio.seq
return weblogolib.LogoData.from_counts(
corebio.seq.unambiguous_dna_alphabet,
dist * 100
)
def weblogo_from_dist( dist, filename = 'logo.eps' ):
"""Generate a weblogo from a pssm distribution"""
import weblogolib
data = weblogo_data_from_dist( dist )
options = weblogolib.LogoOptions()
options.size = weblogolib.LogoSize(
stack_width = 5.4*12,
stack_height = 5.4*12*5
)
options.color_scheme = weblogolib.std_color_schemes[ "classic" ]
format = weblogolib.LogoFormat( data, options )
weblogolib.eps_formatter(
data,
format,
open( filename, 'w' )
)
def convert_format( source, dest, convert_args = '' ):
"""Converts an image from one format to another
Uses imagemagick convert program which must be installed
Args:
source: input image file
dest: output image file
"""
import os
command = 'convert.exe %s "%s" "%s"' % ( convert_args, source, dest )
status = os.system( command )
if status:
raise RuntimeError(
'Could not convert "%s" to "%s".\nCommand: %s\nStatus: %d'
% (
source,
dest,
command,
status
)
)
def format_weblogo_from_dist( dist, basename, ext, convert_args = '' ):
"""Generate a weblogo from a pssm distribution in format defined by ext"""
import os.path
d = os.path.dirname( basename )
if '' != d and not os.path.exists( d ): os.makedirs( d )
eps_file = basename + '.eps'
converted_file = basename + '.' + ext
if eps_file == converted_file:
raise RuntimeError( 'Extension should not be same as eps' )
weblogo_from_dist( dist, eps_file )
convert_format( eps_file, converted_file, convert_args )
os.remove( eps_file )
def write_model_svg(
model,
name = 'model',
dir = '.',
show_rev_comp = False ,
show_dists = True ,
edge_lengths = 1.5
):
print 'Writing model as svg: %s' % name
p_r_given_pre = model.p_r_given_predecessor
import boost.graph as bgl
import os.path, os, numpy
state_map = hmm.StateMap( model.data.K )
pre = state_map.predecessor_states()
g = bgl.Digraph()
state = g.add_vertex_property( 's', 'integer' )
label = g.add_vertex_property( 'label', 'string' )
shapes = g.add_vertex_property( 'shape', 'string' )
if show_dists: shapefile = g.add_vertex_property( 'shapefile', 'string' )
style = g.add_vertex_property( 'style', 'string' )
fillcolor = g.add_vertex_property( 'fillcolor', 'string' )
vertices = list()
for s in xrange( state_map.S ):
# check we want to add this vertex - i.e. check its rev comp attr
if not show_rev_comp and state_map.c( s ):
vertices.append( None )
continue
# add vertex
v = g.add_vertex()
vertices.append( v )
state[ v ] = s
label[ v ] = 'k=%d\\ns=%s' % (state_map.k( s ), s)
if state_map.b( s ): shapes[ v ] = 'rect'
elif state_map.c( s ): shapes[ v ] = 'diamond'
else: shapes[ v ] = 'ellipse'
if state_map.g( s ):
style[ v ] = 'filled'
fillcolor[ v ] = 'lightgray'
# make an image of the distribution for this node
if show_dists:
basename = os.path.join( name, 'emission_%d' % s )
m = state_map.m( s )
dist = model.var_dist.eta[ m:m+1 ]
# is it a rev_comp base?
if state_map.c( s ):
dist = numpy.array( [ [
dist[0][3],
dist[0][2],
dist[0][1],
dist[0][0] ] ] )
format_weblogo_from_dist( dist, os.path.join( dir, basename ), 'png' )
shapefile[ v ] = basename + '.png'
edge_label = g.add_edge_property( 'label', 'string' )
edge_len = g.add_edge_property( 'len', 'float' )
for s in xrange( state_map.S ):
if not show_rev_comp and state_map.c( s ): continue
for p in pre[s]:
if not show_rev_comp and state_map.c( int( p ) ): continue
e = g.add_edge( vertices[p], vertices[s] )
edge_label[ e ] = '%.3f' % p_r_given_pre[p,s]
edge_len[ e ] = 1.5
assert state[ vertices[ p ] ] == p
assert state[ vertices[ s ] ] == s
# write as SVG
dot_filename = '%s.dot' % name
dot_file = os.path.join( dir, dot_filename )
svg_filename = '%s.svg' % name
g.write_graphviz( dot_file )
label = "\\n".join(
(
"Gaps are gray.",
"Reverse complement are diamonds.",
"Background state is square",
)
)
wd = os.getcwd()
os.chdir( dir )
try:
os.system(
"neato %s -Tsvg -o%s -Goverlap=scale \"-Glabel=%s\"" % (
dot_filename,
svg_filename,
label
)
)
finally:
os.chdir( wd )
| 32.920732 | 82 | 0.543064 |
95b577500e44f28d5875895108619d3d2a40b31e | 916 | py | Python | prism/cmds/image/pfp.py | ii-Python/Prism-v3 | 15a43161b41117529c915726e6270259f05d187d | [
"MIT"
] | 3 | 2021-11-26T22:08:11.000Z | 2021-12-23T21:42:22.000Z | prism/cmds/image/pfp.py | wannurhadi/Prism-v3 | 514f8d17072bf208c42e68391bce471c7d608269 | [
"MIT"
] | 1 | 2021-07-07T22:37:10.000Z | 2021-07-07T22:40:11.000Z | prism/cmds/image/pfp.py | wannurhadi/Prism-v3 | 514f8d17072bf208c42e68391bce471c7d608269 | [
"MIT"
] | 1 | 2021-12-23T21:42:24.000Z | 2021-12-23T21:42:24.000Z | # Copyright 2021-xx iiPython
# Modules
import discord
from discord.ext import commands
from discord.commands import Option
# Command class
class Avatar(commands.Cog):
def __init__(self, bot) -> None:
self.bot = bot
self.core = bot.core
@commands.slash_command(description = "Shows you somebodies profile picture.")
async def avatar(self, ctx, user: Option(discord.Member, "The user with the profile picture", reuqired = False)) -> any:
user = user or ctx.author
# Construct embed
embed = self.core.embed(
title = str(user),
description = f"[[Raw Image]({user.avatar.url})] [[{user.name}'s Profile](https://discord.com/users/{user.id})]",
footer = ctx
)
embed.set_image(url = user.avatar.url)
return await ctx.respond(embed = embed)
# Link
def setup(bot) -> None:
return bot.add_cog(Avatar(bot))
| 30.533333 | 125 | 0.64083 |
99bf5fb6da914cabf0b1c48d6e7449aee24fc517 | 3,080 | py | Python | app/app/settings.py | manishnymble19/recipe-app-api | 5461442ca114fc29356dc68c7fd8dc030190be51 | [
"MIT"
] | null | null | null | app/app/settings.py | manishnymble19/recipe-app-api | 5461442ca114fc29356dc68c7fd8dc030190be51 | [
"MIT"
] | null | null | null | app/app/settings.py | manishnymble19/recipe-app-api | 5461442ca114fc29356dc68c7fd8dc030190be51 | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't@_6^^3o92*#yw46d0mwn_58dk6#dp4r7ppht3ec@4mxyw7z$2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| 25.454545 | 91 | 0.696429 |
a058b31493ed3189d6d7092a39940517a7171b5f | 8,682 | py | Python | yandex/cloud/vpc/v1/network_service_pb2_grpc.py | kbespalov/python-sdk | e86563ee850e46a35b4c84053ecd4affdf66a963 | [
"MIT"
] | null | null | null | yandex/cloud/vpc/v1/network_service_pb2_grpc.py | kbespalov/python-sdk | e86563ee850e46a35b4c84053ecd4affdf66a963 | [
"MIT"
] | null | null | null | yandex/cloud/vpc/v1/network_service_pb2_grpc.py | kbespalov/python-sdk | e86563ee850e46a35b4c84053ecd4affdf66a963 | [
"MIT"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
from yandex.cloud.vpc.v1 import network_pb2 as yandex_dot_cloud_dot_vpc_dot_v1_dot_network__pb2
from yandex.cloud.vpc.v1 import network_service_pb2 as yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2
class NetworkServiceStub(object):
"""A set of methods for managing Network resources.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/yandex.cloud.vpc.v1.NetworkService/Get',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.GetNetworkRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__pb2.Network.FromString,
)
self.List = channel.unary_unary(
'/yandex.cloud.vpc.v1.NetworkService/List',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.ListNetworksRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.ListNetworksResponse.FromString,
)
self.Create = channel.unary_unary(
'/yandex.cloud.vpc.v1.NetworkService/Create',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.CreateNetworkRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Update = channel.unary_unary(
'/yandex.cloud.vpc.v1.NetworkService/Update',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.UpdateNetworkRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Delete = channel.unary_unary(
'/yandex.cloud.vpc.v1.NetworkService/Delete',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.DeleteNetworkRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.ListSubnets = channel.unary_unary(
'/yandex.cloud.vpc.v1.NetworkService/ListSubnets',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.ListNetworkSubnetsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.ListNetworkSubnetsResponse.FromString,
)
self.ListOperations = channel.unary_unary(
'/yandex.cloud.vpc.v1.NetworkService/ListOperations',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.ListNetworkOperationsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.ListNetworkOperationsResponse.FromString,
)
self.Move = channel.unary_unary(
'/yandex.cloud.vpc.v1.NetworkService/Move',
request_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.MoveNetworkRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
class NetworkServiceServicer(object):
"""A set of methods for managing Network resources.
"""
def Get(self, request, context):
"""Returns the specified Network resource.
Get the list of available Network resources by making a [List] request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""Retrieves the list of Network resources in the specified folder.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Creates a network in the specified folder using the data specified in the request.
Method starts an asynchronous operation that can be cancelled while it is in progress.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Updates the specified network.
Method starts an asynchronous operation that can be cancelled while it is in progress.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Deletes the specified network.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListSubnets(self, request, context):
"""Lists subnets from the specified network.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListOperations(self, request, context):
"""Lists operations for the specified network.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Move(self, request, context):
"""Move network to another folder.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_NetworkServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.GetNetworkRequest.FromString,
response_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__pb2.Network.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.ListNetworksRequest.FromString,
response_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.ListNetworksResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.CreateNetworkRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.UpdateNetworkRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.DeleteNetworkRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'ListSubnets': grpc.unary_unary_rpc_method_handler(
servicer.ListSubnets,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.ListNetworkSubnetsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.ListNetworkSubnetsResponse.SerializeToString,
),
'ListOperations': grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.ListNetworkOperationsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.ListNetworkOperationsResponse.SerializeToString,
),
'Move': grpc.unary_unary_rpc_method_handler(
servicer.Move,
request_deserializer=yandex_dot_cloud_dot_vpc_dot_v1_dot_network__service__pb2.MoveNetworkRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.vpc.v1.NetworkService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 50.476744 | 136 | 0.785648 |
8c5ff9aae97d611cdd357635b334ed8a5699602f | 3,374 | py | Python | Networks/EfficientNet_yy.py | XLPRUtils/XLPR_Classification | c8a3a574013858e23d4722dd03c1d9fa59a41c0e | [
"MIT"
] | 2 | 2020-11-17T08:57:01.000Z | 2021-06-30T00:42:53.000Z | Networks/EfficientNet_yy.py | XLPRUtils/XLPR_Classification | c8a3a574013858e23d4722dd03c1d9fa59a41c0e | [
"MIT"
] | null | null | null | Networks/EfficientNet_yy.py | XLPRUtils/XLPR_Classification | c8a3a574013858e23d4722dd03c1d9fa59a41c0e | [
"MIT"
] | null | null | null | '''EfficientNet in PyTorch.
Paper: "EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks".
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''expand + depthwise + pointwise + squeeze-excitation'''
def __init__(self, in_planes, out_planes, expansion, stride):
super(Block, self).__init__()
self.stride = stride
planes = expansion * in_planes
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(
planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 1 and in_planes != out_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=1,
stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_planes),
)
# SE layers
self.fc1 = nn.Conv2d(out_planes, out_planes//16, kernel_size=1)
self.fc2 = nn.Conv2d(out_planes//16, out_planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
shortcut = self.shortcut(x) if self.stride == 1 else out
# Squeeze-Excitation
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = self.fc2(w).sigmoid()
out = out * w + shortcut
return out
class EfficientNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(EfficientNet, self).__init__()
self.cfg = cfg
self.conv1 = nn.Conv2d(3, 32, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.linear = nn.Linear(cfg[-1][1] * 7 * 7, num_classes)
def _make_layers(self, in_planes):
layers = []
for expansion, out_planes, num_blocks, stride in self.cfg:
strides = [stride] + [1]*(num_blocks-1)
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def EfficientNetB0(num_classes):
# (expansion, out_planes, num_blocks, stride)
cfg = [(1, 16, 1, 2),
(6, 24, 2, 1),
(6, 40, 2, 2),
(6, 80, 3, 2),
(6, 112, 3, 1),
(6, 192, 4, 2),
(6, 320, 1, 2)]
return EfficientNet(cfg, num_classes=num_classes)
def test():
net = EfficientNetB0()
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.shape)
# test()
| 33.74 | 84 | 0.557795 |
e33f698fc3fafa2d384ca68126b54a912601fb22 | 2,740 | py | Python | modules/generate_thumbnails.py | zamanianlab/wrmXpress | a40b3e7d66c3ca4e319ad268fd5c0bf0de036d16 | [
"MIT"
] | null | null | null | modules/generate_thumbnails.py | zamanianlab/wrmXpress | a40b3e7d66c3ca4e319ad268fd5c0bf0de036d16 | [
"MIT"
] | 1 | 2022-01-24T17:31:03.000Z | 2022-01-24T17:31:03.000Z | modules/generate_thumbnails.py | zamanianlab/wrmXpress | a40b3e7d66c3ca4e319ad268fd5c0bf0de036d16 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from skimage.transform import rescale
from PIL import Image, ImageDraw
from matplotlib import cm
def generate_thumbnails(g, type):
thumb_dict = {}
for well in g.wells:
if type == '':
path = g.work.joinpath(g.plate, well, 'img', g.plate + '_' + well + '.png')
else:
path = g.work.joinpath(g.plate, well, 'img', g.plate + '_' + well + '_' + type + '.png')
image = cv2.imread(str(path), cv2.IMREAD_ANYDEPTH)
# rescale the image with anti-aliasing
if g.species == 'Sma':
rescale_value = 0.25
else:
rescale_value = 0.125
rescaled = rescale(image, rescale_value, anti_aliasing=True, clip=False)
# normalize to 0-255
if type == 'motility':
rescaled[0, 0] = 1
else:
rescaled[0, 0] = 0.05
rescaled_norm = cv2.normalize(src=rescaled, dst=None, alpha=0,
beta=255, norm_type=cv2.NORM_MINMAX,
dtype=-1)
thumb_dict[well] = rescaled_norm
# write out the stitched image
# 0.125 of the 4X ImageXpress image is 256 x 256 pixels
height = int(g.rows) * 256
width = int(g.columns) * 256
# new blank image with gridlines
new_im = Image.new('L', (width, height))
for well, thumb in thumb_dict.items():
# row letters can be converted to integers with ord()
# and then rescaled by subtracting a constant
row = int(ord(well[:1]) - 64)
col = int(well[1:].strip())
new_im.paste(Image.fromarray(thumb),
((col - 1) * 256, (row - 1) * 256))
if type == 'motility':
# apply a colormap if it's a flow image
new_im = np.asarray(new_im) / 255
new_im = Image.fromarray(np.uint8(cm.inferno(new_im) * 255))
draw = ImageDraw.Draw(new_im)
for col_line in range(0, width + 256, 256):
draw.line((col_line, 0, col_line, height), fill=255, width=10)
for row_line in range(0, height + 256, 256):
draw.line((0, row_line, width, row_line), fill=255, width=10)
else:
draw = ImageDraw.Draw(new_im)
for col_line in range(0, width + 256, 256):
draw.line((col_line, 0, col_line, height), fill=64, width=10)
for row_line in range(0, height + 256, 256):
draw.line((0, row_line, width, row_line), fill=64, width=10)
g.output.joinpath('thumbs').mkdir(
parents=True, exist_ok=True)
if type == '':
outfile = g.output.joinpath('thumbs', g.plate + ".png")
else:
outfile = g.output.joinpath('thumbs', g.plate + '_' + type + ".png")
new_im.save(outfile)
| 37.027027 | 100 | 0.569708 |
f6c291967e236ec2765f842c43fa3e0d83729f92 | 67,302 | py | Python | core/domain/skill_services_test.py | Tim810306/oppia | 6f90044d12dbe0979c999265cbe46f267c4c592d | [
"Apache-2.0"
] | 2 | 2021-05-24T10:23:32.000Z | 2021-08-22T18:50:14.000Z | core/domain/skill_services_test.py | Tim810306/oppia | 6f90044d12dbe0979c999265cbe46f267c4c592d | [
"Apache-2.0"
] | 11 | 2021-03-03T07:21:27.000Z | 2022-03-12T01:03:44.000Z | core/domain/skill_services_test.py | Tim810306/oppia | 6f90044d12dbe0979c999265cbe46f267c4c592d | [
"Apache-2.0"
] | 1 | 2020-12-09T21:33:49.000Z | 2020-12-09T21:33:49.000Z | # Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the methods defined in skill services."""
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from constants import constants
from core.domain import config_services
from core.domain import question_domain
from core.domain import skill_domain
from core.domain import skill_fetchers
from core.domain import skill_services
from core.domain import state_domain
from core.domain import suggestion_services
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
(skill_models, suggestion_models) = models.Registry.import_models(
[models.NAMES.skill, models.NAMES.suggestion])
class SkillServicesUnitTests(test_utils.GenericTestBase):
"""Test the skill services module."""
SKILL_ID = None
USER_ID = 'user'
MISCONCEPTION_ID_1 = 1
MISCONCEPTION_ID_2 = 2
def setUp(self):
super(SkillServicesUnitTests, self).setUp()
example_1 = skill_domain.WorkedExample(
state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'),
state_domain.SubtitledHtml('3', '<p>Example Explanation 1</p>')
)
skill_contents = skill_domain.SkillContents(
state_domain.SubtitledHtml('1', '<p>Explanation</p>'), [example_1],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
'1': {}, '2': {}, '3': {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'1': {}, '2': {}, '3': {}
}
})
)
misconceptions = [skill_domain.Misconception(
self.MISCONCEPTION_ID_1, 'name', '<p>description</p>',
'<p>default_feedback</p>', True)]
self.num_queries_to_fetch = 10
self.SKILL_ID = skill_services.get_new_skill_id()
self.SKILL_ID2 = skill_services.get_new_skill_id()
self.SKILL_ID3 = skill_services.get_new_skill_id()
self.signup('a@example.com', 'A')
self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)
self.signup('admin2@example.com', 'adm2')
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_admin = (
self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL))
self.user_id_admin_2 = self.get_user_id_from_email('admin2@example.com')
self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME, 'adm2'])
self.user_a = user_services.get_user_actions_info(self.user_id_a)
self.user_admin = user_services.get_user_actions_info(
self.user_id_admin)
self.user_admin_2 = user_services.get_user_actions_info(
self.user_id_admin_2)
self.skill = self.save_new_skill(
self.SKILL_ID, self.USER_ID, description='Description',
misconceptions=misconceptions,
skill_contents=skill_contents,
prerequisite_skill_ids=['skill_id_1', 'skill_id_2'])
def test_apply_change_list_with_invalid_property_name(self):
class MockSkillChange(python_utils.OBJECT):
def __init__(self, cmd, property_name):
self.cmd = cmd
self.property_name = property_name
invalid_skill_change_list = [MockSkillChange(
skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'invalid_property_name')]
with self.assertRaisesRegexp(Exception, 'Invalid change dict.'):
skill_services.apply_change_list(
self.SKILL_ID, invalid_skill_change_list, self.user_id_a)
def test_compute_summary(self):
skill_summary = skill_services.compute_summary_of_skill(self.skill)
self.assertEqual(skill_summary.id, self.SKILL_ID)
self.assertEqual(skill_summary.description, 'Description')
self.assertEqual(skill_summary.misconception_count, 1)
self.assertEqual(skill_summary.worked_examples_count, 1)
def test_get_image_filenames_from_skill(self):
explanation_html = (
'Explanation with image: <oppia-noninteractive-image '
'filepath-with-value=""img.svg"" caption-with-value='
'"""" alt-with-value=""Image"">'
'</oppia-noninteractive-image>'
)
example_explanation_html = (
'Explanation with image: <oppia-noninteractive-image '
'filepath-with-value=""img2.svg"" caption-with-value='
'"""" alt-with-value=""Image"">'
'</oppia-noninteractive-image>'
)
example_1 = skill_domain.WorkedExample(
state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'),
state_domain.SubtitledHtml('3', example_explanation_html)
)
self.skill.skill_contents = skill_domain.SkillContents(
state_domain.SubtitledHtml('1', explanation_html), [example_1],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
'1': {}, '2': {}, '3': {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'1': {}, '2': {}, '3': {}
}
})
)
filenames = skill_services.get_image_filenames_from_skill(self.skill)
self.assertItemsEqual(filenames, ['img.svg', 'img2.svg'])
def test_get_new_skill_id(self):
new_skill_id = skill_services.get_new_skill_id()
self.assertEqual(len(new_skill_id), 12)
self.assertEqual(skill_models.SkillModel.get_by_id(new_skill_id), None)
def test_get_descriptions_of_skills(self):
example_1 = skill_domain.WorkedExample(
state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'),
state_domain.SubtitledHtml('3', '<p>Example Explanation 1</p>')
)
self.save_new_skill(
'skill_id_1', self.user_id_admin, description='Description 1',
misconceptions=[],
skill_contents=skill_domain.SkillContents(
state_domain.SubtitledHtml('1', '<p>Explanation</p>'),
[example_1],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
'1': {}, '2': {}, '3': {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'1': {}, '2': {}, '3': {}
}
})
)
)
self.save_new_skill(
'skill_id_2', self.user_id_admin, description='Description 2',
misconceptions=[],
skill_contents=skill_domain.SkillContents(
state_domain.SubtitledHtml('1', '<p>Explanation</p>'),
[example_1],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
'1': {}, '2': {}, '3': {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'1': {}, '2': {}, '3': {}
}
})
)
)
skill_services.delete_skill(self.user_id_admin, 'skill_id_2')
skill_descriptions, deleted_skill_ids = (
skill_services.get_descriptions_of_skills(
['skill_id_1', 'skill_id_2']))
self.assertEqual(deleted_skill_ids, ['skill_id_2'])
self.assertEqual(
skill_descriptions, {
'skill_id_1': 'Description 1',
'skill_id_2': None
}
)
def test_get_rubrics_of_linked_skills(self):
example_1 = skill_domain.WorkedExample(
state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'),
state_domain.SubtitledHtml('3', '<p>Example Explanation 1</p>')
)
self.save_new_skill(
'skill_id_1', self.user_id_admin, description='Description 1',
misconceptions=[],
skill_contents=skill_domain.SkillContents(
state_domain.SubtitledHtml('1', '<p>Explanation</p>'),
[example_1],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
'1': {}, '2': {}, '3': {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'1': {}, '2': {}, '3': {}
}
})
)
)
self.save_new_skill(
'skill_id_2', self.user_id_admin, description='Description 2',
misconceptions=[],
skill_contents=skill_domain.SkillContents(
state_domain.SubtitledHtml('1', '<p>Explanation</p>'),
[example_1],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
'1': {}, '2': {}, '3': {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
'1': {}, '2': {}, '3': {}
}
})
)
)
skill_services.delete_skill(self.user_id_admin, 'skill_id_2')
skill_rubrics, deleted_skill_ids = (
skill_services.get_rubrics_of_skills(
['skill_id_1', 'skill_id_2']))
self.assertEqual(deleted_skill_ids, ['skill_id_2'])
self.assertEqual(
skill_rubrics, {
'skill_id_1': [
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[0], ['Explanation 1']
).to_dict(),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[1], ['Explanation 2']
).to_dict(),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[2], ['Explanation 3']
).to_dict()],
'skill_id_2': None
}
)
def test_get_skill_from_model(self):
skill_model = skill_models.SkillModel.get(self.SKILL_ID)
skill = skill_fetchers.get_skill_from_model(skill_model)
self.assertEqual(skill.to_dict(), self.skill.to_dict())
def test_get_skill_summary_from_model(self):
skill_summary_model = skill_models.SkillSummaryModel.get(self.SKILL_ID)
skill_summary = skill_services.get_skill_summary_from_model(
skill_summary_model)
self.assertEqual(skill_summary.id, self.SKILL_ID)
self.assertEqual(skill_summary.description, 'Description')
self.assertEqual(skill_summary.misconception_count, 1)
self.assertEqual(skill_summary.worked_examples_count, 1)
def test_get_all_skill_summaries(self):
skill_summaries = skill_services.get_all_skill_summaries()
self.assertEqual(len(skill_summaries), 1)
self.assertEqual(skill_summaries[0].id, self.SKILL_ID)
self.assertEqual(skill_summaries[0].description, 'Description')
self.assertEqual(skill_summaries[0].misconception_count, 1)
self.assertEqual(skill_summaries[0].worked_examples_count, 1)
def test_commit_log_entry(self):
skill_commit_log_entry = (
skill_models.SkillCommitLogEntryModel.get_commit(self.SKILL_ID, 1)
)
self.assertEqual(skill_commit_log_entry.commit_type, 'create')
self.assertEqual(skill_commit_log_entry.skill_id, self.SKILL_ID)
self.assertEqual(skill_commit_log_entry.user_id, self.USER_ID)
def test_get_skill_summary_by_id(self):
skill_summary = skill_services.get_skill_summary_by_id(self.SKILL_ID)
self.assertEqual(skill_summary.id, self.SKILL_ID)
self.assertEqual(skill_summary.description, 'Description')
self.assertEqual(skill_summary.misconception_count, 1)
def test_get_filtered_skill_summaries(self):
self.save_new_skill(
self.SKILL_ID2, self.USER_ID, description='Description2',
prerequisite_skill_ids=['skill_id_1', 'skill_id_2'])
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, None, None, None))
self.assertEqual(next_cursor, None)
self.assertFalse(more)
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2)
self.assertEqual(augmented_skill_summaries[1].id, self.SKILL_ID)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
1, None, 'english', None, None, None))
self.assertEqual(len(augmented_skill_summaries), 0)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, None,
'Oldest Created', None))
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID)
self.assertEqual(augmented_skill_summaries[1].id, self.SKILL_ID2)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, None,
'Most Recently Updated', None))
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2)
self.assertEqual(augmented_skill_summaries[1].id, self.SKILL_ID)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, None,
'Least Recently Updated', None))
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID)
self.assertEqual(augmented_skill_summaries[1].id, self.SKILL_ID2)
def test_cursor_behaves_correctly_when_fetching_skills_in_batches(self):
self.save_new_skill(
self.SKILL_ID2, self.USER_ID, description='Description2',
prerequisite_skill_ids=[])
self.save_new_skill(
self.SKILL_ID3, self.USER_ID, description='Description3',
prerequisite_skill_ids=[])
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
1, None, None, None, None, None))
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertIsInstance(next_cursor, str)
self.assertTrue(more)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, None, None, next_cursor))
self.assertEqual(len(augmented_skill_summaries), 1)
self.assertIsNone(next_cursor)
self.assertFalse(more)
def test_filter_skills_by_status_all(self):
self.save_new_skill(
self.SKILL_ID2, self.USER_ID, description='Description2',
prerequisite_skill_ids=['skill_id_1', 'skill_id_2'])
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, None,
None, None))
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, 'All', None, None,
None, None))
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
def test_filter_skills_by_status_assigned(self):
self.save_new_skill(
self.SKILL_ID2, self.USER_ID, description='Description2',
prerequisite_skill_ids=['skill_id_1', 'skill_id_2'])
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, 'Assigned', None, None, None, None))
self.assertEqual(len(augmented_skill_summaries), 0)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
topic_id = topic_fetchers.get_new_topic_id()
self.save_new_topic(
topic_id, self.USER_ID, name='topic1',
abbreviated_name='topic-one', url_fragment='topic-one',
description='Description',
canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[self.SKILL_ID2],
subtopics=[], next_subtopic_id=1)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, 'Assigned', None,
None, None, None))
self.assertEqual(augmented_skill_summaries[0].topic_names, ['topic1'])
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
def test_filter_skills_by_status_unassigned(self):
self.save_new_skill(
self.SKILL_ID2, self.USER_ID, description='Description2',
prerequisite_skill_ids=['skill_id_1', 'skill_id_2'])
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, 'Unassigned', None, None,
None, None))
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
def test_filter_skills_by_classroom_name(self):
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, 'english', None, None, None))
self.assertEqual(len(augmented_skill_summaries), 0)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
self.save_new_skill(
self.SKILL_ID2, self.USER_ID, description='Description2',
prerequisite_skill_ids=['skill_id_1', 'skill_id_2'])
topic_id = topic_fetchers.get_new_topic_id()
self.save_new_topic(
topic_id, self.USER_ID, name='topic1',
abbreviated_name='topic-two', url_fragment='topic-two',
description='Description',
canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[self.SKILL_ID2],
subtopics=[], next_subtopic_id=1)
config_services.set_property(
self.user_id_admin, 'classroom_pages_data', [{
'url_fragment': 'math',
'name': 'math',
'topic_ids': [topic_id],
'topic_list_intro': 'Topics Covered',
'course_details': 'Course Details'
}]
)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, 'math', None,
None, None))
self.assertEqual(augmented_skill_summaries[0].topic_names, ['topic1'])
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2)
self.assertEqual(
augmented_skill_summaries[0].classroom_names, ['math'])
self.assertEqual(next_cursor, None)
self.assertFalse(more)
def test_filter_skills_by_keywords(self):
self.save_new_skill(
self.SKILL_ID2, self.USER_ID, description='Alpha',
misconceptions=None,
skill_contents=None,
prerequisite_skill_ids=[])
self.save_new_skill(
self.SKILL_ID3, self.USER_ID, description='Beta',
misconceptions=None,
skill_contents=None,
prerequisite_skill_ids=[])
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, None, None, None))
self.assertEqual(len(augmented_skill_summaries), 3)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
1, None, None, ['Non_existent'],
'Least Recently Updated', None))
self.assertEqual(len(augmented_skill_summaries), 0)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, [], None, None))
self.assertEqual(len(augmented_skill_summaries), 3)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, ['descr'], None, None))
self.assertEqual(len(augmented_skill_summaries), 1)
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, ['alph'], None, None))
self.assertEqual(len(augmented_skill_summaries), 1)
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, ['bet'], None, None))
self.assertEqual(len(augmented_skill_summaries), 1)
self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID3)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
augmented_skill_summaries, next_cursor, more = (
skill_services.get_filtered_skill_summaries(
self.num_queries_to_fetch, None, None, ['alp', 'bet'],
None, None))
self.assertEqual(len(augmented_skill_summaries), 2)
self.assertEqual(next_cursor, None)
self.assertFalse(more)
def test_get_all_topic_assignments_for_skill(self):
topic_id = topic_fetchers.get_new_topic_id()
topic_id_1 = topic_fetchers.get_new_topic_id()
self.save_new_topic(
topic_id, self.USER_ID, name='Topic1',
abbreviated_name='topic-three', url_fragment='topic-three',
description='Description',
canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[self.SKILL_ID],
subtopics=[], next_subtopic_id=1)
subtopic = topic_domain.Subtopic.from_dict({
'id': 1,
'title': 'subtopic1',
'skill_ids': [self.SKILL_ID],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
'thumbnail_size_in_bytes': None,
'url_fragment': 'subtopic-one'
})
self.save_new_topic(
topic_id_1, self.USER_ID, name='Topic2',
abbreviated_name='topic-four', url_fragment='topic-four',
description='Description2', canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[],
subtopics=[subtopic], next_subtopic_id=2)
topic_assignments = (
skill_services.get_all_topic_assignments_for_skill(self.SKILL_ID))
topic_assignments = sorted(
topic_assignments, key=lambda i: i.topic_name)
self.assertEqual(len(topic_assignments), 2)
self.assertEqual(topic_assignments[0].topic_name, 'Topic1')
self.assertEqual(topic_assignments[0].topic_id, topic_id)
self.assertEqual(topic_assignments[0].topic_version, 1)
self.assertIsNone(topic_assignments[0].subtopic_id)
self.assertEqual(topic_assignments[1].topic_name, 'Topic2')
self.assertEqual(topic_assignments[1].topic_id, topic_id_1)
self.assertEqual(topic_assignments[1].topic_version, 1)
self.assertEqual(topic_assignments[1].subtopic_id, 1)
def test_remove_skill_from_all_topics(self):
topic_id = topic_fetchers.get_new_topic_id()
topic_id_1 = topic_fetchers.get_new_topic_id()
self.save_new_topic(
topic_id, self.USER_ID, name='Topic1',
abbreviated_name='topic-five', url_fragment='topic-five',
description='Description',
canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[self.SKILL_ID],
subtopics=[], next_subtopic_id=1)
subtopic = topic_domain.Subtopic.from_dict({
'id': 1,
'title': 'subtopic1',
'skill_ids': [self.SKILL_ID],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
'thumbnail_size_in_bytes': None,
'url_fragment': 'subtopic-one'
})
self.save_new_topic(
topic_id_1, self.USER_ID, name='Topic2',
abbreviated_name='topic-six', url_fragment='topic-six',
description='Description2', canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[],
subtopics=[subtopic], next_subtopic_id=2)
skill_services.remove_skill_from_all_topics(self.USER_ID, self.SKILL_ID)
topic_assignments_dict = (
skill_services.get_all_topic_assignments_for_skill(self.SKILL_ID))
self.assertEqual(len(topic_assignments_dict), 0)
def test_successfully_replace_skill_id_in_all_topics(self):
topic_id = topic_fetchers.get_new_topic_id()
topic_id_1 = topic_fetchers.get_new_topic_id()
self.save_new_topic(
topic_id, self.USER_ID, name='Topic1',
abbreviated_name='topic-five', url_fragment='topic-five',
description='Description',
canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[self.SKILL_ID],
subtopics=[], next_subtopic_id=1)
subtopic = topic_domain.Subtopic.from_dict({
'id': 1,
'title': 'subtopic1',
'skill_ids': [self.SKILL_ID],
'thumbnail_filename': None,
'thumbnail_bg_color': None,
'thumbnail_size_in_bytes': None,
'url_fragment': 'subtopic-one'
})
self.save_new_topic(
topic_id_1, self.USER_ID, name='Topic2',
abbreviated_name='topic-six', url_fragment='topic-six',
description='Description2', canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[],
subtopics=[subtopic], next_subtopic_id=2)
topic_assignments_dict = (
skill_services.get_all_topic_assignments_for_skill('new_skill_id'))
self.assertEqual(len(topic_assignments_dict), 0)
skill_services.replace_skill_id_in_all_topics(
self.USER_ID, self.SKILL_ID, 'new_skill_id')
topic_assignments_dict = (
skill_services.get_all_topic_assignments_for_skill('new_skill_id'))
self.assertEqual(len(topic_assignments_dict), 2)
def test_failure_replace_skill_id_in_all_topics(self):
topic_id = topic_fetchers.get_new_topic_id()
self.save_new_topic(
topic_id, self.USER_ID, name='Topic1',
abbreviated_name='topic-five', url_fragment='topic-five',
description='Description',
canonical_story_ids=[],
additional_story_ids=[],
uncategorized_skill_ids=[self.SKILL_ID, 'new_skill_id'],
subtopics=[], next_subtopic_id=1)
error_message = (
'Found topic \'Topic1\' contains the two skills to be merged. '
'Please unassign one of these skills from topic '
'and retry this operation.')
with self.assertRaisesRegexp(Exception, error_message):
skill_services.replace_skill_id_in_all_topics(
self.USER_ID, self.SKILL_ID, 'new_skill_id')
def test_update_skill(self):
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_ADD_SKILL_MISCONCEPTION,
'new_misconception_dict': {
'id': self.skill.next_misconception_id,
'name': 'test name',
'notes': '<p>test notes</p>',
'feedback': '<p>test feedback</p>',
'must_be_addressed': True
}
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'property_name': (
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NAME),
'misconception_id': self.skill.next_misconception_id,
'old_value': 'test name',
'new_value': 'Name'
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'property_name': (
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_MUST_BE_ADDRESSED
),
'misconception_id': self.skill.next_misconception_id,
'old_value': True,
'new_value': False
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_ADD_PREREQUISITE_SKILL,
'skill_id': 'skill_id_3'
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_DELETE_PREREQUISITE_SKILL,
'skill_id': 'skill_id_1'
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_RUBRICS,
'difficulty': constants.SKILL_DIFFICULTIES[0],
'explanations': [
'<p>New Explanation 1</p>', '<p>New Explanation 2</p>']
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_RUBRICS,
'difficulty': constants.SKILL_DIFFICULTIES[1],
'explanations': ['<p>Explanation</p>']
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Updated misconception name.')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
skill_summary = skill_services.get_skill_summary_by_id(self.SKILL_ID)
self.assertEqual(skill_summary.misconception_count, 2)
self.assertEqual(skill_summary.version, 2)
self.assertEqual(skill.version, 2)
self.assertEqual(
skill.prerequisite_skill_ids, ['skill_id_2', 'skill_id_3'])
self.assertEqual(skill.misconceptions[1].name, 'Name')
self.assertEqual(skill.misconceptions[1].must_be_addressed, False)
self.assertEqual(
skill.rubrics[0].explanations, [
'<p>New Explanation 1</p>', '<p>New Explanation 2</p>'])
self.assertEqual(skill.rubrics[1].explanations, ['<p>Explanation</p>'])
def test_merge_skill(self):
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': (
skill_domain.SKILL_PROPERTY_SUPERSEDING_SKILL_ID),
'old_value': '',
'new_value': 'TestSkillId'
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': (
skill_domain.SKILL_PROPERTY_ALL_QUESTIONS_MERGED),
'old_value': None,
'new_value': False
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Merging skill.')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(skill.version, 2)
self.assertEqual(skill.superseding_skill_id, 'TestSkillId')
self.assertEqual(skill.all_questions_merged, False)
def test_set_merge_complete_for_skill(self):
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': (
skill_domain.SKILL_PROPERTY_SUPERSEDING_SKILL_ID),
'old_value': None,
'new_value': self.SKILL_ID
}),
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': (
skill_domain.SKILL_PROPERTY_ALL_QUESTIONS_MERGED),
'old_value': False,
'new_value': True
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Setting merge complete for skill.')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(skill.version, 2)
self.assertEqual(skill.all_questions_merged, True)
def test_get_merged_skill_ids(self):
skill_ids = skill_services.get_merged_skill_ids()
self.assertEqual(len(skill_ids), 0)
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': (
skill_domain.SKILL_PROPERTY_SUPERSEDING_SKILL_ID),
'old_value': '',
'new_value': 'TestSkillId'
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Merging skill.')
skill_ids = skill_services.get_merged_skill_ids()
self.assertEqual(len(skill_ids), 1)
self.assertEqual(skill_ids[0], self.SKILL_ID)
def test_delete_skill(self):
skill_services.delete_skill(self.USER_ID, self.SKILL_ID)
self.assertEqual(
skill_fetchers.get_skill_by_id(self.SKILL_ID, strict=False), None)
self.assertEqual(
skill_services.get_skill_summary_by_id(
self.SKILL_ID, strict=False), None)
def test_delete_skill_marked_deleted(self):
skill_models.SkillModel.delete_multi(
[self.SKILL_ID], self.USER_ID, '', force_deletion=False)
skill_model = skill_models.SkillModel.get_by_id(self.SKILL_ID)
self.assertTrue(skill_model.deleted)
skill_services.delete_skill(
self.USER_ID, self.SKILL_ID, force_deletion=True)
skill_model = skill_models.SkillModel.get_by_id(self.SKILL_ID)
self.assertEqual(skill_model, None)
self.assertEqual(
skill_services.get_skill_summary_by_id(
self.SKILL_ID, strict=False), None)
def test_delete_skill_model_with_deleted_summary_model(self):
skill_summary_model = (
skill_models.SkillSummaryModel.get(self.SKILL_ID))
skill_summary_model.delete()
skill_summary_model = (
skill_models.SkillSummaryModel.get(self.SKILL_ID, False))
self.assertIsNone(skill_summary_model)
skill_services.delete_skill(
self.USER_ID, self.SKILL_ID, force_deletion=True)
skill_model = skill_models.SkillModel.get_by_id(self.SKILL_ID)
self.assertEqual(skill_model, None)
self.assertEqual(
skill_services.get_skill_summary_by_id(
self.SKILL_ID, strict=False), None)
def test_delete_skill_model_with_linked_suggestion(self):
suggestion_change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': self.SKILL_ID,
'skill_difficulty': 0.3
}
suggestion = suggestion_services.create_suggestion(
feconf.SUGGESTION_TYPE_ADD_QUESTION,
feconf.ENTITY_TYPE_SKILL, self.SKILL_ID, 1,
self.user_id_a, suggestion_change, 'test description'
)
skill_services.delete_skill(
self.user_id_a, self.SKILL_ID, force_deletion=True)
skill_model = skill_models.SkillModel.get_by_id(self.SKILL_ID)
self.assertEqual(skill_model, None)
with self.assertRaisesRegexp(
Exception, 'The suggestion with id %s has already been accepted/'
'rejected.' % suggestion.suggestion_id):
suggestion_services.auto_reject_question_suggestions_for_skill_id(
self.SKILL_ID)
def test_cannot_update_skill_with_no_commit_message(self):
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': skill_domain.SKILL_PROPERTY_LANGUAGE_CODE,
'old_value': 'en',
'new_value': 'bn'
})
]
with self.assertRaisesRegexp(
Exception, 'Expected a commit message, received none.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist, '')
def test_cannot_update_skill_with_empty_changelist(self):
with self.assertRaisesRegexp(
Exception,
'Unexpected error: received an invalid change list when trying to '
'save skill'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, [], 'No changes made.')
def test_mismatch_of_skill_versions(self):
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': skill_domain.SKILL_PROPERTY_LANGUAGE_CODE,
'old_value': 'en',
'new_value': 'bn'
})
]
skill_model = skill_models.SkillModel.get(self.SKILL_ID)
skill_model.version = 0
with self.assertRaisesRegexp(
Exception,
'Unexpected error: trying to update version 0 of skill '
'from version 1. Please reload the page and try again.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Change language code.')
skill_model.version = 2
with self.assertRaisesRegexp(
Exception,
'Trying to update version 2 of skill from version 1, which is too '
'old. Please reload the page and try again.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Change language code.')
def test_normal_user_cannot_update_skill_property(self):
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': skill_domain.SKILL_PROPERTY_DESCRIPTION,
'old_value': 'Description',
'new_value': 'New description'
})
]
with self.assertRaisesRegexp(
Exception,
'The user does not have enough rights to edit the '
'skill description.'):
skill_services.update_skill(
self.user_id_a, self.SKILL_ID, changelist,
'Change description.')
def test_update_skill_explanation(self):
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
old_explanation = {'content_id': '1', 'html': '<p>Explanation</p>'}
new_explanation = {'content_id': '1', 'html': '<p>New explanation</p>'}
self.assertEqual(
skill.skill_contents.explanation.to_dict(), old_explanation)
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_CONTENTS_PROPERTY,
'property_name': (
skill_domain.SKILL_CONTENTS_PROPERTY_EXPLANATION),
'old_value': old_explanation,
'new_value': new_explanation
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist, 'Change explanation.')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(
skill.skill_contents.explanation.to_dict(), new_explanation)
def test_update_skill_worked_examples(self):
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
old_worked_example = skill_domain.WorkedExample(
state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'),
state_domain.SubtitledHtml('3', '<p>Example Explanation 1</p>')
).to_dict()
new_worked_example = skill_domain.WorkedExample(
state_domain.SubtitledHtml('2', '<p>Example Question 1 new</p>'),
state_domain.SubtitledHtml('3', '<p>Example Explanation 1 new</p>')
).to_dict()
self.assertEqual(len(skill.skill_contents.worked_examples), 1)
self.assertEqual(
skill.skill_contents.worked_examples[0].to_dict(),
old_worked_example)
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_CONTENTS_PROPERTY,
'property_name': (
skill_domain.SKILL_CONTENTS_PROPERTY_WORKED_EXAMPLES),
'old_value': [old_worked_example],
'new_value': [new_worked_example]
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist, 'Change worked examples.')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(len(skill.skill_contents.worked_examples), 1)
self.assertEqual(
skill.skill_contents.worked_examples[0].to_dict(),
new_worked_example)
def test_delete_skill_misconception(self):
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(len(skill.misconceptions), 1)
self.assertEqual(skill.misconceptions[0].id, self.MISCONCEPTION_ID_1)
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_DELETE_SKILL_MISCONCEPTION,
'misconception_id': self.MISCONCEPTION_ID_1,
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist, 'Delete misconception.')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(skill.misconceptions, [])
def test_update_skill_misconception_notes(self):
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(len(skill.misconceptions), 1)
self.assertEqual(skill.misconceptions[0].id, self.MISCONCEPTION_ID_1)
self.assertEqual(skill.misconceptions[0].notes, '<p>description</p>')
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'property_name': (
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NOTES),
'misconception_id': self.MISCONCEPTION_ID_1,
'old_value': '<p>description</p>',
'new_value': '<p>new description</p>'
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Update misconception notes.')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(len(skill.misconceptions), 1)
self.assertEqual(skill.misconceptions[0].id, self.MISCONCEPTION_ID_1)
self.assertEqual(
skill.misconceptions[0].notes, '<p>new description</p>')
def test_update_skill_misconception_feedback(self):
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(len(skill.misconceptions), 1)
self.assertEqual(skill.misconceptions[0].id, self.MISCONCEPTION_ID_1)
self.assertEqual(
skill.misconceptions[0].feedback, '<p>default_feedback</p>')
changelist = [
skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'property_name': (
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK),
'misconception_id': self.MISCONCEPTION_ID_1,
'old_value': '<p>default_feedback</p>',
'new_value': '<p>new feedback</p>'
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Update misconception feedback.')
skill = skill_fetchers.get_skill_by_id(self.SKILL_ID)
self.assertEqual(len(skill.misconceptions), 1)
self.assertEqual(skill.misconceptions[0].id, self.MISCONCEPTION_ID_1)
self.assertEqual(
skill.misconceptions[0].feedback, '<p>new feedback</p>')
def test_update_skill_schema(self):
orig_skill_dict = (
skill_fetchers.get_skill_by_id(self.SKILL_ID).to_dict())
changelist = [
skill_domain.SkillChange({
'cmd': (
skill_domain.CMD_MIGRATE_RUBRICS_SCHEMA_TO_LATEST_VERSION),
'from_version': 1,
'to_version': 2,
})
]
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist, 'Update schema.')
new_skill_dict = skill_fetchers.get_skill_by_id(self.SKILL_ID).to_dict()
# Check version is updated.
self.assertEqual(new_skill_dict['version'], 2)
# Delete version and check that the two dicts are the same.
del orig_skill_dict['version']
del new_skill_dict['version']
self.assertEqual(orig_skill_dict, new_skill_dict)
def test_cannot_update_skill_with_invalid_change_list(self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.error()."""
observed_log_messages.append(msg % args)
logging_swap = self.swap(logging, 'error', _mock_logging_function)
assert_raises_context_manager = self.assertRaisesRegexp(
Exception, '\'str\' object has no attribute \'cmd\'')
with logging_swap, assert_raises_context_manager:
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, 'invalid_change_list',
'commit message')
self.assertEqual(len(observed_log_messages), 1)
self.assertRegexpMatches(
observed_log_messages[0], 'object has no'
' attribute \'cmd\' %s invalid_change_list' % self.SKILL_ID)
def test_cannot_update_misconception_name_with_invalid_id(self):
changelist = [skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'property_name': (
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NAME),
'misconception_id': 'invalid_id',
'old_value': 'test name',
'new_value': 'Name'
})]
with self.assertRaisesRegexp(
Exception, 'There is no misconception with the given id.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Updated misconception name.')
def test_cannot_update_misconception_must_be_addressed_with_invalid_id(
self):
changelist = [skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'property_name': (
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_MUST_BE_ADDRESSED),
'misconception_id': 'invalid_id',
'old_value': False,
'new_value': True
})]
with self.assertRaisesRegexp(
Exception, 'There is no misconception with the given id.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Updated misconception must_be_addressed.')
def test_cannot_add_already_existing_prerequisite_skill(self):
changelist = [skill_domain.SkillChange({
'cmd': skill_domain.CMD_ADD_PREREQUISITE_SKILL,
'skill_id': 'skill_id_1'
})]
with self.assertRaisesRegexp(
Exception, 'The skill is already a prerequisite skill.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Added prereq skill.')
def test_cannot_delete_non_existent_prerequisite_skill(self):
changelist = [skill_domain.SkillChange({
'cmd': skill_domain.CMD_DELETE_PREREQUISITE_SKILL,
'skill_id': 'skill_id_5'
})]
with self.assertRaisesRegexp(
Exception, 'The skill to remove is not a prerequisite skill.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Removed prereq skill.')
def test_cannot_add_rubric_with_invalid_difficulty(self):
changelist = [skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_RUBRICS,
'difficulty': 'invalid_difficulty',
'explanations': ['<p>Explanation</p>']
})]
with self.assertRaisesRegexp(
Exception, 'There is no rubric for the given difficulty.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Added rubric.')
def test_cannot_delete_misconception_with_invalid_id(self):
changelist = [skill_domain.SkillChange({
'cmd': skill_domain.CMD_DELETE_SKILL_MISCONCEPTION,
'misconception_id': 'invalid_id'
})]
with self.assertRaisesRegexp(
Exception, 'There is no misconception with the given id.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist, 'Delete misconception')
def test_cannot_update_misconception_notes_with_invalid_id(self):
changelist = [skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'property_name': (
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NOTES),
'misconception_id': 'invalid_id',
'old_value': 'description',
'new_value': 'new description'
})]
with self.assertRaisesRegexp(
Exception, 'There is no misconception with the given id.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Updated misconception notes.')
def test_cannot_update_misconception_feedback_with_invalid_id(self):
changelist = [skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY,
'property_name': (
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK),
'misconception_id': 'invalid_id',
'old_value': 'default_feedback',
'new_value': 'new feedback'
})]
with self.assertRaisesRegexp(
Exception, 'There is no misconception with the given id.'):
skill_services.update_skill(
self.USER_ID, self.SKILL_ID, changelist,
'Updated misconception feedback.')
class SkillMasteryServicesUnitTests(test_utils.GenericTestBase):
"""Test the skill mastery services module."""
SKILL_IDS = []
USER_ID = 'user'
DEGREE_OF_MASTERY_1 = 0.0
DEGREE_OF_MASTERY_2 = 0.5
def setUp(self):
super(SkillMasteryServicesUnitTests, self).setUp()
self.SKILL_ID_1 = skill_services.get_new_skill_id()
self.SKILL_ID_2 = skill_services.get_new_skill_id()
self.SKILL_ID_3 = skill_services.get_new_skill_id()
self.SKILL_IDS = [self.SKILL_ID_1, self.SKILL_ID_2, self.SKILL_ID_3]
skill_services.create_user_skill_mastery(
self.USER_ID, self.SKILL_ID_1, self.DEGREE_OF_MASTERY_1)
skill_services.create_user_skill_mastery(
self.USER_ID, self.SKILL_ID_2, self.DEGREE_OF_MASTERY_2)
def test_get_user_skill_mastery(self):
degree_of_mastery = skill_services.get_user_skill_mastery(
self.USER_ID, self.SKILL_ID_1)
self.assertEqual(degree_of_mastery, self.DEGREE_OF_MASTERY_1)
degree_of_mastery = skill_services.get_user_skill_mastery(
self.USER_ID, self.SKILL_ID_3)
self.assertEqual(degree_of_mastery, None)
def test_get_multi_user_skill_mastery(self):
degree_of_mastery = skill_services.get_multi_user_skill_mastery(
self.USER_ID, self.SKILL_IDS)
self.assertEqual(
degree_of_mastery, {
self.SKILL_ID_1: self.DEGREE_OF_MASTERY_1,
self.SKILL_ID_2: self.DEGREE_OF_MASTERY_2,
self.SKILL_ID_3: None
})
def test_create_multi_user_skill_mastery(self):
skill_id_4 = skill_services.get_new_skill_id()
skill_id_5 = skill_services.get_new_skill_id()
skill_services.create_multi_user_skill_mastery(
self.USER_ID, {skill_id_4: 0.3, skill_id_5: 0.5})
degrees_of_mastery = skill_services.get_multi_user_skill_mastery(
self.USER_ID, [skill_id_4, skill_id_5])
self.assertEqual(
degrees_of_mastery, {skill_id_4: 0.3, skill_id_5: 0.5})
def test_get_sorted_skill_ids(self):
degrees_of_masteries = skill_services.get_multi_user_skill_mastery(
self.USER_ID, self.SKILL_IDS)
with self.swap(feconf, 'MAX_NUMBER_OF_SKILL_IDS', 2):
sorted_skill_ids = skill_services.get_sorted_skill_ids(
degrees_of_masteries)
expected_sorted_skill_ids = [self.SKILL_ID_3, self.SKILL_ID_1]
self.assertEqual(len(sorted_skill_ids), 2)
self.assertEqual(sorted_skill_ids, expected_sorted_skill_ids)
with self.swap(feconf, 'MAX_NUMBER_OF_SKILL_IDS', 3):
sorted_skill_ids = skill_services.get_sorted_skill_ids(
degrees_of_masteries)
expected_sorted_skill_ids = [
self.SKILL_ID_3, self.SKILL_ID_1, self.SKILL_ID_2]
self.assertEqual(sorted_skill_ids, expected_sorted_skill_ids)
def test_filter_skills_by_mastery(self):
with self.swap(feconf, 'MAX_NUMBER_OF_SKILL_IDS', 2):
arranged_filtered_skill_ids = (
skill_services.filter_skills_by_mastery(
self.USER_ID, self.SKILL_IDS))
self.assertEqual(len(arranged_filtered_skill_ids), 2)
expected_skill_ids = [self.SKILL_ID_1, self.SKILL_ID_3]
self.assertEqual(arranged_filtered_skill_ids, expected_skill_ids)
with self.swap(feconf, 'MAX_NUMBER_OF_SKILL_IDS', len(self.SKILL_IDS)):
arranged_filtered_skill_ids = (
skill_services.filter_skills_by_mastery(
self.USER_ID, self.SKILL_IDS))
self.assertEqual(arranged_filtered_skill_ids, self.SKILL_IDS)
class SkillMigrationTests(test_utils.GenericTestBase):
def test_migrate_skill_contents_to_latest_schema(self):
commit_cmd = skill_domain.SkillChange({
'cmd': skill_domain.CMD_CREATE_NEW
})
explanation_content_id = feconf.DEFAULT_SKILL_EXPLANATION_CONTENT_ID
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
written_translations_dict = {
'translations_mapping': {
'content1': {
'en': {
'data_format': 'html',
'translation': '',
'needs_update': True
},
'hi': {
'data_format': 'html',
'translation': 'Hey!',
'needs_update': False
}
}
}
}
written_translations_dict_math = {
'translations_mapping': {
'content1': {
'en': {
'data_format': 'html',
'translation': expected_html_content,
'needs_update': True
},
'hi': {
'data_format': 'html',
'translation': 'Hey!',
'needs_update': False
}
}
}
}
worked_example_dict = {
'question': {
'content_id': 'question1',
'html': ''
},
'explanation': {
'content_id': 'explanation1',
'html': ''
}
}
worked_example_dict_math = {
'question': {
'content_id': 'question1',
'html': expected_html_content
},
'explanation': {
'content_id': 'explanation1',
'html': expected_html_content
}
}
skill_contents = skill_domain.SkillContents(
state_domain.SubtitledHtml(
explanation_content_id, ''),
[skill_domain.WorkedExample.from_dict(worked_example_dict)],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
explanation_content_id: {}
}
}),
state_domain.WrittenTranslations.from_dict(
written_translations_dict))
skill_contents_dict = skill_contents.to_dict()
skill_contents_dict['explanation']['html'] = html_content
skill_contents_dict['written_translations']['translations_mapping'][
'content1']['en']['translation'] = html_content
skill_contents_dict['worked_examples'][0]['question']['html'] = (
html_content)
skill_contents_dict['worked_examples'][0]['explanation']['html'] = (
html_content)
model = skill_models.SkillModel(
id='skill_id',
description='description',
language_code='en',
misconceptions=[],
rubrics=[],
skill_contents=skill_contents_dict,
next_misconception_id=1,
misconceptions_schema_version=1,
rubric_schema_version=1,
skill_contents_schema_version=1,
all_questions_merged=False
)
commit_cmd_dicts = [commit_cmd.to_dict()]
model.commit(
'user_id_admin', 'skill model created', commit_cmd_dicts)
current_schema_version_swap = self.swap(
feconf, 'CURRENT_SKILL_CONTENTS_SCHEMA_VERSION', 3)
with current_schema_version_swap:
skill = skill_fetchers.get_skill_from_model(model)
self.assertEqual(skill.skill_contents_schema_version, 3)
self.assertEqual(
skill.skill_contents.explanation.html,
expected_html_content)
self.assertEqual(
skill.skill_contents.written_translations.to_dict(),
written_translations_dict_math)
self.assertEqual(
skill.skill_contents.worked_examples[0].to_dict(),
worked_example_dict_math)
def test_migrate_misconceptions_to_latest_schema(self):
commit_cmd = skill_domain.SkillChange({
'cmd': skill_domain.CMD_CREATE_NEW
})
explanation_content_id = feconf.DEFAULT_SKILL_EXPLANATION_CONTENT_ID
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
skill_contents = skill_domain.SkillContents(
state_domain.SubtitledHtml(
explanation_content_id, feconf.DEFAULT_SKILL_EXPLANATION), [],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
explanation_content_id: {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
explanation_content_id: {}
}
}))
model = skill_models.SkillModel(
id='skill_id',
description='description',
language_code='en',
misconceptions=[{
'id': 1,
'name': 'name',
'notes': html_content,
'feedback': html_content
}],
rubrics=[],
skill_contents=skill_contents.to_dict(),
next_misconception_id=2,
misconceptions_schema_version=1,
rubric_schema_version=1,
skill_contents_schema_version=1,
all_questions_merged=False
)
commit_cmd_dicts = [commit_cmd.to_dict()]
model.commit(
'user_id_admin', 'skill model created', commit_cmd_dicts)
current_schema_version_swap = self.swap(
feconf, 'CURRENT_MISCONCEPTIONS_SCHEMA_VERSION', 4)
with current_schema_version_swap:
skill = skill_fetchers.get_skill_from_model(model)
self.assertEqual(skill.misconceptions_schema_version, 4)
self.assertEqual(skill.misconceptions[0].must_be_addressed, True)
self.assertEqual(skill.misconceptions[0].notes, expected_html_content)
self.assertEqual(
skill.misconceptions[0].feedback, expected_html_content)
def test_migrate_rubrics_to_latest_schema(self):
commit_cmd = skill_domain.SkillChange({
'cmd': skill_domain.CMD_CREATE_NEW
})
explanation_content_id = feconf.DEFAULT_SKILL_EXPLANATION_CONTENT_ID
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
skill_contents = skill_domain.SkillContents(
state_domain.SubtitledHtml(
explanation_content_id, feconf.DEFAULT_SKILL_EXPLANATION), [],
state_domain.RecordedVoiceovers.from_dict({
'voiceovers_mapping': {
explanation_content_id: {}
}
}),
state_domain.WrittenTranslations.from_dict({
'translations_mapping': {
explanation_content_id: {}
}
}))
model = skill_models.SkillModel(
id='skill_id',
description='description',
language_code='en',
misconceptions=[],
rubrics=[{
'difficulty': 'Easy',
'explanations': ['Easy explanation']
}, {
'difficulty': 'Medium',
'explanations': ['Medium explanation']
}, {
'difficulty': 'Hard',
'explanations': ['Hard explanation', html_content]
}],
skill_contents=skill_contents.to_dict(),
next_misconception_id=1,
misconceptions_schema_version=1,
rubric_schema_version=2,
skill_contents_schema_version=2,
all_questions_merged=False
)
commit_cmd_dicts = [commit_cmd.to_dict()]
model.commit(
'user_id_admin', 'skill model created', commit_cmd_dicts)
current_schema_version_swap = self.swap(
feconf, 'CURRENT_RUBRIC_SCHEMA_VERSION', 4)
with current_schema_version_swap:
skill = skill_fetchers.get_skill_from_model(model)
self.assertEqual(skill.rubric_schema_version, 4)
self.assertEqual(skill.rubrics[0].difficulty, 'Easy')
self.assertEqual(skill.rubrics[0].explanations, ['Easy explanation'])
self.assertEqual(skill.rubrics[1].difficulty, 'Medium')
self.assertEqual(skill.rubrics[1].explanations, ['Medium explanation'])
self.assertEqual(skill.rubrics[2].difficulty, 'Hard')
self.assertEqual(
skill.rubrics[2].explanations,
['Hard explanation', expected_html_content])
| 41.854478 | 80 | 0.61646 |
aae725245c5cc511d7adbf0e9f052d3db18994e3 | 9,277 | py | Python | shade/tests/unit/test_rebuild_server.py | noironetworks/shade | e46878bae44e7daebf32c0aeaeffea0011542525 | [
"Apache-2.0"
] | 96 | 2015-01-29T20:12:08.000Z | 2019-01-28T22:17:13.000Z | shade/tests/unit/test_rebuild_server.py | noironetworks/shade | e46878bae44e7daebf32c0aeaeffea0011542525 | [
"Apache-2.0"
] | 7 | 2015-08-14T18:47:28.000Z | 2019-02-18T16:32:36.000Z | shade/tests/unit/test_rebuild_server.py | noironetworks/shade | e46878bae44e7daebf32c0aeaeffea0011542525 | [
"Apache-2.0"
] | 88 | 2015-05-11T17:20:52.000Z | 2019-04-04T03:23:30.000Z | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_rebuild_server
----------------------------------
Tests for the `rebuild_server` command.
"""
import uuid
from shade import exc
from shade.tests import fakes
from shade.tests.unit import base
class TestRebuildServer(base.RequestsMockTestCase):
def setUp(self):
super(TestRebuildServer, self).setUp()
self.server_id = str(uuid.uuid4())
self.server_name = self.getUniqueString('name')
self.fake_server = fakes.make_fake_server(
self.server_id, self.server_name)
self.rebuild_server = fakes.make_fake_server(
self.server_id, self.server_name, 'REBUILD')
self.error_server = fakes.make_fake_server(
self.server_id, self.server_name, 'ERROR')
def test_rebuild_server_rebuild_exception(self):
"""
Test that an exception in the rebuild raises an exception in
rebuild_server.
"""
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public',
append=['servers', self.server_id, 'action']),
status_code=400,
validate=dict(
json={
'rebuild': {
'imageRef': 'a',
'adminPass': 'b'}})),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.rebuild_server,
self.fake_server['id'], "a", "b")
self.assert_calls()
def test_rebuild_server_server_error(self):
"""
Test that a server error while waiting for the server to rebuild
raises an exception in rebuild_server.
"""
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public',
append=['servers', self.server_id, 'action']),
json={'server': self.rebuild_server},
validate=dict(
json={
'rebuild': {
'imageRef': 'a'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [self.error_server]}),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.rebuild_server, self.fake_server['id'], "a", wait=True)
self.assert_calls()
def test_rebuild_server_timeout(self):
"""
Test that a timeout while waiting for the server to rebuild raises an
exception in rebuild_server.
"""
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public',
append=['servers', self.server_id, 'action']),
json={'server': self.rebuild_server},
validate=dict(
json={
'rebuild': {
'imageRef': 'a'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [self.rebuild_server]}),
])
self.assertRaises(
exc.OpenStackCloudTimeout,
self.cloud.rebuild_server,
self.fake_server['id'], "a", wait=True, timeout=0.001)
self.assert_calls(do_count=False)
def test_rebuild_server_no_wait(self):
"""
Test that rebuild_server with no wait and no exception in the
rebuild call returns the server instance.
"""
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public',
append=['servers', self.server_id, 'action']),
json={'server': self.rebuild_server},
validate=dict(
json={
'rebuild': {
'imageRef': 'a'}})),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'networks': []}),
])
self.assertEqual(
self.rebuild_server['status'],
self.cloud.rebuild_server(self.fake_server['id'], "a")['status'])
self.assert_calls()
def test_rebuild_server_with_admin_pass_no_wait(self):
"""
Test that a server with an admin_pass passed returns the password
"""
password = self.getUniqueString('password')
rebuild_server = self.rebuild_server.copy()
rebuild_server['adminPass'] = password
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public',
append=['servers', self.server_id, 'action']),
json={'server': rebuild_server},
validate=dict(
json={
'rebuild': {
'imageRef': 'a',
'adminPass': password}})),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'networks': []}),
])
self.assertEqual(
password,
self.cloud.rebuild_server(
self.fake_server['id'], 'a',
admin_pass=password)['adminPass'])
self.assert_calls()
def test_rebuild_server_with_admin_pass_wait(self):
"""
Test that a server with an admin_pass passed returns the password
"""
password = self.getUniqueString('password')
rebuild_server = self.rebuild_server.copy()
rebuild_server['adminPass'] = password
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public',
append=['servers', self.server_id, 'action']),
json={'server': rebuild_server},
validate=dict(
json={
'rebuild': {
'imageRef': 'a',
'adminPass': password}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [self.rebuild_server]}),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [self.fake_server]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'networks': []}),
])
self.assertEqual(
password,
self.cloud.rebuild_server(
self.fake_server['id'], 'a',
admin_pass=password, wait=True)['adminPass'])
self.assert_calls()
def test_rebuild_server_wait(self):
"""
Test that rebuild_server with a wait returns the server instance when
its status changes to "ACTIVE".
"""
self.register_uris([
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public',
append=['servers', self.server_id, 'action']),
json={'server': self.rebuild_server},
validate=dict(
json={
'rebuild': {
'imageRef': 'a'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [self.rebuild_server]}),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [self.fake_server]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'networks': []}),
])
self.assertEqual(
'ACTIVE',
self.cloud.rebuild_server(
self.fake_server['id'], 'a', wait=True)['status'])
self.assert_calls()
| 36.667984 | 78 | 0.499407 |
c09eaf5936de0391f9d4a4bfcfebc737244638e6 | 3,166 | py | Python | PhysicsTools/HeppyCore/python/framework/analyzer.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | PhysicsTools/HeppyCore/python/framework/analyzer.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | PhysicsTools/HeppyCore/python/framework/analyzer.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | from __future__ import print_function
# Copyright (C) 2014 Colin Bernet
# https://github.com/cbernet/heppy/blob/master/LICENSE
import os
import logging
from PhysicsTools.HeppyCore.statistics.counter import Counters
from PhysicsTools.HeppyCore.statistics.average import Averages
class Analyzer(object):
"""Base Analyzer class. Used in Looper.
Your custom analyzers should inherit from this class
"""
def __init__(self, cfg_ana, cfg_comp, looperName ):
"""Create an analyzer.
Parameters (also stored as attributes for later use):
cfg_ana: configuration parameters for this analyzer (e.g. a pt cut)
cfg_comp: configuration parameters for the data or MC component (e.g. DYJets)
looperName: name of the Looper which runs this analyzer.
Attributes:
dirName : analyzer directory, where you can write anything you want
"""
self.class_object = cfg_ana.class_object
self.instance_label = cfg_ana.instance_label
self.name = cfg_ana.name
self.verbose = cfg_ana.verbose
self.cfg_ana = cfg_ana
self.cfg_comp = cfg_comp
self.looperName = looperName
if hasattr(cfg_ana,"nosubdir") and cfg_ana.nosubdir:
self.dirName = self.looperName
else:
self.dirName = '/'.join( [self.looperName, self.name] )
os.mkdir( self.dirName )
# this is the main logger corresponding to the looper.
# each analyzer could also declare its own logger
self.mainLogger = logging.getLogger( looperName )
# print self.mainLogger.handlers
self.beginLoopCalled = False
def beginLoop(self, setup):
"""Automatically called by Looper, for all analyzers."""
self.counters = Counters()
self.averages = Averages()
self.mainLogger.info( 'beginLoop ' + self.cfg_ana.name )
self.beginLoopCalled = True
def endLoop(self, setup):
"""Automatically called by Looper, for all analyzers."""
#print self.cfg_ana
self.mainLogger.info( '' )
self.mainLogger.info( str(self) )
self.mainLogger.info( '' )
def process(self, event ):
"""Automatically called by Looper, for all analyzers.
each analyzer in the sequence will be passed the same event instance.
each analyzer can access, modify, and store event information, of any type."""
print(self.cfg_ana.name)
def write(self, setup):
"""Called by Looper.write, for all analyzers.
Just overload it if you have histograms to write."""
self.counters.write( self.dirName )
self.averages.write( self.dirName )
def __str__(self):
"""A multipurpose printout. Should do the job for most analyzers."""
ana = str( self.cfg_ana )
count = ''
ave = ''
if hasattr(self, 'counters') and len( self.counters.counters ) > 0:
count = '\n'.join(map(str, self.counters.counters))
if hasattr(self, 'averages') and len( self.averages ) > 0:
ave = '\n'.join(map(str, self.averages))
return '\n'.join( [ana, count, ave] )
| 37.247059 | 86 | 0.646241 |
a41f5288c0490cb0e2d0ab30950ec449f315447e | 294 | py | Python | django_tex/response.py | bhaettasch/django-tex | 66cc6567acde4db2ac971b7707652067e664392c | [
"MIT"
] | null | null | null | django_tex/response.py | bhaettasch/django-tex | 66cc6567acde4db2ac971b7707652067e664392c | [
"MIT"
] | null | null | null | django_tex/response.py | bhaettasch/django-tex | 66cc6567acde4db2ac971b7707652067e664392c | [
"MIT"
] | 1 | 2021-05-08T16:29:06.000Z | 2021-05-08T16:29:06.000Z | from django.http import HttpResponse
class PDFResponse(HttpResponse):
def __init__(self, content, filename=None):
super(PDFResponse, self).__init__(content_type='application/pdf')
self['Content-Disposition'] = 'filename="{}"'.format(filename)
self.write(content)
| 29.4 | 73 | 0.707483 |
66fe53de1c5faeef357590aadb172a260c6a210b | 14,772 | py | Python | tensor2tensor/models/research/moe_experiments.py | arturbeg/tensor2tensor | 43b70752311d3b8dc5f11f63d0dea3efdf8ee25b | [
"Apache-2.0"
] | null | null | null | tensor2tensor/models/research/moe_experiments.py | arturbeg/tensor2tensor | 43b70752311d3b8dc5f11f63d0dea3efdf8ee25b | [
"Apache-2.0"
] | null | null | null | tensor2tensor/models/research/moe_experiments.py | arturbeg/tensor2tensor | 43b70752311d3b8dc5f11f63d0dea3efdf8ee25b | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Language modeling experiments in mtf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.models import mtf_transformer
from tensor2tensor.models import mtf_transformer2
from tensor2tensor.models.research import moe
from tensor2tensor.utils import registry
@registry.register_hparams
def xmoe_tr_dense_2k():
"""Series of architectural experiments on Translation.
# run on 8-core setup
119M params, einsum=0.95e13
Returns:
a hparams
"""
hparams = mtf_transformer2.mtf_bitransformer_base()
hparams.encoder_layers = ["self_att", "drd"] * 4
hparams.decoder_layers = ["self_att", "enc_att", "drd"] * 4
hparams.batch_size = 64
hparams.shared_embedding_and_softmax_weights = True
hparams.mesh_shape = "batch:8"
return hparams
@registry.register_hparams
def xmoe_tr_dense_32k():
"""Bigger d_ff.
623M params, einsum=3.42e13
Returns:
a hparams
"""
hparams = xmoe_tr_dense_2k()
hparams.d_ff = 32768
return hparams
@registry.register_hparams
def xmoe_tr_1d():
"""Mixture of experts (16 experts).
623M Params, einsum=1.09e13
Returns:
a hparams
"""
hparams = xmoe_tr_dense_2k()
hparams.encoder_layers = ["self_att", "moe_1d"] * 4
hparams.decoder_layers = ["self_att", "enc_att", "moe_1d"] * 4
hparams.layout = "batch:batch;experts:batch"
hparams.moe_hidden_size = 2048
hparams.moe_num_experts = 16
return hparams
@registry.register_hparams
def xmoe_tr_2d():
"""Mixture of experts (16 experts).
623M Params, einsum=1.09e13
Returns:
a hparams
"""
hparams = xmoe_tr_dense_2k()
hparams.mesh_shape = "b0:2;b1:4"
hparams.outer_batch_size = 4
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.encoder_layers = ["self_att", "moe_2d"] * 4
hparams.decoder_layers = ["self_att", "enc_att", "moe_2d"] * 4
hparams.moe_hidden_size = 2048
hparams.moe_experts_x = 4
hparams.moe_experts_y = 4
return hparams
@registry.register_hparams
def xmoe_dense_4k():
"""Series of architectural experiments on cheap language models.
For all of these architectures, we run on languagemodel_lm1b8k_packed
for 32000 steps.
All log-perplexities are per-token - multiply by 1.298 for per-word
Results:
model params(M) einsum alltoall mxu-util log-ppl
xmoe_dense_4k 30 3.0e12 0 45% 3.31
xmoe_dense_8k 46 4.7e12 0 49% 3.24
xmoe_dense_64k 282 2.8e13 0 3.06
xmoe_top_2 282 4.0e12 3.4e8 36% 3.07
xmoe_top_2_c15 282 4.5e12 4.0e8 38% 3.07
xmoe_2d 282 5.3e12 7.6e8 34% 3.06
Trained at 4x the batch size:
xmoe_2d_88 1090 2.1e13 3.0e9 24% 3.07
Note: configurations and code are likely to change without notice.
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_base_lm()
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
# The following hparams are constant across all these experiments.
hparams.batch_size = 128
hparams.d_model = 512
hparams.d_kv = 128
hparams.num_heads = 4
hparams.decoder_layers = ["att", "drd"] * 4
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_schedule = "rsqrt_decay"
# We will vary the following parameters related to the ffn/moe layers.
hparams.d_ff = 4096
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:8"
return hparams
@registry.register_hparams
def xmoe_dense_8k():
hparams = xmoe_dense_4k()
hparams.d_ff = 8192
return hparams
@registry.register_hparams
def xmoe_dense_64k():
"""Very wide layer- run on 4x4."""
hparams = xmoe_dense_4k()
hparams.d_ff = 65536
hparams.mesh_shape = "model:4,batch:8"
return hparams
@registry.register_hparams
def xmoe_top_2():
"""Mixture of experts (16 experts)."""
hparams = xmoe_dense_4k()
moe.set_default_moe_hparams(hparams)
hparams.mesh_shape = "all:8"
hparams.layout = "batch:all;experts:all"
return hparams
@registry.register_hparams
def xmoe_top_2_c15():
"""Mixture of experts."""
hparams = xmoe_top_2()
hparams.moe_capacity_factor_train = 1.5
return hparams
@registry.register_hparams
def xmoe_2d():
"""Two-dimensional hierarchical mixture of 16 experts."""
hparams = xmoe_top_2()
hparams.decoder_layers = ["att", "hmoe"] * 4
hparams.mesh_shape = "b0:2;b1:4"
hparams.outer_batch_size = 4
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.moe_num_experts = [4, 4]
return hparams
@registry.register_hparams
def xmoe_2d_debug():
"""For debugging.
Running this model on TPU without the hack of casting to bfloat16 for
alltoall results in nan on the first step.
TODO(noam): debug
Returns:
a hparams
"""
hparams = xmoe_2d()
hparams.decoder_layers = ["hmoe"] * 1
hparams.activation_dtype = "float32"
return hparams
@registry.register_hparams
def xmoe_2d_c15():
"""Mixture of experts."""
hparams = xmoe_2d()
hparams.moe_capacity_factor_train = 1.5
return hparams
@registry.register_hparams
def xmoe_2d_x64():
"""Two-dimensional hierarchical mixture of 64 experts."""
hparams = xmoe_2d()
# hparams.mesh_shape = "b0:4;b1:8"
hparams.outer_batch_size = 4
hparams.moe_num_experts = [8, 8]
return hparams
@registry.register_hparams
def xmoe2_dense(sz):
"""Series of architectural experiments on language modeling.
Larger models than the ones above.
All models are trained on sequences of 1024 tokens.
We assume infinite training data, so no dropout necessary.
We process 2^36 tokens in training = 524288 steps at batch size 128
TODO(noam): find a large enough dataset for these experiments.
You can use languagemodel_wiki_noref_v32k_l1k, but this is too small,
(1 epoch = ~46000 steps) so training will cover about 11 epochs.
Note: configurations and code are likely to change without notice.
Run on TPU 4x4 for 524288 steps unless otherwise indicated.
Args:
sz: an integer
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_paper_lm(sz)
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.max_length = 1024
hparams.batch_size = 128
hparams.learning_rate_schedule = "rsqrt_decay*linear_decay"
hparams.learning_rate_decay_steps = 65536
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:32"
return hparams
@registry.register_hparams
def xmoe2_dense_0():
return xmoe2_dense(0)
@registry.register_hparams
def xmoe2_dense_1():
return xmoe2_dense(1)
@registry.register_hparams
def xmoe2_dense_2():
return xmoe2_dense(2)
@registry.register_hparams
def xmoe2_dense_3():
return xmoe2_dense(3)
@registry.register_hparams
def xmoe2_v1():
"""Model incorporating mixture-of-experts and local-attention.
~6B parameters
32 experts in 3 hierarchichal moe layers.
Returns:
a hparams
"""
hparams = xmoe2_dense(0)
moe.set_default_moe_hparams(hparams)
hparams.decoder_layers = (
["local_att", "local_att", "drd",
"att", "drd", "local_att", "local_att", "hmoe"] * 4)[:-1]
hparams.d_ff = 2048
hparams.d_kv = 128
hparams.moe_hidden_size = 32768
hparams.mesh_shape = "b0:4;b1:8"
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.outer_batch_size = 4
hparams.moe_num_experts = [8, 4]
hparams.num_heads = 4
return hparams
@registry.register_hparams
def xmoe2_v1_x128():
"""128 experts, ~25B params - Train for 131072 steps on 8x8."""
hparams = xmoe2_v1()
hparams.moe_num_experts = [16, 8]
hparams.outer_batch_size = 8
hparams.mesh_shape = "b0:8;b1:16"
hparams.batch_size = 512
hparams.learning_rate_decay_steps = 16384
return hparams
@registry.register_hparams
def xmoe2_tiny():
"""Test on local cpu."""
hparams = xmoe2_v1()
hparams.decoder_layers = [
"local_att", "att", "compressed_att", "drd", "hmoe"]
hparams.d_model = 128
hparams.moe_hidden_size = 512
hparams.outer_batch_size = 0
hparams.batch_size = 2
hparams.mesh_shape = ""
hparams.activation_dtype = "float32"
return hparams
@registry.register_hparams
def xmoe2_v1_l4k():
"""With sequence length 4096."""
hparams = xmoe2_v1()
hparams.batch_size = 32
hparams.max_length = 4096
hparams.split_to_length = 4096
hparams.reshape_logits_hack = True
return hparams
@registry.register_hparams
def xmoe2_v1_l4k_local_only():
"""With sequence length 4096."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"local_att" if l == "att" else l for l in hparams.decoder_layers]
return hparams
@registry.register_hparams
def xmoe2_v1_l4k_global_only():
"""With sequence length 4096."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"att" if l == "local_att" else l for l in hparams.decoder_layers]
return hparams
@registry.register_hparams
def xmoe2_v1_l4k_compressed_c4():
"""With compressed attention."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"compressed_att" if l == "att" else l for l in hparams.decoder_layers]
hparams.compression_factor = 4
return hparams
@registry.register_hparams
def xmoe2_v1_l4k_compressed_c8():
"""With compressed attention."""
hparams = xmoe2_v1_l4k_compressed_c4()
hparams.compression_factor = 8
return hparams
@registry.register_hparams
def wiki_2x2_base():
"""Set of architectural experiments - language model on wikipedia on a 2x2.
1 epoch = ~180k steps at batch size 32 - we may never finish an epoch!
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_base_lm()
hparams.shared_embedding_and_softmax_weights = False
# no dropout - dataset is big enough to avoid overfitting.
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.max_length = 1024
# 4 sequences per core
hparams.batch_size = 32
# We don't use linear decay in these experiments, since we don't want
# a sharp jump in quality at the end of the training schedule.
# You can insert this once you find the right architecture.
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.mesh_shape = "all:8"
hparams.layout = "batch:all;experts:all"
# parameters for mixture-of-experts
moe.set_default_moe_hparams(hparams)
hparams.moe_num_experts = 16
hparams.moe_hidden_size = 8192
hparams.decoder_layers = ["att", "drd"] * 6
hparams.d_model = 1024
hparams.d_ff = 2048
hparams.d_kv = 128
hparams.num_heads = 4
return hparams
@registry.register_hparams
def wiki_2x2_v1():
hparams = wiki_2x2_base()
hparams.decoder_layers = (
["local_att", "local_att", "drd",
"att", "drd", "local_att", "local_att", "moe"] * 4)[:-1]
return hparams
@registry.register_hparams
def wiki_2x2_local():
hparams = wiki_2x2_base()
hparams.decoder_layers = ["local_att", "drd"] * 6
return hparams
@registry.register_hparams
def denoise_m15():
"""Denoising experiment."""
hparams = xmoe2_dense_0()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "mask", "prob": 0.15}
return hparams
@registry.register_hparams
def denoise_m30():
"""More masking during training."""
hparams = xmoe2_dense_0()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "mask", "prob": 0.3}
return hparams
@registry.register_hparams
def denoise_dense_2_m30():
"""More masking during training."""
hparams = xmoe2_dense_2()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "mask", "prob": 0.3}
return hparams
@registry.register_hparams
def denoise_z15():
"""Replace tokens instead of masking."""
hparams = xmoe2_dense_0()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "random_zipfian", "prob": 0.15}
hparams.noising_use_eval_during_train = 0.25
return hparams
@registry.register_hparams
def denoise_t15():
"""Noise up with dropout and a little transformer."""
hparams = xmoe2_dense_0()
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {
"type": "transformer",
"overrides": {
"noising_spec_train": {"type": "mask", "prob": 0.15},
"noising_use_eval_during_train": 0.0,
"decoder_layers": ["att", "drd"] * 4,
"num_heads": 4,
"d_model": 512,
"d_ff": 2048,
}
}
return hparams
@registry.register_hparams
def denoise_v1_m15():
"""Denoising experiment."""
hparams = xmoe2_v1()
# no local attention
# TODO(noam): non-masked version of local-attention
hparams.decoder_layers = [
"att" if l == "local_att" else l for l in hparams.decoder_layers]
hparams.decoder_type = "denoising"
hparams.noising_spec_train = {"type": "mask", "prob": 0.15}
return hparams
@registry.register_hparams
def denoise_v1_m30():
"""More masking during training."""
hparams = denoise_v1_m15()
hparams.noising_spec_train = {"type": "mask", "prob": 0.3}
return hparams
@registry.register_hparams
def denoise_v1_m50():
"""More masking during training."""
hparams = denoise_v1_m15()
hparams.noising_spec_train = {"type": "mask", "prob": 0.5}
return hparams
@registry.register_hparams
def denoise_v1_z15():
"""Replace tokens instead of masking."""
hparams = denoise_v1_m15()
hparams.noising_spec_train = {"type": "random_zipfian", "prob": 0.15}
return hparams
@registry.register_hparams
def denoise_v1_t15():
"""Noise up with dropout and a little transformer."""
hparams = denoise_v1_m15()
hparams.noising_spec_train = {
"type": "transformer",
"overrides": {
"noising_spec_train": {"type": "mask", "prob": 0.15},
"noising_use_eval_during_train": 0.0,
"decoder_layers": ["att", "drd"] * 4,
"num_heads": 4,
"d_model": 512,
"d_ff": 2048,
}
}
return hparams
| 26.66426 | 77 | 0.712429 |
6f93641122a51039c615a8f2cd677f98fcc2bc48 | 337 | py | Python | photos/views.py | newtonkiragu/mars-rover | 722355a6435571ec509cbf4f5cbfddb1a0f6c235 | [
"MIT"
] | null | null | null | photos/views.py | newtonkiragu/mars-rover | 722355a6435571ec509cbf4f5cbfddb1a0f6c235 | [
"MIT"
] | null | null | null | photos/views.py | newtonkiragu/mars-rover | 722355a6435571ec509cbf4f5cbfddb1a0f6c235 | [
"MIT"
] | null | null | null | from django.shortcuts import render
# Create your views here.
from django.views.generic import TemplateView
# Create your views here.
class ClassView(TemplateView):
"""Home page template."""
template_name = 'photos/index.html'
class RoverView(TemplateView):
"""About view."""
template_name = 'photos/rover.html'
| 16.85 | 45 | 0.718101 |
8d35058a070198e0bcd39e9e12cfd4aadbd33f6d | 10,512 | py | Python | stix2arango/postgresql.py | 1lca/stix2arango | 8bbd8db42710179ba5bebfcc4abe27d993588a0b | [
"MIT"
] | null | null | null | stix2arango/postgresql.py | 1lca/stix2arango | 8bbd8db42710179ba5bebfcc4abe27d993588a0b | [
"MIT"
] | null | null | null | stix2arango/postgresql.py | 1lca/stix2arango | 8bbd8db42710179ba5bebfcc4abe27d993588a0b | [
"MIT"
] | null | null | null | import ipaddress
import copy
from nis import match
from typing import Dict
from stix2arango.exceptions import InvalidObjectForOptimizer
from stix2arango.utils import deep_dict_update
import psycopg2
from psycopg2.errors import DuplicateTable
import uuid
type_map = {
'str' : 'TEXT',
'int' : 'INT'
}
operator_map = {
'>' : '>',
'<' : '<',
'=' : '=',
'!=' : '!=',
'<=' : '<=',
'>=' : '>=',
'like' : 'LIKE',
}
def convert_type(type_, value_):
"""Map between python type/postgresql type
Args:
type_ (str): python field type
value_ (str): python field value
Raises:
RuntimeError: When a type can't be converted
Returns:
str: the postgresql associed type
"""
if type_ in type_map:
return type_map[type_]
raise RuntimeError("%s type not found" % (type_))
class PGResult(dict):
def __init__(
self,
arango_conn,
arango_id=None,
field0_value=None,
optimizer=None
):
self.arango_conn = arango_conn
self.arango_id = arango_id
self.field0_value = field0_value
self.optimizer = optimizer
super().__init__()
def __call__(self):
if self.field0_value:
pass
class PostgresOptimizer:
postgres_conn = None
count = 0
db_name = None
db_host = None
db_user = None
db_pass = None
db_port = None
def __init__(self, field):
self.uuid = str(uuid.uuid4()).replace('-','_')
self.table_name = None
self.field = field
self.table_created = False
if not(PostgresOptimizer.postgres_conn):
raise RuntimeError('PostgresOptimizer.postgres_conn is not set')
def insert_stix_obj(self, stix_object, arango_id, feed):
if not self.table_created:
self.__create_table(feed, stix_object)
arango_id = int(arango_id.split('/')[-1])
if self.field != 'ipv4-addr:x_ip':
value = self.__extract_field_value(self.field, stix_object)
elif stix_object['type'] == 'ipv4-addr':
value = stix_object['value']
else:
raise InvalidObjectForOptimizer(stix_object['type'])
sql = "INSERT INTO " + self.table_name + " values (%s, %s, %s);"
with PostgresOptimizer.postgres_conn.cursor() as cursor:
cursor.execute(sql, [value, arango_id, stix_object['id']])
if self.count % 1000 == 0:
PostgresOptimizer.postgres_conn.commit()
self.count += 1
return stix_object
def craft_obj_from_request(self, stix_id, field0):
type = self.field.split(':')[0]
if self.field == 'ipv4-addr:x_ip':
obj = {'id' : stix_id, 'type' : type, 'value' : field0}
return obj
path = self.field.split(':')[1:-1]
value_name = self.field.split(':')[-1]
obj = {'id' : stix_id, 'type' : type}
current_obj = obj
for step in path:
current_obj[step] = {}
current_obj = current_obj[step]
current_obj[value_name] = field0
return obj
def present_results(self, results):
dict_results = {}
for arango_id, stix_id, field0 in results:
if not str(arango_id) in dict_results:
dict_results[str(arango_id)] = []
dict_results[str(arango_id)] += [self.craft_obj_from_request(stix_id, field0)]
return dict_results
def query(self, operator, value, feed):
if value[0] == '"' and value[-1] == '"':
value = "'" + value[1:-1] + "'"
self.table_name = feed.storage_paradigm.get_collection_name(feed) + self.uuid
if self.field == 'ipv4-addr:x_ip':
middle_sql = 'field0 >> ' + value
middle_sql += ' OR field0 = ' + value
else:
middle_sql = 'field0 ' + operator_map[operator] + ' ' + value
sql = 'select arango_id, stix_id, field0 from ' + self.table_name + ' where ' + middle_sql + ';'
with PostgresOptimizer.postgres_conn.cursor() as cursor:
cursor.execute(sql)
results = cursor.fetchall()
return self.present_results(results)
def query_from_arango_results(self, col_name, results, arango_conn):
# ! BUG : 2 times the same results
self.table_name = col_name + self.uuid
pg_results = []
for r in results:
try:
r = r.getStore()
except:
pass
if r['type'] == self.field.split(':')[0] or\
(self.field == 'ipv4-addr:x_ip' and r['type'] == 'ipv4-addr'):
sql = 'select arango_id, stix_id, field0 from ' + self.table_name + ' where arango_id = \'' + r['_key'] + '\''
cursor = PostgresOptimizer.postgres_conn.cursor()
cursor.execute(sql)
pg_results += cursor.fetchall()
pg_results = self.present_results(pg_results)
cross = []
for r in results:
if r['_key'] in pg_results:
for x in pg_results[r['_key']]:
r = copy.deepcopy(r)
cross.append(deep_dict_update(r, x))
return cross
def crosses_results_with_arango(self, results, arango_conn, col_name) -> list:
aql2 = 'for el in %s filter el._key in %s return el' % (col_name, str(list(results.keys())))
aql_results = [result.getStore() for result in
arango_conn.AQLQuery(aql2, raw_results=True)]
matched_results = []
for m in aql_results:
obj = copy.deepcopy(m)
for pg_obj in results[m['_key']]:
deep_dict_update(obj, pg_obj)
matched_results.append(obj)
return matched_results
def __del__(self):
if self.table_created and PostgresOptimizer.postgres_conn:
idx_name = 'idx_' + self.uuid
sql = 'CREATE INDEX ' + idx_name + ' ON ' + self.table_name + '(field0)'
try:
with PostgresOptimizer.postgres_conn.cursor() as cursor:
cursor.execute(sql)
except:
pass
if PostgresOptimizer.postgres_conn:
PostgresOptimizer.postgres_conn.commit()
def __dict__(self):
return {
'class': str(self.__class__.__name__).lower(),
'field' : self.field,
'uuid' : self.uuid
}
def __extract_field_type(self, field, stix_object):
object = copy.deepcopy(stix_object)
if field.split(':')[0] == object['type']:
for f in field.split(':')[1:]:
try:
object = object[f]
except (TypeError, KeyError):
raise InvalidObjectForOptimizer(stix_object['type'])
return type(object).__name__
else:
raise InvalidObjectForOptimizer(stix_object['type'])
def __extract_field_value(self, field, stix_object):
object = copy.deepcopy(stix_object)
if field.split(':')[0] == object['type']:
for f in field.split(':')[1:]:
try:
object = object[f]
except (TypeError, KeyError):
raise InvalidObjectForOptimizer(stix_object['type'])
return object
else:
raise InvalidObjectForOptimizer(stix_object['type'])
def __create_table(self, feed, stix_object):
try:
if self.field != 'ipv4-addr:x_ip':
type_ = self.__extract_field_type(self.field, stix_object)
value = self.__extract_field_value(self.field, stix_object)
type_ = convert_type(type_, value)
else:
type_ = 'inet'
content = 'field0 ' + type_ + ', arango_id int, stix_id text'
self.table_name = feed.storage_paradigm.get_collection_name(feed) + self.uuid
cursor = PostgresOptimizer.postgres_conn.cursor()
base_query = 'create table ' + self.table_name + ' (%s);'
query = base_query % (content)
cursor.execute(query)
cursor.close()
PostgresOptimizer.postgres_conn.commit()
except DuplicateTable:
pass
self.table_created = True
def list_all_table(self):
s = "SELECT"
s += " table_schema"
s += ", table_name"
s += " FROM information_schema.tables"
s += " WHERE"
s += " ("
s += " table_schema = 'public'"
s += " )"
s += " ORDER BY table_schema, table_name;"
with PostgresOptimizer.postgres_conn.cursor() as cursor:
cursor.execute(s)
results = cursor.fetchall()
return [list(r)[1] for r in results]
def drop_table(self, feed_name) -> bool:
try:
for table_name in self.list_all_table():
if table_name.startswith(feed_name):
sql = 'drop table ' + table_name
with PostgresOptimizer.postgres_conn.cursor() as cursor :
cursor.execute(sql)
PostgresOptimizer.postgres_conn.commit()
self.table_created = False
return True
except Exception:
return False
def delete_fields_in_object(self, object):
object = copy.deepcopy(object)
object_type = self.field.split(':')[0]
field_path = self.field.split(':')[1:-1]
last_field = self.field.split(':')[-1]
if object['type'] == object_type:
dict_to_remove = object
for f in field_path:
if f in dict_to_remove:
dict_to_remove = dict_to_remove[f]
else:
break
if last_field in dict_to_remove:
del dict_to_remove[last_field]
if self.field == 'ipv4-addr:x_ip':
if 'value' in object:
del object['value']
if 'id' in object:
del object['id']
return object
@staticmethod
def connect_db():
auth = "dbname='%s' user='%s' host='%s' password='%s' port='%s'"
auth = auth % (
PostgresOptimizer.db_name,
PostgresOptimizer.db_user,
PostgresOptimizer.db_host,
PostgresOptimizer.db_pass,
PostgresOptimizer.db_port
)
PostgresOptimizer.postgres_conn = psycopg2.connect(auth) | 35.04 | 126 | 0.560027 |
80e51ec84b4c9e436a2fd25c63608640e10f3c20 | 241,531 | py | Python | tensorflow/python/framework/ops.py | kmh4321/tensorflow | 2c5df0c3a4a9c5aa7be26098aba0020dfdfac38e | [
"Apache-2.0"
] | null | null | null | tensorflow/python/framework/ops.py | kmh4321/tensorflow | 2c5df0c3a4a9c5aa7be26098aba0020dfdfac38e | [
"Apache-2.0"
] | null | null | null | tensorflow/python/framework/ops.py | kmh4321/tensorflow | 2c5df0c3a4a9c5aa7be26098aba0020dfdfac38e | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import re
import sys
import threading
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import monitoring
from tensorflow.python.eager import tape
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_conversion_registry
from tensorflow.python.framework import tensor_like
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import traceable_stack
from tensorflow.python.framework import versions
from tensorflow.python.ops import control_flow_util
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import lock_util
from tensorflow.python.util import memory
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_stack
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
ag_ctx = LazyLoader(
"ag_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
# Temporary global switches determining if we should enable the work-in-progress
# calls to the C API. These will be removed once all functionality is supported.
_USE_C_API = True
_USE_C_SHAPES = True
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/ops_eager_execution",
"Whether ops.enable_eager_execution() is called.")
# pylint: disable=protected-access
_TensorLike = tensor_like._TensorLike
_DTYPES_INTERN_TABLE = dtypes._INTERN_TABLE
# pylint: enable=protected-access
def tensor_id(tensor):
"""Returns a unique identifier for this Tensor."""
return tensor._id # pylint: disable=protected-access
class _UserDeviceSpec(object):
"""Store user-specified device and provide computation of merged device."""
def __init__(self, device_name_or_function):
self._device_name_or_function = device_name_or_function
self.display_name = str(self._device_name_or_function)
self.function = device_name_or_function
self.raw_string = None
if isinstance(device_name_or_function, pydev.MergeDevice):
self.is_null_merge = device_name_or_function.is_null_merge
elif callable(device_name_or_function):
self.is_null_merge = False
dev_func = self._device_name_or_function
func_name = function_utils.get_func_name(dev_func)
func_code = function_utils.get_func_code(dev_func)
if func_code:
fname = func_code.co_filename
lineno = func_code.co_firstlineno
else:
fname = "unknown"
lineno = -1
self.display_name = "%s<%s, %d>" % (func_name, fname, lineno)
elif device_name_or_function is None:
# NOTE(taylorrobie): This MUST be False. None signals a break in the
# device stack, so `is_null_merge` must be False for such a case to
# allow callers to safely skip over null merges without missing a None.
self.is_null_merge = False
else:
self.raw_string = device_name_or_function
self.function = pydev.merge_device(device_name_or_function)
self.is_null_merge = self.function.is_null_merge
# We perform this check in __init__ because it is of non-trivial cost,
# and self.string_merge is typically called many times.
self.fast_string_merge = isinstance(self.function, pydev.MergeDevice)
def string_merge(self, node_def):
if self.fast_string_merge:
return self.function.shortcut_string_merge(node_def)
return compat.as_str(_device_string(self.function(node_def)))
class NullContextmanager(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name` and `dtype` properties.
Args:
tensor_type: A type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor interface.
"""
try:
if not isinstance(tensor_type.name, property):
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
try:
if not isinstance(tensor_type.dtype, property):
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
def uid():
"""A unique (within this program execution) integer."""
return c_api.TFE_Py_UID()
def numpy_text(tensor, is_repr=False):
"""Human readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
# pylint: disable=protected-access
text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())
# pylint: enable=protected-access
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
def enable_tensor_equality():
"""Compare Tensors with element-wise comparison and thus be unhashable.
Comparing tensors with element-wise allows comparisons such as
tf.Variable(1.0) == 1.0. Element-wise equality implies that tensors are
unhashable. Thus tensors can no longer be directly used in sets or as a key in
a dictionary.
"""
Tensor._USE_EQUALITY = True # pylint: disable=protected-access
def disable_tensor_equality():
"""Compare Tensors by their id and be hashable.
This is a legacy behaviour of TensorFlow and is highly discouraged.
"""
Tensor._USE_EQUALITY = False # pylint: disable=protected-access
@tf_export("Tensor")
class Tensor(_TensorLike):
"""Represents one of the outputs of an `Operation`.
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
TensorFlow `tf.compat.v1.Session`.
This class has two primary purposes:
1. A `Tensor` can be passed as an input to another `Operation`.
This builds a dataflow connection between operations, which
enables TensorFlow to execute an entire `Graph` that represents a
large, multi-step computation.
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
`tf.Session.run`.
`t.eval()` is a shortcut for calling
`tf.compat.v1.get_default_session().run(t)`.
In the following example, `c`, `d`, and `e` are symbolic `Tensor`
objects, whereas `result` is a numpy array that stores a concrete
value:
```python
# Build a dataflow graph.
c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
e = tf.matmul(c, d)
# Construct a `Session` to execute the graph.
sess = tf.compat.v1.Session()
# Execute the graph and store the value that `e` represents in `result`.
result = sess.run(e)
```
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__ne__",
"__eq__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
# Whether to allow hashing or numpy-style equality
_USE_EQUALITY = False
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
# This will be set by self._as_tf_output().
self._tf_output = None
# This will be set by self.shape().
self._shape_val = None
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
self._id = uid()
self._name = None
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if self._name is None:
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
self._name = "%s:%d" % (self._op.name, self._value_index)
return self._name
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of this tensor.
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
`tf.TensorShape`
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
information without having to launch the graph in a session. This
can be used for debugging, and providing early error messages. For
example:
```python
c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
print(c.shape)
==> TensorShape([Dimension(2), Dimension(3)])
d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
print(d.shape)
==> TensorShape([Dimension(4), Dimension(2)])
# Raises a ValueError, because `c` and `d` do not have compatible
# inner dimensions.
e = tf.matmul(c, d)
f = tf.matmul(c, d, transpose_a=True, transpose_b=True)
print(f.shape)
==> TensorShape([Dimension(3), Dimension(4)])
```
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `Tensor.set_shape()` can be used to augment the
inferred shape.
Returns:
A `TensorShape` representing the shape of this tensor.
"""
if self._shape_val is None:
self._shape_val = self._c_api_shape()
return self._shape_val
def _get_input_ops_without_shapes(self, target_op):
"""Returns ops needing shape inference to compute target_op's shape."""
result = []
stack = [self._op]
visited = set()
while stack:
op = stack.pop()
if op in visited:
continue
result.append(op)
stack.extend(t.op for t in op.inputs if t._shape_val is None)
visited.add(op)
return result
def _c_api_shape(self):
"""Returns the TensorShape of this tensor according to the C API."""
c_graph = self._op._graph._c_graph # pylint: disable=protected-access
shape_vector, unknown_shape = c_api.TF_GraphGetTensorShapeHelper(
c_graph, self._as_tf_output())
if unknown_shape:
return tensor_shape.unknown_shape()
else:
shape_vector = [None if d == -1 else d for d in shape_vector]
return tensor_shape.TensorShape(shape_vector)
@property
def _shape(self):
logging.warning("Tensor._shape is private, use Tensor.shape "
"instead. Tensor._shape will eventually be removed.")
return self.shape
@_shape.setter
def _shape(self, value):
raise ValueError(
"Tensor._shape cannot be assigned, use Tensor.set_shape instead.")
def _disallow_when_autograph_disabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph is disabled in this function."
" Try decorating it directly with @tf.function.".format(task))
def _disallow_when_autograph_enabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph did not convert this function. Try"
" decorating it directly with @tf.function.".format(task))
def _disallow_in_graph_mode(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed in Graph execution. Use Eager execution or decorate"
" this function with @tf.function.".format(task))
def _disallow_bool_casting(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled(
"using a `tf.Tensor` as a Python `bool`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled(
"using a `tf.Tensor` as a Python `bool`")
else:
# Default: V1-style Graph execution.
self._disallow_in_graph_mode("using a `tf.Tensor` as a Python `bool`")
def _disallow_iteration(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled("iterating over `tf.Tensor`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled("iterating over `tf.Tensor`")
else:
# Default: V1-style Graph execution.
self._disallow_in_graph_mode("iterating over `tf.Tensor`")
def __iter__(self):
if not context.executing_eagerly():
self._disallow_iteration()
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
for i in xrange(shape[0]):
yield self[i]
def _shape_as_list(self):
if self.shape.ndims is not None:
return [dim.value for dim in self.shape.dims]
else:
return None
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
def _rank(self):
"""Integer rank of this Tensor, if known, else None.
Returns:
Integer rank or None
"""
return self.shape.ndims
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
This method can be called multiple times, and will merge the given
`shape` with the current shape of this tensor. It can be used to
provide additional information about the shape of this tensor that
cannot be inferred from the graph alone. For example, this can be used
to provide additional information about the shapes of images:
```python
_, image_data = tf.compat.v1.TFRecordReader(...).read(...)
image = tf.image.decode_png(image_data, channels=3)
# The height and width dimensions of `image` are data dependent, and
# cannot be computed without executing the op.
print(image.shape)
==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])
# We know that each image in this dataset is 28 x 28 pixels.
image.set_shape([28, 28, 3])
print(image.shape)
==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])
```
NOTE: This shape is not enforced at runtime. Setting incorrect shapes can
result in inconsistencies between the statically-known graph and the runtime
value of tensors. For runtime validation of the shape, use `tf.ensure_shape`
instead.
Args:
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
# Reset cached shape.
self._shape_val = None
# We want set_shape to be reflected in the C API graph for when we run it.
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
dim_list = []
if shape.dims is None:
unknown_shape = True
else:
unknown_shape = False
for dim in shape.dims:
if dim.value is None:
dim_list.append(-1)
else:
dim_list.append(dim.value)
try:
c_api.TF_GraphSetTensorShape_wrapper(
self._op._graph._c_graph, # pylint: disable=protected-access
self._as_tf_output(),
dim_list,
unknown_shape)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
consumer_names = c_api.TF_OperationOutputConsumers_wrapper(
self._as_tf_output())
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(name)
for name in consumer_names
]
# pylint: enable=protected-access
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def _as_tf_output(self):
# pylint: disable=protected-access
# NOTE: Beyond preventing unnecessary (re-)allocation, the cached object
# also guarantees that a dictionary of tf_output objects will retain a
# deterministic (yet unsorted) order which prevents memory blowup in the
# cache of executor(s) stored for every session.
if self._tf_output is None:
self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index)
return self._tf_output
# pylint: enable=protected-access
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name,
(", shape=%s" %
self.get_shape()) if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name) if self._dtype else "",
(", device=%s" % self.device) if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
if Tensor._USE_EQUALITY and executing_eagerly_outside_functions():
raise TypeError("Tensor is unhashable if Tensor equality is enabled.")
else:
return id(self)
def __copy__(self):
# TODO(b/77597810): get rid of Tensor copies.
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
def __array__(self):
raise NotImplementedError("Cannot convert a symbolic Tensor ({}) to a numpy"
" array.".format(self.name))
def __len__(self):
raise TypeError("len is not well defined for symbolic Tensors. ({}) "
"Please call `x.shape` rather than `len(x)` for "
"shape information.".format(self.name))
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (most commonly in an `if` or `while`
statement), in code that was not converted by AutoGraph. For example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
def experimental_ref(self):
# tf.Variable also has the same experimental_ref() API. If you update the
# documenation here, please update tf.Variable.experimental_ref() as well.
"""Returns a hashable reference object to this Tensor.
Warning: Experimental API that could be changed or removed.
The primary usecase for this API is to put tensors in a set/dictionary.
We can't put tensors in a set/dictionary as `tensor.__hash__()` is no longer
available starting Tensorflow 2.0.
```python
import tensorflow as tf
x = tf.constant(5)
y = tf.constant(10)
z = tf.constant(10)
# The followings will raise an exception starting 2.0
# TypeError: Tensor is unhashable if Tensor equality is enabled.
tensor_set = {x, y, z}
tensor_dict = {x: 'five', y: 'ten', z: 'ten'}
```
Instead, we can use `tensor.experimental_ref()`.
```python
tensor_set = {x.experimental_ref(),
y.experimental_ref(),
z.experimental_ref()}
print(x.experimental_ref() in tensor_set)
==> True
tensor_dict = {x.experimental_ref(): 'five',
y.experimental_ref(): 'ten',
z.experimental_ref(): 'ten'}
print(tensor_dict[y.experimental_ref()])
==> ten
```
Also, the reference object provides `.deref()` function that returns the
original Tensor.
```python
x = tf.constant(5)
print(x.experimental_ref().deref())
==> tf.Tensor(5, shape=(), dtype=int32)
```
"""
return object_identity.Reference(self)
# TODO(agarwal): consider getting rid of this.
class _EagerTensorBase(Tensor):
"""Base class for EagerTensor."""
# __int__, __float__ and __index__ may copy the tensor to CPU and
# only work for scalars; values are cast as per numpy.
def __int__(self):
return int(self._numpy())
def __long__(self):
return long(self._numpy())
def __float__(self):
return float(self._numpy())
def __index__(self):
maybe_arr = self._numpy()
if isinstance(maybe_arr, np.ndarray):
return maybe_arr.__index__()
return int(maybe_arr) # Must be a NumPy scalar.
def __bool__(self):
return bool(self._numpy())
__nonzero__ = __bool__
def __format__(self, format_spec):
return self._numpy().__format__(format_spec)
def __reduce__(self):
return convert_to_tensor, (self._numpy(),)
def __copy__(self):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
return self
def __deepcopy__(self, memo):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
del memo
return self
def __str__(self):
return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape,
self.dtype.name)
def __repr__(self):
return "<tf.Tensor: id=%s, shape=%s, dtype=%s, numpy=%s>" % (
self._id, self.shape, self.dtype.name, numpy_text(self, is_repr=True))
def __len__(self):
"""Returns the length of the first dimension in the Tensor."""
if not self.shape.ndims:
raise TypeError("Scalar tensor has no `len()`")
return self._shape_tuple()[0]
def _numpy(self):
raise NotImplementedError()
@property
def dtype(self):
# Note: using the intern table directly here as this is
# performance-sensitive in some models.
return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access
def numpy(self):
"""Returns a numpy array or a scalar with the same contents as the Tensor.
TODO(ashankar,agarwal): Perhaps this should NOT reference the underlying
buffer but instead always explicitly copy? Note that currently it may or may
not copy based on whether the numpy data is properly aligned or not.
Returns:
A numpy array or a scalar. Numpy array may share memory with the
Tensor object. Any changes to one may be reflected in the other. A scalar
value is returned when self has rank 0.
Raises:
ValueError: if the type of this Tensor is not representable in numpy.
"""
maybe_arr = self._numpy() # pylint: disable=protected-access
return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr
@property
def backing_device(self):
"""Returns the name of the device holding this tensor's memory.
`.backing_device` is usually the same as `.device`, which returns
the device on which the kernel of the operation that produced this tensor
ran. However, some operations can produce tensors on a different device
(e.g., an operation that executes on the GPU but produces output tensors
in host memory).
"""
raise NotImplementedError()
def _datatype_enum(self):
raise NotImplementedError()
def _shape_tuple(self):
"""The shape of this Tensor, as a tuple.
This is more performant than tuple(shape().as_list()) as it avoids
two list and one object creation. Marked private for now as from an API
perspective, it would be better to have a single performant way of
getting a shape rather than exposing shape() and shape_tuple()
(and heaven forbid, shape_list() etc. as well!). Punting on that for now,
but ideally one would work things out and remove the need for this method.
Returns:
tuple with the shape.
"""
raise NotImplementedError()
def _rank(self):
"""Integer rank of this Tensor.
Unlike regular Tensors, the rank is always known for EagerTensors.
This is more performant than len(self._shape_tuple())
Returns:
Integer rank
"""
raise NotImplementedError()
def _num_elements(self):
"""Number of elements of this Tensor.
Unlike regular Tensors, the number of elements is always known for
EagerTensors.
This is more performant than tensor.shape.num_elements
Returns:
Long - num elements in the tensor
"""
raise NotImplementedError()
def _copy_to_device(self, device_name): # pylint: disable=redefined-outer-name
raise NotImplementedError()
@staticmethod
def _override_operator(name, func):
setattr(_EagerTensorBase, name, func)
def _copy_nograd(self, ctx=None, device_name=None):
"""Copies tensor to dest device, but doesn't record the operation."""
# Creates a new tensor on the dest device.
if ctx is None:
ctx = context.context()
if device_name is None:
device_name = ctx.device_name
# pylint: disable=protected-access
try:
ctx.ensure_initialized()
new_tensor = self._copy_to_device(device_name)
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return new_tensor
def _copy(self, ctx=None, device_name=None):
"""Copies tensor to dest device."""
new_tensor = self._copy_nograd(ctx, device_name)
# Record the copy on tape and define backprop copy as well.
if context.executing_eagerly():
self_device = self.device
def grad_fun(dresult):
return [
dresult._copy(device_name=self_device)
if hasattr(dresult, "_copy") else dresult
]
tape.record_operation("_copy", [new_tensor], [self], grad_fun)
return new_tensor
# pylint: enable=protected-access
@property
def shape(self):
if self._tensor_shape is None: # pylint: disable=access-member-before-definition
# `_tensor_shape` is declared and defined in the definition of
# `EagerTensor`, in C.
self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())
return self._tensor_shape
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def _shape_as_list(self):
"""The shape of the tensor as a list."""
return list(self._shape_tuple())
@property
def ndim(self):
"""Returns the number of Tensor dimensions."""
return self.shape.ndims
@deprecation.deprecated(None, "Use tf.identity instead.")
def cpu(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.context(), "CPU:0")
@deprecation.deprecated(None, "Use tf.identity instead.")
def gpu(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU.
Arguments:
gpu_index: Identifies which GPU to place the contents on the returned
Tensor in.
Returns:
A GPU-memory backed Tensor object initialized with the same contents
as this Tensor.
"""
return self._copy(context.context(), "GPU:" + str(gpu_index))
def set_shape(self, shape):
if not self.shape.is_compatible_with(shape):
raise ValueError(
"Tensor's shape %s is not compatible with supplied shape %s" %
(self.shape, shape))
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise AttributeError(
"Tensor.op is meaningless when eager execution is enabled.")
@property
def graph(self):
raise AttributeError(
"Tensor.graph is meaningless when eager execution is enabled.")
@property
def name(self):
raise AttributeError(
"Tensor.name is meaningless when eager execution is enabled.")
@property
def value_index(self):
raise AttributeError(
"Tensor.value_index is meaningless when eager execution is enabled.")
def consumers(self):
raise NotImplementedError(
"Tensor.consumers is meaningless when eager execution is enabled.")
def _add_consumer(self, consumer):
raise NotImplementedError(
"_add_consumer not supported when eager execution is enabled.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported when eager execution is enabled.")
def _as_tf_output(self):
raise NotImplementedError(
"_as_tf_output not supported when eager execution is enabled.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError(
"eval is not supported when eager execution is enabled, "
"is .numpy() what you're looking for?")
# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and
# registers it with the current module.
EagerTensor = c_api.TFE_Py_InitEagerTensor(_EagerTensorBase)
register_dense_tensor_like_type(Tensor)
@tf_export(v1=["convert_to_tensor"])
def convert_to_tensor(value,
dtype=None,
name=None,
preferred_dtype=None,
dtype_hint=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
dtype_hint: same meaning as preferred_dtype, and overrides it.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
preferred_dtype = deprecation.deprecated_argument_lookup(
"dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype)
return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
@tf_export("convert_to_tensor", v1=[])
def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
dtype_hint: Optional element type for the returned tensor, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
return internal_convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=dtype_hint,
as_ref=False)
def _error_prefix(name):
return "" if name is None else "%s: " % name
def internal_convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None,
accept_composite_tensors=False):
"""Implementation of the public convert_to_tensor."""
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if ctx is None:
ctx = context.context()
if isinstance(value, EagerTensor) and not ctx.executing_eagerly():
graph = get_default_graph()
if not graph.building_function:
raise RuntimeError("Attempting to capture an EagerTensor without "
"building a function.")
return graph.capture(value, name=name)
elif isinstance(value, Tensor):
if dtype is not None and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtype.name, value.dtype.name, value))
return value
if preferred_dtype is not None:
preferred_dtype = dtypes.as_dtype(preferred_dtype)
for base_type, conversion_func in tensor_conversion_registry.get(type(value)):
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError):
# Could not coerce the conversion to use the preferred dtype.
pass
else:
if (ret is not NotImplemented and
ret.dtype.base_dtype != preferred_dtype.base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype, preferred_dtype.base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
is_acceptable_type = (
isinstance(ret, Tensor) or
(accept_composite_tensors and
isinstance(ret, composite_tensor.CompositeTensor)))
if not is_acceptable_type:
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r" %
(_error_prefix(name), conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s" %
(_error_prefix(name), conversion_func, base_type, dtype.name,
ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered." %
(_error_prefix(name), value, type(value)))
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
ctx: The value of context.context().
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
if ctx is None:
ctx = context.context()
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype,
ctx=ctx))
return ret
def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(
values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def convert_to_tensor_or_composite(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor` or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_composite(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_composite(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor`, or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, composite_tensor.CompositeTensor):
value_dtype = getattr(value, "dtype", None)
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return internal_convert_to_tensor(
value,
dtype=dtype,
name=name,
as_ref=as_ref,
accept_composite_tensors=True)
def internal_convert_n_to_tensor_or_composite(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor`, or objects that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `CompositeTensor`, and/or `None` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_composite(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_composite(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor``, or objects that can be
consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
Returns:
A list of `Tensor` and/or `CompositeTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_composite(
values=values, dtype=dtype, name=name, as_ref=False)
def _device_string(dev_spec):
if pydev.is_device_spec(dev_spec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, device=None, attrs=None): # pylint: disable=redefined-outer-name
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
device: string, device, or function from NodeDef to string. Value for the
"device" attribute of the NodeDef proto.
attrs: Optional dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef()
node_def.op = compat.as_bytes(op_type)
node_def.name = compat.as_bytes(name)
if attrs is not None:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
if device is not None:
if callable(device):
node_def.device = device(node_def)
else:
node_def.device = _device_string(device)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$")
def _create_c_op(graph, node_def, inputs, control_inputs):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs, e.g. "int64 * N",
"list(int64)"). The length of the list should be equal to the number of
inputs specified by this operation's op def.
control_inputs: A list of `Operation`s to set as control dependencies.
Returns:
A wrapped TF_Operation*.
"""
# pylint: disable=protected-access
op_desc = c_api.TF_NewOperation(graph._c_graph, compat.as_str(node_def.op),
compat.as_str(node_def.name))
if node_def.device:
c_api.TF_SetDevice(op_desc, compat.as_str(node_def.device))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input])
else:
c_api.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
c_api.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized)
try:
c_op = c_api.TF_FinishOperation(op_desc)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
return c_op
@tf_export("Operation")
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a TensorFlow `Graph` that takes zero or
more `Tensor` objects as input, and produces zero or more `Tensor`
objects as output. Objects of type `Operation` are created by
calling a Python op constructor (such as
`tf.matmul`)
or `tf.Graph.create_op`.
For example `c = tf.matmul(a, b)` creates an `Operation` of type
"MatMul" that takes tensors `a` and `b` as input, and produces `c`
as output.
After the graph has been launched in a session, an `Operation` can
be executed by passing it to
`tf.Session.run`.
`op.run()` is a shortcut for calling
`tf.compat.v1.get_default_session().run(op)`.
"""
def __init__(self,
node_def,
g,
inputs=None,
output_types=None,
control_inputs=None,
input_types=None,
original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for
attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and
`device`. The `input` attribute is irrelevant here as it will be
computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the `Tensors`
computed by this operation. The length of this list indicates the
number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a control
dependency.
input_types: List of `DType` objects representing the types of the tensors
accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x
in inputs]`. Operations that expect reference-typed inputs must specify
these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type
that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
# For internal use only: `node_def` can be set to a TF_Operation to create
# an Operation for that op. This is useful for creating Operations for ops
# indirectly created by C API methods, e.g. the ops created by
# TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields
# should be None.
if isinstance(node_def, node_def_pb2.NodeDef):
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
c_op = None
elif type(node_def).__name__ == "SwigPyObject":
assert inputs is None
assert output_types is None
assert control_inputs is None
assert input_types is None
assert original_op is None
assert op_def is None
c_op = node_def
else:
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
for a in inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
if input_types is None:
input_types = [i.dtype.base_dtype for i in inputs]
else:
if not all(
x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)):
raise TypeError("In op '%s', input types (%s) are not compatible "
"with expected types (%s)" %
(node_def.name, [i.dtype for i in inputs], input_types))
# Build the list of control inputs.
control_input_ops = []
if control_inputs:
for c in control_inputs:
control_op = None
if isinstance(c, Operation):
control_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
control_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
control_input_ops.append(control_op)
# This will be set by self.inputs.
self._inputs_val = None
# pylint: disable=protected-access
self._id_value = self._graph._next_id()
self._original_op = original_op
self._traceback = tf_stack.extract_stack()
# List of _UserDevSpecs holding code location of device context manager
# invocations and the users original argument to them.
self._device_code_locations = None
# Dict mapping op name to file and line information for op colocation
# context managers.
self._colocation_code_locations = None
self._control_flow_context = self.graph._get_control_flow_context()
# Initialize self._c_op.
if c_op:
self._c_op = c_op
op_def = g._get_op_def(c_api.TF_OperationOpType(c_op))
else:
if op_def is None:
op_def = self._graph._get_op_def(node_def.op)
# TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs.
# Refactor so we don't have to do this here.
grouped_inputs = self._reconstruct_sequence_inputs(
op_def, inputs, node_def.attr)
self._c_op = _create_c_op(self._graph, node_def, grouped_inputs,
control_input_ops)
# pylint: enable=protected-access
self._is_stateful = op_def.is_stateful
# Initialize self._outputs.
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(c_api_util.tf_output(self._c_op, i))
for i in range(num_outputs)
]
self._outputs = [
Tensor(self, i, output_type)
for i, output_type in enumerate(output_types)
]
self._graph._add_op(self) # pylint: disable=protected-access
if not c_op:
self._control_flow_post_processing()
def _control_flow_post_processing(self):
"""Add this op to its control flow context.
This may add new ops and change this op's inputs. self.inputs must be
available before calling this method.
"""
for input_tensor in self.inputs:
control_flow_util.CheckInputFromValidContext(self, input_tensor.op)
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
def _reconstruct_sequence_inputs(self, op_def, inputs, attrs):
"""Regroups a flat list of input tensors into scalar and sequence inputs.
Args:
op_def: The `op_def_pb2.OpDef` (for knowing the input types)
inputs: a list of input `Tensor`s to the op.
attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define
how long each sequence is)
Returns:
A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs).
"""
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [compat.as_bytes("loc:@%s" % self.name)]
try:
class_attr = self.get_attr("_class")
except ValueError:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [
class_name for class_name in class_attr
if class_name.startswith(b"loc:@")
]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context of this op.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
@property
def name(self):
"""The full name of this operation."""
return c_api.TF_OperationName(self._c_op)
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
return c_api.TF_OperationDevice(self._c_op)
@property
def _device_assignments(self):
"""Code locations for device context managers active at op creation.
This property will return a list of traceable_stack.TraceableObject
instances where .obj is a string representing the assigned device
(or information about the function that would be applied to this op
to compute the desired device) and the filename and lineno members
record the location of the relevant device context manager.
For example, suppose file_a contained these lines:
file_a.py:
15: with tf.device('/gpu:0'):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the device context manager
would have these member values:
t_obj.obj -> '/gpu:0'
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._device_assignments would return the list [t_obj].
Returns:
[str: traceable_stack.TraceableObject, ...] as per this method's
description, above.
"""
return self._device_code_locations or []
@property
def _colocation_dict(self):
"""Code locations for colocation context managers active at op creation.
This property will return a dictionary for which the keys are nodes with
which this Operation is colocated, and for which the values are
traceable_stack.TraceableObject instances. The TraceableObject instances
record the location of the relevant colocation context manager but have the
"obj" field set to None to prevent leaking private data.
For example, suppose file_a contained these lines:
file_a.py:
14: node_a = tf.constant(3, name='NODE_A')
15: with tf.compat.v1.colocate_with(node_a):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the colocation context manager
would have these member values:
t_obj.obj -> None
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._colocation_dict would return the dictionary
{ 'NODE_A': t_obj }
Returns:
{str: traceable_stack.TraceableObject} as per this method's description,
above.
"""
locations_dict = self._colocation_code_locations or {}
return locations_dict.copy()
@property
def _output_types(self):
"""List this operation's output types.
Returns:
List of the types of the Tensors computed by this operation.
Each element in the list is an integer whose value is one of
the TF_DataType enums defined in c_api.h
The length of this list indicates the number of output endpoints
of the operation.
"""
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(self._tf_output(i))
for i in xrange(num_outputs)
]
# In all the tests we have output_types that are passed into
# Operation.__init__ are a list of ints (which is illegal according
# to the docstring), but input_types are instances of DType.
# This extra assert is to catch if we ever use DType for output_types.
if output_types:
assert isinstance(output_types[0], int)
return output_types
def _tf_output(self, output_idx):
"""Create and return a new TF_Output for output_idx'th output of this op."""
tf_output = c_api.TF_Output()
tf_output.oper = self._c_op
tf_output.index = output_idx
return tf_output
def _tf_input(self, input_idx):
"""Create and return a new TF_Input for input_idx'th input of this op."""
tf_input = c_api.TF_Input()
tf_input.oper = self._c_op
tf_input.index = input_idx
return tf_input
def _set_device(self, device): # pylint: disable=redefined-outer-name
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
self._set_device_from_string(compat.as_str(_device_string(device)))
def _set_device_from_string(self, device_str):
"""Fast path to set device if the type is known to be a string.
This function is called frequently enough during graph construction that
there are non-trivial performance gains if the caller can guarantee that
the specified device is already a string.
Args:
device_str: A string specifying where to place this op.
"""
c_api.SetRequestedDevice(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
device_str)
def _update_input(self, index, tensor):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
c_api.UpdateEdge(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._tf_input(index))
def _add_while_inputs(self, tensors):
"""See AddWhileInputHack in python_api.h.
NOTE: This is for TF internal use only. Please don't use it.
Args:
tensors: list of Tensors
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
for tensor in tensors:
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
c_api.AddWhileInputHack(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._c_op)
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _remove_all_control_inputs(self):
"""Removes any control inputs to this operation."""
c_api.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access
def _add_outputs(self, types, shapes):
"""Adds new Tensors to self.outputs.
Note: this is generally unsafe to use. This is used in certain situations in
conjunction with _set_type_list_attr.
Arguments:
types: list of DTypes
shapes: list of TensorShapes
"""
assert len(types) == len(shapes)
orig_num_outputs = len(self.outputs)
for i in range(len(types)):
t = Tensor(self, orig_num_outputs + i, types[i])
self._outputs.append(t)
t.set_shape(shapes[i])
def __str__(self):
return str(self.node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
class _InputList(object):
"""Immutable input list wrapper."""
def __init__(self, inputs):
self._inputs = inputs
def __iter__(self):
return iter(self._inputs)
def __len__(self):
return len(self._inputs)
def __bool__(self):
return bool(self._inputs)
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __getitem__(self, i):
return self._inputs[i]
@property
def inputs(self):
"""The list of `Tensor` objects representing the data inputs of this op."""
if self._inputs_val is None:
tf_outputs = c_api.GetOperationInputs(self._c_op)
# pylint: disable=protected-access
retval = [
self.graph._get_tensor_by_tf_output(tf_output)
for tf_output in tf_outputs
]
# pylint: enable=protected-access
self._inputs_val = Operation._InputList(retval)
return self._inputs_val
@property
def _inputs(self):
logging.warning("Operation._inputs is private, use Operation.inputs "
"instead. Operation._inputs will eventually be removed.")
return self.inputs
@_inputs.setter
def _inputs(self, value):
raise ValueError("Cannot assign _inputs")
@property
def _input_types(self):
num_inputs = c_api.TF_OperationNumInputs(self._c_op)
input_types = [
dtypes.as_dtype(c_api.TF_OperationInputType(self._tf_input(i)))
for i in xrange(num_inputs)
]
return input_types
@_input_types.setter
def _input_types(self, value):
raise ValueError("Cannot assign _input_types")
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlInputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(c_api.TF_OperationName(c_op))
for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_outputs(self):
"""The `Operation` objects which have a control dependency on this op.
Before any of the ops in self._control_outputs can execute tensorflow will
ensure self has finished executing.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlOutputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(c_api.TF_OperationName(c_op))
for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_inputs(self):
logging.warning("Operation._control_inputs is private, use "
"Operation.control_inputs instead. "
"Operation._control_inputs will eventually be removed.")
return self.control_inputs
@_control_inputs.setter
def _control_inputs(self, value):
logging.warning("Operation._control_inputs is private, use "
"Operation.control_inputs instead. "
"Operation._control_inputs will eventually be removed.")
# Copy value because it may be self._control_inputs_val (in particular if
# this is called from self._control_inputs += ...), and we don't want to
# clear value below.
value = copy.copy(value)
self._remove_all_control_inputs()
self._add_control_inputs(value)
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
return c_api.TF_OperationOpType(self._c_op)
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
# pylint: disable=line-too-long
"""Returns the `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationToNodeDef(self._c_op, buf)
data = c_api.TF_GetBuffer(buf)
node_def = node_def_pb2.NodeDef()
node_def.ParseFromString(compat.as_bytes(data))
return node_def
@property
def _node_def(self):
logging.warning("Operation._node_def is private, use Operation.node_def "
"instead. Operation._node_def will eventually be removed.")
return self.node_def
@property
def op_def(self):
# pylint: disable=line-too-long
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
return self._graph._get_op_def(self.type)
@property
def _op_def(self):
logging.warning("Operation._op_def is private, use Operation.op_def "
"instead. Operation._op_def will eventually be removed.")
return self.op_def
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return tf_stack.convert_stack(self._traceback)
@property
def traceback_with_start_lines(self):
"""Same as traceback but includes start line of function definition.
Returns:
A list of 5-tuples (filename, lineno, name, code, func_start_lineno).
"""
return tf_stack.convert_stack(
self._traceback, include_func_start_lineno=True)
def _set_attr(self, attr_name, attr_value):
"""Private method used to set an attribute in the node_def."""
buf = c_api.TF_NewBufferFromString(
compat.as_bytes(attr_value.SerializeToString()))
try:
# pylint: disable=protected-access
c_api.SetAttr(self._graph._c_graph, self._c_op, attr_name, buf)
# pylint: enable=protected-access
finally:
c_api.TF_DeleteBuffer(buf)
def _set_func_attr(self, attr_name, func_name):
"""Private method used to set a function attribute in the node_def."""
func = attr_value_pb2.NameAttrList(name=func_name)
self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func))
def _set_func_list_attr(self, attr_name, func_names):
"""Private method used to set a list(function) attribute in the node_def."""
funcs = [attr_value_pb2.NameAttrList(name=func_name)
for func_name in func_names]
funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list))
def _set_type_list_attr(self, attr_name, types):
"""Private method used to set a list(type) attribute in the node_def."""
if not types:
return
if isinstance(types[0], dtypes.DType):
types = [dt.as_datatype_enum for dt in types]
types_list = attr_value_pb2.AttrValue.ListValue(type=types)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list))
def _set_shape_list_attr(self, attr_name, shapes):
"""Private method used to set a list(shape) attribute in the node_def."""
shapes = [s.as_proto() for s in shapes]
shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list))
def _clear_attr(self, attr_name):
"""Private method used to clear an attribute in the node_def."""
# pylint: disable=protected-access
c_api.ClearAttr(self._graph._c_graph, self._c_op, attr_name)
# pylint: enable=protected-access
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ("s", "i", "f", "b", "type", "shape", "tensor", "func")
try:
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationGetAttrValueProto(self._c_op, name, buf)
data = c_api.TF_GetBuffer(buf)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
x = attr_value_pb2.AttrValue()
x.ParseFromString(data)
oneof_value = x.WhichOneof("value")
if oneof_value is None:
return []
if oneof_value == "list":
for f in fields:
if getattr(x.list, f):
if f == "type":
return [dtypes.as_dtype(t) for t in x.list.type]
else:
return list(getattr(x.list, f))
return []
if oneof_value == "type":
return dtypes.as_dtype(x.type)
assert oneof_value in fields, "Unsupported field type in " + str(x)
return getattr(x, oneof_value)
def _get_attr_type(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
try:
dtype_enum = c_api.TF_OperationGetAttrType(self._c_op, name)
return _DTYPES_INTERN_TABLE[dtype_enum]
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
@tf_export("RegisterGradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
@deprecation.deprecated_endpoints("NotDifferentiable", "NoGradient")
@tf_export("no_gradient", v1=["no_gradient", "NotDifferentiable", "NoGradient"])
def no_gradient(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.no_gradient("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Aliases for the old names, will be eventually removed.
NoGradient = no_gradient
NotDifferentiable = no_gradient
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs:
return None
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
_shape_registry = registry.Registry("shape functions")
_default_shape_function_registry = registry.Registry("default shape functions")
# These are set to common_shapes.call_cpp_shape_fn by op generated code
# (generated by python_op_gen.cc).
# It is set outside ops.py to avoid a circular dependency.
_call_cpp_shape_fn = None
_call_cpp_shape_fn_and_require_op = None
def _set_call_cpp_shape_fn(call_cpp_shape_fn):
"""Sets default shape fns from passed common_shapes.call_cpp_shape_fn."""
global _call_cpp_shape_fn, _call_cpp_shape_fn_and_require_op
if _call_cpp_shape_fn:
return # already registered
def call_without_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=False)
_call_cpp_shape_fn = call_without_requiring
def call_with_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=True)
_call_cpp_shape_fn_and_require_op = call_with_requiring
class RegisterShape(object):
"""No longer used.
Was: A decorator for registering a shape function.
Shape functions must now be registered via the SetShapeFn on the
original Op specification in C++.
"""
def __init__(self, op_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers "f" as the shape function for "op_type"."""
if f is None:
assert _call_cpp_shape_fn
# None is a special "weak" value that provides a default shape function,
# and can be overridden by a non-None registration.
try:
_default_shape_function_registry.register(_call_cpp_shape_fn,
self._op_type)
except KeyError:
# Ignore duplicate registrations of the weak value. This can
# occur if the op library input to wrapper generation
# inadvertently links in one or more of the standard op
# libraries.
pass
else:
_shape_registry.register(f, self._op_type)
return f
def set_shape_and_handle_data_for_outputs(_):
"""No op. TODO(b/74620627): Remove this."""
pass
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s." %
(self.statistic_type, other.statistic_type))
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if (name and name[-1] == "/") else name
_MUTATION_LOCK_GROUP = 0
_SESSION_RUN_LOCK_GROUP = 1
@tf_export("Graph")
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
A `Graph` contains a set of
`tf.Operation` objects,
which represent units of computation; and
`tf.Tensor` objects, which represent
the units of data that flow between operations.
A default `Graph` is always registered, and accessible by calling
`tf.compat.v1.get_default_graph`.
To add an operation to the default graph, simply call one of the functions
that defines a new `Operation`:
```python
c = tf.constant(4.0)
assert c.graph is tf.compat.v1.get_default_graph()
```
Another typical usage involves the
`tf.Graph.as_default`
context manager, which overrides the current default graph for the
lifetime of the context:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
`tf.GraphKeys.GLOBAL_VARIABLES`) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects core state that can be returned via public accessors.
# Thread-safety is provided on a best-effort basis to support buggy
# programs, and is not guaranteed by the public `tf.Graph` API.
#
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.RLock()
# The group lock synchronizes Session.run calls with methods that create
# and mutate ops (e.g. Graph.create_op()). This synchronization is
# necessary because it's illegal to modify an operation after it's been run.
# The group lock allows any number of threads to mutate ops at the same time
# but if any modification is going on, all Session.run calls have to wait.
# Similarly, if one or more Session.run calls are going on, all mutate ops
# have to wait until all Session.run calls have finished.
self._group_lock = lock_util.GroupLock(num_groups=2)
self._nodes_by_id = {} # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = {} # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
self._stack_state_is_thread_local = False
self._thread_local = threading.local()
# Functions that will be applied to choose a device if none is specified.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._device_function_stack is used instead.
self._graph_device_function_stack = traceable_stack.TraceableStack()
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._control_dependencies_stack is used instead.
self._graph_control_dependencies_stack = []
# Arbitrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops. In TF2.x or after switch_to_thread_local(),
# self._thread_local._colocation_stack is used instead.
self._graph_colocation_stack = traceable_stack.TraceableStack()
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = object_identity.ObjectIdentitySet()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Allow optimizers and other objects to pseudo-uniquely key graphs (this key
# will be shared when defining function graphs, for example, so optimizers
# being called inside function definitions behave as if they were seeing the
# actual outside graph).
self._graph_key = "grap-key-%d/" % (uid(),)
# A string with the last reduction method passed to
# losses.compute_weighted_loss(), or None. This is required only for
# backward compatibility with Estimator and optimizer V1 use cases.
self._last_loss_reduction = None
# Flag that is used to indicate whether loss has been scaled by optimizer.
# If this flag has been set, then estimator uses it to scale losss back
# before reporting. This is required only for backward compatibility with
# Estimator and optimizer V1 use cases.
self._is_loss_scaled_by_optimizer = False
self._container = ""
self._registered_ops = op_def_registry.get_registered_ops()
# Set to True if this graph is being built in an
# AutomaticControlDependencies context.
self._add_control_dependencies = False
# Cache for OpDef protobufs retrieved via the C API.
self._op_def_cache = {}
# Cache for constant results of `broadcast_gradient_args()`. The keys are
# tuples of fully-defined shapes: (x_shape_tuple, y_shape_tuple), and the
# values are tuples of reduction indices: (rx, ry).
self._bcast_grad_args_cache = {}
# Cache for constant results of `reduced_shape()`. The keys are pairs of
# tuples: (input_shape_tuple, reduction_indices_tuple), and the values
# are pairs of tuples: (output_shape_kept_dims, tile_scaling).
self._reduced_shape_cache = {}
# TODO(skyewm): fold as much of the above as possible into the C
# implementation
self._scoped_c_graph = c_api_util.ScopedTFGraph()
# The C API requires all ops to have shape functions. Disable this
# requirement (many custom ops do not have shape functions, and we don't
# want to break these existing cases).
c_api.SetRequireShapeInferenceFns(self._c_graph, False)
if tf2.enabled():
self.switch_to_thread_local()
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@tf_contextlib.contextmanager
def _variable_creator_scope(self, creator, priority=100):
"""Scope which defines a variable creation function.
Args:
creator: A callable taking `next_creator` and `kwargs`. See the
`tf.variable_creator_scope` docstring.
priority: Creators with a higher `priority` are called first. Within the
same priority, creators are called inner-to-outer.
Yields:
`_variable_creator_scope` is a context manager with a side effect, but
doesn't return a value.
Raises:
RuntimeError: If variable creator scopes are not properly nested.
"""
# This step keeps a reference to the existing stack, and it also initializes
# self._thread_local._variable_creator_stack if it doesn't exist yet.
old = self._variable_creator_stack
new = list(old)
new.append((priority, creator))
# Sorting is stable, so we'll put higher-priority creators later in the list
# but otherwise maintain registration order.
new.sort(key=lambda item: item[0])
self._thread_local._variable_creator_stack = new # pylint: disable=protected-access
try:
yield
finally:
if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access
raise RuntimeError(
"Exiting variable_creator_scope without proper nesting.")
self._thread_local._variable_creator_stack = old # pylint: disable=protected-access
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@property
def _variable_creator_stack(self):
if not hasattr(self._thread_local, "_variable_creator_stack"):
self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access
# This previously returned a copy of the stack instead of the stack itself,
# to guard against accidental mutation. Consider, however, code that wants
# to save and restore the variable creator stack:
# def f():
# original_stack = graph._variable_creator_stack
# graph._variable_creator_stack = new_stack
# ... # Some code
# graph._variable_creator_stack = original_stack
#
# And lets say you have some code that calls this function with some
# variable_creator:
# def g():
# with variable_scope.variable_creator_scope(creator):
# f()
# When exiting the variable creator scope, it would see a different stack
# object than it expected leading to a "Exiting variable_creator_scope
# without proper nesting" error.
return self._thread_local._variable_creator_stack # pylint: disable=protected-access
@_variable_creator_stack.setter
def _variable_creator_stack(self, variable_creator_stack):
self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op):
"""Adds 'op' to the graph.
Args:
op: the Operator or Tensor to add.
Raises:
TypeError: if op is not an Operation or Tensor.
ValueError: if the op.name or op._id are already used.
"""
self._check_not_finalized()
if not isinstance(op, (Tensor, Operation)):
raise TypeError("op must be a Tensor or Operation: %s" % op)
with self._lock:
# pylint: disable=protected-access
if op._id in self._nodes_by_id:
raise ValueError("cannot add an op with id %d as it already "
"exists in the graph" % op._id)
if op.name in self._nodes_by_name:
raise ValueError("cannot add op with name %s as that name "
"is already used" % op.name)
self._nodes_by_id[op._id] = op
self._nodes_by_name[op.name] = op
self._version = max(self._version, op._id)
# pylint: enable=protected-access
@property
def _c_graph(self):
if self._scoped_c_graph:
return self._scoped_c_graph.graph
return None
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
`tf.Graph.graph_def_versions`.
Returns:
An integer version that increases as ops are added to the graph.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
# pylint: disable=line-too-long
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphVersions(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
version_def = versions_pb2.VersionDef()
version_def.ParseFromString(compat.as_bytes(data))
return version_def
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a `tf.compat.v1.train.QueueRunner`.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`.
Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):
"""If this graph contains functions, copy them to `graph_def`."""
bytesize = starting_bytesize
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph_def.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph_def.library.gradient.extend([grad_def])
def _as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
with self._lock:
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphToGraphDef(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
graph = graph_pb2.GraphDef()
graph.ParseFromString(compat.as_bytes(data))
# Strip the experimental library field iff it's empty.
if not graph.library.function:
graph.ClearField("library")
if add_shapes:
for node in graph.node:
op = self._nodes_by_name[node.name]
if op.outputs:
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
for function_def in graph.library.function:
defined_function = self._functions[function_def.signature.name]
try:
func_graph = defined_function.graph
except AttributeError:
# _DefinedFunction doesn't have a graph, _EagerDefinedFunction
# does. Both rely on ops.py, so we can't really isinstance check
# them.
continue
input_shapes = function_def.attr["_input_shapes"]
try:
func_graph_inputs = func_graph.inputs
except AttributeError:
continue
for input_tensor in func_graph_inputs:
if input_tensor.dtype == dtypes.resource:
# TODO(allenl): Save and restore handle data, then save the
# resource placeholder's shape. Right now some shape functions get
# confused if we set the shape of the resource placeholder (to a
# scalar of course) and there isn't any handle data.
input_shapes.list.shape.add().CopyFrom(
tensor_shape.TensorShape(None).as_proto())
else:
input_shapes.list.shape.add().CopyFrom(
input_tensor.get_shape().as_proto())
for node in function_def.node_def:
try:
op = func_graph.get_operation_by_name(node.name)
except KeyError:
continue
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return compat.as_str(name) in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(compat.as_str(name), None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
name = function.name
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (function.python_grad_func is
not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Add function to graph
# pylint: disable=protected-access
gradient = (
function._grad_func._c_func.func if function._grad_func else None)
c_api.TF_GraphCopyFunction(self._c_graph, function._c_func.func, gradient)
# pylint: enable=protected-access
self._functions[compat.as_str(name)] = function
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
@deprecated_args(None,
"Shapes are always computed; don't use the compute_shapes "
"as it has no effect.", "compute_shapes")
def create_op(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
del compute_shapes
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
return self._create_op_internal(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
"""Creates an `Operation` in this graph.
Implements `Graph.create_op()` without the overhead of the deprecation
wrapper.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, device=None, attrs=attrs)
input_ops = set([t.op for t in inputs])
control_inputs = self._control_dependencies_for_inputs(input_ops)
# _create_op_helper mutates the new Operation. `_mutation_lock` ensures a
# Session.run call cannot occur between creating and mutating the op.
with self._mutation_lock():
ret = Operation(
node_def,
self,
inputs=inputs,
output_types=dtypes,
control_inputs=control_inputs,
input_types=input_types,
original_op=self._default_original_op,
op_def=op_def)
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_from_tf_operation(self, c_op, compute_device=True):
"""Creates an `Operation` in this graph from the supplied TF_Operation.
This method is like create_op() except the new Operation is constructed
using `c_op`. The returned Operation will have `c_op` as its _c_op
field. This is used to create Operation objects around TF_Operations created
indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).
This function does not call Operation._control_flow_post_processing or
Graph._control_dependencies_for_inputs (since the inputs may not be
available yet). The caller is responsible for calling these methods.
Args:
c_op: a wrapped TF_Operation
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
ret = Operation(c_op, self)
# If a name_scope was created with ret.name but no nodes were created in it,
# the name will still appear in _names_in_use even though the name hasn't
# been used. This is ok, just leave _names_in_use as-is in this case.
# TODO(skyewm): make the C API guarantee no name conflicts.
name_key = ret.name.lower()
if name_key not in self._names_in_use:
self._names_in_use[name_key] = 1
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_helper(self, op, compute_device=True):
"""Common logic for creating an op in this graph."""
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
try:
op.get_attr(key)
except ValueError:
if callable(value):
value = value(op.node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" %
(key, value))
if value:
op._set_attr(key, value) # pylint: disable=protected-access
# Apply a kernel label if one has been specified for this op type.
try:
kernel_label = self._op_to_kernel_label_map[op.type]
op._set_attr("_kernel", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
# Apply the overriding op type for gradients if one has been specified for
# this op type.
try:
mapped_op_type = self._gradient_override_map[op.type]
op._set_attr("_gradient_op_type", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
self._record_op_seen_by_control_dependencies(op)
if compute_device:
self._apply_device_functions(op)
# Snapshot the colocation stack metadata before we might generate error
# messages using it. Note that this snapshot depends on the actual stack
# and is independent of the op's _class attribute.
# pylint: disable=protected-access
op._colocation_code_locations = self._snapshot_colocation_stack_metadata()
# pylint: enable=protected-access
if self._colocation_stack:
all_colocation_groups = []
for colocation_op in self._colocation_stack.peek_objs():
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# pylint: disable=protected-access
op._set_device(colocation_op.device)
# pylint: enable=protected-access
all_colocation_groups = sorted(set(all_colocation_groups))
# pylint: disable=protected-access
op._set_attr(
"_class",
attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# pylint: enable=protected-access
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if self._container and op._is_stateful: # pylint: disable=protected-access
try:
container_attr = op.get_attr("container")
except ValueError:
# "container" attribute is not in OpDef
pass
else:
if not container_attr:
op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access
s=compat.as_bytes(self._container)))
def _add_new_tf_operations(self, compute_devices=True):
"""Creates `Operations` in this graph for any new TF_Operations.
This is useful for when TF_Operations are indirectly created by the C API
outside of the Operation constructor (e.g. by TF_ImportGraphDef,
TF_FinishWhile). This ensures there are corresponding Operations for all
TF_Operations in the underlying TF_Graph.
Args:
compute_devices: (Optional.) If True, device functions will be executed to
compute the device properties of each new Operation.
Returns:
A list of the new `Operation` objects.
"""
# Create all Operation objects before accessing their inputs since an op may
# be created before its inputs.
new_ops = [
self._create_op_from_tf_operation(c_op, compute_device=compute_devices)
for c_op in c_api_util.new_tf_operations(self)
]
# pylint: disable=protected-access
for op in new_ops:
new_control_inputs = self._control_dependencies_for_inputs(op.inputs)
op._add_control_inputs(new_control_inputs)
op._control_flow_post_processing()
# pylint: enable=protected-access
return new_ops
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can
also be any object with an `_as_graph_element()` method that returns a
value of one of these types. Note: `_as_graph_element` will be called
inside the graph's lock and so may not modify the graph.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs." %
(repr(name), repr(op_name), len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s." %
(repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s." %
(repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s." %
(type(obj).__name__, types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def _get_operation_by_name_unsafe(self, name):
"""Returns the `Operation` with the given `name`.
This is a internal unsafe version of get_operation_by_name. It skips many
checks and does not have user friedly error messages but runs considerably
faster. This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
KeyError: If `name` does not correspond to an operation in this graph.
"""
if self._finalized:
return self._nodes_by_name[name]
with self._lock:
return self._nodes_by_name[name]
def _get_operation_by_tf_operation(self, tf_oper):
op_name = c_api.TF_OperationName(tf_oper)
return self._get_operation_by_name_unsafe(op_name)
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _get_tensor_by_tf_output(self, tf_output):
"""Returns the `Tensor` representing `tf_output`.
Note that there is only one such `Tensor`, i.e. multiple calls to this
function with the same TF_Output value will always return the same `Tensor`
object.
Args:
tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).
Returns:
The `Tensor` that represents `tf_output`.
"""
op = self._get_operation_by_tf_operation(tf_output.oper)
return op.outputs[tf_output.index]
def _next_id(self):
"""Id for next Operation instance. Also increments the internal id."""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
return self._next_id_counter
@property
def _last_id(self):
return self._next_id_counter
def _get_op_def(self, type): # pylint: disable=redefined-builtin
"""Returns the `OpDef` proto for `type`. `type` is a string."""
# NOTE: No locking is required because the lookup and insertion operations
# on Python dictionaries are atomic.
try:
return self._op_def_cache[type]
except KeyError:
with c_api_util.tf_buffer() as buf:
# pylint: disable=protected-access
c_api.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type), buf)
# pylint: enable=protected-access
data = c_api.TF_GetBuffer(buf)
op_def = op_def_pb2.OpDef()
op_def.ParseFromString(compat.as_bytes(data))
self._op_def_cache[type] = op_def
return op_def
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly.
Use this method with the `with` keyword to specify that ops created within
the scope of a block should be added to this graph. In this case, once
the scope of the `with` is exited, the previous default graph is set again
as default. There is a stack, so it's ok to have multiple nested levels
of `as_default` calls.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
If eager execution is enabled ops created under this context manager will be
added to the graph instead of executed eagerly.
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
@property
def collections(self):
"""Returns the names of the collections known to this graph."""
return list(self._collections)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collection.
""" # pylint: disable=g-doc-exception
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
""" # pylint: disable=g-doc-exception
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
""" # pylint: disable=g-doc-exception
with self._lock:
collection = self._collections.get(name, None)
if collection is None:
return []
if scope is None:
return list(collection)
else:
c = []
regex = re.compile(scope)
for item in collection:
if hasattr(item, "name") and regex.match(item.name):
c.append(item)
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@tf_contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
self._default_original_op = op
try:
yield
finally:
self._default_original_op = old_original_op
@property
def _name_stack(self):
# This may be called from a thread where name_stack doesn't yet exist.
if not hasattr(self._thread_local, "_name_stack"):
self._thread_local._name_stack = ""
return self._thread_local._name_stack
@_name_stack.setter
def _name_stack(self, name_stack):
self._thread_local._name_stack = name_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def name_scope(self, name):
"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if isinstance(name, compat.bytes_or_text_types):
name = compat.as_str(name)
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name[-1] == "/":
new_stack = name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
try:
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield,line-too-long
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
# For the sake of checking for names in use, we treat names as case
# insensitive (e.g. foo = Foo).
name_key = name.lower()
i = self._names_in_use.get(name_key, 0)
# Increment the number for "name_key".
if mark_as_used:
self._names_in_use[name_key] = i + 1
if i > 0:
base_name_key = name_key
# Make sure the composed name key is not already used.
while name_key in self._names_in_use:
name_key = "%s_%d" % (base_name_key, i)
i += 1
# Mark the composed name_key as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name_key] = 1
# Return the new name with the original capitalization of the given name.
name = "%s_%d" % (name, i - 1)
return name
def get_name_scope(self):
"""Returns the current name scope.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.compat.v1.get_default_graph().get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return self._name_stack
@tf_contextlib.contextmanager
def _colocate_with_for_gradient(self, op, gradient_uid,
ignore_existing=False):
with self.colocate_with(op, ignore_existing):
if gradient_uid is not None and self._control_flow_context is not None:
self._control_flow_context.EnterGradientColocation(op, gradient_uid)
try:
yield
finally:
self._control_flow_context.ExitGradientColocation(op, gradient_uid)
else:
yield
@tf_contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within the
context, rather than applying all colocation properties on the stack.
If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
op = _op_to_colocate_with(op, self)
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = traceable_stack.TraceableStack()
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = traceable_stack.TraceableStack()
if op is not None:
# offset refers to the stack frame used for storing code location.
# We use 4, the sum of 1 to use our caller's stack frame and 3
# to jump over layers of context managers above us.
self._colocation_stack.push_obj(op, offset=4)
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop_obj()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
def _add_device_to_stack(self, device_name_or_function, offset=0):
"""Add device to stack manually, separate from a context manager."""
total_offset = 1 + offset
spec = _UserDeviceSpec(device_name_or_function)
self._device_function_stack.push_obj(spec, offset=total_offset)
return spec
@tf_contextlib.contextmanager
def device(self, device_name_or_function):
# pylint: disable=line-too-long
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/device:GPU:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/device:GPU:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in the
context.
Yields:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If device scopes are not properly nested.
"""
self._add_device_to_stack(device_name_or_function, offset=2)
old_top_of_stack = self._device_function_stack.peek_top_obj()
try:
yield
finally:
new_top_of_stack = self._device_function_stack.peek_top_obj()
if old_top_of_stack is not new_top_of_stack:
raise RuntimeError("Exiting device scope without proper scope nesting.")
self._device_function_stack.pop_obj()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in LIFO order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
# pylint: disable=protected-access
prior_device_string = None
for device_spec in self._device_function_stack.peek_objs():
if device_spec.is_null_merge:
continue
if device_spec.function is None:
break
device_string = device_spec.string_merge(op)
# Take advantage of the fact that None is a singleton and Python interns
# strings, since identity checks are faster than equality checks.
if device_string is not prior_device_string:
op._set_device_from_string(device_string)
prior_device_string = device_string
op._device_code_locations = self._snapshot_device_function_stack_metadata()
# pylint: enable=protected-access
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.queue.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.queue.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.queue.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
self._container = container_name
try:
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition to the
current control dependencies. None to indicate that the dependencies
should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs_val = []
self._new_stack = True
else:
self._control_inputs_val = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs_val
def add_op(self, op):
self._seen_nodes.add(op)
def op_in_group(self, op):
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_ops):
"""For an op that takes `input_ops` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_ops: The data input ops for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend([c for c in controller.control_inputs if c not in input_ops])
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Also note that though execution of ops created under this scope will trigger
execution of the dependencies, the ops created under this scope might still
be pruned from a normal tensorflow graph. For example, in the following
snippet of code the dependencies are never executed:
```python
loss = model.loss()
with tf.control_dependencies(dependencies):
loss = loss + tf.constant(1) # note: dependencies ignored in the
# backward pass
return tf.gradients(loss, model.variables)
```
This is because evaluating the gradient graph does not require evaluating
the constant(1) op created in the forward pass.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the
context. Can also be `None` to clear the control dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
# The hasattr(handle) is designed to match ResourceVariables. This is so
# control dependencies on a variable or on an unread variable don't
# trigger reads.
if (isinstance(c, IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to AttrValue protocol
buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to kernel
label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op type
strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
def switch_to_thread_local(self):
"""Make device, colocation and dependencies stacks thread-local.
Device, colocation and dependencies stacks are not thread-local be default.
If multiple threads access them, then the state is shared. This means that
one thread may affect the behavior of another thread.
After this method is called, the stacks become thread-local. If multiple
threads access them, then the state is not shared. Each thread uses its own
value; a thread doesn't affect other threads by mutating such a stack.
The initial value for every thread's stack is set to the current value
of the stack when `switch_to_thread_local()` was first called.
"""
if not self._stack_state_is_thread_local:
self._stack_state_is_thread_local = True
@property
def _device_function_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where device_function_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_device_function_stack"):
stack_copy_for_this_thread = self._graph_device_function_stack.copy()
self._thread_local._device_function_stack = stack_copy_for_this_thread
return self._thread_local._device_function_stack
# pylint: enable=protected-access
else:
return self._graph_device_function_stack
@property
def _device_functions_outer_to_inner(self):
user_device_specs = self._device_function_stack.peek_objs()
device_functions = [spec.function for spec in user_device_specs]
device_functions_outer_to_inner = list(reversed(device_functions))
return device_functions_outer_to_inner
def _snapshot_device_function_stack_metadata(self):
"""Return device function stack as a list of TraceableObjects.
Returns:
[traceable_stack.TraceableObject, ...] where each TraceableObject's .obj
member is a displayable name for the user's argument to Graph.device, and
the filename and lineno members point to the code location where
Graph.device was called directly or indirectly by the user.
"""
snapshot = []
for obj in self._device_function_stack.peek_traceable_objs():
obj_copy = obj.copy_metadata()
obj_copy.obj = obj.obj.display_name
snapshot.append(obj_copy)
return snapshot
@_device_function_stack.setter
def _device_function_stack(self, device_function_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._device_function_stack = device_function_stack
# pylint: enable=protected-access
else:
self._graph_device_function_stack = device_function_stack
@property
def _colocation_stack(self):
"""Return thread-local copy of colocation stack."""
if self._stack_state_is_thread_local:
# This may be called from a thread where colocation_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_colocation_stack"):
stack_copy_for_this_thread = self._graph_colocation_stack.copy()
self._thread_local._colocation_stack = stack_copy_for_this_thread
return self._thread_local._colocation_stack
# pylint: enable=protected-access
else:
return self._graph_colocation_stack
def _snapshot_colocation_stack_metadata(self):
"""Return colocation stack metadata as a dictionary."""
return {
traceable_obj.obj.name: traceable_obj.copy_metadata()
for traceable_obj in self._colocation_stack.peek_traceable_objs()
}
@_colocation_stack.setter
def _colocation_stack(self, colocation_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._colocation_stack = colocation_stack
# pylint: enable=protected-access
else:
self._graph_colocation_stack = colocation_stack
@property
def _control_dependencies_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where control_dependencies_stack
# doesn't yet exist.
if not hasattr(self._thread_local, "_control_dependencies_stack"):
self._thread_local._control_dependencies_stack = (
self._graph_control_dependencies_stack[:])
return self._thread_local._control_dependencies_stack
else:
return self._graph_control_dependencies_stack
@_control_dependencies_stack.setter
def _control_dependencies_stack(self, control_dependencies):
if self._stack_state_is_thread_local:
self._thread_local._control_dependencies_stack = control_dependencies
else:
self._graph_control_dependencies_stack = control_dependencies
@property
def _distribution_strategy_stack(self):
"""A stack to maintain distribution strategy context for each thread."""
if not hasattr(self._thread_local, "_distribution_strategy_stack"):
self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access
return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access
@_distribution_strategy_stack.setter
def _distribution_strategy_stack(self, _distribution_strategy_stack):
self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access
_distribution_strategy_stack)
@property
def _global_distribute_strategy_scope(self):
"""For implementing `tf.distribute.set_strategy()`."""
if not hasattr(self._thread_local, "distribute_strategy_scope"):
self._thread_local.distribute_strategy_scope = None
return self._thread_local.distribute_strategy_scope
@_global_distribute_strategy_scope.setter
def _global_distribute_strategy_scope(self, distribute_strategy_scope):
self._thread_local.distribute_strategy_scope = (distribute_strategy_scope)
@property
def _auto_cast_variable_read_dtype(self):
"""The dtype that instances of `AutoCastVariable` will be casted to.
This is None if `AutoCastVariables` should not be casted.
See `AutoCastVariable` for more information.
Returns:
The dtype that instances of `AutoCastVariable` will be casted to.
"""
if not hasattr(self._thread_local, "_auto_cast_variable_read_dtype"):
self._thread_local._auto_cast_variable_read_dtype = None # pylint: disable=protected-access
return self._thread_local._auto_cast_variable_read_dtype # pylint: disable=protected-access
@_auto_cast_variable_read_dtype.setter
def _auto_cast_variable_read_dtype(self, dtype):
if dtype:
dtype = dtypes.as_dtype(dtype)
self._thread_local._auto_cast_variable_read_dtype = dtype # pylint: disable=protected-access
@tf_contextlib.contextmanager
def _enable_auto_casting_variables(self, dtype):
"""Context manager to automatically cast AutoCastVariables.
If an AutoCastVariable `var` is used under this context manager, it will be
casted to `dtype` before being used.
See `AutoCastVariable` for more information.
Args:
dtype: The dtype that AutoCastVariables should be casted to.
Yields:
Nothing.
"""
prev_read_dtype = self._auto_cast_variable_read_dtype
try:
self._auto_cast_variable_read_dtype = dtype
yield
finally:
self._auto_cast_variable_read_dtype = prev_read_dtype
def _mutation_lock(self):
"""Returns a lock to guard code that creates & mutates ops.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_MUTATION_LOCK_GROUP)
def _session_run_lock(self):
"""Returns a lock to guard code for Session.run.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
@tf_export(v1=["device"])
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See `tf.Graph.device` for more details.
Args:
device_name_or_function: The device name or function to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If eager execution is enabled and a function is passed in.
"""
if context.executing_eagerly():
if callable(device_name_or_function):
raise RuntimeError(
"tf.device does not support functions when eager execution "
"is enabled.")
return context.device(device_name_or_function)
elif executing_eagerly_outside_functions():
@tf_contextlib.contextmanager
def combined(device_name_or_function):
with get_default_graph().device(device_name_or_function):
if not callable(device_name_or_function):
with context.device(device_name_or_function):
yield
else:
yield
return combined(device_name_or_function)
else:
return get_default_graph().device(device_name_or_function)
@tf_export("device", v1=[])
def device_v2(device_name):
"""Specifies the device for ops created/executed in this context.
`device_name` can be fully specified, as in "/job:worker/task:1/device:cpu:0",
or partially specified, containing only a subset of the "/"-separated
fields. Any fields which are specified override device annotations from outer
scopes. For example:
```python
with tf.device('/job:foo'):
# ops created here have devices with /job:foo
with tf.device('/job:bar/task:0/device:gpu:2'):
# ops created here have the fully specified device above
with tf.device('/device:gpu:1'):
# ops created here have the device '/job:foo/device:gpu:1'
```
Args:
device_name: The device name to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If a function is passed in.
"""
if callable(device_name):
raise RuntimeError("tf.device does not support functions.")
return device(device_name)
@tf_export(v1=["container"])
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):
if context.executing_eagerly():
if op is not None:
if not hasattr(op, "device"):
op = internal_convert_to_tensor_or_indexed_slices(op)
return device(op.device)
else:
return NullContextmanager()
else:
default_graph = get_default_graph()
if isinstance(op, EagerTensor):
if default_graph.building_function:
return default_graph.device(op.device)
else:
raise ValueError("Encountered an Eager-defined Tensor during graph "
"construction, but a function was not being built.")
return default_graph._colocate_with_for_gradient(
op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)
# Internal interface to colocate_with. colocate_with has been deprecated from
# public API. There are still a few internal uses of colocate_with. Add internal
# only API for those uses to avoid deprecation warning.
def colocate_with(op, ignore_existing=False):
return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)
@deprecation.deprecated(
date=None, instructions="Colocations handled automatically by placer.")
@tf_export(v1=["colocate_with"])
def _colocate_with(op, ignore_existing=False):
return colocate_with(op, ignore_existing)
@tf_export("control_dependencies")
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See `tf.Graph.control_dependencies`
for more details.
When eager execution is enabled, any callable object in the `control_inputs`
list will be called.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the context.
Can also be `None` to clear the control dependencies. If eager execution
is enabled, any callable object in the `control_inputs` list will be
called.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
if context.executing_eagerly():
if control_inputs:
# Excute any pending callables.
for control in control_inputs:
if callable(control):
control()
return NullContextmanager()
else:
return get_default_graph().control_dependencies(control_inputs)
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack() # pylint: disable=protected-access
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
@tf_export(v1=["get_default_session"])
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
@tf_contextlib.contextmanager
def get_controller(self, default):
context.context().context_switches.push(default.building_function,
default.as_default,
default._device_function_stack)
try:
with super(_DefaultGraphStack,
self).get_controller(default) as g, context.graph_mode():
yield g
finally:
# If an exception is raised here it may be hiding a related exception in
# the try-block (just above).
context.context().context_switches.pop()
_default_graph_stack = _DefaultGraphStack()
# Shared helper used in init_scope and executing_eagerly_outside_functions
# to obtain the outermost context that is not building a function, and the
# innermost non empty device stack.
def _get_outer_context_and_inner_device_stack():
"""Get the outermost context not building a function."""
default_graph = get_default_graph()
outer_context = None
innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access
if not _default_graph_stack.stack:
# If the default graph stack is empty, then we cannot be building a
# function. Install the global graph (which, in this case, is also the
# default graph) as the outer context.
if default_graph.building_function:
raise RuntimeError("The global graph is building a function.")
outer_context = default_graph.as_default
else:
# Find a context that is not building a function.
for stack_entry in reversed(context.context().context_switches.stack):
if not innermost_nonempty_device_stack:
innermost_nonempty_device_stack = stack_entry.device_stack
if not stack_entry.is_building_function:
outer_context = stack_entry.enter_context_fn
break
if outer_context is None:
# As a last resort, obtain the global default graph; this graph doesn't
# necessarily live on the graph stack (and hence it doesn't necessarily
# live on the context stack), but it is stored in the graph stack's
# encapsulating object.
outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access
if outer_context is None:
# Sanity check; this shouldn't be triggered.
raise RuntimeError("All graphs are building functions, and no "
"eager context was previously active.")
return outer_context, innermost_nonempty_device_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_export("init_scope")
@tf_contextlib.contextmanager
def init_scope():
"""A context manager that lifts ops out of control-flow scopes and function-building graphs.
There is often a need to lift variable initialization ops out of control-flow
scopes, function-building graphs, and gradient tapes. Entering an
`init_scope` is a mechanism for satisfying these desiderata. In particular,
entering an `init_scope` has three effects:
(1) All control dependencies are cleared the moment the scope is entered;
this is equivalent to entering the context manager returned from
`control_dependencies(None)`, which has the side-effect of exiting
control-flow scopes like `tf.cond` and `tf.while_loop`.
(2) All operations that are created while the scope is active are lifted
into the lowest context on the `context_stack` that is not building a
graph function. Here, a context is defined as either a graph or an eager
context. Every context switch, i.e., every installation of a graph as
the default graph and every switch into eager mode, is logged in a
thread-local stack called `context_switches`; the log entry for a
context switch is popped from the stack when the context is exited.
Entering an `init_scope` is equivalent to crawling up
`context_switches`, finding the first context that is not building a
graph function, and entering it. A caveat is that if graph mode is
enabled but the default graph stack is empty, then entering an
`init_scope` will simply install a fresh graph as the default one.
(3) The gradient tape is paused while the scope is active.
When eager execution is enabled, code inside an init_scope block runs with
eager execution enabled even when defining graph functions via
tf.contrib.eager.defun. For example:
```python
tf.compat.v1.enable_eager_execution()
@tf.contrib.eager.defun
def func():
# A defun-decorated function constructs TensorFlow graphs,
# it does not execute eagerly.
assert not tf.executing_eagerly()
with tf.init_scope():
# Initialization runs with eager execution enabled
assert tf.executing_eagerly()
```
Raises:
RuntimeError: if graph state is incompatible with this initialization.
"""
# pylint: enable=g-doc-return-or-yield,line-too-long
if context.executing_eagerly():
# Fastpath.
with tape.stop_recording():
yield
else:
# Retrieve the active name scope: entering an `init_scope` preserves
# the name scope of the current context.
scope = get_default_graph().get_name_scope()
if scope and scope[-1] != "/":
# Names that end with trailing slashes are treated by `name_scope` as
# absolute.
scope = scope + "/"
outer_context, innermost_nonempty_device_stack = (
_get_outer_context_and_inner_device_stack())
outer_graph = None
outer_device_stack = None
try:
with outer_context(), name_scope(scope), control_dependencies(
None), tape.stop_recording():
context_manager = NullContextmanager
context_manager_input = None
if not context.executing_eagerly():
# The device stack is preserved when lifting into a graph. Eager
# execution doesn't implement device stacks and in particular it
# doesn't support device functions, so in general it's not possible
# to do the same when lifting into the eager context.
outer_graph = get_default_graph()
outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access
outer_graph._device_function_stack = innermost_nonempty_device_stack # pylint: disable=protected-access
elif innermost_nonempty_device_stack is not None:
for device_spec in innermost_nonempty_device_stack.peek_objs():
if device_spec.function is None:
break
if device_spec.raw_string:
context_manager = context.device
context_manager_input = device_spec.raw_string
break
# It is currently not possible to have a device function in V2,
# but in V1 we are unable to apply device functions in eager mode.
# This means that we will silently skip some of the entries on the
# device stack in V1 + eager mode.
with context_manager(context_manager_input):
yield
finally:
# If an exception is raised here it may be hiding a related exception in
# try-block (just above).
if outer_graph is not None:
outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access
def executing_eagerly_outside_functions():
"""Returns True if executing eagerly, even if inside a graph function."""
if context.executing_eagerly():
return True
else:
outer_context, _ = _get_outer_context_and_inner_device_stack()
with outer_context():
return context.executing_eagerly()
def inside_function():
return get_default_graph().building_function
@tf_export(v1=["enable_eager_execution"])
def enable_eager_execution(config=None, device_policy=None,
execution_mode=None):
"""Enables eager execution for the lifetime of this program.
Eager execution provides an imperative interface to TensorFlow. With eager
execution enabled, TensorFlow functions execute operations immediately (as
opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`)
and
return concrete values (as opposed to symbolic references to a node in a
computational graph).
For example:
```python
tf.compat.v1.enable_eager_execution()
# After eager execution is enabled, operations are executed as they are
# defined and Tensor objects hold concrete values, which can be accessed as
# numpy.ndarray`s through the numpy() method.
assert tf.multiply(6, 7).numpy() == 42
```
Eager execution cannot be enabled after TensorFlow APIs have been used to
create or execute graphs. It is typically recommended to invoke this function
at program startup and not in a library (as most libraries should be usable
both with and without eager execution).
Args:
config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the
environment in which operations are executed. Note that
`tf.compat.v1.ConfigProto` is also used to configure graph execution (via
`tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto`
are not implemented (or are irrelevant) when eager execution is enabled.
device_policy: (Optional.) Policy controlling how operations requiring
inputs on a specific device (e.g., a GPU 0) handle inputs on a different
device (e.g. GPU 1 or CPU). When set to None, an appropriate value will
be picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the
placement is not correct.
- tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not
on the right device but logs a warning.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors.
Note that this may hide performance problems as there is no notification
provided when operations are blocked on the tensor being copied between
devices.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies
int32 tensors, raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched are
actually executed. When set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Valid values:
- tf.contrib.eager.SYNC: executes each operation synchronously.
- tf.contrib.eager.ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
Raises:
ValueError: If eager execution is enabled after creating/executing a
TensorFlow graph, or if options provided conflict with a previous call
to this function.
"""
_api_usage_gauge.get_cell().set(True)
if context.default_execution_mode != context.EAGER_MODE:
return enable_eager_execution_internal(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=None)
@tf_export(v1=["disable_eager_execution"])
def disable_eager_execution():
"""Disables eager execution.
This function can only be called before any Graphs, Ops, or Tensors have been
created. It can be used at the beginning of the program for complex migration
projects from TensorFlow 1.x to 2.x.
"""
_api_usage_gauge.get_cell().set(False)
context.default_execution_mode = context.GRAPH_MODE
c = context.context_safe()
if c is not None:
c._thread_local_data.is_eager = False # pylint: disable=protected-access
def enable_eager_execution_internal(config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Enables eager execution for the lifetime of this program.
Most of the doc string for enable_eager_execution is relevant here as well.
Args:
config: See enable_eager_execution doc string
device_policy: See enable_eager_execution doc string
execution_mode: See enable_eager_execution doc string
server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on
remote devices. GrpcServers need to be started by creating an identical
server_def to this, and setting the appropriate task_indexes, so that the
servers can communicate. It will then be possible to execute operations on
remote devices.
Raises:
ValueError
"""
if config is not None and not isinstance(config, config_pb2.ConfigProto):
raise TypeError("config must be a tf.ConfigProto, but got %s" %
type(config))
if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,
context.DEVICE_PLACEMENT_WARN,
context.DEVICE_PLACEMENT_SILENT,
context.DEVICE_PLACEMENT_SILENT_FOR_INT32):
raise ValueError(
"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*"
)
if execution_mode not in (None, context.SYNC, context.ASYNC):
raise ValueError(
"execution_mode must be one of None, tf.contrib.eager.SYNC, "
"tf.contrib.eager.ASYNC")
if context.default_execution_mode == context.GRAPH_MODE:
graph_mode_has_been_used = (
_default_graph_stack._global_default_graph is not None) # pylint: disable=protected-access
if graph_mode_has_been_used:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
context.default_execution_mode = context.EAGER_MODE
# pylint: disable=protected-access
with context._context_lock:
if context._context is None:
context._set_context_locked(context.Context(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=server_def))
elif ((config is not None and config is not context._context._config) or
(device_policy is not None and
device_policy is not context._context._device_policy) or
(execution_mode is not None and
execution_mode is not context._context._execution_mode)):
raise ValueError(
"Trying to change the options of an active eager"
" execution. Context config: %s, specified config:"
" %s. Context device policy: %s, specified device"
" policy: %s. Context execution mode: %s, "
" specified execution mode %s." %
(context._context._config, config, context._context._device_policy,
device_policy, context._context._execution_mode, execution_mode))
else:
# We already created everything, so update the thread local data.
context._context._thread_local_data.is_eager = True
# Monkey patch to get rid of an unnecessary conditional since the context is
# now initialized.
context.context = context.context_safe
def eager_run(main=None, argv=None):
"""Runs the program with an optional main function and argv list.
The program will run with eager execution enabled.
Example:
```python
import tensorflow as tf
# Import subject to future changes:
from tensorflow.contrib.eager.python import tfe
def main(_):
u = tf.constant(6.0)
v = tf.constant(7.0)
print(u * v)
if __name__ == "__main__":
tfe.run()
```
Args:
main: the main function to run.
argv: the arguments to pass to it.
"""
enable_eager_execution()
app.run(main, argv)
@tf_export(v1=["reset_default_graph"])
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will
result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
Raises:
AssertionError: If this function is called within a nested graph.
"""
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
@tf_export(v1=["get_default_graph"])
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def has_default_graph():
"""Returns True if there is a default graph."""
return len(_default_graph_stack.stack) >= 1
def get_name_scope():
"""Returns the current name scope in the default_graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
if context.executing_eagerly():
return context.context().scope_name.rstrip("/")
return get_default_graph().get_name_scope()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
if original_item.graph is not item.graph:
raise ValueError("%s must be from the same graph as %s." %
(item, original_item))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
current_default_graph = get_default_graph()
if current_default_graph.building_function:
return current_default_graph
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
# TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this
# up.
graph_element = None
if (isinstance(op_input, (Operation, _TensorLike)) and
((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = graph_element.graph
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or current_default_graph
@tf_export(v1=["GraphKeys"])
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
`tf.compat.v1.global_variables`
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
`tf.compat.v1.trainable_variables`
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
`tf.compat.v1.summary.merge_all`
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
`tf.compat.v1.train.start_queue_runners`
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
`tf.compat.v1.moving_average_variables`
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
The following standard keys are _defined_, but their collections are **not**
automatically populated as many of the others are:
* `WEIGHTS`
* `BIASES`
* `ACTIVATIONS`
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect local variables which are used to accumulate interal state
# to be used in tf.metrics.*.
METRIC_VARIABLES = "metric_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Used to store v2 summary names.
_SUMMARY_COLLECTION = "_SUMMARY_V2"
# List of all collections that keep track of variables.
_VARIABLE_COLLECTIONS = [
GLOBAL_VARIABLES,
LOCAL_VARIABLES,
METRIC_VARIABLES,
MODEL_VARIABLES,
TRAINABLE_VARIABLES,
MOVING_AVERAGE_VARIABLES,
CONCATENATED_VARIABLES,
TRAINABLE_RESOURCE_VARIABLES,
]
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
@deprecation.deprecated(None, "Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.")
def VARIABLES(cls): # pylint: disable=no-self-argument
return cls.GLOBAL_VARIABLES
def dismantle_graph(graph):
"""Cleans up reference cycles from a `Graph`.
Helpful for making sure the garbage collector doesn't need to run after a
temporary `Graph` is no longer needed.
Args:
graph: A `Graph` object to destroy. Neither it nor any of its ops are usable
after this function runs.
"""
memory.dismantle_ordered_dict(graph._functions) # pylint: disable=protected-access
# Now clean up Operation<->Graph reference cycles by clearing all of the
# attributes for the Graph and its ops.
graph_operations = graph.get_operations()
for op in graph_operations:
op.__dict__ = {}
graph.__dict__ = {}
@tf_export(v1=["add_to_collection"])
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See `tf.Graph.add_to_collection`
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection. @compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collection(name, value)
@tf_export(v1=["add_to_collections"])
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See `tf.Graph.add_to_collections`
for more details.
Args:
names: The key for the collections. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collections. @compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collections(names, value)
@tf_export(v1=["get_collection_ref"])
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See `tf.Graph.get_collection_ref`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection_ref(key)
@tf_export(v1=["get_collection"])
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See `tf.Graph.get_collection`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items without
a `name` attribute are never returned if a scope is supplied and the
choice or `re.match` means that a `scope` without special tokens filters
by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["name_scope"])
class name_scope(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
`tf.Graph.name_scope`
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
"""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
if not (default_name is None or isinstance(default_name, six.string_types)):
raise TypeError(
"`default_name` type (%s) is not a string type. You likely meant to "
"pass this into the `values` kwarg." % type(default_name))
self._name = default_name if name is None else name
self._default_name = default_name
self._values = values
self._ctx = context.context()
self._in_eager_mode = self._ctx.executing_eagerly()
self._has_symbolic_input_in_eager = False
if self._values and self._in_eager_mode:
# The presence of a graph tensor in `self._values` overrides the context.
for value in self._values:
if hasattr(value, "graph"):
self._has_symbolic_input_in_eager = True
self._name_scope = value.graph.name_scope(self._name)
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
if self._has_symbolic_input_in_eager:
return self._name_scope.__enter__()
if self._in_eager_mode:
scope_name, self._old_name = enter_eager_name_scope(self._ctx, self._name)
return scope_name
else:
if self._name is None and self._values is not None:
# We only raise an error if values is not None (provided) because
# currently tf.name_scope(None) (values=None then) is sometimes used as
# an idiom to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided."
% (self._name, self._default_name))
g = get_default_graph()
if self._values and not g.building_function:
# Specialize based on the knowledge that `_get_graph_from_inputs()`
# ignores `inputs` when building a function.
g_from_inputs = _get_graph_from_inputs(self._values)
if g_from_inputs is not g:
g = g_from_inputs
self._g_manager = g.as_default()
self._g_manager.__enter__()
else:
self._g_manager = None
else:
self._g_manager = None
try:
self._name_scope = g.name_scope(self._name)
return self._name_scope.__enter__()
except:
if self._g_manager is not None:
self._g_manager.__exit__(*sys.exc_info())
raise
def __exit__(self, type_arg, value_arg, traceback_arg):
if self._has_symbolic_input_in_eager:
self._name_scope.__exit__(type_arg, value_arg, traceback_arg)
elif self._in_eager_mode:
self._ctx.scope_name = self._old_name
else:
self._name_scope.__exit__(type_arg, value_arg, traceback_arg)
if self._g_manager is not None:
self._g_manager.__exit__(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
def enter_eager_name_scope(ctx, name):
"""Updates the eager context to enter the given name scope."""
old_name = ctx.scope_name
if not name:
scope_name = ""
else:
if name.endswith("/"):
# A trailing slash breaks out of nested name scopes, indicating a
# fully specified scope name, for compatibility with Graph.name_scope.
scope_name = name
else:
scope_name = name + "/"
if old_name:
scope_name = old_name + scope_name
ctx.scope_name = scope_name
return scope_name, old_name
@tf_export("name_scope", v1=[])
class name_scope_v2(name_scope):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,
and `MyOp/c`.
If the scope name already exists, the name will be made unique by appending
`_n`. For example, calling `my_op` the second time will generate `MyOp_1/a`,
etc.
"""
def __init__(self, name):
"""Initialize the context manager.
Args:
name: The prefix to use on all names created within the name scope.
Raises:
ValueError: If name is None, or not a string.
"""
if name is None or not isinstance(name, six.string_types):
raise ValueError("name for name_scope must be a string.")
self._name = name
self._exit_fns = []
@property
def name(self):
return self._name
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
ctx = context.context()
if ctx.executing_eagerly():
scope_name, old_scope_name = enter_eager_name_scope(ctx, self._name)
self._exit_fns.append(
lambda *a: setattr(ctx, "scope_name", old_scope_name))
else:
scope = get_default_graph().name_scope(self._name)
scope_name = scope.__enter__()
self._exit_fns.append(scope.__exit__)
return scope_name
def __exit__(self, type_arg, value_arg, traceback_arg):
exit_fn = self._exit_fns.pop()
exit_fn(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
if export_scope[-1] == "/":
export_scope = export_scope[:-1]
try:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
if import_scope[-1] == "/":
import_scope = import_scope[:-1]
try:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
# pylint: disable=g-doc-return-or-yield
# pylint: disable=not-context-manager
@tf_export(v1=["op_scope"])
@tf_contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name,
proto_type=None,
to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _operation_conversion_error(op, dtype=None, name=None, as_ref=False):
"""Produce a nice error if someone converts an Operation to a Tensor."""
raise TypeError(("Can't convert Operation '%s' to Tensor "
"(target dtype=%r, name=%r, as_ref=%r)") %
(op.name, dtype, name, as_ref))
def _op_to_colocate_with(v, graph):
"""Operation object corresponding to v to use for colocation constraints."""
if v is None:
return None
if isinstance(v, Operation):
return v
# We always want to colocate with the reference op.
# When 'v' is a ResourceVariable, the reference op is the handle creating op.
#
# What this should be is:
# if isinstance(v, ResourceVariable):
# return v.handle.op
# However, that would require a circular import dependency.
# As of October 2018, there were attempts underway to remove
# colocation constraints altogether. Assuming that will
# happen soon, perhaps this hack to work around the circular
# import dependency is acceptable.
if hasattr(v, "handle") and hasattr(v.handle, "op") and isinstance(
v.handle.op, Operation):
if graph.building_function:
return graph.capture(v.handle).op
else:
return v.handle.op
return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op
def _is_keras_symbolic_tensor(x):
return hasattr(x, "graph") and getattr(x.graph, "name", None) == "keras_graph"
tensor_conversion_registry.register_tensor_conversion_function(
Operation, _operation_conversion_error)
# These symbols were originally defined in this module; import them for
# backwards compatibility until all references have been updated to access
# them from the indexed_slices.py module.
IndexedSlices = indexed_slices.IndexedSlices
IndexedSlicesValue = indexed_slices.IndexedSlicesValue
convert_to_tensor_or_indexed_slices = \
indexed_slices.convert_to_tensor_or_indexed_slices
convert_n_to_tensor_or_indexed_slices = \
indexed_slices.convert_n_to_tensor_or_indexed_slices
internal_convert_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_to_tensor_or_indexed_slices
internal_convert_n_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_n_to_tensor_or_indexed_slices
register_tensor_conversion_function = \
tensor_conversion_registry.register_tensor_conversion_function
| 36.441008 | 115 | 0.692027 |
3b9e6e24f8e4ab98d540fb5e1788f68716019b33 | 11,450 | py | Python | accuracy.py | gpmarques/unsup_action_seg_st_pe_embed | b6d6c95099399b667ad41339ac1259c631c9ec54 | [
"MIT"
] | null | null | null | accuracy.py | gpmarques/unsup_action_seg_st_pe_embed | b6d6c95099399b667ad41339ac1259c631c9ec54 | [
"MIT"
] | null | null | null | accuracy.py | gpmarques/unsup_action_seg_st_pe_embed | b6d6c95099399b667ad41339ac1259c631c9ec54 | [
"MIT"
] | null | null | null | """Module for computing evaluation metrics"""
__author__ = 'Anna Kukleva'
__date__ = 'August 2018'
import numpy as np
from collections import defaultdict, Counter
from scipy.optimize import linear_sum_assignment
class Accuracy(object):
""" Implementation of evaluation metrics for unsupervised learning.
Since it's unsupervised learning relations between ground truth labels
and output segmentation should be found.
Hence the Hungarian method was used and labeling which gives us
the best score is used as a result.
"""
def __init__(self, n_frames=1):
"""
Args:
n_frames: frequency of sampling,
in case of it's equal to 1 => dense sampling
"""
self._n_frames = n_frames
self._reset()
self._predicted_labels = None
self._gt_labels_subset = None
self._gt_labels = None
self._boundaries = None
# all frames used for alg without any subsampling technique
self._indices = None
self._frames_overall = 0
self._frames_true_pr = 0
self._average_score = 0
self._processed_number = 0
self._classes_MoF = {}
self._classes_IoU = {}
# keys - gt, values - pr
self.exclude = {}
self._return = {}
def _reset(self):
self._n_clusters = 0
self._gt_label2index = {}
self._gt_index2label = {}
self._pr_label2index = {}
self._pr_index2label = {}
self._voting_table = []
self._gt2cluster = defaultdict(list)
self._acc_per_gt_class = {}
self.exclude = {}
@property
def predicted_labels(self):
return self._predicted_labels
@predicted_labels.setter
def predicted_labels(self, labels):
self._predicted_labels = np.array(labels)
self._reset()
@property
def gt_labels(self):
return self._gt_labels_subset
@gt_labels.setter
def gt_labels(self, labels):
self._gt_labels = np.array(labels)
self._gt_labels_subset = self._gt_labels[:]
self._indices = list(range(len(self._gt_labels)))
@property
def params(self):
"""
boundaries: if frames samples from segments we need to know boundaries
of these segments to fulfill them after
indices: frames extracted for whatever and indeed evaluation
"""
return self._boundaries, self._indices
@params.setter
def params(self, params):
self._boundaries = params[0]
self._indices = params[1]
self._gt_labels_subset = self._gt_labels[self._indices]
def _create_voting_table(self):
"""Filling table with assignment scores.
Create table which represents paired label assignments, i.e. each
cell comprises score for corresponding label assignment"""
size = max(len(np.unique(self._gt_labels_subset)),
len(np.unique(self._predicted_labels)))
self._voting_table = np.zeros((size, size))
for idx_gt, gt_label in enumerate(np.unique(self._gt_labels_subset)):
self._gt_label2index[gt_label] = idx_gt
self._gt_index2label[idx_gt] = gt_label
if len(self._gt_label2index) < size:
for idx_gt in range(len(np.unique(self._gt_labels_subset)), size):
gt_label = idx_gt
while gt_label in self._gt_label2index:
gt_label += 1
self._gt_label2index[gt_label] = idx_gt
self._gt_index2label[idx_gt] = gt_label
for idx_pr, pr_label in enumerate(np.unique(self._predicted_labels)):
self._pr_label2index[pr_label] = idx_pr
self._pr_index2label[idx_pr] = pr_label
if len(self._pr_label2index) < size:
for idx_pr in range(len(np.unique(self._predicted_labels)), size):
pr_label = idx_pr
while pr_label in self._pr_label2index:
pr_label += 1
self._pr_label2index[pr_label] = idx_pr
self._pr_index2label[idx_pr] = pr_label
for idx_gt, gt_label in enumerate(np.unique(self._gt_labels_subset)):
if gt_label in list(self.exclude.keys()):
continue
gt_mask = self._gt_labels_subset == gt_label
for idx_pr, pr_label in enumerate(np.unique(self._predicted_labels)):
if pr_label in list(self.exclude.values()):
continue
self._voting_table[idx_gt, idx_pr] = \
np.sum(self._predicted_labels[gt_mask] == pr_label, dtype=float)
for key, val in self.exclude.items():
# works only if one pair in exclude
assert len(self.exclude) == 1
try:
self._voting_table[self._gt_label2index[key], self._pr_label2index[val[0]]] = size * np.max(self._voting_table)
except KeyError:
self._voting_table[self._gt_label2index[key], -1] = size * np.max(self._voting_table)
self._pr_index2label[size - 1] = val[0]
self._pr_label2index[val[0]] = size - 1
def _create_correspondences(self, method='hungarian', optimization='max'):
""" Find output labels which correspond to ground truth labels.
Hungarian method finds one-to-one mapping: if there is squared matrix
given, then for each output label -> gt label. If not, some labels will
be without correspondences.
Args:
method: hungarian or max
optimization: for hungarian method usually min problem but here
is max, hence convert to min
where: if some actions are not in the video collection anymore
"""
if method == 'hungarian':
try:
assert self._voting_table.shape[0] == self._voting_table.shape[1]
except AssertionError:
raise AssertionError('bum tss')
if optimization == 'max':
# convert max problem to minimization problem
self._voting_table *= -1
x, y = linear_sum_assignment(self._voting_table)
for idx_gt, idx_pr in zip(x, y):
self._gt2cluster[self._gt_index2label[idx_gt]] = [self._pr_index2label[idx_pr]]
if method == 'max':
# maximum voting, won't create exactly one-to-one mapping
max_responses = np.argmax(self._voting_table, axis=0)
for idx, c in enumerate(max_responses):
# c is index of gt label
# idx is predicted cluster label
self._gt2cluster[self._gt_index2label[c]].append(idx)
def _fulfill_segments(self):
"""If was used frame sampling then anyway we need to get assignment
for each frame"""
self._full_predicted_labels = []
for idx, slice in enumerate(range(0, len(self._predicted_labels), self._n_frames)):
start, end = self._boundaries[idx]
label_counter = Counter(self._predicted_labels[slice: slice + self._n_frames])
win_label = label_counter.most_common(1)[0][0]
self._full_predicted_labels += [win_label] * (end - start + 1)
self._full_predicted_labels = np.asarray(self._full_predicted_labels)
def mof(self, with_segments=False, old_gt2label=None, optimization='max'):
""" Compute mean over frames (MoF) for current labeling.
Args:
with_segments: if frame sampling was used
old_gt2label: MoF for given gt <-> output labels correspondences
optimization: inside hungarian method
where: see _create_correspondences method
Returns:
"""
self._n_clusters = len(np.unique(self._predicted_labels))
self._create_voting_table()
self._create_correspondences(optimization=optimization)
if with_segments:
self._fulfill_segments()
else:
self._full_predicted_labels = self._predicted_labels
old_frames_true = 0
self._classes_MoF = {}
self._classes_IoU = {}
excluded_total = 0
for gt_label in np.unique(self._gt_labels):
true_defined_frame_n = 0.
union = 0
gt_mask = self._gt_labels == gt_label
# no need the loop since only one label should be here
# i.e. one-to-one mapping, but i'm lazy
for cluster in self._gt2cluster[gt_label]:
true_defined_frame_n += np.sum(self._full_predicted_labels[gt_mask] == cluster,
dtype=float)
pr_mask = self._full_predicted_labels == cluster
union += np.sum(gt_mask | pr_mask)
if old_gt2label is not None:
old_true_defined_frame_n = 0.
for cluster in old_gt2label[gt_label]:
old_true_defined_frame_n += np.sum(self._full_predicted_labels[gt_mask] == cluster,
dtype=float)
old_frames_true += old_true_defined_frame_n
self._classes_MoF[gt_label] = [true_defined_frame_n, np.sum(gt_mask)]
self._classes_IoU[gt_label] = [true_defined_frame_n, union]
if gt_label in self.exclude:
excluded_total += np.sum(gt_mask)
else:
self._frames_true_pr += true_defined_frame_n
self._frames_overall = len(self._gt_labels)
self._frames_overall -= excluded_total
return old_frames_true, self._frames_overall
def mof_classes(self):
average_class_mof = 0
total_true = 0
total = 0
for key, val in self._classes_MoF.items():
true_frames, all_frames = val
average_class_mof += true_frames / all_frames
total_true += true_frames
total += all_frames
average_class_mof /= len(self._classes_MoF)
self._return['mof'] = [self._frames_true_pr, self._frames_overall]
self._return['mof_bg'] = [total_true, total]
def iou_classes(self):
average_class_iou = 0
excluded_iou = 0
for key, val in self._classes_IoU.items():
true_frames, union = val
if key not in self.exclude:
average_class_iou += true_frames / union
else:
excluded_iou += true_frames / union
average_iou_without_exc = average_class_iou / \
(len(self._classes_IoU) - len(self.exclude))
average_iou_with_exc = (average_class_iou + excluded_iou) / \
len(self._classes_IoU)
self._return['iou'] = [average_class_iou,
len(self._classes_IoU) - len(self.exclude)]
self._return['iou_bg'] = [average_class_iou + excluded_iou,
len(self._classes_IoU) - len(self.exclude)]
def mof_val(self):
return float(self._frames_true_pr) / self._frames_overall
def frames(self):
return self._frames_true_pr
def stat(self):
return self._return
def get_gt2labels_mapping(self):
self._n_clusters = len(np.unique(self._predicted_labels))
self._create_voting_table()
self._create_correspondences(optimization='max')
return self._gt2cluster | 39.756944 | 127 | 0.61083 |
ba4b226aff30e92ad6c3d7261ff4946066a0b4f3 | 13,315 | py | Python | app/books/serializers.py | MateuszZalewski/STX_Next | 8ee14761bd064799629f652d82f687fbbd7953ee | [
"MIT"
] | null | null | null | app/books/serializers.py | MateuszZalewski/STX_Next | 8ee14761bd064799629f652d82f687fbbd7953ee | [
"MIT"
] | null | null | null | app/books/serializers.py | MateuszZalewski/STX_Next | 8ee14761bd064799629f652d82f687fbbd7953ee | [
"MIT"
] | null | null | null | from collections import OrderedDict
from rest_framework import serializers
from .models import Book, SearchInfo
from .models.access_info import AccessInfo, Epub, Pdf, DownloadAccess
from .models.volume_info import VolumeInfo, Author, Category, IndustryIdentifier, Dimensions, \
ReadingModes, PanelizationSummary, ImageLinks
from .models.sale_info import SaleInfo, ListPrice, RetailPrice, Offer
class NonNullModelSerializer(serializers.ModelSerializer):
def to_representation(self, instance):
result = super().to_representation(instance)
# filter out null values with list comprehension
return OrderedDict([(key, result[key]) for key in result if result[key] is not None])
class OfferSerializer(NonNullModelSerializer):
class Meta:
model = Offer
exclude = ('saleInfo', 'id', 'listPrice', 'retailPrice')
class RetailPriceSerializer(NonNullModelSerializer):
class Meta:
model = ListPrice
exclude = ('saleInfo', 'id')
class ListPriceSerializer(NonNullModelSerializer):
class Meta:
model = ListPrice
exclude = ('saleInfo', 'id')
class SaleInfoSerializer(NonNullModelSerializer):
listPrice = ListPriceSerializer(required=False)
retailPrice = RetailPriceSerializer(required=False)
offer = OfferSerializer(required=False)
class Meta:
model = SaleInfo
exclude = ('book', 'id')
def create(self, validated_data):
list_price_data = validated_data.pop('listPrice', None)
retail_price_data = validated_data.pop('retailPrice', None)
offer_data = validated_data.pop('offer', None)
book = validated_data.pop('book', None)
sale_info = SaleInfo.objects.create(book=book, **validated_data)
if list_price_data:
ListPrice.objects.create(saleInfo=sale_info, **list_price_data)
if retail_price_data:
RetailPrice.objects.create(saleInfo=sale_info, **retail_price_data)
if offer_data:
Offer.objects.create(saleInfo=sale_info, **offer_data)
return sale_info
def update(self, instance, validated_data):
retail_price_data = validated_data.pop('retailPrice', {})
if retail_price_data:
retail_price_serializer = self.fields['retailPrice']
retail_price_instance = instance.retailPrice
retail_price_serializer.update(retail_price_instance, retail_price_data)
list_price_data = validated_data.pop('listPrice', {})
if list_price_data:
list_price_serializer = self.fields['listPrice']
list_price_instance = instance.listPrice
list_price_serializer.update(list_price_instance, list_price_data)
offer_data = validated_data.pop('offer', {})
if offer_data:
offer_serializer = self.fields['offer']
offer_instance = instance.offer
offer_serializer.update(offer_instance, offer_data)
return super().update(instance, validated_data)
class ImageLinksSummarySerializer(NonNullModelSerializer):
class Meta:
model = ImageLinks
exclude = ('volumeInfo', 'id')
class PanelizationSummarySerializer(NonNullModelSerializer):
class Meta:
model = PanelizationSummary
exclude = ('volumeInfo', 'id')
class ReadingModesSerializer(NonNullModelSerializer):
class Meta:
model = ReadingModes
exclude = ('volumeInfo', 'id')
class DimensionsSerializer(NonNullModelSerializer):
class Meta:
model = Dimensions
exclude = ('volumeInfo', 'id')
class IndustryIdentifierSerializer(NonNullModelSerializer):
class Meta:
model = IndustryIdentifier
exclude = ('volumeInfo', 'id')
class AuthorSerializer(NonNullModelSerializer):
class Meta:
model = Author
fields = ('fullName', )
class CategorySerializer(NonNullModelSerializer):
class Meta:
model = Category
fields = ('name', )
class VolumeInfoSerializer(NonNullModelSerializer):
categories = serializers.ListSerializer(child=serializers.CharField(max_length=100), required=False)
authors = serializers.ListSerializer(child=serializers.CharField(max_length=100), required=False)
industryIdentifier = IndustryIdentifierSerializer(required=False)
dimensions = DimensionsSerializer(required=False)
readingModes = ReadingModesSerializer(required=False)
panelizationSummary = PanelizationSummarySerializer(required=False)
imageLinks = ImageLinksSummarySerializer(required=False)
class Meta:
model = VolumeInfo
exclude = ('book', 'id')
def update(self, instance, validated_data):
industry_identifier_data = validated_data.pop('industryIdentifier', {})
if industry_identifier_data:
industry_identifier_serializer = self.fields['industryIdentifier']
industry_identifier_instance = instance.industryIdentifier
industry_identifier_serializer.update(industry_identifier_instance, industry_identifier_data)
dimensions_data = validated_data.pop('dimensions', {})
if dimensions_data:
dimensions_serializer = self.fields['dimensions']
dimensions_instance = instance.dimensions
dimensions_serializer.update(dimensions_instance, dimensions_data)
reading_modes_data = validated_data.pop('readingModes', {})
if reading_modes_data:
reading_modes_serializer = self.fields['readingModes']
reading_modes_instance = instance.readingModes
reading_modes_serializer.update(reading_modes_instance, reading_modes_data)
panelization_summary_data = validated_data.pop('panelizationSummary', {})
if panelization_summary_data:
panelization_summary_serializer = self.fields['panelizationSummary']
panelization_summary_instance = instance.panelizationSummary
panelization_summary_serializer.update(panelization_summary_instance, panelization_summary_data)
image_links_data = validated_data.pop('imageLinks', {})
if image_links_data:
image_links_serializer = self.fields['imageLinks']
image_links_instance = instance.imageLinks
image_links_serializer.update(image_links_instance, image_links_data)
return super().update(instance, validated_data)
def create(self, validated_data):
categories_data = validated_data.pop('categories', None)
authors_data = validated_data.pop('authors', None)
industry_identifiers_data = validated_data.pop('industryIdentifiers', None)
dimensions_data = validated_data.pop('dimensions', None)
reading_modes_data = validated_data.pop('readingModes', None)
panelization_summary_data = validated_data.pop('panelizationSummary', None)
image_links_data = validated_data.pop('imageLinks', None)
book = validated_data.pop('book', None)
volume_info = VolumeInfo.objects.create(book=book, **validated_data)
if categories_data:
for category_data in categories_data:
category = Category.objects.create(name=category_data)
volume_info.categories.add(category)
if authors_data:
for author_data in authors_data:
author = Author.objects.create(fullName=author_data)
volume_info.authors.add(author)
if industry_identifiers_data:
for industry_identifier_data in industry_identifiers_data:
IndustryIdentifier.objects.create(volumeInfo=volume_info, **industry_identifier_data)
if dimensions_data:
Dimensions.objects.create(volumeInfo=volume_info, **dimensions_data)
if reading_modes_data:
ReadingModes.objects.create(volumeInfo=volume_info, **reading_modes_data)
if panelization_summary_data:
PanelizationSummary.objects.create(volumeInfo=volume_info, **panelization_summary_data)
if image_links_data:
ImageLinks.objects.create(volumeInfo=volume_info, **image_links_data)
volume_info.save()
return volume_info
class DownloadAccessSerializer(NonNullModelSerializer):
class Meta:
model = DownloadAccess
exclude = ('accessInfo', 'id')
class PdfSerializer(NonNullModelSerializer):
class Meta:
model = Pdf
exclude = ('accessInfo', 'id')
class EpubSerializer(NonNullModelSerializer):
class Meta:
model = Epub
exclude = ('accessInfo', 'id')
class AccessInfoSerializer(NonNullModelSerializer):
downloadAccess = DownloadAccessSerializer(required=False)
pdf = PdfSerializer(required=False)
epub = EpubSerializer(required=False)
class Meta:
model = AccessInfo
exclude = ('book', 'id')
def update(self, instance, validated_data):
pdf_data = validated_data.pop('pdf', {})
if pdf_data:
pdf_serializer = self.fields['pdf']
pdf_instance = instance.pdf
pdf_serializer.update(pdf_instance, pdf_data)
epub_data = validated_data.pop('epub', {})
if epub_data:
epub_serializer = self.fields['epub']
epub_instance = instance.epub
epub_serializer.update(epub_instance, pdf_data)
download_access_data = validated_data.pop('downloadAccess', {})
if download_access_data:
download_access_serializer = self.fields['downloadAccess']
download_access_instance = instance.downloadAccess
download_access_serializer.update(download_access_instance, pdf_data)
return super().update(instance, validated_data)
def create(self, validated_data):
pdf_data = validated_data.pop('pdf', None)
epub_data = validated_data.pop('epub', None)
download_access_data = validated_data.pop('downloadAccess', None)
book = validated_data.pop('book', None)
access_info = AccessInfo.objects.create(book=book, **validated_data)
if pdf_data:
Pdf.objects.create(accessInfo=access_info, **pdf_data)
if epub_data:
Epub.objects.create(accessInfo=access_info, **epub_data)
if download_access_data:
DownloadAccess.objects.create(accessInfo=access_info, **download_access_data)
return access_info
class SearchInfoSerializer(NonNullModelSerializer):
class Meta:
model = SearchInfo
exclude = ('book', 'id')
class BookSerializer(NonNullModelSerializer):
searchInfo = SearchInfoSerializer(required=False)
accessInfo = AccessInfoSerializer(required=False)
volumeInfo = VolumeInfoSerializer(required=False)
saleInfo = SaleInfoSerializer(required=False)
class Meta:
model = Book
fields = '__all__'
def update(self, instance, validated_data):
search_info_data = validated_data.pop('searchInfo', {})
if search_info_data:
# search_info_serializer = self.fields['searchInfo']
search_info_serializer = SearchInfoSerializer()
search_info_instance = instance.searchInfo
search_info_serializer.update(search_info_instance, search_info_data)
volume_info_data = validated_data.pop('volumeInfo', {})
if volume_info_data:
volume_info_serializer = self.fields['volumeInfo']
volume_info_instance = instance.volumeInfo
volume_info_serializer.update(volume_info_instance, volume_info_data)
access_info_data = validated_data.pop('accessInfo', {})
if access_info_data:
access_info_serializer = self.fields['accessInfo']
access_info_instance = instance.accessInfo
access_info_serializer.update(access_info_instance, access_info_data)
sale_info_data = validated_data.pop('saleInfo', {})
if sale_info_data:
sale_info_serializer = self.fields['saleInfo']
sale_info_instance = instance.saleInfo
sale_info_serializer.update(sale_info_instance, sale_info_data)
return super().update(instance, validated_data)
def create(self, validated_data):
search_info_data = validated_data.pop('searchInfo', None)
volume_info_data = validated_data.pop('volumeInfo', None)
access_info_data = validated_data.pop('accessInfo', None)
sale_info_data = validated_data.pop('saleInfo', None)
book = Book.objects.create(**validated_data)
if search_info_data:
search_info_serializer = self.fields['searchInfo']
search_info_data['book'] = book
search_info_serializer.create(search_info_data)
if access_info_data:
access_info_serializer = self.fields['accessInfo']
access_info_data['book'] = book
access_info_serializer.create(access_info_data)
if volume_info_data:
volume_info_serializer = self.fields['volumeInfo']
volume_info_data['book'] = book
volume_info_serializer.create(volume_info_data)
if sale_info_data:
sale_info_serializer = self.fields['saleInfo']
sale_info_data['book'] = book
sale_info_serializer.create(sale_info_data)
return book
| 38.594203 | 108 | 0.696583 |
24bd42737fc70e26da0686014b66d2095fc8f8e3 | 1,232 | py | Python | fhirtordf/rdfsupport/uriutils.py | janik-martin/fhirtordf | 05b23ba1df9f322c148b7f20ebbd6d58cb92cefc | [
"CC0-1.0"
] | 10 | 2017-11-20T07:18:36.000Z | 2022-02-03T12:59:26.000Z | fhirtordf/rdfsupport/uriutils.py | janik-martin/fhirtordf | 05b23ba1df9f322c148b7f20ebbd6d58cb92cefc | [
"CC0-1.0"
] | 22 | 2017-09-12T22:15:11.000Z | 2021-05-14T13:48:48.000Z | fhirtordf/rdfsupport/uriutils.py | janik-martin/fhirtordf | 05b23ba1df9f322c148b7f20ebbd6d58cb92cefc | [
"CC0-1.0"
] | 9 | 2018-05-09T07:51:51.000Z | 2022-03-09T16:03:19.000Z | from typing import Tuple, NamedTuple, Union, Optional
from rdflib import URIRef
from fhirtordf.rdfsupport.fhirresourcere import FHIR_RESOURCE_RE, FHIR_RE_ID, FHIR_RE_BASE, FHIR_RE_RESOURCE
from fhirtordf.rdfsupport.namespaces import FHIR
class FHIR_RESOURCE(NamedTuple):
namespace: Optional[URIRef]
resource_type: Optional[URIRef]
resource: str
def parse_fhir_resource_uri(uri: Union[URIRef, str]) -> FHIR_RESOURCE:
"""
Use the FHIR Regular Expression for Resource URI's to determine the namespace and type
of a given URI. As an example, "http://hl7.org/fhir/Patient/p123" maps to the tuple
``('Patient', 'http://hl7.org/fhir')
:param uri: URI to parse
:return: FHIR_RESOURCE (namespace, type, resource)
"""
uri_str = str(uri)
m = FHIR_RESOURCE_RE.match(uri_str)
if m:
return FHIR_RESOURCE(URIRef(m.group(FHIR_RE_BASE)), FHIR[m.group(FHIR_RE_RESOURCE)], m.group(FHIR_RE_ID))
else:
# Not in the FHIR format - we can only do namespace and name
namespace, name = uri_str.rsplit('#', 1) if '#' in uri_str \
else uri_str.rsplit('/', 1) if '/' in uri_str else (None, uri_str)
return FHIR_RESOURCE(URIRef(namespace), None, name)
| 36.235294 | 113 | 0.702922 |
8de60812630f043b5be192bf4b9bf18272c064bc | 4,454 | py | Python | service_pro/service_pro/report/ar_salesman_summary/ar_salesman_summary.py | teambackoffice/service-pro | fa07dee3b0e349df5ee906112aee02d3eb0ea7a5 | [
"MIT"
] | 1 | 2021-08-18T06:56:15.000Z | 2021-08-18T06:56:15.000Z | service_pro/service_pro/report/ar_salesman_summary/ar_salesman_summary.py | teambackoffice/service-pro | fa07dee3b0e349df5ee906112aee02d3eb0ea7a5 | [
"MIT"
] | null | null | null | service_pro/service_pro/report/ar_salesman_summary/ar_salesman_summary.py | teambackoffice/service-pro | fa07dee3b0e349df5ee906112aee02d3eb0ea7a5 | [
"MIT"
] | 3 | 2020-12-01T14:17:37.000Z | 2022-02-02T17:12:29.000Z | # Copyright (c) 2013, jan and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, scrub
from frappe.utils import flt, cint
from erpnext.accounts.party import get_partywise_advanced_payment_amount
from erpnext.accounts.report.accounts_receivable.accounts_receivable import ReceivablePayableReport
from six import iteritems
def execute(filters=None):
args = {
"party_type": "Customer",
"naming_by": ["Selling Settings", "cust_master_name"],
}
return AccountsReceivableSummary(filters).run(args)
class AccountsReceivableSummary(ReceivablePayableReport):
def run(self, args):
self.party_type = args.get('party_type')
self.party_naming_by = frappe.db.get_value(args.get("naming_by")[0], None, args.get("naming_by")[1])
self.get_columns()
self.get_data(args)
return self.columns, self.data
def get_data(self, args):
self.data = []
self.receivables = ReceivablePayableReport(self.filters).run(args)[1]
self.get_party_total(args)
party_advance_amount = get_partywise_advanced_payment_amount(self.party_type,
self.filters.report_date, self.filters.show_future_payments, self.filters.company) or {}
for party, party_dict in iteritems(self.party_total):
print("================================")
print(party_dict)
if party_dict.outstanding == 0:
continue
row = frappe._dict()
row.party = party
if self.party_naming_by == "Naming Series":
row.party_name = frappe.get_cached_value(self.party_type, party, scrub(self.party_type) + "_name")
row.update(party_dict)
# Advance against party
row.advance = party_advance_amount.get(party, 0)
# In AR/AP, advance shown in paid columns,
# but in summary report advance shown in separate column
row.paid -= row.advance
self.data.append(row)
def get_party_total(self, args):
self.party_total = frappe._dict()
print(self.receivables)
for d in self.receivables:
d.sales_man_name = frappe.get_value("Sales Invoice", d.voucher_no, "sales_man_name")
self.init_party_total(d)
# Add all amount columns
for k in list(self.party_total[d.party]):
if k not in ["currency", "sales_person"]:
if d.get(k):
self.party_total[d.party][k] += d.get(k, 0.0)
# set territory, customer_group, sales person etc
self.set_party_details(d)
def init_party_total(self, row):
self.party_total.setdefault(row.party, frappe._dict({
"invoiced": 0.0,
"paid": 0.0,
"credit_note": 0.0,
"outstanding": 0.0,
"range1": 0.0,
"range2": 0.0,
"range3": 0.0,
"range4": 0.0,
"range5": 0.0,
"sales_person": []
}))
def set_party_details(self, row):
print("ROOOOOOOOOOOOOOOOOOOOOOOOOOOOOOW")
print(row)
self.party_total[row.party].currency = row.currency
for key in ('territory', 'customer_group', 'supplier_group', "sales_man_name"):
if row.get(key):
self.party_total[row.party][key] = row.get(key)
if row.sales_person:
self.party_total[row.party].sales_person.append(row.sales_person)
def get_columns(self):
self.columns = []
self.add_column(_('Salesman'), fieldname='sales_man_name',fieldtype='Data')
self.add_column(label=_(self.party_type), fieldname='party',
fieldtype='Link', options=self.party_type, width=180)
if self.party_naming_by == "Naming Series":
self.add_column(_('{0} Name').format(self.party_type),
fieldname = 'party_name', fieldtype='Data')
credit_debit_label = "Credit Note" if self.party_type == 'Customer' else "Debit Note"
self.add_column(_('Advance Amount'), fieldname='advance')
self.add_column(_('Invoiced Amount'), fieldname='invoiced')
self.add_column(_('Paid Amount'), fieldname='paid')
self.add_column(_(credit_debit_label), fieldname='credit_note')
self.add_column(_('Outstanding Amount'), fieldname='outstanding')
self.setup_ageing_columns()
def setup_ageing_columns(self):
for i, label in enumerate(["0-{range1}".format(range1=self.filters["range1"]),
"{range1}-{range2}".format(range1=cint(self.filters["range1"])+ 1, range2=self.filters["range2"]),
"{range2}-{range3}".format(range2=cint(self.filters["range2"])+ 1, range3=self.filters["range3"]),
"{range3}-{range4}".format(range3=cint(self.filters["range3"])+ 1, range4=self.filters["range4"]),
"{range4}-{above}".format(range4=cint(self.filters["range4"])+ 1, above=_("Above"))]):
self.add_column(label=label, fieldname='range' + str(i+1)) | 34.261538 | 102 | 0.715537 |
6759334fe7a4bfa2dbc5f670694978ff79fa732d | 386 | py | Python | proxypool/setting.py | nicajonh/ProxyPool | 565432ff269928e4bce31c3712fc3c1700b9236f | [
"Apache-2.0"
] | null | null | null | proxypool/setting.py | nicajonh/ProxyPool | 565432ff269928e4bce31c3712fc3c1700b9236f | [
"Apache-2.0"
] | null | null | null | proxypool/setting.py | nicajonh/ProxyPool | 565432ff269928e4bce31c3712fc3c1700b9236f | [
"Apache-2.0"
] | 1 | 2019-06-19T08:06:44.000Z | 2019-06-19T08:06:44.000Z | """
-------------------------------------------------
File Name: setting.py
Description: 设置模块,包含了一些常量。
Author: Liu
Date: 2016/12/9
-------------------------------------------------
"""
# Redis Host
HOST = 'localhost'
# Redis PORT
PORT = 6379
POOL_LOWER_THRESHOLD = 10
POOL_UPPER_THRESHOLD = 40
VAILD_CHECK_CYCLE = 600
POOL_LEN_CHECK_CYCLE = 20
| 18.380952 | 49 | 0.476684 |
9b1efab9f94a6e759b10f4a31dc0c322db3c3929 | 3,646 | py | Python | cogs/error.py | Tarikazana/Verifier | 59a7f8e3012b8a39c33db918fe5bb83d66c36b47 | [
"MIT"
] | 2 | 2021-11-18T03:32:34.000Z | 2022-02-13T13:19:46.000Z | cogs/error.py | Tarikazana/Verifier | 59a7f8e3012b8a39c33db918fe5bb83d66c36b47 | [
"MIT"
] | null | null | null | cogs/error.py | Tarikazana/Verifier | 59a7f8e3012b8a39c33db918fe5bb83d66c36b47 | [
"MIT"
] | 1 | 2021-11-18T03:30:07.000Z | 2021-11-18T03:30:07.000Z | """
> error.py
> Author: Tari Kazana
> cooldown and stuff
"""
import discord
import traceback
import sys
import math
import asyncio
from discord.ext import commands
class err(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
# if command has local error handler, return
if hasattr(ctx.command, 'on_error'):
return
# get the original exception
error = getattr(error, 'original', error)
if isinstance(error, commands.CommandNotFound):
return
if isinstance(error, commands.BotMissingPermissions):
missing = [perm.replace('_', ' ').replace('guild', 'server').title() for perm in error.missing_perms]
if len(missing) > 2:
fmt = '{}, and {}'.format("**, **".join(missing[:-1]), missing[-1])
else:
fmt = ' and '.join(missing)
_message = 'I need the **{}** permission(s) to run this command.'.format(fmt)
await ctx.reply(_message)
return
if isinstance(error, commands.DisabledCommand):
await ctx.reply('This command has been disabled.')
return
if isinstance(error, commands.CommandOnCooldown):
time = math.ceil(error.retry_after)
if time > 3600:
h = 0
while time > 3600:
time = time - 3600
h = h + 1
i = 0
while time > 60:
time = time - 60
i = i + 1
time = "{h} {hours}, {i} {minutes} and {time} {seconds}".format(h=h, i=i, time=time, hours="hour" if h == 1 else "hours", minutes="minute" if i == 1 else "minutes", seconds="second" if time == 1 else "seconds")
elif time > 60:
i = 0
while time > 60:
time = time - 60
i = i + 1
time = "{i} {minutes} and {time} {seconds}".format(i=i, time=time, minutes="minute" if i == 1 else "minutes", seconds="second" if time == 1 else "seconds")
else:
pass
await ctx.reply("Please wait {} before using this again".format(time))
return
if isinstance(error, commands.MissingPermissions):
missing = [perm.replace('_', ' ').replace('guild', 'server').title() for perm in error.missing_perms]
if len(missing) > 2:
fmt = '{}, and {}'.format("**, **".join(missing[:-1]), missing[-1])
else:
fmt = ' and '.join(missing)
_message = 'You need the **{}** permission(s) to use this command.'.format(fmt)
await ctx.reply(_message)
return
if isinstance(error, commands.UserInputError):
await ctx.reply("Invalid input.")
return
if isinstance(error, commands.NoPrivateMessage):
try:
await ctx.author.send('This command cannot be used in direct messages.')
except discord.Forbidden:
pass
return
print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)
await ctx.reply(f'```Ignoring exception in command {ctx.command}:\nerror: {error}```')
def setup(bot):
bot.add_cog(err(bot)) | 35.745098 | 228 | 0.518651 |
4c969c981b2fbf3a541e205d497a1c0d87a9e36d | 18,752 | py | Python | koku/masu/test/util/test_common.py | rubik-ai/koku | 3255d1c217b7b6685cb2e130bf4e025946e76fac | [
"Apache-2.0"
] | 157 | 2018-04-30T16:27:53.000Z | 2022-03-31T08:17:21.000Z | koku/masu/test/util/test_common.py | rubik-ai/koku | 3255d1c217b7b6685cb2e130bf4e025946e76fac | [
"Apache-2.0"
] | 3,250 | 2018-04-26T14:14:25.000Z | 2022-03-31T23:49:15.000Z | koku/masu/test/util/test_common.py | rubik-ai/koku | 3255d1c217b7b6685cb2e130bf4e025946e76fac | [
"Apache-2.0"
] | 65 | 2018-05-10T14:11:50.000Z | 2022-03-18T19:22:58.000Z | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the common util functions."""
import gzip
import json
import types
from datetime import date
from datetime import datetime
from datetime import timedelta
from decimal import Decimal
from os.path import exists
from dateutil import parser
from django.test import TestCase
from tenant_schemas.utils import schema_context
import masu.util.common as common_utils
from api.models import Provider
from api.utils import DateHelper
from masu.config import Config
from masu.external import LISTEN_INGEST
from masu.external import POLL_INGEST
from masu.test import MasuTestCase
from reporting.provider.aws.models import AWSCostEntryBill
from reporting.provider.aws.models import AWSEnabledTagKeys
class CommonUtilTests(MasuTestCase):
"""Test Common Masu functions."""
def test_extract_uuids_from_string(self):
"""Test that a uuid is extracted from a string."""
assembly_id = "882083b7-ea62-4aab-aa6a-f0d08d65ee2b"
cur_key = f"/koku/20180701-20180801/{assembly_id}/koku-1.csv.gz"
uuids = common_utils.extract_uuids_from_string(cur_key)
self.assertEqual(len(uuids), 1)
self.assertEqual(uuids.pop(), assembly_id)
def test_extract_uuids_from_string_capitals(self):
"""Test that a uuid is extracted from a string with capital letters."""
assembly_id = "882083B7-EA62-4AAB-aA6a-f0d08d65Ee2b"
cur_key = f"/koku/20180701-20180801/{assembly_id}/koku-1.csv.gz"
uuids = common_utils.extract_uuids_from_string(cur_key)
self.assertEqual(len(uuids), 1)
self.assertEqual(uuids.pop(), assembly_id)
def test_stringify_json_data_list(self):
"""Test that each element of JSON is returned as a string."""
data = [{"datetime": datetime.utcnow(), "float": 1.2, "int": 1, "str": "string"}, {"Decimal": Decimal("1.2")}]
with self.assertRaises(TypeError):
json.dumps(data)
result = common_utils.stringify_json_data(data)
self.assertIsInstance(result[0]["datetime"], str)
self.assertIsInstance(result[0]["float"], str)
self.assertIsInstance(result[0]["int"], str)
self.assertIsInstance(result[0]["str"], str)
self.assertIsInstance(result[1]["Decimal"], str)
def test_stringify_json_data_dict(self):
"""Test that the dict block is covered."""
data = {"datetime": datetime.utcnow(), "float": 1.2, "int": 1, "str": "string", "Decimal": Decimal("1.2")}
with self.assertRaises(TypeError):
json.dumps(data)
result = common_utils.stringify_json_data(data)
self.assertIsInstance(result["datetime"], str)
self.assertIsInstance(result["float"], str)
self.assertIsInstance(result["int"], str)
self.assertIsInstance(result["str"], str)
self.assertIsInstance(result["Decimal"], str)
def test_ingest_method_type(self):
"""Test that the correct ingest method is returned for provider type."""
test_matrix = [
{"provider_type": Provider.PROVIDER_AWS, "expected_ingest": POLL_INGEST},
{"provider_type": Provider.PROVIDER_AWS_LOCAL, "expected_ingest": POLL_INGEST},
{"provider_type": Provider.PROVIDER_OCP, "expected_ingest": LISTEN_INGEST},
{"provider_type": Provider.PROVIDER_AZURE_LOCAL, "expected_ingest": POLL_INGEST},
{"provider_type": "NEW_TYPE", "expected_ingest": None},
]
for test in test_matrix:
ingest_method = common_utils.ingest_method_for_provider(test.get("provider_type"))
self.assertEqual(ingest_method, test.get("expected_ingest"))
def test_month_date_range_tuple(self):
"""Test month_date_range_tuple returns first of the month and first of next month."""
test_date = datetime(year=2018, month=12, day=15)
expected_start_month = datetime(year=2018, month=12, day=1)
expected_start_next_month = datetime(year=2019, month=1, day=1)
start_month, first_next_month = common_utils.month_date_range_tuple(test_date)
self.assertEquals(start_month, expected_start_month)
self.assertEquals(first_next_month, expected_start_next_month)
def test_date_range(self):
"""Test that a date range generator is returned."""
start_date = "2020-01-01"
end_date = "2020-02-29"
date_generator = common_utils.date_range(start_date, end_date)
start_date = parser.parse(start_date)
end_date = parser.parse(end_date)
self.assertIsInstance(date_generator, types.GeneratorType)
first_date = next(date_generator)
self.assertEqual(first_date, start_date.date())
for day in date_generator:
self.assertIsInstance(day, date)
self.assertGreater(day, start_date.date())
self.assertLessEqual(day, end_date.date())
self.assertEqual(day, end_date.date())
def test_date_range_pair_date_args(self):
"""Test that start and end dates are returned by this generator with date args passed instead of str."""
start_date = date(2020, 1, 1)
end_date = date(2020, 2, 29)
step = 3
date_generator = common_utils.date_range_pair(start_date, end_date, step=step)
start_date = datetime(start_date.year, start_date.month, start_date.day)
end_date = datetime(end_date.year, end_date.month, end_date.day)
self.assertIsInstance(date_generator, types.GeneratorType)
first_start, first_end = next(date_generator)
self.assertEqual(first_start, start_date.date())
self.assertEqual(first_end, start_date.date() + timedelta(days=step))
for start, end in date_generator:
self.assertIsInstance(start, date)
self.assertIsInstance(end, date)
self.assertGreater(start, start_date.date())
self.assertLessEqual(end, end_date.date())
self.assertEqual(end, end_date.date())
def test_date_range_pair(self):
"""Test that start and end dates are returned by this generator."""
start_date = "2020-01-01"
end_date = "2020-02-29"
step = 3
date_generator = common_utils.date_range_pair(start_date, end_date, step=step)
start_date = parser.parse(start_date)
end_date = parser.parse(end_date)
self.assertIsInstance(date_generator, types.GeneratorType)
first_start, first_end = next(date_generator)
self.assertEqual(first_start, start_date.date())
self.assertEqual(first_end, start_date.date() + timedelta(days=step))
for start, end in date_generator:
self.assertIsInstance(start, date)
self.assertIsInstance(end, date)
self.assertGreater(start, start_date.date())
self.assertLessEqual(end, end_date.date())
self.assertEqual(end, end_date.date())
def test_date_range_pair_one_day(self):
"""Test that generator works for a single day."""
start_date = "2020-01-01"
end_date = start_date
step = 3
date_generator = common_utils.date_range_pair(start_date, end_date, step=step)
start_date = parser.parse(start_date)
end_date = parser.parse(end_date)
self.assertIsInstance(date_generator, types.GeneratorType)
first_start, first_end = next(date_generator)
self.assertEqual(first_start, start_date.date())
self.assertEqual(first_end, end_date.date())
with self.assertRaises(StopIteration):
next(date_generator)
def test_safe_float(self):
"""Test the safe_float method handles good and bad inputs."""
out = common_utils.safe_float("foo")
self.assertEqual(out, float(0))
out = common_utils.safe_float("1.1")
self.assertEqual(out, float("1.1"))
def test_safe_dict(self):
"""Test the safe_dict method handles good and bad inputs."""
out = common_utils.safe_dict(1)
self.assertEqual(out, "{}")
expected = '{"a": "b", "c": "d"}'
out = common_utils.safe_dict(expected)
self.assertEqual(out, expected)
def test_get_path_prefix(self):
"""Test that path prefix is returned."""
account = "10001"
provider_type = Provider.PROVIDER_AWS
provider_uuid = self.aws_provider_uuid
start_date = datetime.utcnow().date()
year = start_date.strftime("%Y")
month = start_date.strftime("%m")
expected_path_prefix = f"{Config.WAREHOUSE_PATH}/{Config.PARQUET_DATA_TYPE}"
expected_path = (
f"{expected_path_prefix}/{account}/{provider_type}/" f"source={provider_uuid}/year={year}/month={month}"
)
path = common_utils.get_path_prefix(account, provider_type, provider_uuid, start_date, "parquet")
self.assertEqual(path, expected_path)
expected_path = (
f"{expected_path_prefix}/daily/{account}/{provider_type}/"
f"source={provider_uuid}/year={year}/month={month}"
)
path = common_utils.get_path_prefix(account, provider_type, provider_uuid, start_date, "parquet", daily=True)
self.assertEqual(path, expected_path)
# Test with report_type
report_type = "pod_report"
expected_path = (
f"{expected_path_prefix}/{account}/{provider_type}/{report_type}/"
f"source={provider_uuid}/year={year}/month={month}"
)
path = common_utils.get_path_prefix(
account, provider_type, provider_uuid, start_date, "parquet", report_type=report_type
)
self.assertEqual(path, expected_path)
def test_get_hive_table_path(self):
"""Test that we resolve the path for a Hive table."""
account = "10001"
provider_type = Provider.PROVIDER_AWS
expected_path_prefix = f"{Config.WAREHOUSE_PATH}/{Config.PARQUET_DATA_TYPE}"
expected_path = f"{expected_path_prefix}/{account}/{provider_type}"
path = common_utils.get_hive_table_path(account, provider_type)
self.assertEqual(path, expected_path)
expected_path = f"{expected_path_prefix}/daily/{account}/{provider_type}/raw"
path = common_utils.get_hive_table_path(account, provider_type, daily=True)
self.assertEqual(path, expected_path)
# Test with report_type
report_type = "pod_report"
expected_path = f"{expected_path_prefix}/{account}/{provider_type}/{report_type}"
path = common_utils.get_hive_table_path(account, provider_type, report_type=report_type)
self.assertEqual(path, expected_path)
def test_determine_if_full_summary_update_needed(self):
"""Test that we process full month under the correct conditions."""
dh = DateHelper()
with schema_context(self.schema):
bills = AWSCostEntryBill.objects.all()
current_month_bill = bills.filter(billing_period_start=dh.this_month_start).first()
last_month_bill = bills.filter(billing_period_start=dh.last_month_start).first()
# Current month, previously summarized
self.assertFalse(common_utils.determine_if_full_summary_update_needed(current_month_bill))
# Previous month
self.assertTrue(common_utils.determine_if_full_summary_update_needed(last_month_bill))
current_month_bill.summary_data_creation_datetime = None
current_month_bill.save()
# Current month, has not been summarized before
self.assertTrue(common_utils.determine_if_full_summary_update_needed(current_month_bill))
def test_split_alphanumeric_string(self):
"""Test the alpha-numeric split function."""
s = "4 GiB"
expected = ["4 ", "GiB"]
result = list(common_utils.split_alphanumeric_string(s))
self.assertEqual(result, expected)
def test_batch(self):
"""Test batch function with default kwargs"""
max_val = 101
vals = list(range(max_val))
res = list(common_utils.batch(vals))
self.assertEqual(len(res), max_val)
self.assertTrue(all(len(e) == 1 for e in res))
def test_batch_set(self):
"""Test batch function using set as iterable"""
max_val = 101
vals = list(range(max_val))
res = list(common_utils.batch(set(vals), _slice=10))
self.assertEqual(len(res), 11)
def test_batch_negative_index(self):
"""Test batch function with negative val for stop/start index"""
max_val = 101
vals = list(range(max_val))
res = list(common_utils.batch(vals, stop=-1, _slice=10))
self.assertEqual(len(res), (max_val // len(res[0])))
res = list(common_utils.batch(vals, start=-1, _slice=10))
self.assertEqual(len(res), 1)
def test_batch_start_index(self):
"""Test batch function with positive start index"""
max_val = 101
vals = list(range(max_val))
res = list(common_utils.batch(vals, start=10))
self.assertEqual(len(res), max_val - 10)
def test_batch_start_none(self):
"""Test batch function with None start index"""
max_val = 101
vals = list(range(max_val))
res = list(common_utils.batch(vals, start=None))
self.assertEqual(len(res), 101)
def test_batch_empty(self):
"""Test batch function with empty iterable"""
res = list(common_utils.batch([]))
self.assertEqual(res, [])
def test_batch_stop_gt_len(self):
"""Test batch function with stop index > len(iterable)"""
max_val = 101
vals = list(range(max_val))
res = list(common_utils.batch(vals, stop=10000, _slice=10))
self.assertEqual(len(res), 11)
def test_batch_stop_lt_start(self):
"""Test batch function with stop_ix < start_ix"""
max_val = 101
vals = list(range(max_val))
res = list(common_utils.batch(vals, start=10, stop=9))
self.assertEqual(res, [])
def test_batch_str_index(self):
"""Test batch function with number strings for indexes"""
max_val = 101
vals = list(range(max_val))
res = list(common_utils.batch(vals, start="0", stop="10"))
self.assertEqual(len(res), 10)
def test_batch_value_error(self):
"""Test batch function with bad strings for index"""
max_val = 101
vals = list(range(max_val))
with self.assertRaises(ValueError):
_ = list(common_utils.batch(vals, start="eek"))
def test_create_enabled_keys(self):
with schema_context(self.schema):
orig_keys = [{"key": e.key, "enabled": e.enabled} for e in AWSEnabledTagKeys.objects.all()]
AWSEnabledTagKeys.objects.all().delete()
for key in ("masu", "database", "processor", "common"):
AWSEnabledTagKeys.objects.create(key=key, enabled=(key != "masu"))
all_keys = list(AWSEnabledTagKeys.objects.all())
orig_disabled = {e.key for e in all_keys if not e.enabled}
orig_enabled = {e.key for e in all_keys if e.enabled}
enabled = orig_enabled.union({"ek_test1", "ek_test2"})
common_utils.create_enabled_keys(self.schema, AWSEnabledTagKeys, enabled)
with schema_context(self.schema):
all_keys = list(AWSEnabledTagKeys.objects.all())
AWSEnabledTagKeys.objects.all().delete()
AWSEnabledTagKeys.objects.bulk_create([AWSEnabledTagKeys(**rec) for rec in orig_keys])
check_disabled = {d.key for d in all_keys if not d.enabled}
check_enabled = {e.key for e in all_keys if e.enabled}
self.assertEqual(enabled, check_enabled)
self.assertEqual(orig_disabled, check_disabled)
def test_update_enabled_keys(self):
with schema_context(self.schema):
orig_keys = [{"key": e.key, "enabled": e.enabled} for e in AWSEnabledTagKeys.objects.all()]
AWSEnabledTagKeys.objects.all().delete()
for key in ("masu", "database", "processor", "common"):
AWSEnabledTagKeys.objects.create(key=key, enabled=(key != "masu"))
all_keys = list(AWSEnabledTagKeys.objects.all())
orig_disabled = {e.key for e in all_keys if not e.enabled}
orig_enabled = {e.key for e in all_keys if e.enabled}
disabled = None
enabled = set()
for i, k in enumerate(orig_enabled):
if i == 0:
disabled = k
else:
enabled.add(k)
new_keys = {"ek_test1", "ek_test2"}
enabled.update(new_keys)
common_utils.update_enabled_keys(self.schema, AWSEnabledTagKeys, enabled)
with schema_context(self.schema):
all_keys = list(AWSEnabledTagKeys.objects.all())
AWSEnabledTagKeys.objects.all().delete()
AWSEnabledTagKeys.objects.bulk_create([AWSEnabledTagKeys(**rec) for rec in orig_keys])
all_keys_set = {k.key for k in all_keys}
check_disabled = {d.key for d in all_keys if not d.enabled}
check_enabled = {e.key for e in all_keys if e.enabled}
self.assertTrue(new_keys.isdisjoint(all_keys))
self.assertTrue(disabled in all_keys_set)
self.assertTrue(disabled in check_disabled)
self.assertEqual(orig_disabled.intersection(check_disabled), orig_disabled)
self.assertNotEqual(orig_disabled, check_disabled)
self.assertNotEqual(orig_enabled, check_enabled)
self.assertEqual((enabled - new_keys), check_enabled)
def test_strip_characters_from_column_name(self):
"""Test that column names are converted properly."""
bad_str = r"column\one:two-three four,five/six_seven"
expected = "column_one_two_three_four_five_six_seven"
result = common_utils.strip_characters_from_column_name(bad_str)
self.assertEqual(result, expected)
class NamedTemporaryGZipTests(TestCase):
"""Tests for NamedTemporaryGZip."""
def test_temp_gzip_is_removed(self):
"""Test that the gzip file is removed."""
with common_utils.NamedTemporaryGZip() as temp_gzip:
file_name = temp_gzip.name
self.assertTrue(exists(file_name))
self.assertFalse(exists(file_name))
def test_gzip_is_readable(self):
"""Test the the written gzip file is readable."""
test_data = "Test Read Gzip"
with common_utils.NamedTemporaryGZip() as temp_gzip:
temp_gzip.write(test_data)
temp_gzip.close()
with gzip.open(temp_gzip.name, "rt") as f:
read_data = f.read()
self.assertEquals(test_data, read_data)
| 40.854031 | 118 | 0.664782 |
6715fb73480288f8b6ca2b2acbe0595ff5784679 | 48,813 | py | Python | core/domain/user_jobs_one_off.py | TheoLipeles/oppia | cd0bb873e08fa716014f3d1480fbbfee95b89121 | [
"Apache-2.0"
] | 2 | 2021-03-07T18:39:15.000Z | 2021-03-29T20:09:11.000Z | core/domain/user_jobs_one_off.py | TheoLipeles/oppia | cd0bb873e08fa716014f3d1480fbbfee95b89121 | [
"Apache-2.0"
] | null | null | null | core/domain/user_jobs_one_off.py | TheoLipeles/oppia | cd0bb873e08fa716014f3d1480fbbfee95b89121 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jobs for queries personalized to individual users."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import ast
import copy
import datetime
import imghdr
from constants import constants
from core import jobs
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import image_services
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
import feconf
import python_utils
import utils
(exp_models, collection_models, feedback_models, user_models) = (
models.Registry.import_models([
models.NAMES.exploration, models.NAMES.collection,
models.NAMES.feedback, models.NAMES.user]))
datastore_services = models.Registry.import_datastore_services()
_LANGUAGES_TO_RESET = ['hu', 'mk', 'sv', 'tr', 'de', 'fr', 'nl', 'pt']
class UserContributionsOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""One-off job for creating and populating UserContributionsModels for
all registered users that have contributed.
"""
@classmethod
def entity_classes_to_map_over(cls):
"""Return a list of datastore class references to map over."""
return [exp_models.ExplorationSnapshotMetadataModel]
@staticmethod
def map(item):
"""Implements the map function for this job."""
yield (
item.committer_id, {
'exploration_id': item.get_unversioned_instance_id(),
'version_string': item.get_version_string(),
})
@staticmethod
def reduce(key, version_and_exp_ids):
"""Implements the reduce function for this job."""
created_exploration_ids = set()
edited_exploration_ids = set()
edits = [ast.literal_eval(v) for v in version_and_exp_ids]
for edit in edits:
edited_exploration_ids.add(edit['exploration_id'])
if edit['version_string'] == '1':
created_exploration_ids.add(edit['exploration_id'])
if user_services.get_user_contributions(key, strict=False) is not None:
user_services.update_user_contributions(
key, list(created_exploration_ids), list(
edited_exploration_ids))
else:
user_services.create_user_contributions(
key, list(created_exploration_ids), list(
edited_exploration_ids))
class UsernameLengthDistributionOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""One-off job for calculating the distribution of username lengths."""
@classmethod
def entity_classes_to_map_over(cls):
"""Return a list of datastore class references to map over."""
return [user_models.UserSettingsModel]
@staticmethod
def map(item):
"""Implements the map function for this job."""
if item.username is not None:
yield (len(item.username), 1)
@staticmethod
def reduce(key, stringified_username_counter):
"""Implements the reduce function for this job."""
username_counter = [
ast.literal_eval(v) for v in stringified_username_counter]
yield (key, len(username_counter))
class UsernameLengthAuditOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""Job that audits and validates username lengths."""
@classmethod
def entity_classes_to_map_over(cls):
return [user_models.UserSettingsModel]
@staticmethod
def map(model_instance):
if len(model_instance.username) > 20:
yield (len(model_instance.username), model_instance.username)
@staticmethod
def reduce(key, values):
yield ('Length: %s' % key, 'Usernames: %s' % sorted(values))
class LongUserBiosOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""One-off job for calculating the length of user_bios."""
@classmethod
def entity_classes_to_map_over(cls):
"""Return a list of datastore class references to map over."""
return [user_models.UserSettingsModel]
@staticmethod
def map(item):
"""Implements the map function for this job."""
if item.user_bio is None:
user_bio_length = 0
else:
user_bio_length = len(item.user_bio)
yield (user_bio_length, item.username)
@staticmethod
def reduce(userbio_length, stringified_usernames):
"""Implements the reduce function for this job."""
if int(userbio_length) > 500:
yield (userbio_length, stringified_usernames)
class DashboardSubscriptionsOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""One-off job for subscribing users to explorations, collections, and
feedback threads.
"""
@classmethod
def entity_classes_to_map_over(cls):
"""Return a list of datastore class references to map over."""
return [
exp_models.ExplorationRightsModel,
collection_models.CollectionRightsModel,
feedback_models.GeneralFeedbackMessageModel]
@staticmethod
def map(item):
"""Implements the map function for this job."""
if isinstance(item, feedback_models.GeneralFeedbackMessageModel):
if item.author_id:
yield (
item.author_id, {
'type': 'feedback',
'id': item.thread_id
})
elif isinstance(item, exp_models.ExplorationRightsModel):
if item.deleted:
return
if not item.community_owned:
for owner_id in item.owner_ids:
yield (
owner_id, {
'type': 'exploration',
'id': item.id
})
for editor_id in item.editor_ids:
yield (
editor_id, {
'type': 'exploration',
'id': item.id
})
else:
# Go through the history.
current_version = item.version
for version in python_utils.RANGE(1, current_version + 1):
model = exp_models.ExplorationRightsModel.get_version(
item.id, version)
if not model.community_owned:
for owner_id in model.owner_ids:
yield (
owner_id, {
'type': 'exploration',
'id': item.id
})
for editor_id in model.editor_ids:
yield (
editor_id, {
'type': 'exploration',
'id': item.id
})
elif isinstance(item, collection_models.CollectionRightsModel):
# NOTE TO DEVELOPERS: Although the code handling subscribing to
# collections is very similar to the code above for explorations,
# it is not abstracted out due to the majority of the coding being
# yield statements. These must happen inside the generator method
# (which is this method) and, as a result, there is little common
# code between the two code blocks which can be effectively
# abstracted.
if item.deleted:
return
if not item.community_owned:
for owner_id in item.owner_ids:
yield (
owner_id, {
'type': 'collection',
'id': item.id
})
for editor_id in item.editor_ids:
yield (
editor_id, {
'type': 'collection',
'id': item.id
})
else:
# Go through the history.
current_version = item.version
for version in python_utils.RANGE(1, current_version + 1):
model = (
collection_models.CollectionRightsModel.get_version(
item.id, version))
if not model.community_owned:
for owner_id in model.owner_ids:
yield (
owner_id, {
'type': 'collection',
'id': item.id
})
for editor_id in model.editor_ids:
yield (
editor_id, {
'type': 'collection',
'id': item.id
})
@staticmethod
def reduce(key, stringified_values):
"""Implements the reduce function for this job."""
values = [ast.literal_eval(v) for v in stringified_values]
for item in values:
if item['type'] == 'feedback':
subscription_services.subscribe_to_thread(key, item['id'])
elif item['type'] == 'exploration':
subscription_services.subscribe_to_exploration(key, item['id'])
elif item['type'] == 'collection':
subscription_services.subscribe_to_collection(key, item['id'])
class DashboardStatsOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""One-off job for populating weekly dashboard stats for all registered
users who have a non-None value of UserStatsModel.
"""
@classmethod
def entity_classes_to_map_over(cls):
"""Return a list of datastore class references to map over."""
return [user_models.UserSettingsModel]
@staticmethod
def map(item):
"""Implements the map function for this job."""
user_services.update_dashboard_stats_log(item.id)
class UserFirstContributionMsecOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""One-off job that updates first contribution time in milliseconds for
current users. This job makes the assumption that once an exploration is
published, it remains published. This job is not completely precise in that
(1) we ignore explorations that have been published in the past but are now
unpublished, and (2) commits that were made during an interim unpublished
period are counted against the first publication date instead of the second
publication date.
"""
@classmethod
def entity_classes_to_map_over(cls):
"""Return a list of datastore class references to map over."""
return [exp_models.ExplorationRightsSnapshotMetadataModel]
@staticmethod
def map(item):
"""Implements the map function for this job."""
exp_id = item.get_unversioned_instance_id()
exp_rights = rights_manager.get_exploration_rights(
exp_id, strict=False)
if exp_rights is None:
return
exp_first_published_msec = exp_rights.first_published_msec
# First contribution time in msec is only set from contributions to
# explorations that are currently published.
if not rights_manager.is_exploration_private(exp_id):
created_on_msec = utils.get_time_in_millisecs(item.created_on)
yield (
item.committer_id,
max(exp_first_published_msec, created_on_msec)
)
@staticmethod
def reduce(user_id, stringified_commit_times_msec):
"""Implements the reduce function for this job."""
commit_times_msec = [
ast.literal_eval(commit_time_string) for
commit_time_string in stringified_commit_times_msec]
first_contribution_msec = min(commit_times_msec)
user_services.update_first_contribution_msec_if_not_set(
user_id, first_contribution_msec)
class UserLastExplorationActivityOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""One-off job that adds fields to record last exploration created and last
edited times.
"""
@classmethod
def entity_classes_to_map_over(cls):
"""Return a list of datastore class references to map over."""
return [user_models.UserSettingsModel]
@staticmethod
def map(user_model):
"""Implements the map function for this job."""
user_id = user_model.id
contributions = user_models.UserContributionsModel.get(user_id)
created_explorations = exp_fetchers.get_multiple_explorations_by_id(
contributions.created_exploration_ids)
if created_explorations:
user_model.last_created_an_exploration = max(
[model.created_on for model in created_explorations.values()])
user_commits = (
exp_models.ExplorationCommitLogEntryModel.query(
exp_models.ExplorationCommitLogEntryModel.user_id == user_id).
order(-exp_models.ExplorationCommitLogEntryModel.created_on).
fetch(1))
if user_commits:
user_model.last_edited_an_exploration = user_commits[0].created_on
user_model.update_timestamps()
user_model.put()
class CleanupExplorationIdsFromUserSubscriptionsModelOneOffJob(
jobs.BaseMapReduceOneOffJobManager):
"""One off job that removes nonexisting exploration ids from
UserSubscriptionsModel.
"""
@classmethod
def entity_classes_to_map_over(cls):
"""Remove invalid ids in a UserSubscriptionsModel entity."""
return [user_models.UserSubscriptionsModel]
@staticmethod
def map(model_instance):
if not model_instance.deleted:
fetched_exploration_model_instances = (
datastore_services.fetch_multiple_entities_by_ids_and_models(
[('ExplorationModel', model_instance.exploration_ids)]))[0]
exp_ids_removed = []
for exp_id, exp_instance in list(python_utils.ZIP(
model_instance.exploration_ids,
fetched_exploration_model_instances)):
if exp_instance is None or exp_instance.deleted:
exp_ids_removed.append(exp_id)
model_instance.exploration_ids.remove(exp_id)
if exp_ids_removed:
model_instance.update_timestamps()
model_instance.put()
yield (
'Successfully cleaned up UserSubscriptionsModel %s and '
'removed explorations %s' % (
model_instance.id,
', '.join(exp_ids_removed)),
1)
@staticmethod
def reduce(key, values):
yield (key, len(values))
class RemoveActivityIDsOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""Job that deletes the activity_ids from the UserSubscriptionsModel.
NOTE TO DEVELOPERS: This job can be deleted after it is run in Februrary
2021 release.
"""
@classmethod
def enqueue(cls, job_id, additional_job_params=None):
super(RemoveActivityIDsOneOffJob, cls).enqueue(
job_id, shard_count=64)
@classmethod
def entity_classes_to_map_over(cls):
return [user_models.UserSubscriptionsModel]
@staticmethod
def map(user_subscriptions_model):
# This is the only way to remove the field from the model,
# see https://stackoverflow.com/a/15116016/3688189 and
# https://stackoverflow.com/a/12701172/3688189.
if 'activity_ids' in user_subscriptions_model._properties: # pylint: disable=protected-access
del user_subscriptions_model._properties['activity_ids'] # pylint: disable=protected-access
if 'activity_ids' in user_subscriptions_model._values: # pylint: disable=protected-access
del user_subscriptions_model._values['activity_ids'] # pylint: disable=protected-access
user_subscriptions_model.update_timestamps(
update_last_updated_time=False)
user_subscriptions_model.put()
yield (
'SUCCESS_REMOVED - UserSubscriptionsModel',
user_subscriptions_model.id)
else:
yield (
'SUCCESS_ALREADY_REMOVED - UserSubscriptionsModel',
user_subscriptions_model.id)
@staticmethod
def reduce(key, values):
"""Implements the reduce function for this job."""
yield (key, len(values))
class RemoveFeedbackThreadIDsOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""Job that deletes the feedback_thread_ids from the UserSubscriptionsModel.
NOTE TO DEVELOPERS: This job can be deleted after it is run in January
2021 release.
"""
@classmethod
def enqueue(cls, job_id, additional_job_params=None):
super(RemoveFeedbackThreadIDsOneOffJob, cls).enqueue(
job_id, shard_count=64)
@classmethod
def entity_classes_to_map_over(cls):
return [user_models.UserSubscriptionsModel]
@staticmethod
def map(user_subscriptions_model):
# This is the only way to remove the field from the model,
# see https://stackoverflow.com/a/15116016/3688189 and
# https://stackoverflow.com/a/12701172/3688189.
if 'feedback_thread_ids' in user_subscriptions_model._properties: # pylint: disable=protected-access
del user_subscriptions_model._properties['feedback_thread_ids'] # pylint: disable=protected-access
if 'feedback_thread_ids' in user_subscriptions_model._values: # pylint: disable=protected-access
del user_subscriptions_model._values['feedback_thread_ids'] # pylint: disable=protected-access
user_subscriptions_model.update_timestamps(
update_last_updated_time=False)
user_subscriptions_model.put()
yield (
'SUCCESS_REMOVED - UserSubscriptionsModel',
user_subscriptions_model.id)
else:
yield (
'SUCCESS_ALREADY_REMOVED - UserSubscriptionsModel',
user_subscriptions_model.id)
@staticmethod
def reduce(key, values):
"""Implements the reduce function for this job."""
yield (key, len(values))
class FixUserSettingsCreatedOnOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""Job that fixes the invalid values of created_on attribute in the
UserSettingsModel.
It is a one-off job and can be removed from the codebase after we resolve
this issue by running the job once in the January 2021 release.
"""
@classmethod
def enqueue(cls, job_id, additional_job_params=None):
super(FixUserSettingsCreatedOnOneOffJob, cls).enqueue(
job_id, shard_count=64)
@classmethod
def entity_classes_to_map_over(cls):
return [user_models.UserSettingsModel]
@staticmethod
def map(user_settings_model):
user_id = user_settings_model.id
user_dates_list = [
(
'UserSettingsModel_last_updated',
user_settings_model.last_updated
),
(
'UserSettingsModel_last_agreed_to_terms',
user_settings_model.last_agreed_to_terms
),
(
'UserSettingsModel_last_started_state_editor_tutorial',
user_settings_model.last_started_state_editor_tutorial
),
(
'UserSettingsModel_last_started_state_translation_tutorial',
user_settings_model.last_started_state_translation_tutorial
),
(
'UserSettingsModel_last_logged_in',
user_settings_model.last_logged_in
),
(
'UserSettingsModel_last_edited_an_exploration',
user_settings_model.last_edited_an_exploration
),
(
'UserSettingsModel_last_created_an_exploration',
user_settings_model.last_created_an_exploration
),
]
if user_settings_model.first_contribution_msec is not None:
user_dates_list.append(
(
'UserSettingsModel_first_contribution_msec',
datetime.datetime.fromtimestamp(
python_utils.divide(
user_settings_model.first_contribution_msec, 1000.0)
)
)
)
# Models in user storage module having user_id as an attribute.
exploration_user_data_model = (
user_models.ExplorationUserDataModel.query(
user_models.ExplorationUserDataModel.user_id == user_id).get()
)
all_models_linked_with_user_settings_model = [
('ExplorationUserDataModel', exploration_user_data_model)
]
# Models in user storage module keyed by user_id itself.
model_names_and_ids_to_be_fetched_in_batch = [
('UserContributionsModel', [user_id]),
('UserEmailPreferencesModel', [user_id]),
('UserStatsModel', [user_id]),
('UserSubscriptionsModel', [user_id]),
]
fetched_batch_models = (
datastore_services.fetch_multiple_entities_by_ids_and_models(
model_names_and_ids_to_be_fetched_in_batch)
)
for model_name_tuple, model_list in list(python_utils.ZIP(
model_names_and_ids_to_be_fetched_in_batch,
fetched_batch_models)):
model_name = model_name_tuple[0]
actual_model = model_list[0]
all_models_linked_with_user_settings_model.append(
(model_name, actual_model)
)
for model_name, model in all_models_linked_with_user_settings_model:
if model is not None:
user_dates_list.append(
(
model_name + python_utils.UNICODE('_last_updated'),
model.last_updated
)
)
user_dates_list.append(
(
model_name + python_utils.UNICODE('_created_on'),
model.created_on
)
)
if model_name == 'UserSubscriptionsModel':
user_dates_list.append(
(
'UserSubscriptionsModel_last_checked',
model.last_checked
)
)
if model_name == 'ExplorationUserDataModel':
user_dates_list.append(
(
'ExplorationUserDataModel_rated_on',
model.rated_on
)
)
user_dates_list.append(
(
'ExplorationUserDataModel_draft_change_list_last_'
'updated',
model.draft_change_list_last_updated
)
)
filtered_user_dates_list = [
(attribute_name, date) for attribute_name, date in user_dates_list
if date is not None
]
model_name, min_date = min(filtered_user_dates_list, key=lambda x: x[1])
time_delta_for_update = datetime.timedelta(minutes=5)
# This method for converting date_time_string to datettime object has
# also been used here:
# https://github.com/oppia/oppia/blob/d394b6a186acc74b5ec9c3fecc20cc3f1954f441/utils.py#L479
correction_cutoff_timestamp = datetime.datetime.strptime(
'Jul 1 2020', '%b %d %Y')
if user_settings_model.created_on - min_date > time_delta_for_update:
user_settings_model.update_timestamps(
update_last_updated_time=False)
user_settings_model.created_on = min_date
user_settings_model.put()
yield (
'SUCCESS_UPDATED_USING_' + python_utils.UNICODE(model_name), 1)
# Yield an additional error key for user_models created after
# cutoff date July 1, 2020 and having a discrepancy in their
# created_on.
if min_date >= correction_cutoff_timestamp:
yield ('ERROR_NOT_UP_TO_DATE_USER', user_id)
else:
yield ('SUCCESS_ALREADY_UP_TO_DATE', 1)
@staticmethod
def reduce(key, values):
"""Implements the reduce function for this job."""
if key == 'ERROR_NOT_UP_TO_DATE_USER':
yield (key, values)
else:
yield (key, len(values))
class UserSettingsCreatedOnAuditOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""Job that audits the value of created_on attribute in the
UserSettingsModel. This one-off job can be removed after we have verified
that all UserSettingsModels have their created_on set correctly.
"""
@classmethod
def enqueue(cls, job_id, additional_job_params=None):
super(UserSettingsCreatedOnAuditOneOffJob, cls).enqueue(
job_id, shard_count=64)
@classmethod
def entity_classes_to_map_over(cls):
return [user_models.UserSettingsModel]
@staticmethod
def map(user_settings_model):
user_id = user_settings_model.id
user_dates_list = [
(
'UserSettingsModel_created_on',
user_settings_model.created_on
),
(
'UserSettingsModel_last_updated',
user_settings_model.last_updated
),
(
'UserSettingsModel_last_agreed_to_terms',
user_settings_model.last_agreed_to_terms
),
(
'UserSettingsModel_last_started_state_editor_tutorial',
user_settings_model.last_started_state_editor_tutorial
),
(
'UserSettingsModel_last_started_state_translation_tutorial',
user_settings_model.last_started_state_translation_tutorial
),
(
'UserSettingsModel_last_logged_in',
user_settings_model.last_logged_in
),
(
'UserSettingsModel_last_edited_an_exploration',
user_settings_model.last_edited_an_exploration
),
(
'UserSettingsModel_last_created_an_exploration',
user_settings_model.last_created_an_exploration
),
]
if user_settings_model.first_contribution_msec is not None:
user_dates_list.append(
(
'UserSettingsModel_first_contribution_msec',
datetime.datetime.fromtimestamp(
python_utils.divide(
user_settings_model.first_contribution_msec, 1000.0)
)
)
)
# Models in user storage module having user_id as an attribute.
exploration_user_data_model = (
user_models.ExplorationUserDataModel.query(
user_models.ExplorationUserDataModel.user_id == user_id).get()
)
all_models_linked_with_user_settings_model = [
('ExplorationUserDataModel', exploration_user_data_model)
]
# Models in user storage module keyed by user_id.
model_names_and_ids_to_be_fetched_in_batch = [
('UserContributionsModel', [user_id]),
('UserEmailPreferencesModel', [user_id]),
('UserStatsModel', [user_id]),
('UserSubscriptionsModel', [user_id]),
]
fetched_batch_models = (
datastore_services.fetch_multiple_entities_by_ids_and_models(
model_names_and_ids_to_be_fetched_in_batch)
)
for model_name_tuple, model_list in list(python_utils.ZIP(
model_names_and_ids_to_be_fetched_in_batch,
fetched_batch_models)):
model_name = model_name_tuple[0]
actual_model = model_list[0]
all_models_linked_with_user_settings_model.append(
(model_name, actual_model)
)
for model_name, model in all_models_linked_with_user_settings_model:
if model is not None:
user_dates_list.append(
(
model_name + python_utils.UNICODE('_last_updated'),
model.last_updated
)
)
user_dates_list.append(
(
model_name + python_utils.UNICODE('_created_on'),
model.created_on
)
)
if model_name == 'UserSubscriptionsModel':
user_dates_list.append(
(
'UserSubscriptionsModel_last_checked',
model.last_checked
)
)
if model_name == 'ExplorationUserDataModel':
user_dates_list.append(
(
'ExplorationUserDataModel_rated_on',
model.rated_on
)
)
user_dates_list.append(
(
'ExplorationUserDataModel_draft_change_list_last_'
'updated',
model.draft_change_list_last_updated
)
)
filtered_user_dates_list = [
(attribute_name, date) for attribute_name, date in user_dates_list
if date is not None
]
model_name, min_date = min(filtered_user_dates_list, key=lambda x: x[1])
time_delta_for_correctness = datetime.timedelta(minutes=5)
if user_settings_model.created_on - min_date > (
time_delta_for_correctness):
yield (
'ERROR_NEED_TO_UPDATE_USING_' + python_utils.UNICODE(
model_name), user_id)
else:
yield ('SUCCESS_ALREADY_UP_TO_DATE', 1)
@staticmethod
def reduce(key, values):
"""Implements the reduce function for this job."""
if key.startswith('ERROR_NEED_TO_UPDATE_USING'):
yield (key, values)
else:
yield (key, len(values))
class CleanUpUserSubscribersModelOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""Job that cleans up UserSubscribersModel by removing user id if it is
present in subscriber ids.
NOTE TO DEVELOPERS: This job can be deleted after it is run in October
2020 release.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [user_models.UserSubscribersModel]
@staticmethod
def map(item):
if item.deleted:
return
if item.id in item.subscriber_ids:
item.subscriber_ids.remove(item.id)
item.update_timestamps()
item.put()
yield ('Removed user from their own subscribers list', item.id)
@staticmethod
def reduce(key, values):
values.sort()
yield (key, values)
class CleanUpCollectionProgressModelOneOffJob(
jobs.BaseMapReduceOneOffJobManager):
"""Job that cleans up CollectionProgressModel.
This is done by:
1. Removing exploration ids which are not a part of the collection.
2. Creating CompletedActivitiesModel for completed explorations if
it is missing.
3. Adding missing exploration ids for completed explorations.
NOTE TO DEVELOPERS: Do not delete this job until issue #10809 is fixed.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [user_models.CollectionProgressModel]
@staticmethod
def map(item):
if item.deleted:
return
completed_activities_model = (
user_models.CompletedActivitiesModel.get_by_id(item.user_id))
if completed_activities_model is None:
completed_activities_model = (
user_models.CompletedActivitiesModel(id=item.user_id))
completed_activities_model.exploration_ids = (
item.completed_explorations)
completed_activities_model.update_timestamps()
completed_activities_model.put()
yield ('Regenerated Missing CompletedActivitiesModel', item.id)
else:
missing_exp_ids = [
exp_id
for exp_id in item.completed_explorations if exp_id not in (
completed_activities_model.exploration_ids)]
if missing_exp_ids:
completed_activities_model.exploration_ids.extend(
missing_exp_ids)
completed_activities_model.update_timestamps()
completed_activities_model.put()
yield (
'Added missing exp ids in CompletedActivitiesModel',
item.id)
col_model = collection_models.CollectionModel.get_by_id(
item.collection_id)
collection_node_ids = [
node['exploration_id'] for node in (
col_model.collection_contents['nodes'])]
exp_ids_to_remove = [
exp_id
for exp_id in item.completed_explorations if exp_id not in (
collection_node_ids)]
if exp_ids_to_remove:
item.completed_explorations = [
exp_id for exp_id in item.completed_explorations
if exp_id not in exp_ids_to_remove]
item.update_timestamps()
item.put()
yield (
'Invalid Exploration IDs cleaned from '
'CollectionProgressModel',
'Model id: %s, Collection id: %s, Removed exploration '
'ids: %s' % (
item.id, item.collection_id, exp_ids_to_remove))
@staticmethod
def reduce(key, values):
values.sort()
yield (key, values)
class CleanUpUserContributionsModelOneOffJob(
jobs.BaseMapReduceOneOffJobManager):
"""Job that cleans up UserContributionsModel by removing deleted
explorations from user contribution.
NOTE TO DEVELOPERS: Do not delete this job until issue #10809 is fixed.
"""
@classmethod
def entity_classes_to_map_over(cls):
return [user_models.UserContributionsModel]
@staticmethod
def map(item):
if item.deleted:
return
fetched_created_exploration_model_instances = (
datastore_services.fetch_multiple_entities_by_ids_and_models(
[('ExplorationModel', item.created_exploration_ids)]))[0]
exp_ids_removed = []
for exp_id, exp_instance in list(python_utils.ZIP(
copy.deepcopy(item.created_exploration_ids),
fetched_created_exploration_model_instances)):
if exp_instance is None or exp_instance.deleted:
exp_ids_removed.append(exp_id)
item.created_exploration_ids.remove(exp_id)
fetched_edited_exploration_model_instances = (
datastore_services.fetch_multiple_entities_by_ids_and_models(
[('ExplorationModel', item.edited_exploration_ids)]))[0]
for exp_id, exp_instance in list(python_utils.ZIP(
copy.deepcopy(item.edited_exploration_ids),
fetched_edited_exploration_model_instances)):
if exp_instance is None or exp_instance.deleted:
exp_ids_removed.append(exp_id)
item.edited_exploration_ids.remove(exp_id)
if exp_ids_removed:
item.update_timestamps()
item.put()
yield (
'Removed deleted exp ids from UserContributionsModel',
'Model id: %s, Removed exploration ids: %s' % (
item.id, exp_ids_removed))
@staticmethod
def reduce(key, values):
values.sort()
yield (key, values)
class ProfilePictureAuditOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""Job that verifies various aspects of profile_picture_data_url in the
UserSettingsModel.
"""
@classmethod
def enqueue(cls, job_id, additional_job_params=None):
# We can raise the number of shards for this job, since it goes only
# over one type of entity class.
super(ProfilePictureAuditOneOffJob, cls).enqueue(job_id, shard_count=32)
@classmethod
def entity_classes_to_map_over(cls):
return [user_models.UserSettingsModel]
@staticmethod
def map(model): # pylint: disable=too-many-return-statements
if model.deleted:
yield ('SUCCESS - DELETED', model.username)
return
if model.username is None:
yield ('SUCCESS - NOT REGISTERED', model.username)
return
if model.profile_picture_data_url is None:
yield ('FAILURE - MISSING PROFILE PICTURE', model.username)
return
try:
profile_picture_binary = utils.convert_png_data_url_to_binary(
model.profile_picture_data_url)
except Exception:
yield ('FAILURE - INVALID PROFILE PICTURE DATA URL', model.username)
return
if imghdr.what(None, h=profile_picture_binary) != 'png':
yield ('FAILURE - PROFILE PICTURE NOT PNG', model.username)
return
try:
# Load the image to retrieve dimensions for later verification.
height, width = image_services.get_image_dimensions(
profile_picture_binary)
except Exception:
yield ('FAILURE - CANNOT LOAD PROFILE PICTURE', model.username)
return
if (
height != user_services.GRAVATAR_SIZE_PX or
width != user_services.GRAVATAR_SIZE_PX
):
yield (
'FAILURE - PROFILE PICTURE NON STANDARD DIMENSIONS - %s,%s' % (
height, width
),
model.username
)
return
yield ('SUCCESS', model.username)
@staticmethod
def reduce(key, values):
if key.startswith('SUCCESS'):
yield (key, len(values))
else:
yield (key, values)
class UniqueHashedNormalizedUsernameAuditJob(
jobs.BaseMapReduceOneOffJobManager):
"""Job that checks that the hashed normalized usernames are unique."""
@classmethod
def enqueue(cls, job_id, additional_job_params=None):
# We can raise the number of shards for this job, since it goes only
# over one type of entity class.
super(UniqueHashedNormalizedUsernameAuditJob, cls).enqueue(
job_id, shard_count=32)
@classmethod
def entity_classes_to_map_over(cls):
return [user_models.UserSettingsModel]
@staticmethod
def map(model):
if model.normalized_username is None:
yield ('SUCCESS USERNAME NONE', 1)
else:
yield (
utils.convert_to_hash(
model.normalized_username,
user_models.DeletedUsernameModel.ID_LENGTH
),
model.normalized_username
)
@staticmethod
def reduce(key, values):
if key == 'SUCCESS USERNAME NONE':
yield (key, len(values))
return
if len(values) != 1:
yield ('FAILURE', values)
class DiscardOldDraftsOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""Job that discards any drafts that were last updated in 2019 or prior.
This is done to avoid issues arising from old schema version
incompatibility. It is unlikely that such drafts are being used or relied
on anyway, since they have been abandoned for over a year.
"""
@classmethod
def enqueue(cls, job_id, additional_job_params=None):
super(DiscardOldDraftsOneOffJob, cls).enqueue(job_id, shard_count=64)
@classmethod
def entity_classes_to_map_over(cls):
return [user_models.ExplorationUserDataModel]
@staticmethod
def map(model):
if model.draft_change_list is None:
return
exploration = exp_fetchers.get_exploration_by_id(
model.exploration_id, strict=False)
if exploration is None:
yield ('DISCARDED - Exploration is missing', model.id)
elif model.draft_change_list_last_updated.timetuple().tm_year <= 2019:
yield ('DISCARDED - Draft is old', model.id)
else:
return
# Discard the draft.
model.draft_change_list = None
model.draft_change_list_last_updated = None
model.draft_change_list_exp_version = None
model.update_timestamps()
model.put()
yield ('SUCCESS - Discarded draft', 1)
@staticmethod
def reduce(key, values):
"""Implements the reduce function for this job."""
if key.startswith('SUCCESS'):
yield (key, len(values))
else:
yield (key, values)
class DeleteNonExistentExpsFromUserModelsOneOffJob(
jobs.BaseMapReduceOneOffJobManager):
"""Job that removes explorations that do not exist or that are private from
completed and incomplete activities models and from user
subscriptions model.
This job will only be used in the April 2021 release to fix the errors on
the prod server. The errors exist because the activity models were not
properly updated when the explorations were deleted.
"""
@classmethod
def enqueue(cls, job_id, additional_job_params=None):
super(
DeleteNonExistentExpsFromUserModelsOneOffJob, cls
).enqueue(job_id, shard_count=16)
@classmethod
def entity_classes_to_map_over(cls):
return [
user_models.CompletedActivitiesModel,
user_models.IncompleteActivitiesModel,
user_models.UserSubscriptionsModel
]
@staticmethod
def map(model):
class_name = model.__class__.__name__
exploration_ids = model.exploration_ids
exp_rights_models = exp_models.ExplorationRightsModel.get_multi(
model.exploration_ids)
existing_exploration_ids = [
exp_id for exp_id, exp_rights_model
in python_utils.ZIP(exploration_ids, exp_rights_models)
if exp_rights_model is not None
]
changed = False
if len(existing_exploration_ids) < len(exploration_ids):
changed = True
yield ('REMOVED_DELETED_EXPS - %s' % class_name, 1)
if isinstance(model, (
user_models.CompletedActivitiesModel,
user_models.IncompleteActivitiesModel
)):
public_exploration_ids = [
exp_id for exp_id, exp_rights_model
in python_utils.ZIP(exploration_ids, exp_rights_models)
if exp_rights_model is not None and
exp_rights_model.status == constants.ACTIVITY_STATUS_PUBLIC
]
if len(public_exploration_ids) < len(existing_exploration_ids):
changed = True
yield ('REMOVED_PRIVATE_EXPS - %s' % class_name, 1)
else:
public_exploration_ids = existing_exploration_ids
if changed:
model.exploration_ids = public_exploration_ids
model.update_timestamps(update_last_updated_time=False)
model.put()
yield ('SUCCESS - %s' % class_name, 1)
@staticmethod
def reduce(key, values):
"""Implements the reduce function for this job."""
yield (key, len(values))
class DeleteNonExistentExpUserDataOneOffJob(jobs.BaseMapReduceOneOffJobManager):
"""Job that deletes exploration user data models that do not have
existing explorations.
This job will only be used in the April 2021 release to fix the errors on
the prod server. The errors exist because the exploration user data models
were not properly deleted when the explorations were deleted.
"""
@classmethod
def enqueue(cls, job_id, additional_job_params=None):
super(
DeleteNonExistentExpUserDataOneOffJob, cls
).enqueue(job_id, shard_count=32)
@classmethod
def entity_classes_to_map_over(cls):
return [user_models.ExplorationUserDataModel]
@staticmethod
def map(model):
exp_model = exp_models.ExplorationModel.get(
model.exploration_id, strict=False)
if exp_model is None:
# By using delete_exploration we combine a few things, we delete
# the ExplorationUserDataModels, also we delete any other models
# that still exist for the exploration ID, and finally we verify
# that the delete_exploration works correctly.
exp_services.delete_exploration(
feconf.SYSTEM_COMMITTER_ID,
model.exploration_id,
force_deletion=True
)
yield ('SUCCESS_DELETED_EXPLORATION', 1)
else:
yield ('SUCCESS_KEPT', 1)
@staticmethod
def reduce(key, values):
"""Implements the reduce function for this job."""
yield (key, len(values))
class DeleteNonExistentExpUserContributionsOneOffJob(
jobs.BaseMapReduceOneOffJobManager):
"""Job that removes deleted explorations from UserContributionsModels.
This job will only be used in the April 2021 release to fix the errors on
the prod server. The errors exist because the contributions models were not
properly updated when the explorations were deleted.
"""
@classmethod
def enqueue(cls, job_id, additional_job_params=None):
super(
DeleteNonExistentExpUserContributionsOneOffJob, cls
).enqueue(job_id, shard_count=32)
@classmethod
def entity_classes_to_map_over(cls):
return [user_models.UserContributionsModel]
@staticmethod
def map(model):
created_exp_models = exp_models.ExplorationModel.get_multi(
model.created_exploration_ids)
existing_created_exp_ids = [
exp_id for exp_id, exp_model
in python_utils.ZIP(
model.created_exploration_ids, created_exp_models
) if exp_model is not None
]
changed = False
if len(existing_created_exp_ids) < len(model.created_exploration_ids):
changed = True
yield ('REMOVED_CREATED_DELETED_EXPS', 1)
edited_exp_models = exp_models.ExplorationModel.get_multi(
model.edited_exploration_ids)
existing_edited_exp_ids = [
exp_id for exp_id, exp_model
in python_utils.ZIP(
model.edited_exploration_ids, edited_exp_models
) if exp_model is not None
]
if len(existing_edited_exp_ids) < len(model.edited_exploration_ids):
changed = True
yield ('REMOVED_EDITED_DELETED_EXPS', 1)
if changed:
model.created_exploration_ids = existing_created_exp_ids
model.edited_exploration_ids = existing_edited_exp_ids
model.update_timestamps(update_last_updated_time=False)
model.put()
yield ('SUCCESS', 1)
@staticmethod
def reduce(key, values):
"""Implements the reduce function for this job."""
yield (key, len(values))
| 37.404598 | 111 | 0.614426 |
a9a34e3c015b355aa136be674f165f8ccc7aa52e | 1,172 | py | Python | study_sample/Animal.py | dantefung/Python-Codebase | 7e154100a1016ad79ec5d6adc7c11f096ec1966b | [
"MIT"
] | null | null | null | study_sample/Animal.py | dantefung/Python-Codebase | 7e154100a1016ad79ec5d6adc7c11f096ec1966b | [
"MIT"
] | 5 | 2021-04-30T21:18:36.000Z | 2022-03-12T00:55:18.000Z | study_sample/Animal.py | dantefung/Python-Codebase | 7e154100a1016ad79ec5d6adc7c11f096ec1966b | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding:UTF-8 -*-
'super class animal'
# 全局变量
__Author__ = 'DANTE FUNG'
# 类定义
class Animal(object):
def run(self):
print('Animal is running ...')
class Dog(Animal):
def run(self):
print('Dog is running ...')
def eat(self):
print('Dog is Eating meat...')
class Cat(Animal):
pass
animal = Animal()
animal.run()
dog = Dog()
dog.run()
dog.eat()
cat = Cat()
cat.run()
print('animal is Animal?', isinstance(animal,Animal))
print('dog is Animal?', isinstance(dog,Animal))
print('dog is Dog?', isinstance(dog,Dog))
print('----多态测试-------')
def run_twice(animal):
animal.run()
animal.run()
run_twice(Animal())
run_twice(Dog())
r'''
静态语言 vs 动态语言
对于静态语言(例如Java)来说,如果需要传入Animal类型,则传入的对象必须是Animal类型或者它的子类,否则,将无法调用run()方法。
对于Python这样的动态语言来说,则不一定需要传入Animal类型。我们只需要保证传入的对象有一个run()方法就可以了:
class Timer(object):
def run(self):
print('Start...')
这就是动态语言的“鸭子类型”,它并不要求严格的继承体系,一个对象只要“看起来像鸭子,走起路来像鸭子”,那它就可以被看做是鸭子。
Python的“file-like object“就是一种鸭子类型。对真正的文件对象,它有一个read()方法,返回其内容。
但是,许多对象,只要有read()方法,都被视为“file-like object“。
许多函数接收的参数就是“file-like object“,你不一定要传入真正的文件对象,完全可以传入任何实现了read()方法的对象。
''' | 18.603175 | 72 | 0.676621 |
61313860cd0f75bfbfe233c8405ccb82a259a3e1 | 6,323 | py | Python | twbot/twtrbot/twtrbot.py | EmilyBarbour/twbot | 549818229553b18b6ac8371a096eeeade489f0e5 | [
"MIT"
] | null | null | null | twbot/twtrbot/twtrbot.py | EmilyBarbour/twbot | 549818229553b18b6ac8371a096eeeade489f0e5 | [
"MIT"
] | null | null | null | twbot/twtrbot/twtrbot.py | EmilyBarbour/twbot | 549818229553b18b6ac8371a096eeeade489f0e5 | [
"MIT"
] | null | null | null | # coding: utf-8
import ast
import contextlib
from datetime import datetime, timedelta
import json
import logging
import logging.handlers
import requests
from requests_oauthlib import OAuth1
import twitter_funcs
import time
BOT_NUM=20
#logging.addLevelName(BOT_NUM, "BOTLOG")
# Add the log message handler to the logger
#LOG_FILENAME = "/home/ec2-user/log/twtrbot.log"
#handler = logging.handlers.TimedRotatingFileHandler(LOG_FILENAME, when="d", interval=1, backupCount=14)
#LOGGER = logging.getLogger(__name__)
#LOGGER.addHandler(handler)
#LOGGER.setLevel(logging.DEBUG)
class StreamDisconnectError(IOError):
""" Twitter streams should last forever
but sometimes do not :( """
pass
class TwitterParseError(Exception):
""" If tweet response cannot be
parsed """
pass
class TwitterStream():
def __init__(self, logger, config_file='config.txt', daterange=None, tweet_json=None):
self.HEADERS = {'Accept': 'application/json'}
self.LOGGER = logger
self._config(config_file)
self.last_tweet_time = None
if daterange:
self.daterange = daterange
if tweet_json:
self.tweet_json = tweet_json
def _config(self, config_file):
""" reads in config file for params """
config = {}
try:
with open(config_file, 'r') as config_file:
for line in config_file:
param, value = line.split('=')
config[param.strip()] = value.strip()
except Exception as e:
self.LOGGER.error('Config File Loading Error: {}', e)
raise Exception('INVALID CONFIG FILE ({})'.format(e))
req_params = ['CONSUMER_KEY', 'CONSUMER_SECRET',
'TOKEN', 'TOKEN_SECRET',
'USER', 'OAUTH_USER']
if list(set(req_params) - set(config)):
self.LOGGER.error('Config Params missing: {}'.format(', '.join(list(set(req_params) - set(config)))))
raise Exception('MISSING PARAMS ({})'.format(', '.join(list(set(req_params) - set(config)))))
self._auth(config)
self.user = map(str, ast.literal_eval(config['USER']))
self.oauth_user = config['OAUTH_USER']
def api_session(self, retries=15, retry_statuses=[420, 429, 500, 502, 503, 504]):
""" Get a customized requests Session object. Handle Twitter exceptions """
session = requests.Session()
session.headers.update(self.HEADERS)
retry_config = {
'logger': self.LOGGER,
'num_retries': retries,
'retry_statuses': set(tuple(retry_statuses))
}
adapter = requests.adapters.HTTPAdapter(max_retries=3)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def _auth(self, config_dict):
""" Create a two-legged OAuth1 session object. """
session = self.api_session()
session.auth = OAuth1(config_dict['CONSUMER_KEY'], config_dict['CONSUMER_SECRET'],
config_dict['TOKEN'], config_dict['TOKEN_SECRET'])
session.headers.update(self.HEADERS)
self.session = session
def start_stream(self):
""" Start listening to Streaming endpoint. """
url = "https://stream.twitter.com/1.1/statuses/filter.json"
params = {'follow': ",".join(self.user)}
twstream = self.session.post(url, params=params, stream=True)
self.LOGGER.log(BOT_NUM, 'Connected to Twitter Stream!')
with contextlib.closing(twstream) as resp:
for line in resp.iter_lines():
self.process_stream(line)
else:
raise StreamDisconnectError(line)
def process_stream(self, line):
""" does stuff w stream output """
if line:
tries=0; max=2
while tries < max:
try:
line = json.loads(line)
self.last_tweet_time = twitter_funcs.parse_tweet(self.LOGGER, self.session, self.oauth_user, self.user, self.last_tweet_time, line)
break
except Exception as e:
self.LOGGER.error("PARSING ERROR")
err = str(e)
if tries == max-1:
self.LOGGER.log(BOT_NUM, line)
self.tweet_json = line
self.post_tweet_json()
raise TwitterParseError(err)
tries+=1
def post_tweet_json(self):
""" can manually post a json blob """
self.LOGGER.log(BOT_NUM, "Manually posting json blob")
twitter_funcs.parse_tweet(self.LOGGER, self.session, self.oauth_user, self.user, self.tweet_json)
self.LOGGER.log(BOT_NUM, "Manual post successful")
def get_missing_tweets(self):
""" gets tweets from user for daterange and compares with tweets posted on timeline user's feed """
for user in self.user:
self.LOGGER.log(BOT_NUM, 'Looking for user, {}, missing tweets! Wish me luck!'.format(user))
tweets = twitter_funcs.retrieve_historical_tweets(self.session, user, self.daterange)
if tweets:
self.LOGGER.log(BOT_NUM, 'There are {} tweets between {} and {}...'.format(len(tweets), self.daterange[0],
self.daterange[1]))
self.post_missing_tweets(user, tweets)
else:
self.LOGGER.log(BOT_NUM, 'No missing Tweets. Yay?!')
def post_missing_tweets(self, user, tweet_list):
""" posts missing tweets """
i=0
for tweet in reversed(tweet_list): # post in chrono order
if not twitter_funcs.retrieve_tweet(self.session, self.oauth_user, tweet['id_str']):
tweet['text'] = tweet['text'].encode('utf-8')
self.LOGGER.log(BOT_NUM, 'Posting missing tweet id: {}, {}'.format(tweet['id_str'], tweet['text']))
twitter_funcs.parse_tweet(self.LOGGER, self.session, self.oauth_user, user, tweet)
time.sleep(20) # don't post too fast as to trigger rate limiting
i+=1
self.LOGGER.log(BOT_NUM, '{} tweets were missing and posted'.format(i))
| 39.030864 | 151 | 0.599083 |
c98c3a08ebea71275d73b74242c2023e822ad7d2 | 598 | py | Python | CorelationCovariance.py | samuelmaina/Data_Science | c9377565ae4ba60bf2d17b592122df2056b6ba96 | [
"MIT"
] | null | null | null | CorelationCovariance.py | samuelmaina/Data_Science | c9377565ae4ba60bf2d17b592122df2056b6ba96 | [
"MIT"
] | null | null | null | CorelationCovariance.py | samuelmaina/Data_Science | c9377565ae4ba60bf2d17b592122df2056b6ba96 | [
"MIT"
] | null | null | null | from numpy import mean,dot
import matplotlib.pyplot as plt
import numpy as np
def de_mean(x):
xmean= np.mean(x)
return [x1 - xmean for x1 in x]
def coveriance(x, y):
n= len(x)
return np.dot(de_mean(x),de_mean(y))/(n-1)
pageSpeeds=np.random.normal(3.0, 1.0, 1000)
purchaseAmount= np.random.normal(50.0, 10.0, 1000)
plt.scatter(pageSpeeds,purchaseAmount)
plt.show()
print(coveriance(pageSpeeds, purchaseAmount))
def correlation(x, y):
stddevx= x.std()
stddevy= y.std()
return coveriance(x, y)/stddevx/stddevy
print(correlation(pageSpeeds, purchaseAmount))
| 16.611111 | 50 | 0.697324 |
d2c2674fe38e427addfa01e8f998b5afae60f66a | 958 | py | Python | masonite/commands/MakeMigrationCommand.py | w3x10e8/core | d8f0ca29c2bd5e86d199391fa916ce2f5c9b0f49 | [
"MIT"
] | null | null | null | masonite/commands/MakeMigrationCommand.py | w3x10e8/core | d8f0ca29c2bd5e86d199391fa916ce2f5c9b0f49 | [
"MIT"
] | null | null | null | masonite/commands/MakeMigrationCommand.py | w3x10e8/core | d8f0ca29c2bd5e86d199391fa916ce2f5c9b0f49 | [
"MIT"
] | null | null | null | """ New Migration Command """
import subprocess
from cleo import Command
class MakeMigrationCommand(Command):
"""
Makes a new migration
migration
{name : Name of your migration}
{--t|--table=False : Table you are migrating for}
{--c|--create=False : Table you want to create with this migration}
"""
def handle(self):
name = self.argument('name')
if self.option('create') != 'False':
subprocess.call(['orator', 'make:migration', name,
'-p', 'databases/migrations', '--table', self.option('create'), '--create'])
elif self.option('table') != 'False':
subprocess.call(['orator', 'make:migration', name,
'-p', 'databases/migrations', '--table', self.option('table')])
else:
subprocess.call(['orator', 'make:migration', name,
'-p', 'databases/migrations'])
| 33.034483 | 105 | 0.545929 |
be83fb416646912490d10c10afc5c7f45b410545 | 5,378 | py | Python | benchmarks/f3_wrong_hints_permutations/scaling_software_termination/8-2Nested_false-termination_4.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/f3_wrong_hints_permutations/scaling_software_termination/8-2Nested_false-termination_4.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/f3_wrong_hints_permutations/scaling_software_termination/8-2Nested_false-termination_4.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
symbols = frozenset([pc, x, y])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = n_locs
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x >= 0) -> pc' = 1
cond = mgr.GE(x, ints[0])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x >= 0) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 -> pc' = 2
cfg.append(mgr.Implies(pcs[1], x_pcs[2]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same_x = mgr.Equals(x_x, x)
same_y = mgr.Equals(x_y, y)
same = mgr.And(same_x, same_y)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> x' = x + y & same_y
trans.append(mgr.Implies(pcs[1],
mgr.And(mgr.Equals(x_x, mgr.Plus(x, y)),
same_y)))
# pc = 2 -> same_x & y' = y + 1
trans.append(mgr.Implies(pcs[2],
mgr.And(same_x,
mgr.Equals(x_y, mgr.Plus(y, ints[1])))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
symbs = frozenset([pc, x, y])
m_100 = mgr.Int(-100)
m_1 = mgr.Int(-1)
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_4 = mgr.Int(4)
i_20 = mgr.Int(20)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
res = []
loc0 = Location(env, mgr.LE(x, i_0))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, x)))
loc1 = Location(env, mgr.GE(x, i_0))
loc1.set_progress(0, mgr.LT(x_x, mgr.Times(m_1, x, x)))
h_x = Hint("h_x5", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(x, y)))
loc1 = Location(env, mgr.GE(x, i_2), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x2", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.GT(x_x, y))
loc2 = Location(env, mgr.GE(x, i_2))
loc2.set_progress(0, mgr.GE(x_x, i_20))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100))
loc0.set_progress(0, mgr.Equals(x_y, mgr.Times(y, y)))
h_y = Hint("h_y5", env, frozenset([y]), symbs)
h_y.set_locs([loc0])
res.append(h_y)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(2, mgr.GE(x_y, i_20))
loc2 = Location(env, mgr.TRUE())
loc2.set_progress(0, mgr.And(mgr.GE(x_y, m_100), mgr.LE(x_y, i_0)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_20), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x0", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
return frozenset(res)
| 30.213483 | 77 | 0.576237 |
58cc1fa3fc63108e0d898a9479824cef2857b0b4 | 2,497 | py | Python | Scripts/pdf_geneator.py | yogeshwaran01/Mini-Projects | c1a8790079d904405d49c71d6903ca4daaa77b38 | [
"MIT"
] | 4 | 2020-09-30T17:18:13.000Z | 2021-06-11T21:02:10.000Z | Scripts/pdf_geneator.py | yogeshwaran01/Mini-Projects | c1a8790079d904405d49c71d6903ca4daaa77b38 | [
"MIT"
] | null | null | null | Scripts/pdf_geneator.py | yogeshwaran01/Mini-Projects | c1a8790079d904405d49c71d6903ca4daaa77b38 | [
"MIT"
] | 1 | 2021-04-02T14:51:00.000Z | 2021-04-02T14:51:00.000Z | from reportlab.platypus import SimpleDocTemplate
from reportlab.platypus import Paragraph, Table, Image, ListItem
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib import colors
class PdfGenerator:
"""
Class help to generate pdf
"""
def __init__(self, filename: str):
self.filename = filename
self.report = SimpleDocTemplate(self.filename)
self.styles = getSampleStyleSheet()
def set_title(self, heading_text):
return Paragraph(heading_text, self.styles['Title'])
def add_italict_text(self, text):
return Paragraph(text, self.styles['Italic'])
def set_definition(self, text):
return Paragraph(text, self.styles['Definition'])
def insert_code(self, text):
return Paragraph(text, self.styles['Code'])
def set_heading(self, heading_text: str, tag=None):
if tag:
report = Paragraph(heading_text, self.styles[tag])
else:
report = Paragraph(heading_text, self.styles["h1"])
return report
def set_paragragh(self, paragraph: str):
report = Paragraph(paragraph, self.styles["BodyText"])
return report
def insert_table(self, table_data: list):
report = Table(
data=table_data,
style=[
("GRID", (0, 0), (-1, -1), 1, colors.black),
("FONTNAME", (0, 0), (-1, 0), "Helvetica-Bold"),
("ALIGN", (0, 0), (-1, -1), "CENTER"),
],
hAlign="CENTER",
)
return report
def insert_image(self, path, width=None, height=None, hAlign="CENTER"):
report = Image(path, width=width, height=height, hAlign=hAlign)
return report
def done(self, *kwgs):
return self.report.build(list(kwgs))
if __name__ == "__main__":
pdf = PdfGenerator("python.pdf")
a = pdf.set_title("Python")
b = pdf.set_paragragh("Python is Super")
c = pdf.set_heading("Uses of Python", "h3")
table_data = [
["Modules", "Uses"],
["Requests", "Talk to http protocol"],
["Flask", "Web framework"],
["Pygame", "Build Game for life"],
["Bs4", "Scarpe the Website"],
["Matplotlib", "Plot the data"],
["Pandas", "Frame the Data"],
]
d = pdf.insert_table(table_data)
e = pdf.set_heading("Some sample code", "h3")
f = pdf.set_definition("Python is a Vera Level;")
pdf.done(a, b, c, d, e, f)
| 29.034884 | 75 | 0.592311 |
7b21699334c6eb15b1eebafe871f5b4b538d91da | 2,301 | py | Python | metrics/state.py | sspbft/BFTList | d73aee5bd0ab05995509f0fcfaf3c0a5944e617a | [
"MIT"
] | 6 | 2019-11-12T01:45:55.000Z | 2022-03-18T10:57:21.000Z | metrics/state.py | practicalbft/BFTList | d73aee5bd0ab05995509f0fcfaf3c0a5944e617a | [
"MIT"
] | 4 | 2019-02-14T10:57:09.000Z | 2019-03-21T15:22:08.000Z | metrics/state.py | sspbft/BFTList | d73aee5bd0ab05995509f0fcfaf3c0a5944e617a | [
"MIT"
] | 1 | 2019-04-04T15:09:33.000Z | 2019-04-04T15:09:33.000Z | """Metrics related to state and requests."""
# standard
import logging
import time
from prometheus_client import Counter, Gauge
# local
from modules.replication.models.client_request import ClientRequest
logger = logging.getLogger(__name__)
PEND = "pend"
START_TIME = "start_time"
# metrics
state_length = Counter("state_length",
"Length of the RSM state")
client_req_exec_time = Gauge("client_req_exec_time",
"Execution time of client_request",
["client_id", "timestamp", "state_length",
"pend_length"])
# dict to keep track of all client_requests and when they arrived in pending
client_reqs = {}
def client_req_added_to_pending(client_req: ClientRequest,
start_pend_length):
"""Called whenever a client request is added to pending requests
The request is stored along with the current timestamp in client_reqs
until client_req_executed is called for the same request. This enables
the tracking of client request execution time.
"""
if client_req in client_reqs:
logger.error(f"ClientRequest {client_req} already tracked")
return
logger.info(f"Started tracking {client_req}")
client_reqs[client_req] = {START_TIME: time.time(),
PEND: start_pend_length}
def client_req_executed(client_req: ClientRequest,
state_length,
pend_length):
"""Called whenever a client request is fully executed, i.e. committed
The total execution time is calculated and emitted to the gauge tracking
the client request execution time.
"""
if client_req not in client_reqs:
logger.debug(f"ClientRequest {client_req} not tracked")
return
exec_time = time.time() - client_reqs[client_req][START_TIME]
logger.info(f"req execed in {exec_time} s")
avg_pend_length = (pend_length + client_reqs[client_req][PEND]) / 2
# emit execution time for this client_req
client_req_exec_time.labels(
client_req.get_client_id(),
client_req.get_timestamp(),
state_length,
avg_pend_length
).set(exec_time)
# stop tracking client_req
del client_reqs[client_req]
| 32.871429 | 76 | 0.669709 |
1b893bdcbad6d934ebd79101f2b06bdf36aba8fb | 138 | py | Python | kafka_consumer/datadog_checks/kafka_consumer/__about__.py | vbarbaresi/integrations-core | ab26ab1cd6c28a97c1ad1177093a93659658c7aa | [
"BSD-3-Clause"
] | null | null | null | kafka_consumer/datadog_checks/kafka_consumer/__about__.py | vbarbaresi/integrations-core | ab26ab1cd6c28a97c1ad1177093a93659658c7aa | [
"BSD-3-Clause"
] | null | null | null | kafka_consumer/datadog_checks/kafka_consumer/__about__.py | vbarbaresi/integrations-core | ab26ab1cd6c28a97c1ad1177093a93659658c7aa | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
__version__ = "2.8.6"
| 23 | 59 | 0.717391 |
06bca580dbca157ceb25a4dd8baf3b64682d1f46 | 3,707 | py | Python | test/generic/test_id_provider.py | Rishav1/PySyft | f620ee12727b52b19a317f263789830b57ee2539 | [
"Apache-2.0"
] | 3 | 2020-09-23T14:09:09.000Z | 2020-09-23T19:26:28.000Z | test/generic/test_id_provider.py | mukira/PySyft | 94595008e8326d3111406ae143099b311fc3f2e6 | [
"Apache-2.0"
] | 3 | 2019-05-24T01:16:56.000Z | 2019-09-18T13:02:30.000Z | test/generic/test_id_provider.py | mukira/PySyft | 94595008e8326d3111406ae143099b311fc3f2e6 | [
"Apache-2.0"
] | 1 | 2022-03-12T08:04:34.000Z | 2022-03-12T08:04:34.000Z | import unittest.mock as mock
import pytest
from syft import exceptions
from syft.generic import id_provider
def test_pop_no_given_ids(hook):
provider = id_provider.IdProvider()
values = [10, 4, 15, 4, 2, 0]
orig_func = id_provider.create_random_id
mocked_random_numbers = mock.Mock()
mocked_random_numbers.side_effect = values
id_provider.create_random_id = mocked_random_numbers
val = provider.pop()
assert val == values[0]
val = provider.pop()
assert val == values[1]
val = provider.pop()
assert val == values[2]
# values[3] is skipped, as value already used.
val = provider.pop()
assert val == values[4]
val = provider.pop()
assert val == values[5]
id_provider.create_random_id = orig_func
def test_pop_with_given_ids(hook):
given_ids = [4, 15, 2]
provider = id_provider.IdProvider(given_ids=given_ids.copy())
values = [10, 4, 15, 4, 2, 0]
orig_func = id_provider.create_random_id
mocked_random_numbers = mock.Mock()
mocked_random_numbers.side_effect = values
id_provider.create_random_id = mocked_random_numbers
val = provider.pop()
assert val == given_ids[-1]
val = provider.pop()
assert val == given_ids[-2]
val = provider.pop()
assert val == given_ids[-3]
val = provider.pop()
assert val == values[0]
# values[1, 2, 3, 4] are skipped, as value already used.
val = provider.pop()
assert val == values[5]
id_provider.create_random_id = orig_func
def test_given_ids_side_effect(hook):
given_ids = [4, 15, 2]
provider = id_provider.IdProvider(given_ids=given_ids)
assert len(given_ids) == 3
provider.pop()
assert len(given_ids) == 2
provider.pop()
assert len(given_ids) == 1
provider.pop()
assert len(given_ids) == 0
def test_set_next_ids(hook):
initial_given_ids = [2, 3]
provider = id_provider.IdProvider(given_ids=initial_given_ids.copy())
next_ids = [4, 5]
provider.set_next_ids(next_ids.copy())
val = provider.pop()
assert val == next_ids[-1]
val = provider.pop()
assert val == next_ids[-2]
val = provider.pop()
assert val == initial_given_ids[-1]
val = provider.pop()
assert val == initial_given_ids[-2]
def test_set_next_ids_with_id_checking(hook):
initial_given_ids = [2, 3]
provider = id_provider.IdProvider()
provider.set_next_ids(initial_given_ids.copy(), check_ids=False)
# generated the initial 3 ids
provider.pop()
provider.pop()
provider.pop()
next_ids = [1, 2, 5]
with pytest.raises(exceptions.IdNotUniqueError, match=r"\{2\}"):
provider.set_next_ids(next_ids.copy(), check_ids=True)
next_ids = [2, 3, 5]
with pytest.raises(exceptions.IdNotUniqueError, match=r"\{2, 3\}"):
provider.set_next_ids(next_ids.copy(), check_ids=True)
def test_start_recording_ids():
initial_given_ids = [2, 3]
provider = id_provider.IdProvider(given_ids=initial_given_ids.copy())
provider.pop()
provider.start_recording_ids()
provider.pop()
ids = provider.get_recorded_ids()
assert len(ids) == 1
assert ids[0] == initial_given_ids[-2]
def test_get_recorded_ids():
initial_given_ids = [2, 3, 4]
provider = id_provider.IdProvider(given_ids=initial_given_ids.copy())
provider.pop()
provider.start_recording_ids()
provider.pop()
ids = provider.get_recorded_ids(continue_recording=True)
assert len(ids) == 1
assert ids[0] == initial_given_ids[-2]
provider.pop()
ids = provider.get_recorded_ids()
assert len(ids) == 2
assert ids[0] == initial_given_ids[-2]
assert ids[1] == initial_given_ids[-3]
| 24.879195 | 73 | 0.6744 |
e2a596c3cfb6154545012a293ee87efffec577d5 | 2,020 | py | Python | caeproject/simulation_result/13/project3.py | cosailer/trivial_code | 80167dd669c22ceb5139bda9379cca5a55bb3680 | [
"MIT"
] | null | null | null | caeproject/simulation_result/13/project3.py | cosailer/trivial_code | 80167dd669c22ceb5139bda9379cca5a55bb3680 | [
"MIT"
] | null | null | null | caeproject/simulation_result/13/project3.py | cosailer/trivial_code | 80167dd669c22ceb5139bda9379cca5a55bb3680 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
-------------------------------------
N A C S P Y T H O N S C R I P T
-------------------------------------
NACS version: 2.0.2745 - pre3
NACS architecture: CENTOS 5.11 (X86_64)
File generated at Tue Jan 20 16:55:05 2015
On host 'lse86' by 'cae42'
"""
from __future__ import division
try:
from nacs.scripting import *
except:
raise Exception("File is only executable in the NACS python interpreter!")
# =================
# NACS SIMULATION
# =================
simulation = NacsSimulation()
simulation.setGrid(u'project3.nmf', 'plane')
simulation.addOutput(Output.Nacs())
text = Output.Text()
simulation.addOutput(text)
simulation.addOutput(Output.GiD())
# =====================
# MATERIAL DEFINITION
# =====================
copper = Material('Copper')
copper.density(8940.0)
copper.lossTangensDelta([1000],[0.002])
copper.stiffness.isotropic.byENu(1.15e+11, 0.35)
steel = Material('Steel')
steel.density(10205)
steel.lossTangensDelta([1000],[0.0003])
steel.stiffness.isotropic.byENu(1.95e+11, 0.28)
silicon = Material('Silicon')
silicon.density(2300.0)
silicon.stiffness.isotropic.byENu(67500000000.0, 0.1)
simulation.setMat('exc_f_r', copper)
simulation.setMat('rec_f_r', copper)
simulation.setMat('sen_coat_r', steel)
simulation.setMat('silicon_r', silicon)
# ===============
# ANALYSIS STEP
# ===============
trans1 = Analysis.Transient()
trans1.set(3.6417e-11, None, 300, False)
mech1 = Physic.Mechanic('planeStrain')
mech1.addRegions(['exc_f_r', 'sen_coat_r', 'silicon_r', 'rec_f_r'])
mech1.addBc(mech1.BC.Force.expr('exc_f_r', 'y', "-1000*sinBurst(1.3730e+09, 5, 1, 1 ,t)"))
mech1.addBc(mech1.BC.Fix('outerbounds_bot', ['x', 'y']))
mech1.addResult(mech1.Result.Displacement(['exc_f_r', 'rec_f_r', 'sen_coat_r', 'silicon_r']))
mech1.addResult(mech1.Result.Displacement(['observer_point_1', 'observer_point_2', 'observer_point_3', 'observer_point_4', 'observer_point_e4'], 'amplPhase', 'mesh', [text]))
trans1.addPhysic(mech1)
simulation.addAnalysis(trans1)
| 28.857143 | 174 | 0.661386 |
1e603769c91973e195e593fe76cd31c3727bb542 | 3,565 | py | Python | vgif/main.py | 0xflotus/video2gif | fb7d4be2aa0da24e6dc87ca71e88220782564915 | [
"MIT"
] | 19 | 2021-01-05T02:38:40.000Z | 2021-09-01T06:47:10.000Z | vgif/main.py | 0xflotus/video2gif | fb7d4be2aa0da24e6dc87ca71e88220782564915 | [
"MIT"
] | null | null | null | vgif/main.py | 0xflotus/video2gif | fb7d4be2aa0da24e6dc87ca71e88220782564915 | [
"MIT"
] | 4 | 2021-01-05T12:37:29.000Z | 2022-01-13T06:51:49.000Z | #!/usr/bin/env python
import PySimpleGUI as sg
from PIL import Image
import cv2
import io
import imageio
def loadVideo():
filename = sg.popup_get_file('Filename to gif')
return filename
def main():
# ---===--- Get the filename --- #
skipRate=3
gifFps=10
videoFile=loadVideo()
# scale=0.5
try:
outFile=videoFile[:-3] + 'gif'
cap = cv2.VideoCapture(videoFile)
videoWidth=cap.get(3)
videoHeight=cap.get(4)
videoFps=cap.get(5)
frameTotal = cap.get(7)
except:
print("cannot play")
return
sg.theme('Black')
# ---===--- define the window layout --- #
layout = [[sg.Text('OpenCV Demo', size=(40, 1), font='Helvetica 20',key='-text-')],
[sg.Image(filename='', key='-image-')],
[sg.Text('Video Sample Rate'),sg.Slider(range=(skipRate, 300),
size=(60, 10), orientation='h', key='-slider-')],
[sg.Text('Output Gif FPS'),sg.Slider(range=(10, 60),
size=(60, 10), orientation='h', key='-slider2-')],
[sg.Text('Video Resize Rate'),sg.Slider(range=(2, 10),
size=(60, 10),orientation='h', key='-slider3-')],
[sg.Button('Start', size=(7, 1), pad=((600, 0), 3), font='Helvetica 14')],
[sg.Button('Restart', size=(7, 1), pad=((600, 0), 3), font='Helvetica 14')],
[sg.Button('Exit', size=(7, 1), pad=((600, 0), 3), font='Helvetica 14')]]
# create the window and show it without the plot
window = sg.Window('Demo Application - video2gif', layout, no_titlebar=False, location=(0, 0))
# locate the elements we'll be updating. Does the search only 1 time
image_elem = window['-image-']
text_elem = window['-text-']
slider_elem = window['-slider-']
gifSliderElem = window['-slider2-']
sizeSliderElem = window['-slider3-']
# ---===--- LOOP through video file by frame --- #
cur_frame = 0
startProcess=False
ret, firstFrame = cap.read()
while True:
event, values = window.read(timeout=0)
if event in ('Start', None):
startProcess=True
if event in ('Exit', None):
break
if(startProcess==False):
skipRate = int(values['-slider-'])
gifFps = int(values['-slider2-'])
scale = int(values['-slider3-'])
slider_elem.update(skipRate)
gifSliderElem.update(gifFps)
sizeSliderElem.update(scale)
firstFrameDisplay=cv2.resize(firstFrame,(0,0),fx=1/scale,fy=1/scale)
framerCounter=0
cap.set(cv2.CAP_PROP_POS_FRAMES, framerCounter)
gifEstLenght="EST out put GIF length:"+ str(round((frameTotal/skipRate)/gifFps,2))+" s"
text_elem.update(gifEstLenght)
imgbytes = cv2.imencode('.png', firstFrameDisplay)[1].tobytes() # ditto
image_elem.update(data=imgbytes)
else:
eta="strating...."
text_elem.update(eta)
with imageio.get_writer(outFile, duration=1/gifFps, mode='I') as writer:
while True:
event, values = window.read(timeout=0)
if event in ('Restart', None):
break
cap.set(cv2.CAP_PROP_POS_FRAMES, framerCounter)
ret, frame = cap.read()
if not ret: # if out of data stop looping
break
framerCounter+=skipRate
frame=cv2.resize(frame,(0,0),fx=1/scale,fy=1/scale)
imgbytes = cv2.imencode('.png', frame)[1].tobytes() # ditto
image_elem.update(data=imgbytes)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
writer.append_data(frame)
eta="ETA-"+str(round((framerCounter/frameTotal)*100,2))+"%"
text_elem.update(eta)
slider_elem.update(skipRate)
gifSliderElem.update(gifFps)
if cv2.waitKey(1) == ord('q'):
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
startProcess=False
break
#
startProcess=False
cap.release()
main() | 28.98374 | 95 | 0.649369 |
367ec6406ceec3174ed6af56c447e96b04fd2fe2 | 45,109 | py | Python | pysnmp-with-texts/EQLDISK-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/EQLDISK-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/EQLDISK-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module EQLDISK-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/EQLDISK-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:05:18 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
eqlGroupId, = mibBuilder.importSymbols("EQLGROUP-MIB", "eqlGroupId")
eqlMemberIndex, = mibBuilder.importSymbols("EQLMEMBER-MIB", "eqlMemberIndex")
equalLogic, = mibBuilder.importSymbols("EQUALLOGIC-SMI", "equalLogic")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
iso, ObjectIdentity, Counter32, Gauge32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, IpAddress, NotificationType, Bits, enterprises, Integer32, ModuleIdentity, TimeTicks, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "ObjectIdentity", "Counter32", "Gauge32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "IpAddress", "NotificationType", "Bits", "enterprises", "Integer32", "ModuleIdentity", "TimeTicks", "Counter64")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
eqldiskModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 12740, 3))
eqldiskModule.setRevisions(('2002-09-06 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: eqldiskModule.setRevisionsDescriptions(('Initial revision',))
if mibBuilder.loadTexts: eqldiskModule.setLastUpdated('201403121459Z')
if mibBuilder.loadTexts: eqldiskModule.setOrganization('EqualLogic Inc.')
if mibBuilder.loadTexts: eqldiskModule.setContactInfo('Contact: Customer Support Postal: Dell Inc 300 Innovative Way, Suite 301, Nashua, NH 03062 Tel: +1 603-579-9762 E-mail: US-NH-CS-TechnicalSupport@dell.com WEB: www.equallogic.com')
if mibBuilder.loadTexts: eqldiskModule.setDescription('Equallogic Inc Peer Storage Array disk table information Copyright (c) 2002-2009 by Dell, Inc. All rights reserved. This software may not be copied, disclosed, transferred, or used except in accordance with a license granted by Dell, Inc. This software embodies proprietary information and trade secrets of Dell, Inc. ')
eqldiskObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 12740, 3, 1))
eqldiskNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 12740, 3, 2))
eqldiskConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 12740, 3, 3))
eqlDiskTable = MibTable((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1), )
if mibBuilder.loadTexts: eqlDiskTable.setStatus('current')
if mibBuilder.loadTexts: eqlDiskTable.setDescription('EqualLogic-Dynamic Member Disk Table. This table contains disk status information. One table entry per disk. It is indexed by disk slot number. The number of entries is equal to the number of disks that are present in the system.')
eqlDiskEntry = MibTableRow((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1), ).setIndexNames((0, "EQLGROUP-MIB", "eqlGroupId"), (0, "EQLMEMBER-MIB", "eqlMemberIndex"), (0, "EQLDISK-MIB", "eqlDiskIndex"))
if mibBuilder.loadTexts: eqlDiskEntry.setStatus('current')
if mibBuilder.loadTexts: eqlDiskEntry.setDescription('An entry (row) containing a list of disk status parameters.')
eqlDiskIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: eqlDiskIndex.setStatus('current')
if mibBuilder.loadTexts: eqlDiskIndex.setDescription('The index value that uniquely identifies the disk. It is equal to the disk slot number plus one.')
eqlDiskType = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64)).clone('unknown disk type')).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskType.setStatus('current')
if mibBuilder.loadTexts: eqlDiskType.setDescription('This variable specifies the disk type.')
eqlDiskModelNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40)).clone('unknown disk model')).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskModelNumber.setStatus('current')
if mibBuilder.loadTexts: eqlDiskModelNumber.setDescription('This variable specifies the disk model.')
eqlDiskRevisionNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8)).clone('?firmrev')).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskRevisionNumber.setStatus('current')
if mibBuilder.loadTexts: eqlDiskRevisionNumber.setDescription('This variable specifies the disk revision number.')
eqlDiskSerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20)).clone('unknown serial#')).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSerialNumber.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSerialNumber.setDescription('This variable specifies the disk serial number.')
eqlDiskSize = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSize.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSize.setDescription('This variable specifies the disk size in MB.')
eqlDiskAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("set-disk-on-line", 1), ("set-disk-off-line", 2), ("set-disk-spare", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eqlDiskAdminStatus.setStatus('current')
if mibBuilder.loadTexts: eqlDiskAdminStatus.setDescription('Command to take the disk off line or put it on line. This command is used in manual operation by the operator.')
eqlDiskStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13))).clone(namedValues=NamedValues(("on-line", 1), ("spare", 2), ("failed", 3), ("off-line", 4), ("alt-sig", 5), ("too-small", 6), ("history-of-failures", 7), ("unsupported-version", 8), ("unhealthy", 9), ("replacement", 10), ("encrypted", 11), ("notApproved", 12), ("preempt-failed", 13)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskStatus.setStatus('current')
if mibBuilder.loadTexts: eqlDiskStatus.setDescription("Disk status will be on-line when the disk is being used in the raid set. Disk status will be spare when the disk is not currently being used by raid configuration but may be used when status of a currently on-line disk changes. Disk status will be failed when either it is faulted or when there is no power. Disk status will be offline when the disk doesn't fall into any of these other categories. Disk status will be alt-sig (foreign) when the disk belongs to a RAIDset from another EqualLogic system. The disk can be converted to a spare. Disk status will be too-small if drive is too small to be used. The drive cannot be converted to spare. Disk status will be history-of-failures if drive has a history of failures. The drive can be converted to spare. Disk status will be unsupported-version if drive label is from a later release. The drive can be converted to spare. Disk status will be unhealthy if drive is unhealthy. Disk status will be replacement if drive is a replacement drive. Disk status will be encrypted if the drive is encrypted and cannot be decrypted. Disk status will be notApproved if the drive is not DELL approved. Disk status will be preempt-failed if the drive is off-line due to excessive errors. ")
eqlDiskErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskErrors.setStatus('current')
if mibBuilder.loadTexts: eqlDiskErrors.setDescription('This variable specifies the number of disk errors since drive power on.')
eqlDiskId = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskId.setStatus('current')
if mibBuilder.loadTexts: eqlDiskId.setDescription('This variable specifies the disk id as specified by the subsystem.')
eqlDiskSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 13))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSlot.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSlot.setDescription('The zero-based slot number for the physical location of this disk, as marked on the back panel.')
eqlDiskTypeEnum = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("unknown", 0), ("sata", 1), ("sas", 2), ("sata-ssd", 3), ("sas-ssd", 4), ("sas-sed-hdd", 5), ("sas-sed-ssd", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskTypeEnum.setStatus('current')
if mibBuilder.loadTexts: eqlDiskTypeEnum.setDescription('Type of this disk.')
eqlDiskRPM = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskRPM.setStatus('current')
if mibBuilder.loadTexts: eqlDiskRPM.setDescription('This variable specifies the disk RPM(Revolutions per Minute) as specified by the manufacturer not measured. if 0, disk RPM is unknown, if 1, disk is non-rotating media - SSD(Solid State Disk) otherwise this is the disk RPM.')
eqlDiskSectorSize = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("sector-size-512-bytes", 0), ("sector-size-4096-bytes", 1), ("sector-size-unknown", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSectorSize.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSectorSize.setDescription('This variable specifies the disk sector size as specified by the manufacturer. if 0, disk sector size is 512 bytes, if 1, disk sector size is 4096 bytes')
eqlDiskManufacturingInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 15), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20)).clone('mfginfo?')).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskManufacturingInfo.setStatus('current')
if mibBuilder.loadTexts: eqlDiskManufacturingInfo.setDescription('This variable specifies manufacturing data for the drive.')
eqlDiskPI = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("pi-disabled", 0), ("pi-enabled", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskPI.setStatus('current')
if mibBuilder.loadTexts: eqlDiskPI.setDescription('This variable specifies whether Protection Information (PI) is enabled on the drive.')
eqlDiskHealth = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("smart-status-not-available", 0), ("smart-ok", 1), ("smart-tripped", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskHealth.setStatus('current')
if mibBuilder.loadTexts: eqlDiskHealth.setDescription('Disk status will be smart-status-not-available when the disk is offline or failed. Disk status will be smart-ok when the disk does not report itself to have exceeded its internal SMART threshold for errors. Disk status will be smart-tripped when the disk reports itself to have exceeded its internal SMART threshold for errors.')
eqlDiskStatusTable = MibTable((1, 3, 6, 1, 4, 1, 12740, 3, 1, 2), )
if mibBuilder.loadTexts: eqlDiskStatusTable.setStatus('current')
if mibBuilder.loadTexts: eqlDiskStatusTable.setDescription('EqualLogic-Dynamic Disk Status Table. This table contains disk status information. One table entry per disk. It is indexed by disk slot number. The number of entries is equal to the number of disks that are present in the system.')
eqlDiskStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 12740, 3, 1, 2, 1), )
eqlDiskEntry.registerAugmentions(("EQLDISK-MIB", "eqlDiskStatusEntry"))
eqlDiskStatusEntry.setIndexNames(*eqlDiskEntry.getIndexNames())
if mibBuilder.loadTexts: eqlDiskStatusEntry.setStatus('current')
if mibBuilder.loadTexts: eqlDiskStatusEntry.setDescription('An entry (row) containing a list of disk status parameters.')
eqlDiskStatusXfers = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 2, 1, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskStatusXfers.setStatus('current')
if mibBuilder.loadTexts: eqlDiskStatusXfers.setDescription('Total number of succesfull transfers')
eqlDiskStatusBytesRead = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 2, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskStatusBytesRead.setStatus('current')
if mibBuilder.loadTexts: eqlDiskStatusBytesRead.setDescription('This variable specifies total number of Mbytes read.')
eqlDiskStatusBytesWritten = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 2, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskStatusBytesWritten.setStatus('current')
if mibBuilder.loadTexts: eqlDiskStatusBytesWritten.setDescription('This variable specifies total number of Mbytes written.')
eqlDiskStatusBusyTime = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 2, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskStatusBusyTime.setStatus('current')
if mibBuilder.loadTexts: eqlDiskStatusBusyTime.setDescription('This variable specifies total time in seconds spent busy.')
eqlDiskStatusNumIOs = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskStatusNumIOs.setStatus('current')
if mibBuilder.loadTexts: eqlDiskStatusNumIOs.setDescription('This variable specifies number of IO operations currently outstanding.')
eqlDiskStatusFailXfers = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskStatusFailXfers.setStatus('current')
if mibBuilder.loadTexts: eqlDiskStatusFailXfers.setDescription('This variable specifies number of failed transfers.')
eqlDiskStatusNumResets = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskStatusNumResets.setStatus('current')
if mibBuilder.loadTexts: eqlDiskStatusNumResets.setDescription('This variable specifies total SATA reset count.')
eqlDiskStatusTotalQD = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 2, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskStatusTotalQD.setStatus('current')
if mibBuilder.loadTexts: eqlDiskStatusTotalQD.setDescription('This variable serves as a continuous counter for disk queue depth.')
eqlDiskStatusLifetime = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 2, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskStatusLifetime.setStatus('current')
if mibBuilder.loadTexts: eqlDiskStatusLifetime.setDescription('This variable specifies remaining life on SSD drives which report it.')
eqlDiskErrorTable = MibTable((1, 3, 6, 1, 4, 1, 12740, 3, 1, 3), )
if mibBuilder.loadTexts: eqlDiskErrorTable.setStatus('current')
if mibBuilder.loadTexts: eqlDiskErrorTable.setDescription('EqualLogic-Dynamic Disk Error Table. This table contains disk error information. One table entry per disk. It is indexed by disk slot number. The number of entries is equal to the number of disks that are present in the system.')
eqlDiskErrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 12740, 3, 1, 3, 1), )
eqlDiskEntry.registerAugmentions(("EQLDISK-MIB", "eqlDiskErrorEntry"))
eqlDiskErrorEntry.setIndexNames(*eqlDiskEntry.getIndexNames())
if mibBuilder.loadTexts: eqlDiskErrorEntry.setStatus('current')
if mibBuilder.loadTexts: eqlDiskErrorEntry.setDescription('An entry (row) containing a list of disk error parameters.')
eqlDiskErrorPhyReady = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 3, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskErrorPhyReady.setStatus('current')
if mibBuilder.loadTexts: eqlDiskErrorPhyReady.setDescription('PhyREady changed count.')
eqlDiskErrorPhyInternal = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 3, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskErrorPhyInternal.setStatus('current')
if mibBuilder.loadTexts: eqlDiskErrorPhyInternal.setDescription('This variable specifies total number of Phy internal errors.')
eqlDiskErrorCommWake = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 3, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskErrorCommWake.setStatus('current')
if mibBuilder.loadTexts: eqlDiskErrorCommWake.setDescription('This variable specifies Comm wake count.')
eqlDiskErrorDecode10b8b = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 3, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskErrorDecode10b8b.setStatus('current')
if mibBuilder.loadTexts: eqlDiskErrorDecode10b8b.setDescription('This variable specifies 10b to 8b decode error count.')
eqlDiskErrorDisparity = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 3, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskErrorDisparity.setStatus('current')
if mibBuilder.loadTexts: eqlDiskErrorDisparity.setDescription('This variable specifies disparity error count.')
eqlDiskErrorCRC = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 3, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskErrorCRC.setStatus('current')
if mibBuilder.loadTexts: eqlDiskErrorCRC.setDescription('This variable specifies CRC error count.')
eqlDiskErrorHandShake = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 3, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskErrorHandShake.setStatus('current')
if mibBuilder.loadTexts: eqlDiskErrorHandShake.setDescription('This variable specifies handshake error count.')
eqlDiskErrorLinkSeq = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 3, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskErrorLinkSeq.setStatus('current')
if mibBuilder.loadTexts: eqlDiskErrorLinkSeq.setDescription('This variable specifies Link Sequence error count.')
eqlDiskErrorTransportState = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 3, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskErrorTransportState.setStatus('current')
if mibBuilder.loadTexts: eqlDiskErrorTransportState.setDescription('This variable specifies Transport State Transitions error count.')
eqlDiskErrorUnrecFIS = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 3, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskErrorUnrecFIS.setStatus('current')
if mibBuilder.loadTexts: eqlDiskErrorUnrecFIS.setDescription('This variable specifies Unrecognised FIS type error count.')
eqlDiskSmartInfoTable = MibTable((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4), )
if mibBuilder.loadTexts: eqlDiskSmartInfoTable.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoTable.setDescription('EqualLogic-Dynamic Disk SMART Info Table. This table contains the SMART info for each disk. It is indexed by disk slot number. The number of entries is equal to the number of disks that are present in the system.')
eqlDiskSmartInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1), )
eqlDiskEntry.registerAugmentions(("EQLDISK-MIB", "eqlDiskSmartInfoEntry"))
eqlDiskSmartInfoEntry.setIndexNames(*eqlDiskEntry.getIndexNames())
if mibBuilder.loadTexts: eqlDiskSmartInfoEntry.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoEntry.setDescription('An entry (row) containing a list of disk SMART info parameters.')
eqlDiskSmartInfoRawReadErrorRate = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoRawReadErrorRate.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoRawReadErrorRate.setDescription('')
eqlDiskSmartInfoRawReadErrorRateWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoRawReadErrorRateWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoRawReadErrorRateWorst.setDescription('')
eqlDiskSmartInfoThroughputPerformance = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoThroughputPerformance.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoThroughputPerformance.setDescription('')
eqlDiskSmartInfoThroughputPerformanceWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoThroughputPerformanceWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoThroughputPerformanceWorst.setDescription('')
eqlDiskSmartInfoSpinUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoSpinUpTime.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoSpinUpTime.setDescription('')
eqlDiskSmartInfoSpinUpTimeWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoSpinUpTimeWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoSpinUpTimeWorst.setDescription('')
eqlDiskSmartInfoStartStopCount = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoStartStopCount.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoStartStopCount.setDescription('')
eqlDiskSmartInfoStartStopCountWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoStartStopCountWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoStartStopCountWorst.setDescription('')
eqlDiskSmartInfoReallocatedSectorCount = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoReallocatedSectorCount.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoReallocatedSectorCount.setDescription('')
eqlDiskSmartInfoReallocatedSectorCountWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoReallocatedSectorCountWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoReallocatedSectorCountWorst.setDescription('')
eqlDiskSmartInfoReadChannelMargin = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoReadChannelMargin.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoReadChannelMargin.setDescription('')
eqlDiskSmartInfoReadChannelMarginWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoReadChannelMarginWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoReadChannelMarginWorst.setDescription('')
eqlDiskSmartInfoSeekErrorRate = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoSeekErrorRate.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoSeekErrorRate.setDescription('')
eqlDiskSmartInfoSeekErrorRateWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoSeekErrorRateWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoSeekErrorRateWorst.setDescription('')
eqlDiskSmartInfoSeekPerformance = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoSeekPerformance.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoSeekPerformance.setDescription('')
eqlDiskSmartInfoSeekPerformanceWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoSeekPerformanceWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoSeekPerformanceWorst.setDescription('')
eqlDiskSmartInfoPowerOnHours = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoPowerOnHours.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoPowerOnHours.setDescription('')
eqlDiskSmartInfoPowerOnHoursWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoPowerOnHoursWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoPowerOnHoursWorst.setDescription('')
eqlDiskSmartInfoSpinupRetries = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoSpinupRetries.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoSpinupRetries.setDescription('')
eqlDiskSmartInfoSpinupRetriesWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 20), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoSpinupRetriesWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoSpinupRetriesWorst.setDescription('')
eqlDiskSmartInfoDriveRecalibRetryCount = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 21), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoDriveRecalibRetryCount.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoDriveRecalibRetryCount.setDescription('')
eqlDiskSmartInfoDriveRecalibRetryCountWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 22), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoDriveRecalibRetryCountWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoDriveRecalibRetryCountWorst.setDescription('')
eqlDiskSmartInfoPowerCycleCount = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 23), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoPowerCycleCount.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoPowerCycleCount.setDescription('')
eqlDiskSmartInfoPowerCycleCountWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 24), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoPowerCycleCountWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoPowerCycleCountWorst.setDescription('')
eqlDiskSmartInfoReadSoftErrorRate = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 25), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoReadSoftErrorRate.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoReadSoftErrorRate.setDescription('')
eqlDiskSmartInfoReadSoftErrorRateWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 26), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoReadSoftErrorRateWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoReadSoftErrorRateWorst.setDescription('')
eqlDiskSmartInfoEmergencyRetractCycles = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 27), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoEmergencyRetractCycles.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoEmergencyRetractCycles.setDescription('')
eqlDiskSmartInfoEmergencyRetractCyclesWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 28), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoEmergencyRetractCyclesWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoEmergencyRetractCyclesWorst.setDescription('')
eqlDiskSmartInfoLoadUnloadCycles = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 29), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoLoadUnloadCycles.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoLoadUnloadCycles.setDescription('')
eqlDiskSmartInfoLoadUnloadCyclesWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 30), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoLoadUnloadCyclesWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoLoadUnloadCyclesWorst.setDescription('')
eqlDiskSmartInfoHDDTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 31), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoHDDTemp.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoHDDTemp.setDescription('')
eqlDiskSmartInfoHDDTempWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 32), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoHDDTempWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoHDDTempWorst.setDescription('')
eqlDiskSmartInfoOnTheFlyErrorRate = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 33), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoOnTheFlyErrorRate.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoOnTheFlyErrorRate.setDescription('')
eqlDiskSmartInfoOnTheFlyErrorRateWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 34), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoOnTheFlyErrorRateWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoOnTheFlyErrorRateWorst.setDescription('')
eqlDiskSmartInfoSelfTestReallocSectors = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 35), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoSelfTestReallocSectors.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoSelfTestReallocSectors.setDescription('')
eqlDiskSmartInfoSelfTestReallocSectorsWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 36), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoSelfTestReallocSectorsWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoSelfTestReallocSectorsWorst.setDescription('')
eqlDiskSmartInfoPendingDefects = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 37), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoPendingDefects.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoPendingDefects.setDescription('')
eqlDiskSmartInfoPendingDefectsWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 38), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoPendingDefectsWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoPendingDefectsWorst.setDescription('')
eqlDiskSmartInfoOfflineSurfaceScan = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 39), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoOfflineSurfaceScan.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoOfflineSurfaceScan.setDescription('')
eqlDiskSmartInfoOfflineSurfaceScanWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 40), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoOfflineSurfaceScanWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoOfflineSurfaceScanWorst.setDescription('')
eqlDiskSmartInfoUltraDMACRCErrorRate = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 41), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoUltraDMACRCErrorRate.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoUltraDMACRCErrorRate.setDescription('')
eqlDiskSmartInfoUltraDMACRCErrorRateWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 42), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoUltraDMACRCErrorRateWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoUltraDMACRCErrorRateWorst.setDescription('')
eqlDiskSmartInfoWritePreampErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 43), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoWritePreampErrors.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoWritePreampErrors.setDescription('')
eqlDiskSmartInfoWritePreampErrorsWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 44), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoWritePreampErrorsWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoWritePreampErrorsWorst.setDescription('')
eqlDiskSmartInfoOffTrackErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 45), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoOffTrackErrors.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoOffTrackErrors.setDescription('')
eqlDiskSmartInfoOffTrackErrorsWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 46), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoOffTrackErrorsWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoOffTrackErrorsWorst.setDescription('')
eqlDiskSmartInfoDAMErrorRate = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 47), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoDAMErrorRate.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoDAMErrorRate.setDescription('')
eqlDiskSmartInfoDAMErrorRateWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 48), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoDAMErrorRateWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoDAMErrorRateWorst.setDescription('')
eqlDiskSmartInfoECCErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 49), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoECCErrors.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoECCErrors.setDescription('')
eqlDiskSmartInfoECCErrorsWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 50), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoECCErrorsWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoECCErrorsWorst.setDescription('')
eqlDiskSmartInfoSoftECCCorrection = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 51), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoSoftECCCorrection.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoSoftECCCorrection.setDescription('')
eqlDiskSmartInfoSoftECCCorrectionWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 52), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoSoftECCCorrectionWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoSoftECCCorrectionWorst.setDescription('')
eqlDiskSmartInfoThermalAsperityRate = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 53), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoThermalAsperityRate.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoThermalAsperityRate.setDescription('')
eqlDiskSmartInfoThermalAsperityRateWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 54), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoThermalAsperityRateWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoThermalAsperityRateWorst.setDescription('')
eqlDiskSmartInfoSpinHighCount = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 55), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoSpinHighCount.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoSpinHighCount.setDescription('')
eqlDiskSmartInfoSpinHighCountWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 56), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoSpinHighCountWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoSpinHighCountWorst.setDescription('')
eqlDiskSmartInfoSpinBuzz = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 57), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoSpinBuzz.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoSpinBuzz.setDescription('')
eqlDiskSmartInfoSpinBuzzWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 58), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoSpinBuzzWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoSpinBuzzWorst.setDescription('')
eqlDiskSmartInfoOfflineSeekPerformance = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 59), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoOfflineSeekPerformance.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoOfflineSeekPerformance.setDescription('')
eqlDiskSmartInfoOfflineSeekPerformanceWorst = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 60), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoOfflineSeekPerformanceWorst.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoOfflineSeekPerformanceWorst.setDescription('')
eqlDiskSmartInfoThresholdExceeded = MibTableColumn((1, 3, 6, 1, 4, 1, 12740, 3, 1, 4, 1, 61), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eqlDiskSmartInfoThresholdExceeded.setStatus('current')
if mibBuilder.loadTexts: eqlDiskSmartInfoThresholdExceeded.setDescription('')
eqldiskMgmtNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 12740, 3, 2, 1))
eqlDiskStatusChange = NotificationType((1, 3, 6, 1, 4, 1, 12740, 3, 2, 1, 1)).setObjects(("EQLDISK-MIB", "eqlDiskStatus"), ("EQLDISK-MIB", "eqlDiskSlot"))
if mibBuilder.loadTexts: eqlDiskStatusChange.setStatus('current')
if mibBuilder.loadTexts: eqlDiskStatusChange.setDescription('Sent when eqlDiskStatus changes from one state to another state')
mibBuilder.exportSymbols("EQLDISK-MIB", eqlDiskIndex=eqlDiskIndex, eqlDiskSmartInfoPendingDefects=eqlDiskSmartInfoPendingDefects, eqlDiskSmartInfoReallocatedSectorCountWorst=eqlDiskSmartInfoReallocatedSectorCountWorst, eqlDiskSectorSize=eqlDiskSectorSize, eqlDiskStatusFailXfers=eqlDiskStatusFailXfers, eqlDiskSmartInfoWritePreampErrorsWorst=eqlDiskSmartInfoWritePreampErrorsWorst, eqlDiskStatusBytesRead=eqlDiskStatusBytesRead, eqldiskNotifications=eqldiskNotifications, eqlDiskSmartInfoPendingDefectsWorst=eqlDiskSmartInfoPendingDefectsWorst, eqldiskObjects=eqldiskObjects, eqlDiskSmartInfoSpinHighCount=eqlDiskSmartInfoSpinHighCount, eqlDiskSmartInfoOfflineSeekPerformance=eqlDiskSmartInfoOfflineSeekPerformance, eqlDiskErrorPhyInternal=eqlDiskErrorPhyInternal, eqlDiskSmartInfoUltraDMACRCErrorRate=eqlDiskSmartInfoUltraDMACRCErrorRate, eqlDiskStatusChange=eqlDiskStatusChange, eqlDiskStatusBusyTime=eqlDiskStatusBusyTime, eqlDiskSmartInfoSpinupRetriesWorst=eqlDiskSmartInfoSpinupRetriesWorst, eqlDiskErrorTable=eqlDiskErrorTable, eqlDiskStatusXfers=eqlDiskStatusXfers, eqlDiskSmartInfoSelfTestReallocSectorsWorst=eqlDiskSmartInfoSelfTestReallocSectorsWorst, eqlDiskErrorLinkSeq=eqlDiskErrorLinkSeq, eqlDiskSmartInfoSoftECCCorrectionWorst=eqlDiskSmartInfoSoftECCCorrectionWorst, eqlDiskPI=eqlDiskPI, eqlDiskRPM=eqlDiskRPM, eqlDiskSmartInfoSpinUpTimeWorst=eqlDiskSmartInfoSpinUpTimeWorst, eqlDiskSmartInfoThroughputPerformance=eqlDiskSmartInfoThroughputPerformance, eqlDiskStatusTotalQD=eqlDiskStatusTotalQD, eqlDiskSmartInfoRawReadErrorRateWorst=eqlDiskSmartInfoRawReadErrorRateWorst, eqlDiskSmartInfoPowerCycleCount=eqlDiskSmartInfoPowerCycleCount, eqlDiskSmartInfoSeekPerformanceWorst=eqlDiskSmartInfoSeekPerformanceWorst, eqlDiskSmartInfoDriveRecalibRetryCountWorst=eqlDiskSmartInfoDriveRecalibRetryCountWorst, eqlDiskErrorEntry=eqlDiskErrorEntry, eqlDiskSmartInfoThresholdExceeded=eqlDiskSmartInfoThresholdExceeded, eqlDiskSmartInfoHDDTemp=eqlDiskSmartInfoHDDTemp, eqlDiskRevisionNumber=eqlDiskRevisionNumber, eqldiskMgmtNotifications=eqldiskMgmtNotifications, eqlDiskAdminStatus=eqlDiskAdminStatus, eqlDiskEntry=eqlDiskEntry, eqlDiskSmartInfoSeekErrorRateWorst=eqlDiskSmartInfoSeekErrorRateWorst, eqlDiskSmartInfoUltraDMACRCErrorRateWorst=eqlDiskSmartInfoUltraDMACRCErrorRateWorst, eqlDiskSmartInfoSoftECCCorrection=eqlDiskSmartInfoSoftECCCorrection, eqlDiskSmartInfoStartStopCountWorst=eqlDiskSmartInfoStartStopCountWorst, eqlDiskSmartInfoPowerCycleCountWorst=eqlDiskSmartInfoPowerCycleCountWorst, eqlDiskId=eqlDiskId, eqlDiskStatus=eqlDiskStatus, eqlDiskSmartInfoOffTrackErrors=eqlDiskSmartInfoOffTrackErrors, eqlDiskErrorHandShake=eqlDiskErrorHandShake, eqlDiskErrorCRC=eqlDiskErrorCRC, eqlDiskSmartInfoOfflineSurfaceScan=eqlDiskSmartInfoOfflineSurfaceScan, eqlDiskSmartInfoEmergencyRetractCyclesWorst=eqlDiskSmartInfoEmergencyRetractCyclesWorst, eqlDiskSmartInfoSeekErrorRate=eqlDiskSmartInfoSeekErrorRate, eqlDiskSmartInfoPowerOnHours=eqlDiskSmartInfoPowerOnHours, eqlDiskSmartInfoSpinBuzz=eqlDiskSmartInfoSpinBuzz, eqlDiskErrorDecode10b8b=eqlDiskErrorDecode10b8b, eqlDiskErrorCommWake=eqlDiskErrorCommWake, PYSNMP_MODULE_ID=eqldiskModule, eqlDiskSmartInfoThermalAsperityRateWorst=eqlDiskSmartInfoThermalAsperityRateWorst, eqlDiskSmartInfoEmergencyRetractCycles=eqlDiskSmartInfoEmergencyRetractCycles, eqlDiskSerialNumber=eqlDiskSerialNumber, eqlDiskSlot=eqlDiskSlot, eqlDiskSmartInfoDAMErrorRateWorst=eqlDiskSmartInfoDAMErrorRateWorst, eqlDiskSmartInfoECCErrorsWorst=eqlDiskSmartInfoECCErrorsWorst, eqlDiskSmartInfoOfflineSeekPerformanceWorst=eqlDiskSmartInfoOfflineSeekPerformanceWorst, eqlDiskStatusBytesWritten=eqlDiskStatusBytesWritten, eqlDiskSmartInfoReadChannelMargin=eqlDiskSmartInfoReadChannelMargin, eqlDiskSmartInfoHDDTempWorst=eqlDiskSmartInfoHDDTempWorst, eqlDiskErrorPhyReady=eqlDiskErrorPhyReady, eqlDiskSmartInfoSpinUpTime=eqlDiskSmartInfoSpinUpTime, eqlDiskStatusNumResets=eqlDiskStatusNumResets, eqlDiskSmartInfoOfflineSurfaceScanWorst=eqlDiskSmartInfoOfflineSurfaceScanWorst, eqldiskModule=eqldiskModule, eqlDiskTable=eqlDiskTable, eqlDiskSmartInfoReadSoftErrorRateWorst=eqlDiskSmartInfoReadSoftErrorRateWorst, eqlDiskSmartInfoReadSoftErrorRate=eqlDiskSmartInfoReadSoftErrorRate, eqlDiskSmartInfoWritePreampErrors=eqlDiskSmartInfoWritePreampErrors, eqlDiskSmartInfoReadChannelMarginWorst=eqlDiskSmartInfoReadChannelMarginWorst, eqlDiskHealth=eqlDiskHealth, eqlDiskTypeEnum=eqlDiskTypeEnum, eqlDiskSmartInfoLoadUnloadCycles=eqlDiskSmartInfoLoadUnloadCycles, eqlDiskStatusEntry=eqlDiskStatusEntry, eqlDiskSmartInfoThroughputPerformanceWorst=eqlDiskSmartInfoThroughputPerformanceWorst, eqlDiskSmartInfoSpinupRetries=eqlDiskSmartInfoSpinupRetries, eqlDiskSmartInfoSpinBuzzWorst=eqlDiskSmartInfoSpinBuzzWorst, eqlDiskStatusNumIOs=eqlDiskStatusNumIOs, eqlDiskModelNumber=eqlDiskModelNumber, eqlDiskSmartInfoDriveRecalibRetryCount=eqlDiskSmartInfoDriveRecalibRetryCount, eqlDiskStatusTable=eqlDiskStatusTable, eqlDiskSmartInfoStartStopCount=eqlDiskSmartInfoStartStopCount, eqlDiskSmartInfoSelfTestReallocSectors=eqlDiskSmartInfoSelfTestReallocSectors, eqlDiskSmartInfoDAMErrorRate=eqlDiskSmartInfoDAMErrorRate, eqlDiskSmartInfoECCErrors=eqlDiskSmartInfoECCErrors, eqldiskConformance=eqldiskConformance, eqlDiskSmartInfoOffTrackErrorsWorst=eqlDiskSmartInfoOffTrackErrorsWorst, eqlDiskSmartInfoRawReadErrorRate=eqlDiskSmartInfoRawReadErrorRate, eqlDiskSmartInfoOnTheFlyErrorRateWorst=eqlDiskSmartInfoOnTheFlyErrorRateWorst, eqlDiskStatusLifetime=eqlDiskStatusLifetime, eqlDiskSmartInfoReallocatedSectorCount=eqlDiskSmartInfoReallocatedSectorCount, eqlDiskSmartInfoPowerOnHoursWorst=eqlDiskSmartInfoPowerOnHoursWorst, eqlDiskManufacturingInfo=eqlDiskManufacturingInfo, eqlDiskErrorDisparity=eqlDiskErrorDisparity, eqlDiskSmartInfoEntry=eqlDiskSmartInfoEntry, eqlDiskSize=eqlDiskSize, eqlDiskSmartInfoThermalAsperityRate=eqlDiskSmartInfoThermalAsperityRate, eqlDiskSmartInfoSpinHighCountWorst=eqlDiskSmartInfoSpinHighCountWorst, eqlDiskSmartInfoSeekPerformance=eqlDiskSmartInfoSeekPerformance, eqlDiskErrorTransportState=eqlDiskErrorTransportState, eqlDiskErrors=eqlDiskErrors, eqlDiskType=eqlDiskType, eqlDiskErrorUnrecFIS=eqlDiskErrorUnrecFIS, eqlDiskSmartInfoOnTheFlyErrorRate=eqlDiskSmartInfoOnTheFlyErrorRate, eqlDiskSmartInfoTable=eqlDiskSmartInfoTable, eqlDiskSmartInfoLoadUnloadCyclesWorst=eqlDiskSmartInfoLoadUnloadCyclesWorst)
| 127.067606 | 6,358 | 0.806979 |
227119ff41ac91db8af42efffd832717fa0c2cef | 2,260 | py | Python | src/clusto/test/drivers/DatacenterTests.py | rongoro/clusto | d6425433e5132e8778feeb9db4b8dd80b933b030 | [
"BSD-3-Clause"
] | 5 | 2015-07-19T08:28:01.000Z | 2021-07-08T14:49:27.000Z | src/clusto/test/drivers/DatacenterTests.py | wt/clusto | c114ce7c42dcfa33c1e79f4d3b49313115fea06b | [
"BSD-3-Clause"
] | null | null | null | src/clusto/test/drivers/DatacenterTests.py | wt/clusto | c114ce7c42dcfa33c1e79f4d3b49313115fea06b | [
"BSD-3-Clause"
] | 5 | 2015-01-06T07:57:07.000Z | 2021-11-10T18:01:33.000Z |
import clusto
from clusto.test import testbase
from clusto.drivers.Base import Thing
from clusto.drivers.Servers import Server
from clusto.drivers.Datacenter import Rack, RackU, Datacenter, Colo, Cage
from clusto.exceptions import *
class RackTests(testbase.ClustoTestBase):
def testAddToRack(self):
rackname = 'ashrack101'
rack = Rack(rackname)
t1 = Thing('foo1')
rack.addToRack(t1, [23,24])
clusto.flush()
tp = clusto.get_by_name('foo1')
theRack = tp.get_connectedByType(Rack)
self.assert_(theRack[0].name == rackname)
def testRackContents(self):
rackname = 'ashrack101'
rack = Rack(rackname)
t1 = Thing('t1')
t2 = Thing('t2')
t3 = Thing('t3')
rack.addToRack(t3, [1,2])
rack.addToRack(t2, [32])
rack.addToRack(t1, [23,24,25])
clusto.flush()
contents = rack.getRackContents()
self.assert_(contents[1].name == contents[2].name =='t3')
self.assert_(contents[32].name == 't2')
self.assert_(contents[23].name == contents[24].name
== contents[25].name == 't1')
t1.delete()
clusto.flush()
rack = clusto.get_by_name(rackname)
contents = rack.getRackContents()
clusto.flush()
self.assertEqual(len(contents), 3)
def testRackUMissingArg(self):
# correct
RackU('foo2', 3)
# missing RU number
self.assertRaises(TypeError, RackU, 'foo')
class Datacentertest(testbase.ClustoTestBase):
"""
Test Datacenter Driver
"""
def testLocationRequirement(self):
d = Datacenter('d1', 'san francisco')
clusto.flush()
z = clusto.get_by_name('d1')
self.assert_(z.getAttr('location') == 'san francisco')
def testDatacenterThingStack(self):
d = Datacenter('d1', 'footown')
co = Colo('colo1')
ca = Cage('cage1')
ra = Rack('rack1')
s = Server('s1')
d.connect(co)
co.connect(ca)
ca.connect(ra)
clusto.flush()
# can't connect a server to a datacenter
self.assertRaises(ConnectionException, d.connect, s)
| 21.121495 | 73 | 0.579646 |
39fea51c829189bda7905fd934c816a76d2bc8be | 106,984 | py | Python | test/onnx/test_pytorch_onnx_caffe2.py | meganhumhrey/pytorch | 41054f2ab5bb39d28a3eb8497f1a65b42385a996 | [
"Intel"
] | 7 | 2021-05-29T16:31:51.000Z | 2022-02-21T18:52:25.000Z | test/onnx/test_pytorch_onnx_caffe2.py | meganhumhrey/pytorch | 41054f2ab5bb39d28a3eb8497f1a65b42385a996 | [
"Intel"
] | 1 | 2021-05-31T02:20:29.000Z | 2021-05-31T02:20:29.000Z | test/onnx/test_pytorch_onnx_caffe2.py | meganhumhrey/pytorch | 41054f2ab5bb39d28a3eb8497f1a65b42385a996 | [
"Intel"
] | null | null | null | from typing import Tuple
import io
import itertools
import sys
import unittest
import numpy as np
from debug_embed_params import run_embed_params
from torch import nn
from torch.autograd import Variable, function
from torch.nn.utils import rnn as rnn_utils
from torch.onnx import ExportTypes
import torch.onnx
import torch.onnx.operators
import torch.utils.model_zoo as model_zoo
# Import various models for testing
from torchvision.models.alexnet import alexnet
from torchvision.models.densenet import densenet121
from torchvision.models.inception import inception_v3
from torchvision.models.resnet import resnet50
from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn
from model_defs.squeezenet import SqueezeNet
from model_defs.super_resolution import SuperResolutionNet
from model_defs.srresnet import SRResNet
import model_defs.dcgan as dcgan
import model_defs.word_language_model as word_language_model
from model_defs.mnist import MNIST
from model_defs.lstm_flattening_result import LstmFlatteningResult
from model_defs.rnn_model_with_packed_sequence import RnnModelWithPackedSequence
from caffe2.python.operator_test.torch_integration_test import (generate_rois_rotated,
create_bbox_transform_inputs)
import onnx
import caffe2.python.onnx.backend as c2
from test_pytorch_common import skipIfTravis, skipIfNoLapack, skipIfNoCuda
from test_pytorch_common import BATCH_SIZE, RNN_BATCH_SIZE, RNN_SEQUENCE_LENGTH, RNN_INPUT_SIZE, RNN_HIDDEN_SIZE
from test_pytorch_common import skipIfUnsupportedOpsetVersion, skipIfUnsupportedMinOpsetVersion
import verify
skip = unittest.skip
def skipIfEmbed(func):
def wrapper(self):
if self.embed_params:
raise unittest.SkipTest("Skip embed_params verify test")
return func(self)
return wrapper
def skipIfNoEmbed(func):
def wrapper(self):
if not self.embed_params:
raise unittest.SkipTest("Skip debug embed_params test")
return func(self)
return wrapper
# def import_model(proto, input, workspace=None, use_gpu=True):
# model_def = onnx.ModelProto.FromString(proto)
# onnx.checker.check_model(model_def)
#
# if workspace is None:
# workspace = {}
# if isinstance(input, tuple):
# for i in range(len(input)):
# workspace[model_def.graph.input[i]] = input[i]
# else:
# workspace[model_def.graph.input[0]] = input
#
# caffe2_out_workspace = c2.run_model(
# init_graph=None,
# predict_graph=graph_def,
# inputs=workspace,
# use_gpu=use_gpu)
# caffe2_out = caffe2_out_workspace[0]
# return caffe2_out
def do_export(model, inputs, *args, **kwargs):
f = io.BytesIO()
out = torch.onnx._export(model, inputs, f, *args, **kwargs)
if isinstance(model, torch.jit.ScriptModule):
# Special case for common case of passing a single Tensor
if isinstance(inputs, torch.Tensor):
inputs = (inputs,)
out = model(*inputs)
return f.getvalue(), out
torch.set_default_tensor_type("torch.FloatTensor")
try:
import torch
except ImportError:
print("Cannot import torch, hence caffe2-torch test will not run.")
sys.exit(0)
model_urls = {
"alexnet": "https://s3.amazonaws.com/download.caffe2.ai/test_data/alexnet-owt-4df8aa71.pth",
"dcgan_b": "https://s3.amazonaws.com/pytorch/test_data/export/netG_bedroom_epoch_1-0649e76b.pth",
"dcgan_f": "https://s3.amazonaws.com/pytorch/test_data/export/netG_faces_epoch_49-d86035a6.pth",
"densenet121": "https://s3.amazonaws.com/download.caffe2.ai/test_data/densenet121-d66d3027.pth",
"inception_v3_google": "https://s3.amazonaws.com/download.caffe2.ai/test_data/inception_v3_google-1a9a5a14.pth",
"resnet50": "https://s3.amazonaws.com/download.caffe2.ai/test_data/resnet50-19c8e357.pth",
"srresNet": "https://s3.amazonaws.com/pytorch/demos/srresnet-e10b2039.pth",
"super_resolution": "https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth",
"squeezenet1_0": "https://s3.amazonaws.com/download.caffe2.ai/test_data/squeezenet1_0-a815701f.pth",
"squeezenet1_1": "https://s3.amazonaws.com/download.caffe2.ai/test_data/squeezenet1_1-f364aa15.pth",
"vgg16": "https://s3.amazonaws.com/download.caffe2.ai/test_data/vgg16-397923af.pth",
"vgg19": "https://s3.amazonaws.com/download.caffe2.ai/test_data/vgg19-dcbb9e9d.pth",
}
class TestCaffe2Backend_opset9(unittest.TestCase):
from torch.onnx.symbolic_helper import _export_onnx_opset_version
opset_version = _export_onnx_opset_version
embed_params = False
def setUp(self):
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
np.random.seed(seed=0)
def convert_cuda(self, model, input):
cuda_model = model.cuda()
# input might be nested - we want to move everything to GPU
cuda_input = function._nested_map(
lambda o: isinstance(o, Variable) or isinstance(o, torch.Tensor),
lambda o: o.cuda())(input)
return cuda_model, cuda_input
def run_debug_test(self, model, train, batch_size, state_dict=None,
input=None, use_gpu=True, example_outputs=None,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX):
"""
# TODO: remove this from the final release version
This test is for our debugging only for the case where
embed_params=False
"""
if not isinstance(model, torch.jit.ScriptModule):
model.train(train)
if state_dict is not None:
model.load_state_dict(state_dict)
# Either user specified input or random (deterministic) input
if input is None:
input = torch.randn(batch_size, 3, 224, 224, requires_grad=True)
if use_gpu:
model, input = self.convert_cuda(model, input)
onnxir, torch_out = do_export(model, input, export_params=self.embed_params, verbose=False,
example_outputs=example_outputs,
do_constant_folding=False,
opset_version=self.opset_version,
keep_initializers_as_inputs=True,
add_node_names=False,
operator_export_type=operator_export_type)
if isinstance(torch_out, torch.autograd.Variable):
torch_out = (torch_out,)
caffe2_out = run_embed_params(onnxir, model, input, state_dict, use_gpu)
for _, (x, y) in enumerate(zip(torch_out, caffe2_out)):
np.testing.assert_almost_equal(x.data.cpu().numpy(), y, decimal=3)
def run_actual_test(self, model, train, batch_size, state_dict=None,
input=None, use_gpu=True, rtol=0.001, atol=1e-7,
example_outputs=None, do_constant_folding=True,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
input_names=None, dynamic_axes=None,
remained_onnx_input_idx=None):
"""
This is what the user facing version will look like
"""
# set the training/test mode for the model
if not isinstance(model, torch.jit.ScriptModule):
model.train(train)
# use the pre-trained model params if available
if state_dict is not None:
model.load_state_dict(state_dict)
# Either user specified input or random (deterministic) input
if input is None:
input = torch.randn(batch_size, 3, 224, 224, requires_grad=True)
# GPU-ize the model, if requested
if use_gpu:
model, input = self.convert_cuda(model, input)
# Verify the model runs the same in Caffe2
verify.verify(model, input, c2, rtol=rtol, atol=atol,
example_outputs=example_outputs,
do_constant_folding=do_constant_folding,
opset_version=self.opset_version,
keep_initializers_as_inputs=True,
operator_export_type=operator_export_type,
input_names=input_names,
dynamic_axes=dynamic_axes,
remained_onnx_input_idx=remained_onnx_input_idx)
def run_model_test(self, model, train, batch_size, state_dict=None,
input=None, use_gpu=True, rtol=0.001, atol=1e-7,
example_outputs=None, do_constant_folding=True,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
input_names=None, dynamic_axes=None,
remained_onnx_input_idx=None):
use_gpu_ = torch.cuda.is_available() and use_gpu
# NOTE: do_constant_folding is turned on only when model has
# parameters embedded (which are needed for constant folding),
# i.e. for self.embed_params=True case. self.embed_params=True
# for the TestCaffe2BackendEmbed class defined at the bottom.
if self.embed_params:
self.run_actual_test(model, train, batch_size, state_dict, input,
use_gpu=use_gpu_, rtol=rtol, atol=atol,
example_outputs=example_outputs,
do_constant_folding=do_constant_folding,
operator_export_type=operator_export_type,
input_names=input_names,
dynamic_axes=dynamic_axes,
remained_onnx_input_idx=remained_onnx_input_idx)
else:
self.run_debug_test(model, train, batch_size, state_dict, input,
use_gpu=use_gpu_, example_outputs=example_outputs,
operator_export_type=operator_export_type)
def test_linear(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.many_fc = nn.Sequential(
nn.Linear(4, 5, bias=True),
nn.ReLU(inplace=True),
nn.Linear(5, 6, bias=True),
nn.ReLU(inplace=True),
nn.Linear(6, 7, bias=True),
)
def forward(self, input):
return self.many_fc(input)
model = MyModel()
input = torch.randn(3, 4, requires_grad=True)
self.run_model_test(model, train=False, batch_size=0, input=input)
def test_onnx_export_with_parameter_renaming(self):
class SimpleFcNet(nn.Module):
def __init__(self):
super(SimpleFcNet, self).__init__()
self.fc1 = nn.Linear(5, 10)
def forward(self, input):
return self.fc1(input)
model = SimpleFcNet()
input = torch.randn(7, 5)
output = model(input)
f = io.BytesIO()
# Note that the export call explicitly sets the names of not just the input,
# but also the parameters. This test checks that the model can be loaded and
# executed in Caffe2 backend correctly.
torch.onnx._export(model, input, f, verbose=True, export_type=ExportTypes.ZIP_ARCHIVE,
input_names=["input1", "parameter1", "parameter2"],
keep_initializers_as_inputs=True)
f.seek(0)
model_c2 = c2.prepare_zip_archive(f)
result = model_c2.run(input.numpy())
np.testing.assert_almost_equal(output.data.cpu().numpy(), result[0], decimal=3)
def test_onnx_export_param_name_duplication(self):
class SimpleFcNet(nn.Module):
def __init__(self):
super(SimpleFcNet, self).__init__()
self.fc1 = nn.Linear(5, 10)
def forward(self, input):
return self.fc1(input)
model = SimpleFcNet()
input = torch.randn(7, 5)
output = model(input)
f = io.BytesIO()
# The export call explicitly sets the names of the input, and the first parameter.
# But note that the target first parameter name is the same as the second parameter name.
# This test checks that given this edge condition, the model can be loaded and executed
# in Caffe2 backend correctly.
torch.onnx._export(model, input, f, verbose=True, export_type=ExportTypes.ZIP_ARCHIVE,
input_names=["input1", "fc1.bias"], _retain_param_name=False,
keep_initializers_as_inputs=True)
f.seek(0)
model_c2 = c2.prepare_zip_archive(f)
result = model_c2.run(input.numpy())
np.testing.assert_almost_equal(output.data.cpu().numpy(), result[0], decimal=3)
def test_lstm_cell(self):
model = nn.LSTMCell(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE)
input = torch.randn(BATCH_SIZE, RNN_INPUT_SIZE)
h0 = torch.randn(BATCH_SIZE, RNN_HIDDEN_SIZE)
c0 = torch.randn(BATCH_SIZE, RNN_HIDDEN_SIZE)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE, input=(input, (h0, c0)), use_gpu=False)
def test_gru_cell(self):
model = nn.GRUCell(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE)
input = torch.randn(BATCH_SIZE, RNN_INPUT_SIZE)
h0 = torch.randn(BATCH_SIZE, RNN_HIDDEN_SIZE)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE, input=(input, h0), use_gpu=False)
def _dispatch_rnn_test(self, name, *args, **kwargs):
if name == "elman":
self._elman_rnn_test(*args, **kwargs)
if name == "lstm":
self._lstm_test(*args, **kwargs)
if name == "gru":
self._gru_test(*args, **kwargs)
def _elman_rnn_test(self, layers, nonlinearity, bidirectional,
initial_state, packed_sequence, dropout):
batch_first = True if packed_sequence == 2 else False
model = nn.RNN(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE,
layers,
nonlinearity=nonlinearity,
bidirectional=bidirectional,
dropout=dropout,
batch_first=batch_first)
if packed_sequence == 1:
model = RnnModelWithPackedSequence(model, False)
if packed_sequence == 2:
model = RnnModelWithPackedSequence(model, True)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append(h0)
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_model_test(model, train=False, batch_size=RNN_BATCH_SIZE, input=input, use_gpu=False, atol=1e-7)
# test that the model still runs with a different batch size
# (save the model with a batch_size of 1 with rnn with a variable batch size,
# otherwise expand will fail)
variable_batch_size_init_input = make_input(1)
# Constant folding works when model has parameters embedded. For this case, we need to disable it
onnxir, _ = do_export(model, variable_batch_size_init_input, keep_initializers_as_inputs=True,
do_constant_folding=False)
other_input = make_input(RNN_BATCH_SIZE + 1)
_ = run_embed_params(onnxir, model, other_input, use_gpu=False)
def _lstm_test(self, layers, bidirectional, initial_state,
packed_sequence, dropout):
batch_first = True if packed_sequence == 2 else False
model = LstmFlatteningResult(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers,
bidirectional=bidirectional, dropout=dropout, batch_first=batch_first)
if packed_sequence == 1:
model = RnnModelWithPackedSequence(model, False)
if packed_sequence == 2:
model = RnnModelWithPackedSequence(model, True)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
c0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append((h0, c0))
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_model_test(model, train=False, batch_size=RNN_BATCH_SIZE, input=input, use_gpu=False)
# test that the model still runs with a different batch size
# (save the model with a batch_size of 1 with rnn with a variable batch size,
# otherwise expand will fail)
variable_batch_size_init_input = make_input(1)
# Constant folding works when model has parameters embedded. For this case, we need to disable it
onnxir, _ = do_export(model, variable_batch_size_init_input, keep_initializers_as_inputs=True,
do_constant_folding=False)
other_input = make_input(RNN_BATCH_SIZE + 1)
_ = run_embed_params(onnxir, model, other_input, use_gpu=False)
def _gru_test(self, layers, bidirectional, initial_state,
packed_sequence, dropout):
batch_first = True if packed_sequence == 2 else False
model = nn.GRU(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers,
bidirectional=bidirectional, dropout=dropout, batch_first=batch_first)
if packed_sequence == 1:
model = RnnModelWithPackedSequence(model, False)
if packed_sequence == 2:
model = RnnModelWithPackedSequence(model, True)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append(h0)
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_model_test(model, train=False, batch_size=RNN_BATCH_SIZE, input=input, use_gpu=False)
# test that the model still runs with a different batch size
# (save the model with a batch_size of 1 with rnn with a variable batch size,
# otherwise expand will fail)
variable_batch_size_init_input = make_input(1)
# Constant folding works when model has parameters embedded. For this case, we need to disable it
onnxir, _ = do_export(model, variable_batch_size_init_input, keep_initializers_as_inputs=True,
do_constant_folding=False)
other_input = make_input(RNN_BATCH_SIZE + 1)
_ = run_embed_params(onnxir, model, other_input, use_gpu=False)
def test_rnn_init_predict_split(self):
model = nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 3, bidirectional=True)
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=7)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
input = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
input = rnn_utils.pad_sequence(input)
# Test that we are correctly splitting between init and
# predict net. When we embed parameters, there should be more
# ops in the init net.
mp = onnx.ModelProto.FromString(do_export(model, input, export_params=self.embed_params,
keep_initializers_as_inputs=True,
do_constant_folding=False)[0])
prepared = c2.prepare(mp, device="CPU")
if self.embed_params:
assert len(prepared.init_net.op) == 950
assert len(prepared.predict_net.op) == 101
else:
assert len(prepared.init_net.op) == 83
assert len(prepared.predict_net.op) == 968
def test_alexnet(self):
state_dict = model_zoo.load_url(model_urls["alexnet"], progress=False)
self.run_model_test(alexnet(), train=False, batch_size=BATCH_SIZE,
state_dict=state_dict, atol=1e-3)
@skipIfNoCuda
def test_dcgan(self):
# dcgan is flaky on some seeds, see:
# https://github.com/ProjectToffee/onnx/pull/70
torch.manual_seed(1)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1)
netD = dcgan._netD(1)
netD.apply(dcgan.weights_init)
input = torch.randn(BATCH_SIZE, 3, dcgan.imgsz, dcgan.imgsz)
self.run_model_test(netD, train=False, batch_size=BATCH_SIZE,
input=input)
netG = dcgan._netG(1)
netG.apply(dcgan.weights_init)
state_dict = model_zoo.load_url(model_urls["dcgan_b"], progress=False)
# state_dict = model_zoo.load_url(model_urls["dcgan_f"], progress=False)
noise = torch.randn(BATCH_SIZE, dcgan.nz, 1, 1).normal_(0, 1)
self.run_model_test(netG, train=False, batch_size=BATCH_SIZE,
input=noise, state_dict=state_dict, rtol=1e-2, atol=1e-6)
@unittest.skipIf(not torch.cuda.is_available(),
"model on net has cuda in it, awaiting fix")
def test_densenet(self):
state_dict = model_zoo.load_url(model_urls["densenet121"], progress=False)
self.run_model_test(densenet121(), train=False, batch_size=BATCH_SIZE,
state_dict=state_dict, atol=1e-7)
@skip("doesn't match exactly...")
# TODO: figure out the numerical instabilities
def test_inception(self):
x = torch.randn(BATCH_SIZE, 3, 299, 299, requires_grad=True)
# state_dict = model_zoo.load_url(model_urls["inception_v3_google"], progress=False)
state_dict = None
self.run_model_test(inception_v3(), train=False, batch_size=BATCH_SIZE,
state_dict=state_dict, input=x)
@skipIfNoEmbed
def test_resnet(self):
state_dict = model_zoo.load_url(model_urls["resnet50"], progress=False)
self.run_model_test(resnet50(), train=False, batch_size=BATCH_SIZE,
state_dict=state_dict, atol=1e-5)
def test_squeezenet(self):
sqnet_v1_1 = SqueezeNet(version=1.1)
state_dict = model_zoo.load_url(model_urls["squeezenet1_1"], progress=False)
# state_dict = model_zoo.load_url(model_urls["squeezenet1_0"], progress=False)
self.run_model_test(sqnet_v1_1, train=False, batch_size=BATCH_SIZE,
state_dict=state_dict)
# @skip("takes long to run, LAPACK needed for gpu")
@skipIfNoLapack
@unittest.skip("This model takes too much memory")
def test_srresnet(self):
super_resolution_net = SRResNet(
rescale_factor=4, n_filters=64, n_blocks=8)
state_dict = model_zoo.load_url(model_urls["srresNet"], progress=False)
x = torch.randn(1, 3, 224, 224, requires_grad=True)
self.run_model_test(super_resolution_net, train=False,
batch_size=1, state_dict=state_dict,
input=x, use_gpu=False)
@skipIfTravis
@skipIfNoLapack
@skipIfNoCuda
def test_super_resolution(self):
super_resolution_net = SuperResolutionNet(upscale_factor=3)
state_dict = model_zoo.load_url(model_urls["super_resolution"], progress=False)
x = torch.randn(1, 1, 224, 224, requires_grad=True)
self.run_model_test(super_resolution_net, train=False,
batch_size=BATCH_SIZE, state_dict=state_dict,
input=x, use_gpu=False, atol=1e-6)
@unittest.skip("This model takes too much memory")
def test_vgg16(self):
state_dict = model_zoo.load_url(model_urls["vgg16"], progress=False)
self.run_model_test(vgg16(), train=False, batch_size=BATCH_SIZE,
state_dict=state_dict)
@skip("disable to run tests faster...")
def test_vgg16_bn(self):
self.run_model_test(vgg16_bn(), train=False,
batch_size=BATCH_SIZE)
@skip("disable to run tests faster...")
def test_vgg19(self):
state_dict = model_zoo.load_url(model_urls["vgg19"], progress=False)
self.run_model_test(vgg19(), train=False, batch_size=BATCH_SIZE,
state_dict=state_dict)
@skip("disable to run tests faster...")
def test_vgg19_bn(self):
self.run_model_test(vgg19_bn(), train=False,
batch_size=BATCH_SIZE)
def run_word_language_model(self, model_name):
ntokens = 50
emsize = 5
nhid = 5
nlayers = 5
dropout = 0.2
tied = False
batchsize = 5
model = word_language_model.RNNModel(model_name, ntokens, emsize,
nhid, nlayers, dropout, tied,
batchsize)
x = torch.arange(0, ntokens).long().view(-1, batchsize)
# Only support CPU version, since tracer is not working in GPU RNN.
self.run_model_test(model, train=False, input=(x, model.hidden),
batch_size=batchsize, use_gpu=False)
@skipIfUnsupportedOpsetVersion([10])
def test_word_language_model_RNN_TANH(self):
self.run_word_language_model("RNN_TANH")
@skipIfUnsupportedOpsetVersion([10])
def test_word_language_model_RNN_RELU(self):
self.run_word_language_model("RNN_RELU")
@skipIfUnsupportedOpsetVersion([10])
def test_word_language_model_LSTM(self):
self.run_word_language_model("LSTM")
@skipIfUnsupportedOpsetVersion([10])
def test_word_language_model_GRU(self):
self.run_word_language_model("GRU")
def test_batchnorm1d_special(self):
c = torch.randn(BATCH_SIZE, 224)
model = nn.BatchNorm1d(224)
self.run_model_test(model, train=True, input=c, batch_size=BATCH_SIZE)
def test_batchnorm1d(self):
c = torch.randn(BATCH_SIZE, 224, 224)
model = nn.BatchNorm1d(224)
self.run_model_test(model, train=True, input=c, batch_size=BATCH_SIZE)
def test_batchnorm1d_noaffine(self):
c = torch.randn(BATCH_SIZE, 224)
model = nn.BatchNorm1d(224, affine=False)
self.run_model_test(model, train=False, input=c, batch_size=BATCH_SIZE)
def test_batchnorm2d_noaffine(self):
c = torch.randn(128, 128, 1, 1)
model = nn.BatchNorm2d(128, affine=False)
self.run_model_test(model, train=False, input=c, batch_size=BATCH_SIZE)
def test_batchnorm3d_noaffine(self):
c = torch.randn(128, 128, 1, 1, 1)
model = nn.BatchNorm3d(128, affine=False)
self.run_model_test(model, train=False, input=c, batch_size=BATCH_SIZE)
def test_constant(self):
c = torch.randn(BATCH_SIZE, 3, 224, 224)
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, input):
return input + c.type_as(input)
self.run_model_test(MyModel(), train=False, batch_size=BATCH_SIZE)
def test_consumed_bn(self):
underlying = nn.BatchNorm2d(3)
self.run_model_test(underlying, train=True, batch_size=BATCH_SIZE)
def _test_index_generic(self, fn):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, input):
return fn(input)
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_model_test(MyModel(), input=m1, train=False, batch_size=BATCH_SIZE)
def test_index_1d(self):
self._test_index_generic(lambda input: input[0])
@skipIfUnsupportedOpsetVersion([10])
def test_index_2d_1dimslice(self):
self._test_index_generic(lambda input: input[0:1, :])
@skipIfUnsupportedOpsetVersion([10])
def test_index_2d_sliceint(self):
self._test_index_generic(lambda input: input[1, :])
@skipIfUnsupportedOpsetVersion([10])
def test_index_2d_neg_slice(self):
self._test_index_generic(lambda input: input[0:-1, :])
@skipIfUnsupportedOpsetVersion([10])
def test_index_2d_2dimslice(self):
self._test_index_generic(lambda input: input[0:1, 0:1])
@skipIfUnsupportedOpsetVersion([10])
def test_index_2d_neg_slice2dim(self):
self._test_index_generic(lambda input: input[0:-1, 0:-1])
def test_tensor_index_1d(self):
self._test_index_generic(lambda input: input[torch.tensor([0, 2])])
def test_tensor_index_2d_1dconstant(self):
self._test_index_generic(lambda input: input[1, torch.tensor([0, 2])])
@skipIfUnsupportedOpsetVersion([10])
def test_tensor_index_2d_1dslice(self):
self._test_index_generic(lambda input: input[torch.tensor([0, 2]), 0:1])
@skipIfUnsupportedOpsetVersion([10])
def test_tensor_index_2d_1dslice_first(self):
self._test_index_generic(lambda input: input[1:3, torch.tensor([0, 2])])
def test_tensor_index_newaxis(self):
self._test_index_generic(lambda input: input[None, torch.tensor([0, 2])])
def test_tensor_index_advanced_indexing(self):
self._test_index_generic(
lambda input: input[:, torch.tensor([[0, 2], [1, 1]]), :, torch.tensor([2, 1]), torch.tensor([0, 3])])
@skipIfUnsupportedOpsetVersion([10])
def test_tensor_index_advanced_indexing_with_slice(self):
self._test_index_generic(lambda input: input[:, torch.tensor([0, 2]), None, 2:4, torch.tensor([[1, 3], [4, 0]])])
self._test_index_generic(lambda input: input[:, torch.tensor([0, 2]), torch.tensor([1]), 2:4, torch.tensor([[1], [4]])])
def test_tensor_index_advanced_indexing_consecutive(self):
self._test_index_generic(lambda input: input[:, torch.tensor([0, 2]), torch.tensor([[1, 3], [4, 0]]), None])
@skipIfUnsupportedMinOpsetVersion(9)
def test_tensor_index_advanced_indexing_masked(self):
self._test_index_generic(
lambda input: input[:, torch.tensor([1, 0, 1, 0], dtype=torch.uint8), torch.tensor([[1, 3], [4, 0]]), None])
def test_chunk(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, input):
# TODO: Why index? This returns a tuple and test runner doesn't
# support tuple comparison.
return input.chunk(8, dim=2)[-1]
self.run_model_test(MyModel(), train=False, batch_size=BATCH_SIZE)
def test_sqrt(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, input):
return input.sqrt()
input = torch.empty(BATCH_SIZE, 10, 10).uniform_(4, 9)
self.run_model_test(MyModel(), train=False, input=input, batch_size=BATCH_SIZE)
def test_rsqrt(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input.rsqrt()
input = torch.randn(4, 2, 3, requires_grad=True)
self.run_model_test(MyModel(), train=False, input=input, batch_size=BATCH_SIZE)
def test_log(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, input):
return input.log()
input = torch.empty(BATCH_SIZE, 10, 10).uniform_(4, 9)
self.run_model_test(MyModel(), train=False, input=input, batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(9)
def test_erf(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, input):
return input.erf()
input = torch.empty(BATCH_SIZE, 10, 10).uniform_(4, 9)
self.run_model_test(MyModel(), train=False, input=input, batch_size=BATCH_SIZE)
def test_trigonometry(self):
def test_func(name):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, input):
return getattr(input, name)()
input = torch.empty(BATCH_SIZE, 10, 10).uniform_()
self.run_model_test(MyModel(), train=False, input=input, batch_size=BATCH_SIZE)
test_func("cos")
test_func("sin")
test_func("tan")
test_func("acos")
test_func("asin")
test_func("atan")
def test_addconstant(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, input):
# TODO: Why index? This returns a tuple and test runner doesn't
# support tuple comparison.
return input + 1
self.run_model_test(MyModel(), train=False, batch_size=BATCH_SIZE)
def test_subconstant(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, input):
# TODO: Why index? This returns a tuple and test runner doesn't
# support tuple comparison.
return input - 1
self.run_model_test(MyModel(), train=False, batch_size=BATCH_SIZE)
def test_arithmetic(self):
class ArithmeticModule(torch.nn.Module):
def forward(self, x):
x = x + 2
x = x - 4
x = x * 6
x = x / 8
return x
x = torch.randn(2, 3, 4)
self.run_model_test(ArithmeticModule(), input=x, train=False, batch_size=BATCH_SIZE)
def test_embedding(self):
model = nn.Embedding(10, 3, padding_idx=-1)
input = torch.LongTensor(list(range(10))[::-1])
self.run_model_test(model, train=False, input=input, batch_size=BATCH_SIZE)
def test_constantpad2d(self):
model = nn.ConstantPad2d((1, 2, 3, 4), 3.5)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
def test_reflectionpad2d(self):
model = nn.ReflectionPad2d((1, 2, 3, 4))
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
def test_replicationpad2d(self):
model = nn.ReplicationPad2d((1, 2, 3, 4))
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
def test_maxpool2d(self):
model = nn.MaxPool2d(5, padding=(1, 2))
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
def test_maxpool2d_single_padding(self):
model = nn.MaxPool2d(5, padding=2)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
@skipIfUnsupportedOpsetVersion([10])
def test_maxpool1d_ceil(self):
model = nn.MaxPool1d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedOpsetVersion([10])
def test_maxpool2d_ceil(self):
model = nn.MaxPool2d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 32, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedOpsetVersion([10])
def test_maxpool3d_ceil(self):
model = nn.MaxPool3d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 44, 31, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
@unittest.skip("C2 and PyTorch have small difference in padding implementation")
def test_avgpool2d(self):
model = nn.AvgPool2d(5, padding=(2))
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
def test_avgpool2d_with_count_include_pad_set_false(self):
model = nn.AvgPool2d(7, padding=(2), count_include_pad=False)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
def test_avgpool2d_with_count_include_pad_set_true(self):
model = nn.AvgPool2d(7, padding=(2), count_include_pad=True)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
def test_avgpool2d_no_padding(self):
model = nn.AvgPool2d(5)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
@skipIfUnsupportedOpsetVersion([10])
def test_avg_pool1D_ceil(self):
model = torch.nn.AvgPool1d(3, 2, ceil_mode=True)
x = torch.randn(1, 1, 7, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedOpsetVersion([10])
def test_avg_pool2D_ceil(self):
model = torch.nn.AvgPool2d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 32, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedOpsetVersion([10])
def test_avg_pool3D_ceil(self):
model = torch.nn.AvgPool3d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 44, 31, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
def test_adaptive_avg_pool1D(self):
model = torch.nn.AdaptiveAvgPool1d((5))
x = torch.randn(20, 16, 50, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
def test_adaptive_avg_pool2D(self):
model = torch.nn.AdaptiveAvgPool2d((5, 4))
x = torch.randn(20, 16, 50, 32, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
def test_adaptive_avg_pool3D(self):
model = torch.nn.AdaptiveAvgPool3d((5, 4, 3))
x = torch.randn(20, 16, 50, 44, 30, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(8)
def test_adaptive_max_pool1D(self):
model = torch.nn.AdaptiveMaxPool1d((5))
x = torch.randn(20, 16, 50, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(8)
def test_adaptive_max_pool2D(self):
model = torch.nn.AdaptiveMaxPool2d((5, 4))
x = torch.randn(20, 16, 50, 32, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(8)
def test_adaptive_max_pool3D(self):
model = torch.nn.AdaptiveMaxPool3d((5, 4, 3))
x = torch.randn(20, 16, 50, 44, 30, requires_grad=True)
self.run_model_test(model, train=False, input=x, batch_size=BATCH_SIZE)
def test_weight_norm(self):
model = nn.utils.weight_norm(nn.Conv1d(1, 1, 3))
input = torch.randn(1, 1, 5, requires_grad=True)
self.run_model_test(
model, train=True, batch_size=0, input=input, use_gpu=False
)
def test_mnist(self):
model = MNIST()
input = torch.randn(BATCH_SIZE, 1, 28, 28)
state_dict = None
# TODO: test with state_dict
self.run_model_test(model, train=False, input=input, batch_size=BATCH_SIZE,
state_dict=state_dict)
def test_mm(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, m1, m2):
return torch.mm(m1, m2)
m1 = torch.randn(3, 4)
m2 = torch.randn(4, 5)
self.run_model_test(MyModel(), train=False, input=(m1, m2), batch_size=BATCH_SIZE, use_gpu=False)
def test_addmm(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, ma, m1, m2):
return torch.addmm(ma, m1, m2)
ma = torch.randn(5)
m1 = torch.randn(3, 4)
m2 = torch.randn(4, 5)
self.run_model_test(MyModel(), train=False, input=(ma, m1, m2), batch_size=BATCH_SIZE, use_gpu=False)
def test_fuse_addmm(self):
class AddmmModel(torch.nn.Module):
def forward(self, x):
return torch.mm(x, x) + x
x = torch.randn(3, 3)
self.run_model_test(AddmmModel(), train=False, input=x, batch_size=BATCH_SIZE, use_gpu=False)
def test_scalar_type(self):
class ArithmeticModel(torch.nn.Module):
def forward(self, x):
return x.size(0) * 2 * x
x = torch.ones(2, 3, dtype=torch.float32)
self.run_model_test(ArithmeticModel(), input=x, train=False, batch_size=BATCH_SIZE)
class ReciprocalModel(torch.nn.Module):
def forward(self, x):
return torch.reciprocal(x)
x = torch.tensor([2.0, 4.0], dtype=torch.double)
self.run_model_test(ReciprocalModel(), input=x, train=False, batch_size=BATCH_SIZE)
class ComparisonModel(torch.nn.Module):
def forward(self, x, y):
return x.ge(0.5) & y.le(2)
x = torch.ones(2, 3, dtype=torch.int32)
y = torch.ones(2, 3, dtype=torch.float32)
self.run_model_test(ComparisonModel(), input=(x, y), train=False, batch_size=BATCH_SIZE)
class MatMulModel(torch.nn.Module):
def forward(self, x, y):
return torch.mm(x, y)
x = torch.ones(3, 4)
y = torch.ones(4, 5)
self.run_model_test(MatMulModel(), input=(x, y), train=False, batch_size=BATCH_SIZE)
class AddMMModel(torch.nn.Module):
def forward(self, x):
return torch.mm(x, x) + x
x = torch.ones(3, 3)
self.run_model_test(AddMMModel(), input=x, train=False, batch_size=BATCH_SIZE)
# test for a pytorch optimization pass, see https://github.com/pytorch/pytorch/pull/7872
def test_consecutive_transposes(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
return x.transpose(1, 2).transpose(2, 3)
x = torch.randn(5, 6, 7, 8)
self.run_model_test(MyModel(), train=False, input=x, batch_size=BATCH_SIZE, use_gpu=False)
def test_sum(self):
shape = (3, 4, 5)
for params in [{}] + [{"dim": i} for i in range(len(shape))]:
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
return torch.sum(x, **params)
x = torch.randn(*shape)
self.run_model_test(MyModel(), train=False, input=(x), batch_size=BATCH_SIZE, use_gpu=False)
def test_cumsum(self):
shape = (3, 4, 5)
for params in [{"dim": i} for i in range(len(shape))]:
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
return torch.cumsum(x, **params)
x = torch.randn(*shape)
self.run_model_test(MyModel(), train=False, input=(x), batch_size=BATCH_SIZE, use_gpu=False,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
def test_cosine_similarity(self):
shape = (100, 128)
x = torch.randn(*shape)
y = torch.randn(*shape)
self.run_model_test(torch.nn.CosineSimilarity(dim=1, eps=1e-6), train=False,
input=(x, y), batch_size=BATCH_SIZE, use_gpu=False,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
@skipIfUnsupportedOpsetVersion([10])
def test_lstm_constant_folding(self):
class LstmNet(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional):
super(LstmNet, self).__init__()
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, bidirectional=bidirectional)
def forward(self, input, initial_state):
return self.lstm(input, initial_state)
def get_LstmNet_model_and_inputs(input_size, hidden_size, num_layers, batch_size,
seq_len, bidirectional):
num_directions = 2 if bidirectional else 1
model = LstmNet(input_size, hidden_size, num_layers, bidirectional)
input = torch.randn(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
c0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
return model, (input, (h0, c0))
batch_size1 = 3
model1, input1 = get_LstmNet_model_and_inputs(7, 3, 2, batch_size1, 5, True)
self.run_actual_test(model1, train=False, batch_size=batch_size1, input=input1, use_gpu=False, do_constant_folding=True)
batch_size2 = 4
model2, input2 = get_LstmNet_model_and_inputs(5, 4, 3, batch_size2, 7, False)
self.run_actual_test(model2, train=False, batch_size=batch_size2, input=input2, use_gpu=False, do_constant_folding=True)
@skipIfUnsupportedOpsetVersion([10])
def test_gru_constant_folding(self):
class GruNet(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional):
super(GruNet, self).__init__()
self.mygru = nn.GRU(input_size, hidden_size, num_layers, bidirectional=bidirectional)
def forward(self, input, initial_state):
out = self.mygru(input, initial_state)
return out
def get_GruNet_model_and_inputs(input_size, hidden_size, num_layers, batch_size,
seq_len, bidirectional):
num_directions = 2 if bidirectional else 1
model = GruNet(input_size, hidden_size, num_layers, bidirectional)
input = torch.randn(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
return model, (input, h0)
batch_size1 = 3
model1, input1 = get_GruNet_model_and_inputs(7, 3, 2, batch_size1, 5, True)
self.run_actual_test(model1, train=False, batch_size=batch_size1, input=input1, use_gpu=False, do_constant_folding=True)
batch_size2 = 4
model2, input2 = get_GruNet_model_and_inputs(5, 4, 3, batch_size2, 7, False)
self.run_actual_test(model2, train=False, batch_size=batch_size2, input=input2, use_gpu=False, do_constant_folding=True)
def test_repeat(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
return x.repeat(1, 2, 3, 4)
x = torch.randn(4, 3, 2, 1, requires_grad=True)
self.run_model_test(MyModel(), train=False, input=(x), batch_size=BATCH_SIZE, use_gpu=False)
@skipIfUnsupportedOpsetVersion([10])
def test_upsample(self):
x = torch.randn(1, 2, 3, 4, requires_grad=True)
model = nn.Upsample(size=[v * 2 for v in x.size()[2:]], mode="nearest")
self.run_model_test(model, train=False, input=(x),
batch_size=BATCH_SIZE, use_gpu=False)
@skipIfUnsupportedOpsetVersion([10])
def test_interpolate_upsample(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
size = [v * 2 for v in x.size()[2:]]
# work around for now: turn the dynamic sizes into constant
size = [int(i) for i in size]
return nn.functional.interpolate(x,
size=size,
mode="nearest")
x = torch.randn(1, 2, 3, 4, requires_grad=True)
model = MyModel()
self.run_model_test(model, train=False, input=(x),
batch_size=BATCH_SIZE, use_gpu=False)
@skipIfUnsupportedOpsetVersion([7, 8, 10])
def test_interpolate_upsample_dynamic_sizes(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
size = [v * 2 for v in x.size()[2:]]
return nn.functional.interpolate(x,
size=size,
mode="nearest")
x = torch.randn(1, 2, 3, 4, requires_grad=True)
model = MyModel()
self.run_model_test(model, train=False, input=(x),
batch_size=BATCH_SIZE, use_gpu=False)
def test_repeat_dim_overflow(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
return x.repeat(1, 2, 3, 4)
x = torch.randn(1, 2, requires_grad=True)
self.run_model_test(MyModel(), train=False, input=(x), batch_size=BATCH_SIZE, use_gpu=False)
def test_repeat_dynamic(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x, y):
return x.repeat(y.size()[0] // 2, y.size()[1] * 2)
x = torch.randn(1, 2, requires_grad=True)
y = torch.randn(2, 4, requires_grad=True)
self.run_model_test(MyModel(), train=False, input=(x, y), batch_size=BATCH_SIZE, use_gpu=False,
input_names=["x", "y"], dynamic_axes={"x": [0, 1], "y": [0, 1]})
self.run_model_test(MyModel(), train=False, input=(x, y), batch_size=BATCH_SIZE, use_gpu=False, remained_onnx_input_idx=[0])
def test_mean(self):
shape = (3, 4, 5)
for params in [{}] + [{"dim": i} for i in range(len(shape))]:
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
return torch.mean(x, **params)
x = torch.randn(*shape)
self.run_model_test(MyModel(), train=False, input=(x), batch_size=BATCH_SIZE, use_gpu=False)
# TODO: Add test cases for prod once Caffe2 has support for ReduceProd
def test_softmax(self):
for i in range(2, 8):
for d in range(0, i - 1):
model = nn.Softmax(dim=d)
dims = [2] * (i - 2) + [3, 4]
input = torch.ones(*dims, requires_grad=True)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE, input=input)
def test_softmax_dtype(self):
class SoftmaxModel(torch.nn.Module):
def forward(self, input):
return nn.functional.softmax(input, dim=0, dtype=torch.float64)
x = torch.randn(1, 2, 3, requires_grad=True, dtype=torch.float32)
self.run_model_test(SoftmaxModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_logsoftmax(self):
for i in range(7)[2:]:
model = nn.LogSoftmax(dim=i - 1)
dims = [2] * (i - 2) + [3, 4]
input = torch.ones(*dims, requires_grad=True)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE, input=input)
def test_logsoftmax_dim(self):
for i in range(-4, 3):
model = nn.LogSoftmax(dim=i)
input = torch.randn(3, 4, 5, 6)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE, input=input)
def test_randn(self):
x = torch.randn(1, 2, 3, 4)
class MyModule(torch.nn.Module):
def forward(self, x):
return (torch.randn(1, 2, 3, 4) + x).shape
self.run_model_test(MyModule(), train=False, input=(x),
batch_size=BATCH_SIZE, use_gpu=False, remained_onnx_input_idx=[])
def test_rand(self):
x = torch.randn(1, 2, 3, 4)
class MyModule(torch.nn.Module):
def forward(self, x):
return (torch.rand(1, 2, 3, 4) + x).shape
self.run_model_test(MyModule(), train=False, input=(x),
batch_size=BATCH_SIZE, use_gpu=False, remained_onnx_input_idx=[])
def test_convtranspose(self):
model = nn.ConvTranspose2d(3, 3, 3, stride=3, bias=False, padding=1, output_padding=2)
self.run_model_test(model, train=False, batch_size=BATCH_SIZE, atol=1e-7)
def test_unsqueeze(self):
shape = (3, 4, 5)
# test negative dim as well.
for dim in range(-len(shape) - 1, len(shape) + 1):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
return x.unsqueeze(dim)
x = torch.randn(*shape)
self.run_model_test(MyModel(), train=False, input=(x), batch_size=BATCH_SIZE, atol=1e-7)
def test_squeeze(self):
shape = (1, 1, 1)
# test negative dim as well
for dim in range(-len(shape), len(shape)):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
return x.squeeze(dim)
x = torch.randn(*shape)
self.run_model_test(MyModel(), train=False, input=(x), batch_size=BATCH_SIZE, atol=1e-7)
# NB: InstanceNorm model includes unused weights, so skip this in TestCaffe2BackendEmbed
# TODO: We should have another pass to eliminate the unused initializers in ONNX models.
@skipIfEmbed
def test_instance_norm(self):
underlying = nn.InstanceNorm2d(3)
self.run_model_test(underlying, train=False, batch_size=BATCH_SIZE)
def test_pixel_shuffle(self):
underlying = nn.PixelShuffle(4)
shape = (1, 32, 5, 5)
input = Variable(torch.randn(*shape),
requires_grad=True)
self.run_model_test(underlying, train=False, input=(input),
batch_size=BATCH_SIZE)
def test_dynamic_sizes(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
shape = torch.onnx.operators.shape_as_tensor(x)
new_shape = torch.cat((torch.LongTensor([-1]), shape[0].view(1)))
return torch.onnx.operators.reshape_from_tensor_shape(x, new_shape)
x = torch.randn(3, 5, 7)
self.run_model_test(MyModel(), train=False, input=x, batch_size=BATCH_SIZE, use_gpu=False)
def test_advanced_broadcast(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x, y):
return torch.mul(x, y)
x = torch.randn(1, 5, 10)
y = torch.randn(1, 5, 1)
self.run_model_test(MyModel(), train=False, input=(x, y), batch_size=BATCH_SIZE, use_gpu=False)
def test_int8_export(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.param = torch.ByteTensor(3, 4).random_()
def forward(self, x):
return x * self.param.float()
import io
f = io.BytesIO()
from torch.onnx import ExportTypes
torch.onnx._export(MyModel(), (torch.rand(3, 4),), f, verbose=True, export_type=ExportTypes.ZIP_ARCHIVE,
keep_initializers_as_inputs=True)
X = np.random.rand(3, 4).astype(np.float32)
f.seek(0)
import caffe2.python.onnx.backend as c2
model = c2.prepare_zip_archive(f)
model.run(X)
@skipIfUnsupportedOpsetVersion([10])
def test_neg_slice(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[-1, :, :]
x = torch.randn(3, 4, 5)
self.run_model_test(NegSlice(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False)
@skipIfUnsupportedOpsetVersion([10])
def test_neg_slice_large(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[:, :, :, :, -3]
x = torch.randn(3, 4, 5, 6, 7)
self.run_model_test(NegSlice(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False)
@unittest.skip("https://github.com/pytorch/pytorch/issues/10984")
@skipIfUnsupportedOpsetVersion([10])
def test_neg_slice_large_negone(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[:, :, :, :, -1]
x = torch.randn(3, 4, 5, 6, 7)
self.run_model_test(NegSlice(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False)
@skipIfUnsupportedMinOpsetVersion(11)
def test_dynamic_slice(self):
class DynamicSliceExportMod(torch.nn.Module):
def forward(self, x):
results = []
for i in range(4):
results.append(x[:x.size(0) - i, i:x.size(2), i:3])
return tuple(results)
x = torch.rand(5, 5, 5)
self.run_model_test(DynamicSliceExportMod(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False)
@skipIfUnsupportedMinOpsetVersion(11)
def test_dynamic_slice_script(self):
class DynamicSliceModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x[1:x.size(0)]
module = DynamicSliceModel()
x = torch.rand(1, 2)
example_output = module(x)
self.run_model_test(DynamicSliceModel(), train=False, input=(x,),
batch_size=BATCH_SIZE, use_gpu=False, example_outputs=example_output)
@skipIfUnsupportedMinOpsetVersion(11)
def test_dynamic_slice_to_the_end(self):
class DynamicSliceExportMod(torch.nn.Module):
def forward(self, x):
results = []
for i in range(4):
results.append(x[:, i:, x.size(2) - 5])
return tuple(results)
x = torch.rand(5, 5, 5)
self.run_model_test(DynamicSliceExportMod(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False)
def test_unbind(self):
class UnbindModel(torch.nn.Module):
def forward(self, input):
return input.unbind()
x = torch.randn(3, 4, 5)
self.run_model_test(UnbindModel(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False)
class UnbindModel2(torch.nn.Module):
def forward(self, input):
_, out, _, _ = input.unbind(1)
return out
x = torch.randn(3, 4, 5)
self.run_model_test(UnbindModel2(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False)
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_zero(self):
class Zero_(torch.nn.Module):
def forward(self, x):
return x.zero_()
x = torch.randn(2, 3, 4)
self.run_model_test(Zero_(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False,
input_names=['x'], dynamic_axes={'x': [0, 1, 2]})
self.run_model_test(Zero_(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False, remained_onnx_input_idx=[])
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_fill(self):
class Fill_(torch.nn.Module):
def forward(self, x):
return x.fill_(3)
x = torch.randn(2, 3, 4)
self.run_model_test(Fill_(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False,
input_names=['x'], dynamic_axes={'x': [0, 1, 2]})
self.run_model_test(Fill_(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False, remained_onnx_input_idx=[])
# ConstantFill is a deprecated experimental op (used in opsets < 9).
# Shape inference does not cover this op.
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_arithmetic(self):
class Arithmetic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self):
x = torch.ones(2, 3, 4)
y = torch.ones(2, 3, 4) * 2
x.add_(3)
y.mul_(x)
return x, y
x = torch.ones(2, 3, 4)
y = torch.ones(2, 3, 4) * 2
self.run_model_test(Arithmetic(),
train=False, input=(), batch_size=BATCH_SIZE,
use_gpu=False, example_outputs=(x + 3, y * (x + 3)))
def test_tensor_factories(self):
class TensorFactory(torch.nn.Module):
def forward(self, x):
return torch.zeros(x.size()) + torch.ones(x.size())
x = torch.randn(2, 3, 4)
self.run_model_test(TensorFactory(), train=False, input=(x,), batch_size=BATCH_SIZE,
use_gpu=False, input_names=['x'], dynamic_axes={'x': [0, 1, 2]})
self.run_model_test(TensorFactory(), train=False, input=(x,), batch_size=BATCH_SIZE,
use_gpu=False, remained_onnx_input_idx=[])
def test_tensor_factories_script(self):
class TensorFactory(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.zeros(x.shape, dtype=torch.float) + torch.ones(x.shape, dtype=torch.float)
x = torch.randn(2, 3, 4)
self.run_model_test(TensorFactory(), train=False, input=(x,), batch_size=BATCH_SIZE,
use_gpu=False, example_outputs=(torch.ones(x.size()),),
input_names=['x'], dynamic_axes={'x': [0, 1, 2]})
self.run_model_test(TensorFactory(), train=False, input=(x,), batch_size=BATCH_SIZE,
use_gpu=False, example_outputs=(torch.ones(x.size()),),
remained_onnx_input_idx=[])
def test_tensor_like_factories_script(self):
class TensorFactory(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
zeros = torch.zeros_like(x, dtype=torch.float, layout=torch.strided, device=torch.device("cpu"))
ones = torch.ones_like(x, dtype=torch.float, layout=torch.strided, device=torch.device("cpu"))
return zeros + ones
x = torch.randn(2, 3, 4)
self.run_model_test(TensorFactory(), train=False, input=(x,), batch_size=BATCH_SIZE,
use_gpu=False, example_outputs=(torch.ones(x.size()),),
input_names=['x'], dynamic_axes={'x': [0, 1, 2]})
remained_onnx_input_idx = None if self.opset_version < 9 else []
self.run_model_test(TensorFactory(), train=False, input=(x,), batch_size=BATCH_SIZE,
use_gpu=False, example_outputs=(torch.ones(x.size()),),
remained_onnx_input_idx=remained_onnx_input_idx)
def test_full(self):
class FullModel(torch.nn.Module):
def forward(self, x):
return torch.full((3, 4), x, dtype=torch.long)
x = torch.tensor(12)
self.run_model_test(FullModel(), train=False, input=(x,), batch_size=BATCH_SIZE,
use_gpu=False)
def test_full_script(self):
class FullClass(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.full((4, 5), x, dtype=torch.long)
x = torch.tensor(12)
self.run_model_test(FullClass(), train=False, input=(x,), batch_size=BATCH_SIZE,
use_gpu=False, example_outputs=FullClass()(x))
def test_clamp(self):
class ClampModel(torch.nn.Module):
def forward(self, x):
return x.clamp(-0.5, 0.5)
x = torch.randn(3, 4)
self.run_model_test(ClampModel(), train=False, input=(x,), batch_size=BATCH_SIZE)
class ClampMinModel(torch.nn.Module):
def forward(self, x):
return x.clamp(min=-0.5)
x = torch.randn(3, 4)
self.run_model_test(ClampMinModel(), train=False, input=(x,), batch_size=BATCH_SIZE)
class ClampMaxModel(torch.nn.Module):
def forward(self, x):
return x.clamp(max=0.5)
x = torch.randn(3, 4)
self.run_model_test(ClampMaxModel(), train=False, input=(x,), batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(9)
def test_where_functional(self):
class WhereFunctional(torch.nn.Module):
def forward(self, x):
return torch.where(x > 2.0, x, torch.neg(x))
x = torch.randn(3, 4)
self.run_model_test(WhereFunctional(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False)
@skipIfUnsupportedMinOpsetVersion(9)
def test_where_method(self):
class WhereMethod(torch.nn.Module):
def forward(self, x):
return x.where(x > 2.0, torch.neg(x))
x = torch.randn(3, 4)
self.run_model_test(WhereMethod(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False)
def test_data_dependent_zeros_factory(self):
class ZerosFactory(torch.nn.Module):
def forward(self, input):
return torch.cat([input, torch.zeros(input.size(0), 1).type_as(input)], dim=1)
x = torch.zeros(3, 4)
self.run_model_test(ZerosFactory(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False)
def test_implicit_expand(self):
class ImplicitExpandExportMod(torch.nn.Module):
def forward(self, x):
return x + 1
x = torch.randn(3, 4)
self.run_model_test(ImplicitExpandExportMod(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False)
def test_reduce_sum(self):
class ReduceSumNegativeIndices(torch.nn.Module):
def forward(self, x):
return x.sum(-1)
x = torch.randn(2, 3, 4)
self.run_model_test(ReduceSumNegativeIndices(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False)
def test_reduce_sum_multi_dim(self):
class ReduceSumMultipleAxes(torch.nn.Module):
def forward(self, x):
return x.sum(dim=(2, 3), keepdim=True)
x = torch.randn(16, 3, 256, 256)
self.run_model_test(ReduceSumMultipleAxes(), train=False, input=(x,), batch_size=BATCH_SIZE, use_gpu=False)
# InstanceNorm model (used in the subgraph) includes unused weights,
# so skip this in TestCaffe2BackendEmbed
@skipIfEmbed
def test_group_norm(self):
c = torch.randn(BATCH_SIZE, 6, 224, 224)
model = nn.GroupNorm(3, 6, eps=0.0002)
self.run_model_test(model, train=True, input=c, batch_size=BATCH_SIZE)
# InstanceNorm model (used in the subgraph) includes unused weights,
# so skip this in TestCaffe2BackendEmbed
@skipIfEmbed
def test_group_norm_noaffine(self):
c = torch.randn(BATCH_SIZE, 6, 224, 224)
model = nn.GroupNorm(3, 6, eps=0.0002, affine=False)
self.run_model_test(model, train=True, input=c, batch_size=BATCH_SIZE)
def test_rsub(self):
class RsubModel(torch.nn.Module):
def forward(self, x):
return 1 - x
x = torch.randn(1, 2)
self.run_model_test(RsubModel(), train=False, input=(x,),
batch_size=BATCH_SIZE, use_gpu=False)
@skipIfUnsupportedMinOpsetVersion(9)
def test_isnan(self):
class IsNaNModel(torch.nn.Module):
def forward(self, input):
return torch.isnan(input)
x = torch.tensor([1.0, float("nan"), 2.0])
self.run_model_test(IsNaNModel(), train=False, input=x, batch_size=BATCH_SIZE, use_gpu=False)
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter(self):
class ScatterModel(torch.nn.Module):
def forward(self, input, indices, values):
return input.scatter(1, indices, values)
input = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
indices = torch.tensor([[1, 0], [0, 2], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.run_model_test(ScatterModel(), train=False, input=(input, indices, values),
batch_size=BATCH_SIZE, use_gpu=False)
input = torch.zeros(3, 4, 5, 6)
indices = torch.tensor([[1, 0], [0, 2], [0, 1]], dtype=torch.int64)
indices = indices.view(3, 2, 1, 1).expand(3, 2, 5, 6)
values = torch.arange(3 * 2 * 5 * 6, dtype=torch.float32).view(3, 2, 5, 6)
self.run_model_test(ScatterModel(), train=False, input=(input, indices, values),
batch_size=BATCH_SIZE, use_gpu=False)
input = torch.zeros(3, 4, 2)
indices = torch.tensor([[[1, 0], [0, 2]], [[1, 1], [0, 1]], [[2, 1], [2, 2]]])
values = torch.arange(3 * 2 * 2, dtype=torch.float32).view(3, 2, 2)
self.run_model_test(ScatterModel(), train=False, input=(input, indices, values),
batch_size=BATCH_SIZE, use_gpu=False)
@skipIfUnsupportedOpsetVersion([10])
def test_flatten(self):
class FlattenModel(torch.nn.Module):
def forward(self, input):
return torch.flatten(input)
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.run_model_test(FlattenModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_flatten2D(self):
class FlattenModel(torch.nn.Module):
def forward(self, input):
return torch.flatten(input, 1)
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.run_model_test(FlattenModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_max(self):
class MaxModel(torch.nn.Module):
def forward(self, input):
return torch.max(input, dim=1)
x = torch.randn(4, 4, requires_grad=True)
self.run_model_test(MaxModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_max_keepdim(self):
class MaxModel(torch.nn.Module):
def forward(self, input):
return torch.max(input, dim=1, keepdim=True)
x = torch.randn(4, 4, requires_grad=True)
self.run_model_test(MaxModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_max_tensors(self):
class MaxModel(torch.nn.Module):
def forward(self, input, other):
return torch.max(input, other)
x = torch.randn(4, 4, requires_grad=True)
y = torch.randn(4, 4, requires_grad=True)
self.run_model_test(MaxModel(), train=False, input=(x, y), batch_size=BATCH_SIZE)
def test_min(self):
class MinModel(torch.nn.Module):
def forward(self, input):
return torch.min(input, dim=1)
x = torch.randn(4, 4, requires_grad=True)
self.run_model_test(MinModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_argmax(self):
class ArgmaxModel(torch.nn.Module):
def forward(self, input):
return torch.argmax(input, dim=1)
x = torch.randn(4, 4, requires_grad=True)
self.run_model_test(ArgmaxModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_argmax_none_dim(self):
class ArgmaxModel(torch.nn.Module):
def forward(self, input):
return torch.argmax(input)
x = torch.randn(4, 4, requires_grad=True)
self.run_model_test(ArgmaxModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_argmin(self):
class ArgminModel(torch.nn.Module):
def forward(self, input):
return torch.argmin(input, dim=1)
x = torch.randn(4, 4, requires_grad=True)
self.run_model_test(ArgminModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_argmin_none_dim(self):
class ArgminModel(torch.nn.Module):
def forward(self, input):
return torch.argmin(input)
x = torch.randn(4, 4, requires_grad=True)
self.run_model_test(ArgminModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_reshape(self):
class ReshapeModel(torch.nn.Module):
def forward(self, input):
return input.reshape(1, 1)
x = torch.randn(1, requires_grad=True)
self.run_model_test(ReshapeModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_reshape_as(self):
class ReshapeAsModel(torch.nn.Module):
def forward(self, input):
y = torch.randn(3, 1, 2, 1, requires_grad=False)
return input.reshape_as(y)
x = torch.randn(2, 3, requires_grad=True)
self.run_model_test(ReshapeAsModel(), train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedOpsetVersion([10])
def test_narrow(self):
class NarrowModel(torch.nn.Module):
def forward(self, input):
return torch.narrow(input, 0, 0, 2)
x = torch.randn(3, 3, requires_grad=True)
self.run_model_test(NarrowModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_randn_like(self):
class RandNLikeModel(torch.nn.Module):
def forward(self, input):
return torch.randn_like(input)
x = torch.randn(2, 3, 4, requires_grad=False)
model = RandNLikeModel()
onnxir, _ = do_export(model, x, keep_initializers_as_inputs=True)
onnx_model = onnx.ModelProto.FromString(onnxir)
prepared = c2.prepare(onnx_model)
caffe2_out = prepared.run(inputs=[x.cpu().numpy()])
self.assertEqual(caffe2_out[0].shape, x.shape)
def test_traced_ints(self):
A = 4
H = 10
W = 8
img_count = 3
# in this model, the constant propagation in JIT doesn't work
# so we have ListConstruct in the symbolic
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.conv = torch.nn.Conv2d(A, 4 * A, 1, stride=1)
def forward(self, feature, im_info, anchors):
bbox_deltas = self.conv(feature)
a, b = torch.ops._caffe2.GenerateProposals(
feature, bbox_deltas, im_info, anchors,
2.0, 6000, 300, 0.7, 16, True, -90, 90, 1.0, True,
)
output = torch.ops._caffe2.RoIAlign(
feature, a,
order="NCHW",
spatial_scale=1.0,
pooled_h=3,
pooled_w=3,
sampling_ratio=0,
aligned=False,
)
return output
feature = torch.empty(img_count, A, H, W)
im_info = torch.ones(img_count, 3, dtype=torch.float32)
anchors = torch.ones(A, 4, dtype=torch.float32)
inputs = (feature, im_info, anchors)
model = MyModel()
with torch.no_grad():
self.run_model_test(MyModel(), train=False, input=inputs, batch_size=BATCH_SIZE)
def test_c2_roi_align(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, feature, rois):
roi_feature = torch.ops._caffe2.RoIAlign(
feature, rois, order="NCHW", spatial_scale=1.0,
pooled_h=3, pooled_w=3, sampling_ratio=3, aligned=False,
)
return roi_feature
def rand_roi(N, C, H, W):
return [
float(int(N * np.random.rand())),
0.5 * np.random.rand() * W,
0.5 * np.random.rand() * H,
(0.5 + 0.5 * np.random.rand()) * W,
(0.5 + 0.5 * np.random.rand()) * H,
]
N, C, H, W = 1, 4, 10, 8
feature = torch.randn(N, C, H, W)
rois = torch.tensor([rand_roi(N, C, H, W) for _ in range(10)])
inputs = (feature, rois)
self.run_model_test(MyModel(), train=False, input=inputs, batch_size=3)
def test_c2_generate_proposals(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, scores, bbox_deltas, im_info, anchors):
a, b = torch.ops._caffe2.GenerateProposals(
scores, bbox_deltas, im_info, anchors,
2.0, 6000, 300, 0.7, 16, True, -90, 90, 1.0, True,
)
return a, b
A = 4
H = 10
W = 8
img_count = 3
scores = torch.ones(img_count, A, H, W, dtype=torch.float32)
bbox_deltas = torch.linspace(0, 10, steps=img_count * 4 * A * H * W,
dtype=torch.float32)
bbox_deltas = bbox_deltas.view(img_count, 4 * A, H, W)
im_info = torch.ones(img_count, 3, dtype=torch.float32)
anchors = torch.ones(A, 4, dtype=torch.float32)
inputs = (scores, bbox_deltas, im_info, anchors)
self.run_model_test(MyModel(), train=False, input=inputs, batch_size=3)
def test_c2_bbox_transform(self):
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, rois, deltas, im_info):
a, b = torch.ops._caffe2.BBoxTransform(
rois,
deltas,
im_info,
weights=[1., 1., 1., 1.],
apply_scale=False,
rotated=True,
angle_bound_on=True,
angle_bound_lo=-90,
angle_bound_hi=90,
clip_angle_thresh=0.5,
legacy_plus_one=True,
)
return a, b
roi_counts = [0, 2, 3, 4, 5]
batch_size = len(roi_counts)
total_rois = sum(roi_counts)
im_dims = np.random.randint(100, 600, batch_size)
rois = generate_rois_rotated(roi_counts, im_dims)
box_dim = 5
num_classes = 7
deltas = np.random.randn(total_rois, box_dim * num_classes).astype(np.float32)
im_info = np.zeros((batch_size, 3)).astype(np.float32)
im_info[:, 0] = im_dims
im_info[:, 1] = im_dims
im_info[:, 2] = 1.0
im_info = torch.zeros((batch_size, 3))
inputs = (torch.tensor(rois), torch.tensor(deltas), torch.tensor(im_info))
self.run_model_test(MyModel(), train=False, input=inputs, batch_size=3, use_gpu=False)
# BoxWithNMSLimits has requirements for the inputs, so randomly generated inputs
# in Caffe2BackendTestEmbed doesn't work with this op.
@skipIfEmbed
def test_c2_box_with_nms_limits(self):
roi_counts = [0, 2, 3, 4, 5]
num_classes = 7
rotated = False
angle_bound_on = True
clip_angle_thresh = 0.5
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, rotated
)
pred_bbox, batch_splits = [
t.detach().numpy()
for t in torch.ops._caffe2.BBoxTransform(
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
[1.0, 1.0, 1.0, 1.0],
False,
rotated,
angle_bound_on,
-90,
90,
clip_angle_thresh,
legacy_plus_one=True,
)
]
class_prob = np.random.randn(sum(roi_counts), num_classes).astype(np.float32)
score_thresh = 0.5
nms_thresh = 0.5
topk_per_image = int(sum(roi_counts) / 2)
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, class_prob, pred_bbox, batch_splits):
a, b, c, d, e, f = torch.ops._caffe2.BoxWithNMSLimit(
class_prob,
pred_bbox,
batch_splits,
score_thresh=score_thresh,
nms=nms_thresh,
detections_per_im=topk_per_image,
soft_nms_enabled=False,
soft_nms_method="linear",
soft_nms_sigma=0.5,
soft_nms_min_score_thres=0.001,
rotated=rotated,
cls_agnostic_bbox_reg=False,
input_boxes_include_bg_cls=True,
output_classes_include_bg_cls=True,
legacy_plus_one=True,
)
return a, b, c, d, e, f
inputs = (torch.tensor(class_prob), torch.tensor(pred_bbox), torch.tensor(batch_splits))
self.run_model_test(MyModel(), train=False, input=inputs, batch_size=3, use_gpu=False)
def test_c2_inference_lstm(self):
num_layers = 4
seq_lens = 6
emb_lens = 10
has_bias = True
batch_first = True
is_bidirectional = True
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, lstm_in):
a, b, c = torch.ops._caffe2.InferenceLSTM(
lstm_in, num_layers, has_bias, batch_first, is_bidirectional
)
return a, b, c
num_directions = 2
bsz = 5
hidden_size = 7
hx = np.zeros((num_layers * num_directions, bsz, hidden_size), dtype=np.float32)
inputs = np.random.randn(bsz, seq_lens, emb_lens).astype(np.float32)
torch_lstm = torch.nn.LSTM(
emb_lens,
hidden_size,
batch_first=batch_first,
bidirectional=is_bidirectional,
bias=has_bias,
num_layers=num_layers,
)
lstm_in = [
torch.from_numpy(inputs),
torch.from_numpy(hx),
torch.from_numpy(hx),
] + [param.detach() for param in torch_lstm._flat_weights]
self.run_model_test(MyModel(), train=False, input=lstm_in, batch_size=3, use_gpu=False)
def test_tuple_input_output(self):
class TupleModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
return a
x = (torch.randn(3, 4), torch.randn(4, 3))
self.run_model_test(TupleModel(), train=False, input=(x,), batch_size=BATCH_SIZE,
example_outputs=(x,))
def test_nested_tuple_input_output(self):
class NestedTupleModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a: torch.Tensor, b: Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]) -> torch.Tensor:
return a + b[0] + b[1][0] + b[1][1]
x = torch.randn(4, 5)
y = (torch.randn(4, 5), (torch.randn(4, 5), torch.randn(4, 5)))
self.run_model_test(NestedTupleModel(), train=False, input=(x, y), batch_size=BATCH_SIZE,
example_outputs=x + y[0] + y[1][0] + y[1][1])
def test_topk(self):
class TopKModel(torch.nn.Module):
def forward(self, input):
return torch.topk(input, 3)
x = torch.arange(1., 6.)
self.run_model_test(TopKModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_topk_script(self):
class TopKModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.topk(input, 3, dim=0)
x = torch.randn(4, 3, requires_grad=True)
self.run_model_test(TopKModel(), train=False, input=(x,), batch_size=BATCH_SIZE, example_outputs=torch.topk(x, 3, dim=0))
def test_floor(self):
class FloorModel(torch.nn.Module):
def forward(self, input):
return torch.floor(input)
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.run_model_test(FloorModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_ceil(self):
class CeilModel(torch.nn.Module):
def forward(self, input):
return torch.ceil(input)
x = torch.randn(1, 2, 3, 4, requires_grad=True)
self.run_model_test(CeilModel(), train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(9)
def test__dim_arange(self):
class DimArange(torch.nn.Module):
def forward(self, input):
return torch._dim_arange(input, 1)
x = torch.ones(5, 6)
self.run_model_test(DimArange(), train=False, input=x, batch_size=BATCH_SIZE,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_end(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(a.size(0), dtype=torch.float).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
outputs = ArangeScript()(x)
self.run_model_test(ArangeScript(), train=False, input=(x,), batch_size=BATCH_SIZE,
example_outputs=(outputs,))
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(a.size(0), dtype=torch.float).view(-1, 1) + a
self.run_model_test(ArangeModel(), train=False, input=(x,), batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_start_end(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(2, a.size(0) + 2, dtype=torch.float).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
outputs = ArangeScript()(x)
self.run_model_test(ArangeScript(), train=False, input=(x,), batch_size=BATCH_SIZE,
example_outputs=(outputs,))
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(2, a.size(0) + 2, dtype=torch.float).view(-1, 1) + a
self.run_model_test(ArangeModel(), train=False, input=(x,), batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_start_end_step(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(2, a.size(0) * a.size(1) + 2, a.size(1), dtype=torch.float).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
outputs = ArangeScript()(x)
self.run_model_test(ArangeScript(), train=False, input=(x,), batch_size=BATCH_SIZE,
example_outputs=(outputs,))
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(2, a.size(0) * a.size(1) + 2, a.size(1), dtype=torch.float).view(-1, 1) + a
self.run_model_test(ArangeModel(), train=False, input=(x,), batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(9)
def test_size(self):
class SizeModel(torch.nn.Module):
def forward(self, input):
return torch.arange(input.size(0)), torch.arange(input.size(-1))
x = torch.randn(5, 3, 2)
self.run_model_test(SizeModel(), train=False, input=(x,), batch_size=BATCH_SIZE,
input_names=['x'], dynamic_axes={'x': [0, 1, 2]})
self.run_model_test(SizeModel(), train=False, input=(x,), batch_size=BATCH_SIZE, remained_onnx_input_idx=[])
def test_log2(self):
class Log2Model(torch.nn.Module):
def forward(self, input):
return torch.log2(input)
x = torch.empty(BATCH_SIZE, 10, 10).uniform_(4, 9)
self.run_model_test(Log2Model(), train=False, input=x, batch_size=BATCH_SIZE)
def test__sample_dirichlet(self):
class DirichletModel(torch.nn.Module):
def forward(self, input):
return torch._sample_dirichlet(input)
x = torch.randn(2, 3, 4, requires_grad=False)
model = DirichletModel()
onnxir, _ = do_export(model, x, keep_initializers_as_inputs=True,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
onnx_model = onnx.ModelProto.FromString(onnxir)
prepared = c2.prepare(onnx_model)
caffe2_out = prepared.run(inputs=[x.cpu().numpy()])
self.assertEqual(caffe2_out[0].shape, x.shape)
def test__standard_gamma(self):
class GammaModel(torch.nn.Module):
def forward(self, input):
return torch._standard_gamma(input)
x = torch.randn(2, 3, 4, requires_grad=False)
model = GammaModel()
onnxir, _ = do_export(model, x, keep_initializers_as_inputs=True,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
onnx_model = onnx.ModelProto.FromString(onnxir)
prepared = c2.prepare(onnx_model)
caffe2_out = prepared.run(inputs=[x.cpu().numpy()])
self.assertEqual(caffe2_out[0].shape, x.shape)
# The order of returned indices from Multinomial is undefined, so randomly generated inputs
# in Caffe2BackendTestEmbed doesn't work with this op.
@skipIfEmbed
def test_multinomial(self):
class Multinomial(torch.nn.Module):
def forward(self, weight):
return torch.multinomial(weight, 3, replacement=True)
class MultinomialNoReplacement(torch.nn.Module):
def forward(self, weight):
return torch.multinomial(weight, 1)
weight = torch.tensor([[0, 10, 0, 0], [0, 0, 100, 0]], dtype=torch.float)
self.run_model_test(Multinomial(), train=False, input=weight, batch_size=BATCH_SIZE)
self.run_model_test(MultinomialNoReplacement(), train=False, input=weight, batch_size=BATCH_SIZE)
def test_prim_shape(self):
x = torch.randn(4, 5, requires_grad=True)
@torch.jit.script
def view_by_prim_shape(x):
return x.view(x.shape)
class PrimShapeModel(torch.nn.Module):
def forward(self, input):
return view_by_prim_shape(input)
self.run_model_test(PrimShapeModel(), train=False, input=x, batch_size=BATCH_SIZE)
def test_and(self):
class AndModel(torch.nn.Module):
def forward(self, x, y):
return x & y
x = torch.randint(0, 1, (3, 5))
y = torch.randint(0, 1, (3, 5))
self.run_model_test(AndModel(), train=False, input=(x, y), batch_size=BATCH_SIZE)
def test_or(self):
class OrModel(torch.nn.Module):
def forward(self, x, y):
return x | y
x = torch.randint(0, 1, (3, 5))
y = torch.randint(0, 1, (3, 5))
self.run_model_test(OrModel(), train=False, input=(x, y), batch_size=BATCH_SIZE)
def test_dropout(self):
class DropoutModel(torch.nn.Module):
def __init__(self):
super(DropoutModel, self).__init__()
self.dropout = torch.nn.Dropout(0.5)
def forward(self, x):
return self.dropout(x)
x = torch.randn(1, 2, 3)
self.run_model_test(DropoutModel(), train=False, input=x, batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(9)
def test_while(self):
class WhileModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
a = 0
while a < 4:
a += 1
return x + a
model = WhileModel()
inputs = torch.zeros(1, 2, 3, dtype=torch.long)
outputs = model(inputs)
self.run_model_test(model, train=False, input=(inputs,), batch_size=BATCH_SIZE,
example_outputs=(outputs,))
def test_while_cond(self):
class WhileModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, a):
b = (a < 4)
while b:
a += b.to(torch.long)
b = (a < 4)
return x + a
model = WhileModel()
x = torch.zeros(1, 2, 3, dtype=torch.long)
a = torch.tensor([0], dtype=torch.long)
outputs = model(x, a)
self.run_model_test(model, train=False, input=(x, a), batch_size=BATCH_SIZE,
example_outputs=(outputs,))
def test_loop(self):
class LoopModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(5):
x = x + i
return x
model = LoopModel()
inputs = torch.zeros(1, 2, 3, dtype=torch.long)
outputs = model(inputs)
self.run_model_test(model, train=False, input=(inputs,), batch_size=BATCH_SIZE,
example_outputs=(outputs,))
def test_dynamic_loop(self):
class LoopModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(x.size(2)):
x = x + i
return x
model = LoopModel()
inputs = torch.zeros(1, 2, 3, dtype=torch.long)
outputs = model(inputs)
self.run_model_test(model, train=False, input=(inputs,), batch_size=BATCH_SIZE,
example_outputs=(outputs,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_nested_loops(self):
class NestedLoopsModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(5):
a = 0
while a < 4:
a += 1
for j in range(a):
x = x + j
x = x + a
return x
model = NestedLoopsModel()
inputs = torch.zeros(1, 2, 3, dtype=torch.long)
outputs = model(inputs)
self.run_model_test(model, train=False, input=(inputs,), batch_size=BATCH_SIZE,
example_outputs=(outputs,))
def test_select(self):
class SelectModel(torch.nn.Module):
def forward(self, x):
return torch.select(x, 0, 1)
model = SelectModel()
inputs = torch.randn(3, 2, 1)
self.run_model_test(model, train=False, input=(inputs, ), batch_size=BATCH_SIZE)
def test_std(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std(input, unbiased=False)
model = StandardDeviation()
inputs = torch.randn(2, 3, 4)
outputs = model(inputs)
self.run_model_test(model, train=False, input=(inputs,), batch_size=BATCH_SIZE,
example_outputs=(outputs,))
def test_std_along_dims(self):
class StandardDeviationAlongDims(torch.nn.Module):
def forward(self, input):
return torch.std(input, dim=(0, 1), unbiased=False, keepdim=False)
model = StandardDeviationAlongDims()
inputs = torch.randn(2, 3, 4)
outputs = model(inputs)
self.run_model_test(model, train=False, input=(inputs,), batch_size=BATCH_SIZE,
example_outputs=(outputs,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_masked_fill(self):
class MaskedFillModel(torch.nn.Module):
def forward(self, x):
mask = torch.tensor([[0, 0, 1], [1, 1, 0]], dtype=torch.uint8)
return x.masked_fill(mask, 2)
x = torch.zeros(4, 2, 3, requires_grad=True)
self.run_model_test(MaskedFillModel(), input=(x, ), train=False, batch_size=BATCH_SIZE)
class MaskedFillModel2(torch.nn.Module):
def forward(self, x):
return x.masked_fill(x > 3, -1)
x = torch.arange(16).view(2, 2, 4).to(torch.float32)
self.run_model_test(MaskedFillModel2(), input=(x, ), train=False, batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(8)
def test_meshgrid(self):
class MeshgridModel(torch.nn.Module):
def forward(self, x, y, z):
return torch.meshgrid(x, y, z)
x = torch.ones(3, requires_grad=True)
y = torch.zeros(4, requires_grad=True)
z = torch.ones(5, requires_grad=True)
model = MeshgridModel()
outputs = model(x, y, z)
self.run_model_test(model, train=False, input=(x, y, z), batch_size=BATCH_SIZE,
example_outputs=(outputs,))
def test_remainder(self):
class RemainderModel(torch.nn.Module):
def forward(self, input, other):
return torch.remainder(input, other)
x = torch.randn(4, 2, 3)
y = torch.randn(1, 2, 1)
model = RemainderModel()
outputs = model(x, y)
self.run_model_test(model, train=False, input=(x, y), batch_size=BATCH_SIZE,
example_outputs=(outputs,))
def test_remainder_scalar(self):
class RemainderModel(torch.nn.Module):
def forward(self, input):
return torch.remainder(input, 2.55)
inputs = torch.randint(10, (2, 3))
model = RemainderModel()
outputs = model(inputs)
self.run_model_test(model, train=False, input=(inputs,), batch_size=BATCH_SIZE,
example_outputs=(outputs,))
def test_baddbmm(self):
class MyModule(torch.nn.Module):
def forward(self, input, batch1, batch2):
return torch.baddbmm(input, batch1, batch2, alpha=torch.tensor(5), beta=3.5)
x = torch.randn(10, 3, 5)
batch1 = torch.randn(10, 3, 4)
batch2 = torch.randn(10, 4, 5)
self.run_model_test(MyModule(), input=(x, batch1, batch2), train=False, batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(9)
def test_gelu(self):
class GeluModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.gelu(x)
model = GeluModel()
inputs = torch.randn(2, 4, 5, 6, requires_grad=True)
outputs = model(inputs)
self.run_model_test(model, train=False, input=(inputs,), batch_size=BATCH_SIZE,
example_outputs=(outputs,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_fill(self):
class IndexFillModel(torch.nn.Module):
def forward(self, input):
index = torch.tensor([2, 0])
return input.index_fill(2, index, -1)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_model_test(IndexFillModel(), input=(x, ), train=False, batch_size=BATCH_SIZE)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_copy(self):
class IndexCopyModel(torch.nn.Module):
def forward(self, input):
index = torch.tensor([2, 0])
source = torch.ones(3, 2, 5)
return input.index_copy(1, index, source)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_model_test(IndexCopyModel(), input=(x, ), train=False, batch_size=BATCH_SIZE)
# a bit of metaprogramming to set up all the rnn tests
def make_test(name, base, layer, bidirectional, initial_state,
variable_length, dropout,
**extra_kwargs):
test_name = str("_".join([
"test", name, layer[1],
bidirectional[1], initial_state[1],
variable_length[1], dropout[1]
]))
@skipIfUnsupportedOpsetVersion([10])
@skipIfUnsupportedMinOpsetVersion(8)
def f(self):
self._dispatch_rnn_test(
base,
layers=layer[0],
bidirectional=bidirectional[0],
initial_state=initial_state[0],
packed_sequence=variable_length[0],
dropout=dropout[0],
**extra_kwargs)
f.__name__ = test_name
setattr(TestCaffe2Backend_opset9, f.__name__, f)
def setup_rnn_tests():
layers_opts = [
(1, "unilayer"),
(3, "trilayer")
]
bidirectional_opts = [
(False, "forward"),
(True, "bidirectional")
]
initial_state_opts = [
(True, "with_initial_state"),
(False, "no_initial_state")
]
variable_length_opts = [
(0, "without_sequence_lengths"),
(1, "with_variable_length_sequences"),
(2, "with_batch_first_sequence_lengths")
]
dropout_opts = [
(0.2, "with_dropout"),
(0.0, "without_dropout")
]
test_count = 0
for (layer, bidirectional, initial_state, variable_length, dropout) in \
itertools.product(
layers_opts,
bidirectional_opts,
initial_state_opts,
variable_length_opts,
dropout_opts,
):
for base, name, extra_kwargs in (
("elman", "elman_relu", {"nonlinearity": u"relu"}),
("elman", "elman_tanh", {"nonlinearity": u"tanh"}),
("lstm", "lstm", {}),
("gru", "gru", {})
):
make_test(name, base, layer, bidirectional, initial_state,
variable_length, dropout,
**extra_kwargs)
test_count += 1
# sanity check that a representative example does exist
TestCaffe2Backend_opset9.test_gru_trilayer_forward_with_initial_state_without_sequence_lengths_with_dropout
# make sure no one accidentally disables all the tests without
# noticing
assert test_count == 192, test_count
setup_rnn_tests()
# add the same test suite as above, but switch embed_params=False
# to embed_params=True
TestCaffe2BackendEmbed_opset9 = type(str("TestCaffe2BackendEmbed_opset9"),
(unittest.TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, embed_params=True))
# opset 7 tests
TestCaffe2Backend_opset7 = type(str("TestCaffe2Backend_opset7"),
(unittest.TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, opset_version=7))
TestCaffe2BackendEmbed_opset7 = type(str("TestCaffe2BackendEmbed_opset7"),
(unittest.TestCase,),
dict(TestCaffe2Backend_opset9.__dict__,
embed_params=True, opset_version=7))
# opset 8 tests
TestCaffe2Backend_opset8 = type(str("TestCaffe2Backend_opset8"),
(unittest.TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, opset_version=8))
TestCaffe2BackendEmbed_opset8 = type(str("TestCaffe2BackendEmbed_opset8"),
(unittest.TestCase,),
dict(TestCaffe2Backend_opset9.__dict__,
embed_params=True, opset_version=8))
# opset 10 tests
TestCaffe2Backend_opset10 = type(str("TestCaffe2Backend_opset10"),
(unittest.TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, opset_version=10))
TestCaffe2BackendEmbed_opset10 = type(str("TestCaffe2BackendEmbed_opset10"),
(unittest.TestCase,),
dict(TestCaffe2Backend_opset9.__dict__,
embed_params=True, opset_version=10))
# add the same test suite as above, but switch embed_params=False
# to embed_params=True
TestCaffe2BackendEmbed_opset9_new_jit_API = type(str("TestCaffe2BackendEmbed_opset9_new_jit_API"),
(unittest.TestCase,),
dict(TestCaffe2Backend_opset9.__dict__, embed_params=True))
if __name__ == "__main__":
unittest.main()
| 41.806956 | 132 | 0.604137 |
09433fc9ac977e9473af682a2d1e6de18ea98cdb | 359 | py | Python | PyWavelet/WaveletPeaks.py | mattkjames7/PyWavelet | 542c608da1477894c7454396d1e29627bf8eb038 | [
"MIT"
] | null | null | null | PyWavelet/WaveletPeaks.py | mattkjames7/PyWavelet | 542c608da1477894c7454396d1e29627bf8eb038 | [
"MIT"
] | null | null | null | PyWavelet/WaveletPeaks.py | mattkjames7/PyWavelet | 542c608da1477894c7454396d1e29627bf8eb038 | [
"MIT"
] | null | null | null | import numpy as np
def WaveletPeaks(Pow,Threshold=0.0):
grid = ((Pow[1:-1,1:-1] >= Pow[0:-2,1:-1]) & (Pow[1:-1,1:-1] >= Pow[2:,1:-1])) & ((Pow[1:-1,1:-1] >= Pow[1:-1,0:-2]) & (Pow[1:-1,1:-1] >= Pow[1:-1,2:])) & (Pow[1:-1,1:-1] >= Threshold)
pgrid = np.zeros(Pow.shape,dtype='bool')
pgrid[1:-1,1:-1] = grid
si,ti = np.where(pgrid)
return si,ti,pgrid
| 32.636364 | 185 | 0.532033 |
9e1f83395baf61c5ea5677f705d97e9a10aa676d | 18,140 | py | Python | tensorflow/python/kernel_tests/transpose_op_test.py | zo7/tensorflow-loomai | 3b77c1f018f6a671a0753f4f2ab2cea16349bb1b | [
"Apache-2.0"
] | 1 | 2017-10-09T08:33:36.000Z | 2017-10-09T08:33:36.000Z | tensorflow/python/kernel_tests/transpose_op_test.py | zo7/tensorflow-loomai | 3b77c1f018f6a671a0753f4f2ab2cea16349bb1b | [
"Apache-2.0"
] | 1 | 2018-05-04T23:17:25.000Z | 2018-05-04T23:17:25.000Z | tensorflow/python/kernel_tests/transpose_op_test.py | zo7/tensorflow-loomai | 3b77c1f018f6a671a0753f4f2ab2cea16349bb1b | [
"Apache-2.0"
] | 1 | 2020-04-29T20:49:13.000Z | 2020-04-29T20:49:13.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Transpose op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class TransposeTest(test.TestCase):
def _np_transpose(self, x, perm):
ret = np.copy(x)
ret = ret.transpose(perm)
return ret
def _compareCpu(self, x, p):
np_ans = self._np_transpose(x, p)
with self.test_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
y = array_ops.transpose(inx, p)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
jacob_t = None
# Gradient check on CPU.
xs = list(np.shape(x))
ys = list(np.shape(tf_ans))
if x.dtype == np.float32:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
elif x.dtype == np.float64:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
return tf_ans, jacob_t
def _compareGpu(self, x, p):
np_ans = self._np_transpose(x, p)
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
y = array_ops.transpose(inx, p)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
jacob_t = None
# Gradient check on GPU.
xs = list(np.shape(x))
ys = list(np.shape(tf_ans))
if x.dtype == np.float32:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
elif x.dtype == np.float64:
jacob_t, jacob_n = gradient_checker.compute_gradient(inx, xs, y, ys, x,
1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
return tf_ans, jacob_t
def _compare(self, x, use_gpu=False):
n = np.ndim(x)
# generate all permutations of [0, 1, ... n-1] in random order.
all_perm = np.random.permutation(
[p for p in itertools.permutations(range(n))]).astype(np.int32)
for p in all_perm[:2]:
self._compareCpu(x, p)
if use_gpu:
self._compareGpu(x, p)
def _compare_cpu_gpu(self, x):
n = np.ndim(x)
# generate all permutation of [0, 1, ... n-1] in random order,
# choose the first two.
perms = itertools.permutations(range(n))
for _ in range(2):
p = np.random.permutation(next(perms)).astype(np.int32)
tf_a_cpu, tf_g_cpu = self._compareCpu(x, p)
tf_a_gpu, tf_g_gpu = self._compareGpu(x, p)
assert tf_g_cpu is not None
assert tf_g_gpu is not None
if x.dtype == np.float32:
self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-3, 1e-3)
self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-3, 1e-3)
elif x.dtype == np.float64:
self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-6, 1e-6)
self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-6, 1e-6)
def _testBoth(self, x):
self._compare(x, use_gpu=False)
self._compare(x, use_gpu=True)
def testRank1(self):
self._compareCpu(np.arange(0., 2), [0])
def test1D(self):
vector = np.arange(0, 2).reshape((1, 1, 1, 2, 1))
self._compare(vector, use_gpu=False)
self._compare(vector, use_gpu=True)
def test5DGPU(self):
# If no GPU available, skip the test
if not test.is_gpu_available(cuda_only=True):
return
large_shapes = [[4, 10, 10, 10, 3], [4, 10, 10, 10, 8], [4, 10, 10, 10, 13],
[4, 3, 10, 10, 10], [4, 8, 10, 10, 10], [4, 13, 10, 10,
10]] * 3
perms = [[0, 4, 1, 2, 3]] * 3 + [[0, 2, 3, 4, 1]] * 3 + [[
4, 1, 2, 3, 0
]] * 6 + [[1, 2, 3, 4, 0]] * 6
datatypes = [np.int8, np.float16, np.float32, np.float64, np.complex128]
for datatype in datatypes:
for input_shape, perm in zip(large_shapes, perms):
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=datatype).reshape(input_shape)
np_ans = self._np_transpose(inp, perm)
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
def test4DGPU(self):
# If no GPU available, skip the test
if not test.is_gpu_available(cuda_only=True):
return
large_shapes = [[4, 10, 10, 3], [4, 10, 10, 8], [4, 10, 10, 13],
[4, 3, 10, 10], [4, 8, 10, 10], [4, 13, 10, 10]] * 3
perms = [[0, 3, 1, 2]] * 3 + [[0, 2, 3, 1]] * 3 + [[3, 1, 2, 0]] * 6 + [[
1, 2, 3, 0
]] * 3 + [[2, 3, 0, 1]] * 3
for input_shape, perm in zip(large_shapes, perms):
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_shape)
np_ans = self._np_transpose(inp, perm)
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
# shapes related to Inception (taken from conv_ops_test.py)
inception_shapes = [[4, 5, 5, 124], [4, 8, 8, 38], [4, 8, 8, 38], [
4, 8, 8, 204
], [4, 8, 8, 44], [4, 8, 8, 204], [4, 8, 8, 204], [4, 8, 8, 204], [
4, 8, 8, 176
], [4, 8, 8, 176], [4, 8, 8, 176], [4, 8, 8, 176], [4, 17, 17, 19], [
4, 17, 17, 19
], [4, 17, 17, 124], [4, 17, 17, 12], [4, 17, 17, 124], [4, 17, 17, 22], [
4, 17, 17, 19
], [4, 17, 17, 19], [4, 17, 17, 121], [4, 17, 17, 121], [4, 17, 17, 22], [
4, 17, 17, 19
], [4, 17, 17, 19], [4, 17, 17, 115], [4, 17, 17, 115], [4, 17, 17, 19], [
4, 17, 17, 16
], [4, 17, 17, 115], [4, 17, 17, 102], [4, 17, 17, 12], [4, 17, 17, 102], [
4, 17, 17, 12
], [4, 17, 17, 102], [4, 17, 17, 12], [4, 17, 17, 76], [4, 17, 17, 12], [
4, 17, 17, 12
], [4, 17, 17, 76], [4, 17, 17, 76], [4, 35, 35, 9], [4, 35, 35, 28], [
4, 35, 35, 6
], [4, 35, 35, 28], [4, 35, 35, 25], [4, 35, 35, 4], [4, 35, 35, 25],
[4, 35, 35, 9], [4, 35, 35, 19], [4, 35, 35, 19],
[4, 35, 35, 19], [4, 73, 73, 6], [4, 73, 73,
6], [4, 147, 147, 2]]
for input_shape in inception_shapes:
perm = [0, 3, 1, 2]
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_shape)
np_ans = self._np_transpose(inp, perm)
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
def test3DGPU(self):
# If no GPU available, skip the test
if not test.is_gpu_available(cuda_only=True):
return
datatypes = [np.int8, np.float16, np.float32, np.float64, np.complex128]
large_shapes = [[4, 1000, 3], [4, 1000, 8], [4, 1000, 13], [4, 3, 1000],
[4, 8, 1000], [4, 13, 1000]] * 3
perms = [[0, 2, 1]] * 6 + [[2, 1, 0]] * 6 + [[1, 2, 0]] * 3 + [[2, 0, 1]
] * 3
for datatype in datatypes:
for input_shape, perm in zip(large_shapes, perms):
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=datatype).reshape(input_shape)
np_ans = self._np_transpose(inp, perm)
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
def testLargeSizeGPU(self):
# If no GPU available, skip the test
if not test.is_gpu_available(cuda_only=True):
return
large_shapes = [[1000000, 31, 3], [3, 1000000, 31], [3, 31, 1000000],
[10000, 310, 3], [3, 10000, 310], [3, 310, 10000],
[2, 1000, 1000], [1000, 2, 1000], [1000, 1000, 2]]
perms = [[0, 2, 1]] * 9
for input_shape, perm in zip(large_shapes, perms):
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_shape)
np_ans = self._np_transpose(inp, perm)
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
def testRandomizedSmallDimLargeSizeGPU(self):
# If no GPU available, skip the test
if not test.is_gpu_available(cuda_only=True):
return
# Draw 10 random shapes with large dimension sizes.
# 40% prob to generate dim[0] size within [1, 2047]
# 40% prob to generate dim[0] size within [2048, 4095]
# 20% prob to generate dim[0] size within [4096, 100000]
# 50% prob to use dim[1] as the small dim (<16)
num_samples = 10
total_size = 500000
small_size_limit = 2048
large_size_limit = 95905
small_size_percentage = 0.4
medium_size_percentage = 0.4
large_size_percentage = 0.2
perms = [[0, 2, 1]] * num_samples
dim_zero_sizes = []
dim_zero_sizes += list(
np.random.randint(
small_size_limit, size=int(small_size_percentage * num_samples)) +
1)
dim_zero_sizes += list(
np.random.randint(
small_size_limit, size=int(medium_size_percentage * num_samples)) +
small_size_limit)
dim_zero_sizes += list(
np.random.randint(
large_size_limit, size=int(large_size_percentage * num_samples)) +
small_size_limit * 2)
input_shapes = []
small_dim_limit = 16
for dim_zero_size in dim_zero_sizes:
small_dim_size = np.random.randint(small_dim_limit - 1) + 1
large_dim_size = int(
total_size / dim_zero_size / small_dim_size) + small_dim_limit
input_shapes += ([[dim_zero_size, small_dim_size, large_dim_size]]
if np.random.randint(2) else
[[dim_zero_size, large_dim_size, small_dim_size]])
for input_shape, perm in zip(input_shapes, perms):
# generate input data with random ints from 0 to 9.
inp = np.random.randint(10, size=input_shape)
np_ans = self._np_transpose(inp, perm)
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(inp)
y = array_ops.transpose(inx, perm)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
self._ClearCachedSession()
def testNop(self):
self._compareCpu(np.arange(0, 6).reshape([3, 2]).astype(np.float32), [0, 1])
def testSimple(self):
self._compareCpu(
np.arange(0, 8).reshape([2, 4]).astype(np.float32),
np.array([1, 0]).astype(np.int32))
def testHalf(self):
self._compare(np.arange(0, 21).reshape([3, 7]).astype(np.float16))
self._compare(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float16))
self._compare(
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float16))
def testFloat(self):
self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float32))
self._compare_cpu_gpu(
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float32))
self._compare_cpu_gpu(
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float32))
def testDouble(self):
self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float64))
self._compare_cpu_gpu(
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float64))
self._compare_cpu_gpu(
np.arange(0, 16).reshape([1, 2, 1, 2, 1, 2, 1, 2]).astype(np.float64))
def testComplex64(self):
self._testBoth(
np.complex(1, 2) *
np.arange(0, 21).reshape([3, 7]).astype(np.complex64))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex64))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.complex64))
def testComplex128(self):
self._testBoth(
np.complex(1, 2) *
np.arange(0, 21).reshape([3, 7]).astype(np.complex128))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.complex128))
self._testBoth(
np.complex(1, 2) *
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.complex128))
def testInt8(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int8))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int8))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int8))
def testInt16(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int16))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int16))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int16))
def testInt32(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int32))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int32))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int32))
def testInt64(self):
self._testBoth(np.arange(0, 21).reshape([3, 7]).astype(np.int64))
self._testBoth(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int64))
self._testBoth(
np.arange(0, 1260).reshape([2, 3, 5, 7, 2, 3]).astype(np.int64))
def testTranspose2DAuto(self):
x_np = [[1, 2, 3], [4, 5, 6]]
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = array_ops.transpose(x_np).eval()
self.assertAllEqual(x_tf, [[1, 4], [2, 5], [3, 6]])
def testSingletonDims(self):
# A singleton dimension is a dimension i with shape[i] == 1. Such dimensions
# can be collapsed and expanded using reshape without changing the
# underlying data storage. If all non-singleton dimensions remain in
# ascending order, the shuffled singletons will be transposed by a reshape,
# saving a memory allocation & copy. Since this gets a special code-path in
# transpose_op.cc, we test that the codepath is exercised and the results
# are as expected; we do not test that we save the memory allocation and
# copy here.
for shape in [[2, 1, 2], [2, 1, 2, 1, 1, 2], [1, 2, 2, 1, 1, 1],
[1, 1, 1, 2, 2, 2], [2, 2, 1, 1, 1]]:
self._compare_cpu_gpu(
np.arange(np.prod(shape)).reshape(shape).astype(np.float32))
def testTransposeShapes(self):
self.assertEqual(
[],
array_ops.transpose(array_ops.placeholder(
dtypes.int32, shape=[])).get_shape().dims)
self.assertEqual(
[100],
array_ops.transpose(array_ops.placeholder(
dtypes.int32, shape=[100])).get_shape().dims)
self.assertEqual(
[37, 100],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37])).get_shape().dims)
self.assertEqual(
[100, 37],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37]), [0, 1]).get_shape().dims)
self.assertEqual(
[15, 37, 100],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37, 15])).get_shape().dims)
self.assertEqual(
[15, 100, 37],
array_ops.transpose(
array_ops.placeholder(
dtypes.int32, shape=[100, 37, 15]), [2, 0, 1]).get_shape().dims)
self.assertEqual(
tensor_shape.TensorShape(None),
array_ops.transpose(array_ops.placeholder(dtypes.int32)).get_shape())
def testNullTensor(self):
with self.test_session():
x = constant_op.constant([], dtype=dtypes.float32, shape=[1, 4, 0])
xt = array_ops.transpose(x, [0, 2, 1]).eval()
self.assertAllEqual(xt.shape, (1, 0, 4))
def _testError(self, x, p, err):
with self.test_session():
with self.assertRaisesOpError(err):
array_ops.transpose(x, p).eval()
def testError(self):
with self.assertRaises(ValueError):
array_ops.transpose(
np.arange(0., 30).reshape([2, 3, 5]), [[0, 1], [2, 3]])
with self.assertRaises(ValueError):
array_ops.transpose(np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 3])
self._testError(
np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 1], "2 is missing")
if __name__ == "__main__":
test.main()
| 39.693654 | 80 | 0.590353 |
2b8af182245fadadec0475e0a3fdc285d8470f24 | 1,019 | py | Python | dcgan/discriminator.py | amjack100/DCGAN-Implementation | c25411093dd3e8609e7aac573dc05521cdd664b5 | [
"MIT"
] | null | null | null | dcgan/discriminator.py | amjack100/DCGAN-Implementation | c25411093dd3e8609e7aac573dc05521cdd664b5 | [
"MIT"
] | null | null | null | dcgan/discriminator.py | amjack100/DCGAN-Implementation | c25411093dd3e8609e7aac573dc05521cdd664b5 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.keras import layers
def make_discriminator_model(img_size, channel_count: int):
model = tf.keras.Sequential()
model.add(
layers.Conv2D(
64,
(5, 5),
strides=(2, 2),
padding="same",
input_shape=[img_size, img_size, channel_count],
)
)
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding="same"))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
def discriminator_loss(real_output, fake_output):
cross_entropy = tf.keras.losses.BinaryCrossentropy(
from_logits=True, label_smoothing=0.2
)
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss | 28.305556 | 73 | 0.658489 |
440e29c82ae24020b31e1dae00c1f15ef14c2f82 | 9,165 | py | Python | nova/tests/functional/compute/test_live_migration.py | wangyc666666/ussuri_nova | 0706b514f288216c41d64e98524ef7e517efb8d8 | [
"Apache-2.0"
] | null | null | null | nova/tests/functional/compute/test_live_migration.py | wangyc666666/ussuri_nova | 0706b514f288216c41d64e98524ef7e517efb8d8 | [
"Apache-2.0"
] | null | null | null | nova/tests/functional/compute/test_live_migration.py | wangyc666666/ussuri_nova | 0706b514f288216c41d64e98524ef7e517efb8d8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import exception
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_notifier
class FakeCinderError(object):
"""Poor man's Mock because we're stubbing out and not mock.patching. Stubs
out attachment_delete. We keep a raise and call count to simulate a single
volume error while being able to assert that we still got called for all
of an instance's volumes.
"""
def __init__(self):
self.raise_count = 0
self.call_count = 0
def __call__(self, *args, **kwargs):
self.call_count += 1
if self.raise_count == 0:
self.raise_count += 1
raise exception.CinderConnectionFailed(reason='Fake Cinder error')
class LiveMigrationCinderFailure(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2.1'
microversion = 'latest'
def setUp(self):
super(LiveMigrationCinderFailure, self).setUp()
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
# Start a second compute node (the first one was started for us by
# _IntegratedTestBase. set_nodes() is needed to avoid duplicate
# nodenames. See comments in test_bug_1702454.py.
self.compute2 = self.start_service('compute', host='host2')
def test_live_migrate_attachment_delete_fails(self):
self.useFixture(nova_fixtures.CinderFixture(self))
server = self.api.post_server({
'server': {
'flavorRef': 1,
'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'live-migrate-attachment-delete-fail-test',
'networks': 'none',
'block_device_mapping_v2': [
{'boot_index': 0,
'uuid': uuids.broken_volume,
'source_type': 'volume',
'destination_type': 'volume'},
{'boot_index': 1,
'uuid': uuids.working_volume,
'source_type': 'volume',
'destination_type': 'volume'}]}})
server = self._wait_for_state_change(server, 'ACTIVE')
source = server['OS-EXT-SRV-ATTR:host']
if source == self.compute.host:
dest = self.compute2.host
else:
dest = self.compute.host
post = {
'os-migrateLive': {
'host': dest,
'block_migration': False,
}
}
stub_attachment_delete = FakeCinderError()
self.stub_out('nova.volume.cinder.API.attachment_delete',
stub_attachment_delete)
self.api.post_server_action(server['id'], post)
self._wait_for_server_parameter(server,
{'OS-EXT-SRV-ATTR:host': dest,
'status': 'ACTIVE'})
self.assertEqual(2, stub_attachment_delete.call_count)
self.assertEqual(1, stub_attachment_delete.raise_count)
class TestVolAttachmentsDuringLiveMigration(
integrated_helpers._IntegratedTestBase
):
"""Assert the lifecycle of volume attachments during LM rollbacks
"""
# Default self.api to the self.admin_api as live migration is admin only
ADMIN_API = True
microversion = 'latest'
def setUp(self):
super().setUp()
self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
def _setup_compute_service(self):
self._start_compute('src')
self._start_compute('dest')
@mock.patch('nova.virt.fake.FakeDriver.live_migration')
def test_vol_attachments_during_driver_live_mig_failure(self, mock_lm):
"""Assert volume attachments during live migration rollback
* Mock live_migration to always rollback and raise a failure within the
fake virt driver
* Launch a boot from volume instance
* Assert that the volume is attached correctly to the instance
* Live migrate the instance to another host invoking the mocked
live_migration method
* Assert that the instance is still on the source host
* Assert that the original source host volume attachment remains
"""
# Mock out driver.live_migration so that we always rollback
def _fake_live_migration_with_rollback(
context, instance, dest, post_method, recover_method,
block_migration=False, migrate_data=None):
# Just call the recover_method to simulate a rollback
recover_method(context, instance, dest, migrate_data)
# raise test.TestingException here to imitate a virt driver
raise test.TestingException()
mock_lm.side_effect = _fake_live_migration_with_rollback
volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
server = self._build_server(
name='test_bfv_live_migration_failure', image_uuid='',
networks='none'
)
server['block_device_mapping_v2'] = [{
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
'uuid': volume_id
}]
server = self.api.post_server({'server': server})
self._wait_for_state_change(server, 'ACTIVE')
# Fetch the source host for use later
server = self.api.get_server(server['id'])
src_host = server['OS-EXT-SRV-ATTR:host']
# Assert that the volume is connected to the instance
self.assertIn(
volume_id, self.cinder.volume_ids_for_instance(server['id']))
# Assert that we have an active attachment in the fixture
attachments = self.cinder.volume_to_attachment.get(volume_id)
self.assertEqual(1, len(attachments))
# Fetch the attachment_id for use later once we have migrated
src_attachment_id = list(attachments.keys())[0]
# Migrate the instance and wait until the migration errors out thanks
# to our mocked version of live_migration raising TestingException
self._live_migrate(server, 'error', server_expected_state='ERROR')
# Assert that we called the fake live_migration method
mock_lm.assert_called_once()
# Assert that the instance is on the source
server = self.api.get_server(server['id'])
self.assertEqual(src_host, server['OS-EXT-SRV-ATTR:host'])
# Assert that the src attachment is still present
attachments = self.cinder.volume_to_attachment.get(volume_id)
self.assertIn(src_attachment_id, attachments.keys())
self.assertEqual(1, len(attachments))
class LiveMigrationNeutronInteractionsTest(
integrated_helpers._IntegratedTestBase):
# NOTE(artom) We need the admin API to force the host when booting the test
# server.
ADMIN_API = True
microversion = 'latest'
def _setup_compute_service(self):
self._start_compute('src')
self._start_compute('dest')
def test_live_migrate_vifs_from_info_cache(self):
"""Test that bug 1879787 can no longer manifest itself because we get
the network_info from the instance info cache, and not Neutron.
"""
def stub_notify(context, instance, event_suffix,
network_info=None, extra_usage_info=None, fault=None):
vif = network_info[0]
# Make sure we have the correct VIF (the NeutronFixture
# deterministically uses port_2 for networks=auto) and that the
# profile does not contain `migrating_to`, indicating that we did
# not obtain it from the Neutron API.
self.assertEqual(self.neutron.port_2['id'], vif['id'])
self.assertNotIn('migrating_to', vif['profile'])
server = self._create_server(networks='auto',
host=self.computes['src'].host)
with mock.patch.object(self.computes['src'].manager,
'_notify_about_instance_usage',
side_effect=stub_notify) as mock_notify:
self._live_migrate(server, 'completed')
server = self.api.get_server(server['id'])
self.assertEqual('dest', server['OS-EXT-SRV-ATTR:host'])
# We don't care about call arguments here, we just want to be sure
# our stub actually got called.
mock_notify.assert_called()
| 41.283784 | 79 | 0.646263 |
5314df3f63297cc6e206474c5b996c98b4ac1dbb | 1,445 | py | Python | axcell/models/linking/utils.py | Kabongosalomon/axcell | f9c74910561f6064a04a10118824c99e871f8a38 | [
"Apache-2.0"
] | 335 | 2020-05-07T19:57:36.000Z | 2022-03-16T07:05:51.000Z | axcell/models/linking/utils.py | doc22940/axcell | b41c1623377d89c3c45a61907f0a47ea029269de | [
"Apache-2.0"
] | 16 | 2020-06-12T16:43:29.000Z | 2021-11-24T11:19:09.000Z | axcell/models/linking/utils.py | doc22940/axcell | b41c1623377d89c3c45a61907f0a47ea029269de | [
"Apache-2.0"
] | 50 | 2020-05-07T20:35:18.000Z | 2022-02-16T06:37:31.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from unidecode import unidecode
import re
# cleaning & normalization
parens_re = re.compile(r"\([^)]*?\)|\[[^]]*?\]")
strip_nonalnum_re = re.compile(r"^\W*(\w.*\b)\W*$")
def strip_nonalnum(s):
m = strip_nonalnum_re.match(s)
if m:
return m.group(1)
return ""
def remove_parens(text):
return parens_re.sub("", text)
def clean_name(name):
return remove_parens(unidecode(name).strip()).strip()
def clean_cell(cell):
return strip_nonalnum(clean_name(cell))
year_2k_re = re.compile(r"20(\d\d)")
hyphens_re = re.compile(r"[-_'`–’→]")
ws_re = re.compile(r"\s+")
refs_re = re.compile(r"(xxtable-)?xxanchor-[^ ]*|xxref-[^ ]*")
def remove_references(s):
return refs_re.sub("", s)
def normalize_dataset_ws(name):
name = remove_references(name)
name = hyphens_re.sub(" ", name)
name = year_2k_re.sub(r"\1", name)
name = ws_re.sub(" ", name)
return unidecode(name.strip().lower())
def normalize_dataset(name):
name = remove_references(name)
name = year_2k_re.sub(r"\1", name)
name = hyphens_re.sub("", name)
name = ws_re.sub(" ", name)
return unidecode(name.strip().lower())
def normalize_cell(s):
return unidecode("".join([x for x in s if x.isalnum()]))
def normalize_cell_ws(s):
return unidecode("".join([x for x in s if x.isalnum() or x.isspace()]))
# end of cleaning & normalization
| 25.350877 | 75 | 0.651211 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.