hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5fd71b33c6e9e77c0be46887a506da419e458766 | 22,917 | py | Python | kornia/filters/kernels.py | gianscarpe/kornia | 766bd71d6cca7313988b02784be6d56834e8c744 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-04-09T21:24:47.000Z | 2021-04-09T21:24:47.000Z | kornia/filters/kernels.py | wyli/kornia | 53e417eae7c296a0d0b57ad2b1ba8cd11f24c40d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/filters/kernels.py | wyli/kornia | 53e417eae7c296a0d0b57ad2b1ba8cd11f24c40d | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-10-20T06:57:07.000Z | 2020-10-20T06:57:07.000Z | from typing import Tuple, List, Union, cast
import torch
import torch.nn as nn
from kornia.geometry.transform.affwarp import rotate, rotate3d
def normalize_kernel2d(input: torch.Tensor) -> torch.Tensor:
r"""Normalizes both derivative and smoothing kernel.
"""
if len(input.size()) < 2:
raise TypeError("input should be at least 2D tensor. Got {}"
.format(input.size()))
norm: torch.Tensor = input.abs().sum(dim=-1).sum(dim=-1)
return input / (norm.unsqueeze(-1).unsqueeze(-1))
def gaussian(window_size, sigma):
x = torch.arange(window_size).float() - window_size // 2
if window_size % 2 == 0:
x = x + 0.5
gauss = torch.exp((-x.pow(2.0) / float(2 * sigma ** 2)))
return gauss / gauss.sum()
def laplacian_1d(window_size) -> torch.Tensor:
r"""One could also use the Laplacian of Gaussian formula
to design the filter.
"""
filter_1d = torch.ones(window_size)
filter_1d[window_size // 2] = 1 - window_size
laplacian_1d: torch.Tensor = filter_1d
return laplacian_1d
def get_box_kernel2d(kernel_size: Tuple[int, int]) -> torch.Tensor:
r"""Utility function that returns a box filter."""
kx: float = float(kernel_size[0])
ky: float = float(kernel_size[1])
scale: torch.Tensor = torch.tensor(1.) / torch.tensor([kx * ky])
tmp_kernel: torch.Tensor = torch.ones(1, kernel_size[0], kernel_size[1])
return scale.to(tmp_kernel.dtype) * tmp_kernel
def get_binary_kernel2d(window_size: Tuple[int, int]) -> torch.Tensor:
r"""Creates a binary kernel to extract the patches. If the window size
is HxW will create a (H*W)xHxW kernel.
"""
window_range: int = window_size[0] * window_size[1]
kernel: torch.Tensor = torch.zeros(window_range, window_range)
for i in range(window_range):
kernel[i, i] += 1.0
return kernel.view(window_range, 1, window_size[0], window_size[1])
def get_sobel_kernel_3x3() -> torch.Tensor:
"""Utility function that returns a sobel kernel of 3x3"""
return torch.tensor([
[-1., 0., 1.],
[-2., 0., 2.],
[-1., 0., 1.],
])
def get_sobel_kernel_5x5_2nd_order() -> torch.Tensor:
"""Utility function that returns a 2nd order sobel kernel of 5x5"""
return torch.tensor([
[-1., 0., 2., 0., -1.],
[-4., 0., 8., 0., -4.],
[-6., 0., 12., 0., -6.],
[-4., 0., 8., 0., -4.],
[-1., 0., 2., 0., -1.]
])
def _get_sobel_kernel_5x5_2nd_order_xy() -> torch.Tensor:
"""Utility function that returns a 2nd order sobel kernel of 5x5"""
return torch.tensor([
[-1., -2., 0., 2., 1.],
[-2., -4., 0., 4., 2.],
[0., 0., 0., 0., 0.],
[2., 4., 0., -4., -2.],
[1., 2., 0., -2., -1.]
])
def get_diff_kernel_3x3() -> torch.Tensor:
"""Utility function that returns a sobel kernel of 3x3"""
return torch.tensor([
[-0., 0., 0.],
[-1., 0., 1.],
[-0., 0., 0.],
])
def get_diff_kernel3d(device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor:
"""Utility function that returns a first order derivative kernel of 3x3x3"""
kernel: torch.Tensor = torch.tensor([[[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[-0.5, 0.0, 0.5],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
],
[[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, -0.5, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.5, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
],
[[[0.0, 0.0, 0.0],
[0.0, -0.5, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.0, 0.0]],
],
], device=device, dtype=dtype)
return kernel.unsqueeze(1)
def get_diff_kernel3d_2nd_order(device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor:
"""Utility function that returns a first order derivative kernel of 3x3x3"""
kernel: torch.Tensor = torch.tensor([[[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[1.0, -2.0, 1.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
],
[[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, 1.0, 0.0],
[0.0, -2.0, 0.0],
[0.0, 1.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
],
[[[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, -2.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0]],
],
[[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
[[1.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 1.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
],
[[[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, -1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]],
],
[[[0.0, 0.0, 0.0],
[1.0, 0.0, -1.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[-1.0, 0.0, 1.0],
[0.0, 0.0, 0.0]],
],
], device=device, dtype=dtype)
return kernel.unsqueeze(1)
def get_sobel_kernel2d() -> torch.Tensor:
kernel_x: torch.Tensor = get_sobel_kernel_3x3()
kernel_y: torch.Tensor = kernel_x.transpose(0, 1)
return torch.stack([kernel_x, kernel_y])
def get_diff_kernel2d() -> torch.Tensor:
kernel_x: torch.Tensor = get_diff_kernel_3x3()
kernel_y: torch.Tensor = kernel_x.transpose(0, 1)
return torch.stack([kernel_x, kernel_y])
def get_sobel_kernel2d_2nd_order() -> torch.Tensor:
gxx: torch.Tensor = get_sobel_kernel_5x5_2nd_order()
gyy: torch.Tensor = gxx.transpose(0, 1)
gxy: torch.Tensor = _get_sobel_kernel_5x5_2nd_order_xy()
return torch.stack([gxx, gxy, gyy])
def get_diff_kernel2d_2nd_order() -> torch.Tensor:
gxx: torch.Tensor = torch.tensor([
[0., 0., 0.],
[1., -2., 1.],
[0., 0., 0.],
])
gyy: torch.Tensor = gxx.transpose(0, 1)
gxy: torch.Tensor = torch.tensor([
[-1., 0., 1.],
[0., 0., 0.],
[1., 0., -1.],
])
return torch.stack([gxx, gxy, gyy])
def get_spatial_gradient_kernel2d(mode: str, order: int) -> torch.Tensor:
r"""Function that returns kernel for 1st or 2nd order image gradients,
using one of the following operators: sobel, diff"""
if mode not in ['sobel', 'diff']:
raise TypeError("mode should be either sobel\
or diff. Got {}".format(mode))
if order not in [1, 2]:
raise TypeError("order should be either 1 or 2\
Got {}".format(order))
if mode == 'sobel' and order == 1:
kernel: torch.Tensor = get_sobel_kernel2d()
elif mode == 'sobel' and order == 2:
kernel = get_sobel_kernel2d_2nd_order()
elif mode == 'diff' and order == 1:
kernel = get_diff_kernel2d()
elif mode == 'diff' and order == 2:
kernel = get_diff_kernel2d_2nd_order()
else:
raise NotImplementedError("")
return kernel
def get_spatial_gradient_kernel3d(mode: str, order: int, device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor:
r"""Function that returns kernel for 1st or 2nd order scale pyramid gradients,
using one of the following operators: sobel, diff"""
if mode not in ['sobel', 'diff']:
raise TypeError("mode should be either sobel\
or diff. Got {}".format(mode))
if order not in [1, 2]:
raise TypeError("order should be either 1 or 2\
Got {}".format(order))
if mode == 'sobel':
raise NotImplementedError("Sobel kernel for 3d gradient is not implemented yet")
elif mode == 'diff' and order == 1:
kernel = get_diff_kernel3d(device, dtype)
elif mode == 'diff' and order == 2:
kernel = get_diff_kernel3d_2nd_order(device, dtype)
else:
raise NotImplementedError("")
return kernel
def get_gaussian_kernel1d(kernel_size: int,
sigma: float,
force_even: bool = False) -> torch.Tensor:
r"""Function that returns Gaussian filter coefficients.
Args:
kernel_size (int): filter size. It should be odd and positive.
sigma (float): gaussian standard deviation.
force_even (bool): overrides requirement for odd kernel size.
Returns:
Tensor: 1D tensor with gaussian filter coefficients.
Shape:
- Output: :math:`(\text{kernel_size})`
Examples::
>>> kornia.image.get_gaussian_kernel(3, 2.5)
tensor([0.3243, 0.3513, 0.3243])
>>> kornia.image.get_gaussian_kernel(5, 1.5)
tensor([0.1201, 0.2339, 0.2921, 0.2339, 0.1201])
"""
if (not isinstance(kernel_size, int) or (
(kernel_size % 2 == 0) and not force_even) or (
kernel_size <= 0)):
raise TypeError(
"kernel_size must be an odd positive integer. "
"Got {}".format(kernel_size)
)
window_1d: torch.Tensor = gaussian(kernel_size, sigma)
return window_1d
def get_gaussian_kernel2d(
kernel_size: Tuple[int, int],
sigma: Tuple[float, float],
force_even: bool = False) -> torch.Tensor:
r"""Function that returns Gaussian filter matrix coefficients.
Args:
kernel_size (Tuple[int, int]): filter sizes in the x and y direction.
Sizes should be odd and positive.
sigma (Tuple[int, int]): gaussian standard deviation in the x and y
direction.
force_even (bool): overrides requirement for odd kernel size.
Returns:
Tensor: 2D tensor with gaussian filter matrix coefficients.
Shape:
- Output: :math:`(\text{kernel_size}_x, \text{kernel_size}_y)`
Examples::
>>> kornia.image.get_gaussian_kernel2d((3, 3), (1.5, 1.5))
tensor([[0.0947, 0.1183, 0.0947],
[0.1183, 0.1478, 0.1183],
[0.0947, 0.1183, 0.0947]])
>>> kornia.image.get_gaussian_kernel2d((3, 5), (1.5, 1.5))
tensor([[0.0370, 0.0720, 0.0899, 0.0720, 0.0370],
[0.0462, 0.0899, 0.1123, 0.0899, 0.0462],
[0.0370, 0.0720, 0.0899, 0.0720, 0.0370]])
"""
if not isinstance(kernel_size, tuple) or len(kernel_size) != 2:
raise TypeError(
"kernel_size must be a tuple of length two. Got {}".format(
kernel_size
)
)
if not isinstance(sigma, tuple) or len(sigma) != 2:
raise TypeError(
"sigma must be a tuple of length two. Got {}".format(sigma)
)
ksize_x, ksize_y = kernel_size
sigma_x, sigma_y = sigma
kernel_x: torch.Tensor = get_gaussian_kernel1d(ksize_x, sigma_x, force_even)
kernel_y: torch.Tensor = get_gaussian_kernel1d(ksize_y, sigma_y, force_even)
kernel_2d: torch.Tensor = torch.matmul(
kernel_x.unsqueeze(-1), kernel_y.unsqueeze(-1).t()
)
return kernel_2d
def get_laplacian_kernel1d(kernel_size: int) -> torch.Tensor:
r"""Function that returns the coefficients of a 1D Laplacian filter.
Args:
kernel_size (int): filter size. It should be odd and positive.
Returns:
Tensor (float): 1D tensor with laplacian filter coefficients.
Shape:
- Output: math:`(\text{kernel_size})`
Examples::
>>> kornia.image.get_laplacian_kernel(3)
tensor([ 1., -2., 1.])
>>> kornia.image.get_laplacian_kernel(5)
tensor([ 1., 1., -4., 1., 1.])
"""
if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or \
kernel_size <= 0:
raise TypeError("ksize must be an odd positive integer. Got {}"
.format(kernel_size))
window_1d: torch.Tensor = laplacian_1d(kernel_size)
return window_1d
def get_laplacian_kernel2d(kernel_size: int) -> torch.Tensor:
r"""Function that returns Gaussian filter matrix coefficients.
Args:
kernel_size (int): filter size should be odd.
Returns:
Tensor: 2D tensor with laplacian filter matrix coefficients.
Shape:
- Output: :math:`(\text{kernel_size}_x, \text{kernel_size}_y)`
Examples::
>>> kornia.image.get_laplacian_kernel2d(3)
tensor([[ 1., 1., 1.],
[ 1., -8., 1.],
[ 1., 1., 1.]])
>>> kornia.image.get_laplacian_kernel2d(5)
tensor([[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., -24., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.]])
"""
if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or \
kernel_size <= 0:
raise TypeError("ksize must be an odd positive integer. Got {}"
.format(kernel_size))
kernel = torch.ones((kernel_size, kernel_size))
mid = kernel_size // 2
kernel[mid, mid] = 1 - kernel_size ** 2
kernel_2d: torch.Tensor = kernel
return kernel_2d
def get_motion_kernel2d(kernel_size: int, angle: Union[torch.Tensor, float],
direction: Union[torch.Tensor, float] = 0.) -> torch.Tensor:
r"""Return 2D motion blur filter.
Args:
kernel_size (int): motion kernel width and height. It should be odd and positive.
angle (torch.Tensor, float): angle of the motion blur in degrees (anti-clockwise rotation).
direction (float): forward/backward direction of the motion blur.
Lower values towards -1.0 will point the motion blur towards the back (with angle provided via angle),
while higher values towards 1.0 will point the motion blur forward. A value of 0.0 leads to a
uniformly (but still angled) motion blur.
Returns:
torch.Tensor: the motion blur kernel.
Shape:
- Output: :math:`(ksize, ksize)`
Examples::
>>> kornia.filters.get_motion_kernel2d(5, 0., 0.)
tensor([[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.2000, 0.2000, 0.2000, 0.2000, 0.2000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000]])
>>> kornia.filters.get_motion_kernel2d(3, 215., -0.5)
tensor([[0.0000, 0.0412, 0.0732],
[0.1920, 0.3194, 0.0804],
[0.2195, 0.0743, 0.0000]])
"""
if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or kernel_size < 3:
raise TypeError("ksize must be an odd integer >= than 3")
if not isinstance(angle, torch.Tensor):
angle = torch.tensor([angle])
angle = cast(torch.Tensor, angle)
if angle.dim() == 0:
angle = angle.unsqueeze(0)
assert angle.dim() == 1, f"angle must be a 1-dim tensor. Got {angle}."
if not isinstance(direction, torch.Tensor):
direction = torch.tensor([direction])
direction = cast(torch.Tensor, direction)
if direction.dim() == 0:
direction = direction.unsqueeze(0)
assert direction.dim() == 1, f"direction must be a 1-dim tensor. Got {direction}."
assert direction.size(0) == angle.size(0), \
f"direction and angle must have the same length. Got {direction} and {angle}."
kernel_tuple: Tuple[int, int] = (kernel_size, kernel_size)
# direction from [-1, 1] to [0, 1] range
direction = (torch.clamp(direction, -1., 1.) + 1.) / 2.
kernel = torch.zeros((direction.size(0), *kernel_tuple), dtype=torch.float)
# Element-wise linspace
kernel[:, kernel_tuple[0] // 2, :] = torch.stack(
[(direction - (1 / (kernel_tuple[0] - 1)) * i) for i in range(kernel_tuple[0])], dim=-1)
kernel = kernel.unsqueeze(1)
# rotate (counterclockwise) kernel by given angle
kernel = rotate(kernel, angle, mode='nearest', align_corners=True)
kernel = kernel[:, 0]
kernel = kernel / kernel.sum(dim=(1, 2), keepdim=True)
return kernel
def get_motion_kernel3d(kernel_size: int, angle: Union[torch.Tensor, Tuple[float, float, float]],
direction: Union[torch.Tensor, float] = 0.) -> torch.Tensor:
r"""Return 3D motion blur filter.
Args:
kernel_size (int): motion kernel width, height and depth. It should be odd and positive.
angle (tensor or float): Range of yaw (x-axis), pitch (y-axis), roll (z-axis) to select from.
If tensor, it must be :math:`(B, 3)`.
direction (float): forward/backward direction of the motion blur.
Lower values towards -1.0 will point the motion blur towards the back (with angle provided via angle),
while higher values towards 1.0 will point the motion blur forward. A value of 0.0 leads to a
uniformly (but still angled) motion blur.
Returns:
torch.Tensor: the motion blur kernel.
Shape:
- Output: :math:`(ksize, ksize)`
Examples::
>>> kornia.filters.get_motion_kernel2d(5, 0., 0.)
tensor([[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.2000, 0.2000, 0.2000, 0.2000, 0.2000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000]])
>>> kornia.filters.get_motion_kernel2d(3, 215., -0.5)
tensor([[0.0000, 0.0412, 0.0732],
[0.1920, 0.3194, 0.0804],
[0.2195, 0.0743, 0.0000]])
"""
if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or kernel_size < 3:
raise TypeError("ksize must be an odd integer >= than 3")
if not isinstance(angle, torch.Tensor):
angle = torch.tensor([angle])
angle = cast(torch.Tensor, angle)
if angle.dim() == 1:
angle = angle.unsqueeze(0)
assert len(angle.shape) == 2 and angle.size(1) == 3, f"angle must be (B, 3). Got {angle}."
if not isinstance(direction, torch.Tensor):
direction = torch.tensor([direction])
direction = cast(torch.Tensor, direction)
if direction.dim() == 0:
direction = direction.unsqueeze(0)
assert direction.dim() == 1, f"direction must be a 1-dim tensor. Got {direction}."
assert direction.size(0) == angle.size(0), \
f"direction and angle must have the same length. Got {direction} and {angle}."
kernel_tuple: Tuple[int, int, int] = (kernel_size, kernel_size, kernel_size)
# direction from [-1, 1] to [0, 1] range
direction = (torch.clamp(direction, -1., 1.) + 1.) / 2.
kernel = torch.zeros((direction.size(0), *kernel_tuple), dtype=torch.float)
# Element-wise linspace
kernel[:, kernel_tuple[0] // 2, kernel_tuple[0] // 2, :] = torch.stack(
[(direction - (1 / (kernel_tuple[0] - 1)) * i) for i in range(kernel_tuple[0])], dim=-1)
kernel = kernel.unsqueeze(1)
# rotate (counterclockwise) kernel by given angle
kernel = rotate3d(kernel, angle[:, 0], angle[:, 1], angle[:, 2], mode='nearest', align_corners=True)
kernel = kernel[:, 0]
kernel = kernel / kernel.sum(dim=(1, 2, 3), keepdim=True)
return kernel
| 39.241438 | 120 | 0.482 |
0028eb78f09feda552f70771438374983e479555 | 1,621 | py | Python | amharict_TfIdf_Vectorizer.py | 1Mathias/PublicNLPA | e1b7e94210528fe3bced86305dbbe1336bdd72ab | [
"MIT"
] | null | null | null | amharict_TfIdf_Vectorizer.py | 1Mathias/PublicNLPA | e1b7e94210528fe3bced86305dbbe1336bdd72ab | [
"MIT"
] | null | null | null | amharict_TfIdf_Vectorizer.py | 1Mathias/PublicNLPA | e1b7e94210528fe3bced86305dbbe1336bdd72ab | [
"MIT"
] | null | null | null | from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
# importing the modules
from IPython.display import display
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
docs = ["የኢትዮጵያ ቤት ኪንግ ፕሪሚዬር ሊግ አሸናፊው ፋሲል ከነማ ትናንት በአዲስ አበባ ሸራተን አዲስ ሆቴል የገቢ ማሰባሰቢያ ቴሌቶን ማዘጋጀቱ ይታወሳል",
"በገቢ ማሰባሰቢያ ዝግጅቱ ከፍተኛ የመንግሥት የሥራ ኃላፊዎችን ጨምሮ የተለያዩ የኅብረተሰብ ክፍሎች ተሳትፈዋል፡፡ ስለ ተደረገው የገቢ ማሰባሰቢያ ቴሌቶን መግለጫ የሰጡት የክለቡ ፕሬዚዳንትና የጎንደር ከተማ ከንቲባ አቶ ሞላ መልካሙ ቴሌቶኑ ኢትዮጵያዊነት አምሮና ደምቆ የታየበትና የስፖርት ዓላማን ያሳካ ነበር ብለዋል",
"በቴሌቶኑ አሁንም በስልክና በተለያዩ አማራጮች ቃል የሚገቡ እንዳሉ ሆኖ ከ170 ሚሊዮን ብር በላይ መሰብሰቡም ተገልጿል"
"ቀዳማዊት እመቤት ዝናሽ ታያቸው በሁሉም ክልሎች የክለቡ አምባሳደሮች መሰየማቸው ፋሲል ከነማ የኢትዮጵያ ክለብ መሆኑን የሚገልጽ ነው ብለዋል። በቴሌቶኑ ከሁሉም የኢትዮጵያ ክፍሎች ድጋፎች መደረጋቸው ሌላኛው ፍሲል የኢትዮጵያ ክለብ መሆኑን የሚያሳይ እንደሆነ ተናግረዋል። በድጋፉ ለተሳተፉ ሁሉም አካላት ምስጋናም አቅርበዋል",
"በቀጣይ ክለቡ የያዛቸውን ትላልቅ ፕሮጀክቶች ከግብ ለማድረስና ክለቡ በአፍሪካ መድረክ ረዥም ርቀት እንዲጓዝ አሁንም የሁሉም ድጋፍ ያስፈልጋል ተብሏል።",
"የክለቡ ሥራ አስኪያጅ አቶ አቢዮት ብርሃኑ ክለቡ በቀጣይ ከመንግሥት በጀት ተላቆ የራሱ ቋሚ ሀብት እንዲኖረው ሥራዎች በእቅድ እየተሠሩ ስለመሆናቸው ተናግረዋል",
"ከቴሌቶኑ የሚገኘው ገቢ ለደሞዝና ለእለታዊ ወጭዎች ሳይሆን አካዳሚ መገንባት ጨምሮ ለተያያዙት ትላልቅ ፕሮጀክቶች እንደሚውልም ተጠቅሷል"
]
tfidf_vectorizer = TfidfVectorizer(use_idf=True)
tfidf_vectorizer_vectors = tfidf_vectorizer.fit_transform(docs)
first_vector_tfidfvectorizer = tfidf_vectorizer_vectors[0]
# place tf-idf values in a pandas data frame
df = pd.DataFrame(first_vector_tfidfvectorizer.T.todense(), index=tfidf_vectorizer.get_feature_names(),
columns=["tfidf"])
d=df.sort_values(by=["tfidf"], ascending=False)
display(d)
| 62.346154 | 213 | 0.766811 |
3e70db8a79541d7afea528eeda9f6e1e3c856d2d | 79 | py | Python | manage.py | wuyue92tree/service_runner | ac6cf1ecd231dbefa431d4d858d7340414279e0a | [
"Apache-2.0"
] | null | null | null | manage.py | wuyue92tree/service_runner | ac6cf1ecd231dbefa431d4d858d7340414279e0a | [
"Apache-2.0"
] | 10 | 2020-06-06T00:02:49.000Z | 2022-02-10T11:15:40.000Z | manage.py | wuyue92tree/service_runner | ac6cf1ecd231dbefa431d4d858d7340414279e0a | [
"Apache-2.0"
] | null | null | null | from service_runner.manage import main
if __name__ == "__main__":
main()
| 13.166667 | 38 | 0.708861 |
8e88ebeacc883606dc1cd9473bd4eac5fa845202 | 764 | py | Python | pacote-download/coursera4/week3.py | JoaoP-Rodrigues/CursoPython3-Aulas | 24d884eaa67485a12c8c3629c7f0fa5b8606a798 | [
"MIT"
] | null | null | null | pacote-download/coursera4/week3.py | JoaoP-Rodrigues/CursoPython3-Aulas | 24d884eaa67485a12c8c3629c7f0fa5b8606a798 | [
"MIT"
] | null | null | null | pacote-download/coursera4/week3.py | JoaoP-Rodrigues/CursoPython3-Aulas | 24d884eaa67485a12c8c3629c7f0fa5b8606a798 | [
"MIT"
] | null | null | null | class Student():
def __init__(self, name, s_time=1):
self.name = name
self.years_UM = s_time
self.knowledge = 0
def study(self):
self.knowledge += 1
def getKnowledge(self):
return self.knowledge
def year_at_umich(self):
return self.years_UM
def update_counts(letras, cont_d):
for c in letras:
if c == 0:
cont_d[c] = 1
else:
if c in cont_d:
cont_d[c] += 1
def mySum(lista):
soma = 0
for i in lista:
soma += i
return soma
lista1 = [2, 5, 7]
assert mySum(lista1)
lista2 = []
assert mySum(lista2)
'''
conta = {'a': 3, 'b': 2}
print(conta)
update_counts('aaab', conta)
print(conta)
''' | 15.591837 | 39 | 0.527487 |
16acd06eae4f5cf423a5d409978d743ff3cd7ccf | 935 | py | Python | ex084A18.py | gabrieleliasdev/python-cev | 45390963b5112a982e673f6a6866da422bf9ae6d | [
"MIT"
] | null | null | null | ex084A18.py | gabrieleliasdev/python-cev | 45390963b5112a982e673f6a6866da422bf9ae6d | [
"MIT"
] | null | null | null | ex084A18.py | gabrieleliasdev/python-cev | 45390963b5112a982e673f6a6866da422bf9ae6d | [
"MIT"
] | null | null | null | print(f"\n{'Exemple 01':=^40}\n")
dados = ["Pedro",25]
dados1 = ["Maria",19]
pessoas = list()
pessoas.append(dados[:])
pessoas.append(dados1[:])
print(pessoas)
print(len(pessoas))
print(pessoas[0][0])
print(pessoas[1][1])
print(f"\n{'Exemple 02':=^40}\n")
print("O fatimanento '[:]' da lista permite realizar uma cópia da mesma, cortando qualquer vinculo com a 'original.'")
galera = [["João", 19], ["Ana", 33], ["Joaquim", 13], ["Maria", 45]]
for p in galera:
print(f'{p[0]} tem {p[1]}.')
dado = []
totmai = totmen = 0
for c in range(0,3):
dado.append(str(input("Nome » ")))
dado.append(int(input("Idade » ")))
galera.append(dado[:])
dado.clear()
print(dado)
print(galera)
for p in galera:
if p[1] >= 21:
print(f"{p[0]} é maior de idade.")
totmai += 1
else:
print(f"{p[0]} é menor de idade.")
totmen += 1
print(f"Temos {totmai} maiores e {totmen} menores de idade.") | 23.375 | 118 | 0.594652 |
1e99d7efab81f203889cfd3bc198a91603f97d4d | 4,769 | py | Python | pywikibot/tools/_logging.py | notconfusing/pywikibot-fr-welcome-bot | 6e07b7e74166a47c9425816e79786308df369ac2 | [
"MIT"
] | 1 | 2020-01-03T11:52:01.000Z | 2020-01-03T11:52:01.000Z | pywikibot/tools/_logging.py | notconfusing/pywikibot-fr-welcome-bot | 6e07b7e74166a47c9425816e79786308df369ac2 | [
"MIT"
] | 2 | 2019-11-07T13:46:32.000Z | 2019-11-07T14:20:53.000Z | pywikibot/tools/_logging.py | notconfusing/pywikibot-fr-welcome-bot | 6e07b7e74166a47c9425816e79786308df369ac2 | [
"MIT"
] | 1 | 2020-04-14T14:52:24.000Z | 2020-04-14T14:52:24.000Z | # -*- coding: utf-8 -*-
"""Logging tools."""
#
# (C) Pywikibot team, 2009-2018
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import logging
import os
from pywikibot.tools import PY2
# Logging module configuration
class RotatingFileHandler(logging.handlers.RotatingFileHandler):
"""Modified RotatingFileHandler supporting unlimited amount of backups."""
def doRollover(self):
"""
Modified naming system for logging files.
Overwrites the default Rollover renaming by inserting the count number
between file name root and extension. If backupCount is >= 1, the
system will successively create new files with the same pathname as the
base file, but with inserting ".1", ".2" etc. in front of the filename
suffix. For example, with a backupCount of 5 and a base file name of
"app.log", you would get "app.log", "app.1.log", "app.2.log", ...
through to "app.5.log". The file being written to is always "app.log" -
when it gets filled up, it is closed and renamed to "app.1.log", and if
files "app.1.log", "app.2.log" etc. already exist, then they are
renamed to "app.2.log", "app.3.log" etc. respectively.
If backupCount is == -1 do not rotate but create new numbered
filenames. The newest file has the highest number except some older
numbered files where deleted and the bot was restarted. In this case
the ordering starts from the lowest available (unused) number.
"""
if self.stream:
self.stream.close()
self.stream = None
root, ext = os.path.splitext(self.baseFilename)
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = '%s.%d%s' % (root, i, ext)
dfn = '%s.%d%s' % (root, i + 1, ext)
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = '%s.1%s' % (root, ext)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
elif self.backupCount == -1:
if not hasattr(self, '_lastNo'):
self._lastNo = 1
while True:
fn = '%s.%d%s' % (root, self._lastNo, ext)
self._lastNo += 1
if not os.path.exists(fn):
break
os.rename(self.baseFilename, fn)
self.mode = 'w'
self.stream = self._open()
def format(self, record):
"""Strip trailing newlines before outputting text to file."""
# Warnings captured from the warnings system are not processed by
# logoutput(), so the 'context' variables are missing.
if record.name == 'py.warnings' \
and 'caller_file' not in record.__dict__:
assert len(record.args) == 1, \
'Arguments for record is not correctly set'
msg = record.args[0]
record.__dict__['caller_file'] = record.pathname
record.__dict__['caller_name'] = record.module
record.__dict__['caller_line'] = record.lineno
record.args = (msg,)
text = logging.handlers.RotatingFileHandler.format(self, record)
return text.rstrip()
class LoggingFormatter(logging.Formatter):
"""Format LogRecords for output to file.
This formatter *ignores* the 'newline' key of the LogRecord, because
every record written to a file must end with a newline, regardless of
whether the output to the user's console does.
"""
def __init__(self, fmt=None, datefmt=None, encoding=None):
"""Initializer with additional encoding parameter."""
logging.Formatter.__init__(self, fmt, datefmt)
self._encoding = encoding
def formatException(self, ei):
r"""
Convert exception trace to unicode if necessary.
Make sure that the exception trace is converted to unicode.
L{exceptions.Error} traces are encoded in our console encoding, which
is needed for plainly printing them. However, when logging them
using logging.exception, the Python logging module will try to use
these traces, and it will fail if they are console encoded strings.
Formatter.formatException also strips the trailing \n, which we need.
"""
exception_string = logging.Formatter.formatException(self, ei)
if PY2 and isinstance(exception_string, bytes):
return exception_string.decode(self._encoding) + '\n'
else:
return exception_string + '\n'
| 39.090164 | 79 | 0.617111 |
93e7b5759aa758adb68f78514e91365dcabf0d2a | 14,673 | py | Python | gxpm.py | nandun/gxp | 8dd9d396102e254cb4712fe572b64e398a5f069b | [
"BSD-3-Clause"
] | 2 | 2020-03-16T11:37:13.000Z | 2020-05-15T10:10:56.000Z | gxpm.py | nandun/gxp | 8dd9d396102e254cb4712fe572b64e398a5f069b | [
"BSD-3-Clause"
] | null | null | null | gxpm.py | nandun/gxp | 8dd9d396102e254cb4712fe572b64e398a5f069b | [
"BSD-3-Clause"
] | 1 | 2017-05-12T02:42:35.000Z | 2017-05-12T02:42:35.000Z | # Copyright (c) 2009 by Kenjiro Taura. All rights reserved.
# Copyright (c) 2008 by Kenjiro Taura. All rights reserved.
# Copyright (c) 2007 by Kenjiro Taura. All rights reserved.
# Copyright (c) 2006 by Kenjiro Taura. All rights reserved.
# Copyright (c) 2005 by Kenjiro Taura. All rights reserved.
#
# THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
# EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
#
# Permission is hereby granted to use or copy this program
# for any purpose, provided the above notices are retained on all
# copies. Permission to modify the code and to distribute modified
# code is granted, provided the above notices are retained, and
# a notice that the code was modified is included with the above
# copyright notice.
#
# $Header: /cvsroot/gxp/gxp3/gxpm.py,v 1.11 2010/09/08 04:08:22 ttaauu Exp $
# $Name: $
#
def import_safe_pickler():
"""
import cPickle if it exists.
otherwise resort to pickle.
"""
import cPickle,pickle
try:
cPickle.dumps(None)
return cPickle
except:
return pickle
pickler = import_safe_pickler()
def unparse(m):
"""
msg -> string
"""
return pickler.dumps(m)
def parse(s):
"""
string -> msg
"""
return pickler.loads(s)
class exec_env:
"""
process execution environment
"""
def __init__(self):
self.cwd = None # working dir (None : do not change it)
self.env = {} # environment variables
def show(self):
return ("cwd=%s, env=%s" % (self.cwd, self.env))
class target_tree:
"""
a tree of daemons, describing which daemons should
deliver this message
"""
def __init__(self, name, hostname, target_label,
eflag, exec_idx, eenv, children):
# gupid of the receiving daemon
self.name = name
# hostname of this daemon
self.hostname = hostname
# target label of it
self.target_label = target_label
# 1 if this daemon should deliver the msg
self.eflag = eflag
# sequential index of this daemon
self.exec_idx = exec_idx
# exec_env instance
self.eenv = eenv
# children daemons. a list of target_tree
# or None to mean all children
self.children = children
# number of daemons that should deliver the message
# or None if it is not known (because some
# nodes have None as children)
self.num_execs = None
def count(self):
"""
return # of nodes of the tree, whether eflag is on
or not
"""
if self.children is None: return None # unknown
c = 1
for ch in self.children:
x = ch.count()
if x is None: return None
c = c + x
return c
def count_execs(self):
"""
return # of nodes of the tree whose eflag are set
"""
if self.children is None: return None # unknown
c = self.eflag
for ch in self.children:
x = ch.count_execs()
if x is None: return None
c = c + x
return c
def show(self):
"""
convert to string
"""
if self.children is None:
cs = None
else:
cs = map(lambda c: c.show(), self.children)
if self.eenv is None:
eenv_show = "None"
else:
eenv_show = self.eenv.show()
return ("target_tree(%s, %s, %s, %s, %s, %s, %s)" \
% (self.name, self.hostname, self.target_label,
self.eflag, self.exec_idx,
eenv_show, cs))
def set_eflag(self, flag):
"""
set eflag of all nodes to flag
"""
self.eflag = flag
if self.children is not None:
for child in self.children:
child.set_eflag(flag)
def merge_target_tree(tgt1, tgt2):
"""
merge two target trees
"""
eflag = tgt1.eflag or tgt2.eflag
name_dictionary = {}
children = tgt1.children + tgt2.children
for child in children:
if name_dictionary.has_key(child.name):
# both trees have a child of the same name,
# so merge them recursively
name_dictionary[child.name] = merge_target_tree(name_dictionary[child.name], child)
else:
name_dictionary[child.name] = child
return target_tree(tgt1.name, tgt1.hostname, tgt1.target_label,
eflag, None, tgt1.eenv, name_dictionary.values())
class xxx_synchronize_message:
def __init__(self, peer_tree=None, exec_tree=None):
self.peer_tree = peer_tree
self.exec_tree = exec_tree
#
# actions
#
# an action describes an 'instruction' to a daemon node,
# such as "create a process of this command line",
# or "feed this msg to the process of this id"
#
class action:
pass
class action_quit(action):
"""
daemons that receive this action should quit
"""
pass
class action_ping(action):
"""
daemons that receive this action immediately respond
with basic information about the daemon
"""
def __init__(self, level):
"""
level : specifies how detailed shoult the response be
"""
self.level = level
class action_createproc(action):
"""
receiving this action, the daemon should create
a process with a specified command line (cmd),
working dir (cwd), environment (env), relative id
(rid), and open file descriptors (pipes).
relative id is an id given to the process unique
in the task the process belongs to.
for "pipes", see gxpc.py's add_down_pipe method.
it is a list of a record that loooks like...
"""
def __init__(self, rid, cwds, env, cmd, pipes, rlimits):
self.rid = rid # relative process id
self.cwds = cwds # list of dirs or None
self.env = env
self.cmd = cmd
self.pipes = pipes
self.rlimits = rlimits
class action_createpeer(action):
"""
similar to action_createproce. the only difference
is it should create a child daemon, so it should
notify the parent when the daemon brought up.
"""
def __init__(self, rid, cwds, env, cmd, pipes, rlimits):
self.rid = rid
self.cwds = cwds # list of dirs or None
self.env = env
self.cmd = cmd
self.pipes = pipes
self.rlimits = rlimits
class action_feed(action):
"""
an instruction to feed a string (payload)
to file descriptor (fd) of a process whose
relative id is rid.
"""
def __init__(self, rid, fd, payload):
self.rid = rid
self.fd = fd
self.payload = payload
class action_close(action):
"""
an instruction to close file descriptor (fd)
of a process whose relative id is rid.
"""
def __init__(self, rid, fd):
self.rid = rid
self.fd = fd
class action_sig(action):
"""
an instruction to send a signal (sig)
to a process whose relative id is rid.
"""
def __init__(self, rid, sig):
self.rid = rid
self.sig = sig
class action_chdir(action):
"""
an instruction to change its dir to TO.
currently not used.
"""
def __init__(self, to):
self.to = to
class action_export(action):
"""
an instruction to set its environment
variable (var) to val.
"""
def __init__(self, var, val):
self.var = var
self.val = val
class action_trim(action):
"""
an instruction to trim its children that
do not receive this msg.
"""
def __init__(self):
pass
class action_set_max_buf_len(action):
"""
an instruction to set its maximum
buffer length.
"""
def __init__(self, max_buf_len):
self.max_buf_len = max_buf_len
class action_prof_start(action):
"""
an instruction to start profiling
"""
def __init__(self, file):
self.file = file
class action_prof_stop(action):
"""
an instruction to stop profiling
"""
pass
class action_set_log_level(action):
"""
an instruction to set its loglevel
"""
def __init__(self, level):
self.level = level
class action_set_log_base_time(action):
"""
an instruction to set its log base time
"""
pass
class action_reclaim_task(action):
def __init__(self, target_tids):
self.target_tids = target_tids
# to synchronize gxpcs
class xxx_action_synchronize(action, xxx_synchronize_message):
pass
#
# commands
#
#
# clause
#
# clause is a list of actions with a condition under which
# those actions should be executed.
#
class clausexxx:
"""
an instruction that says "do those actions
when your name (gupid) matches a regular expression ON"
"""
def __init__ (self, on, actions):
self.on = on # regular exp of gupid
self.actions = actions
#
# down msg
#
#
keep_connection_never = 0
keep_connection_until_fin = 1
keep_connection_forever = 2
class down:
def __init__(self, target, tid, persist, keep_connection, gcmds):
# target tree (target_tree instance)
self.target = target
# task id this msg talks about
self.tid = tid
# 1 if the task sholud persist even if its processes are all gone
self.persist = persist
# see above constants (0, 1, or 2).
# specify what the root daemon does to the connection to the
# client (gxpc.py) process.
# never : immediately close it
# until_fin : keep it until tasks are gone. close it when
# tasks are gone
# forever : keep forever (never close from this side)
self.keep_connection = keep_connection
# list of list of clauses
self.gcmds = gcmds
#
# event
#
# describes some events that occurred around the daemon,
# such as "a process finished", and "a process outputs
# this". besides, it generally describes information
# from daemons to the client (gxpc.py).
#
class event:
pass
class event_info(event):
"""
low level messages such as error messages
"""
def __init__(self, status, msg):
"""
status : status of gxpd
msg : whatever string a daemon wishes to deliver
"""
self.status = status
self.msg = msg
class event_info_pong(event_info):
"""
response to ping action.
"""
def __init__(self, status, msg,
targetlabel, peername, hostname,
parent, children, children_in_progress):
event_info.__init__(self, status, msg)
# target label of the daemon
self.targetlabel = targetlabel
# gupid
self.peername = peername
# hostname
self.hostname = hostname
# parent gupid
self.parent = parent
# children gupid
self.children = children
# children that are in progress
self.children_in_progress = children_in_progress
class event_io(event):
"""
an event indicating a process or a child gxp says something.
"""
def __init__(self, src, kind, rid, pid, fd, payload, err_msg):
# proc or peer
self.src = src
# OK, EOF, ERROR, TIMEOUT
self.kind = kind
# relative process ID within a task
self.rid = rid
# process id
self.pid = pid
# file descriptor (channel name)
self.fd = fd
# string that is output
self.payload = payload
# string indicating error msg
self.err_msg = err_msg
class event_die(event):
"""
an event indicating a process is dead.
"""
def __init__(self, src, rid, pid, status, rusage, time_start, time_end):
# src : proc or peer
self.src = src
# relative process ID within a task
self.rid = rid
# process id
self.pid = pid
# status (return value of waitpid)
self.status = status
# rusage of the process
self.rusage = rusage
# local time (via time.time()) at which the process was
# started/finished
self.time_start = time_start
self.time_end = time_end
class event_peerstatus(event):
"""
an event indicating a peer status (NG/OK) becomes available
"""
def __init__(self, peername, target_label, hostname, status, parent_name, rid):
# gupid of the child gxpd in question
self.peername = peername
# its target label
self.target_label = target_label
# its hostname
self.hostname = hostname
# OK, NG
self.status = status
self.parent_name = parent_name
# relative id
self.rid = rid
class event_fin(event):
"""
an event indicating that no processes of the task
are currently left under the sender's subtree.
used to detect a task has finished.
"""
def __init__(self, weight):
self.weight = weight
class event_nopeersinprogress(event):
"""
similar to event_fin, but indicates that no
gxpd processes of the task are in progress
under the sender's subtree
used to detect an explore has finished.
"""
pass
# to synchronize gxpcs
class event_invalidate_view(event):
def __init__(self): # peer_tree, exec_tree
pass
#
# an upward msg (from down to up)
#
class up:
def __init__(self, gupid, tid, event):
self.gupid = gupid
self.tid = tid
self.event = event
class syn:
def __init__(self, gupid, tid, event):
self.gupid = gupid
self.tid = tid
self.event = event
# $Log: gxpm.py,v $
# Revision 1.11 2010/09/08 04:08:22 ttaauu
# a new job scheduling framework (gxpc js). see ChangeLog 2010-09-08
#
# Revision 1.10 2010/05/25 18:13:58 ttaauu
# support --translate_dir src,dst1,dst2,... and associated changes. ChangeLog 2010-05-25
#
# Revision 1.9 2010/05/20 14:56:56 ttaauu
# e supports --rlimit option. e.g., --rlimit rlimit_as:2g ChangeLog 2010-05-20
#
# Revision 1.8 2010/05/19 03:41:10 ttaauu
# gxpd/gxpc capture time at which processes started/ended at remote daemons. xmake now receives and displays them. xmake now never misses IO from jobs. ChangeLog 2010-05-19
#
# Revision 1.7 2009/09/27 17:15:14 ttaauu
# added comment on gxpm.py
#
# Revision 1.6 2009/09/17 18:47:53 ttaauu
# ioman.py,gxpm.py,gxpd.py,gxpc.py,xmake: changes to track rusage of children and show them in state.txt
#
# Revision 1.5 2009/06/06 14:06:23 ttaauu
# added headers and logs
#
| 27.632768 | 172 | 0.614735 |
57f38f4179516f8470fdac9984a67a86bd348b74 | 329 | py | Python | optuna/multi_objective/__init__.py | nzw0301/optuna | 0875fd7d307c6f0bafccdb8691ae6bbb5cb7837c | [
"MIT"
] | 4,950 | 2019-11-15T07:35:51.000Z | 2022-03-31T10:32:42.000Z | optuna/multi_objective/__init__.py | nzw0301/optuna | 0875fd7d307c6f0bafccdb8691ae6bbb5cb7837c | [
"MIT"
] | 2,490 | 2019-11-15T07:06:20.000Z | 2022-03-31T23:52:45.000Z | optuna/multi_objective/__init__.py | sile/optuna | 52f585c3281b84db0df4f2a621e15e4848ecad82 | [
"MIT"
] | 621 | 2019-11-15T11:26:57.000Z | 2022-03-28T11:46:34.000Z | from optuna.multi_objective import samplers # NOQA
from optuna.multi_objective import study # NOQA
from optuna.multi_objective import trial # NOQA
from optuna.multi_objective import visualization # NOQA
from optuna.multi_objective.study import create_study # NOQA
from optuna.multi_objective.study import load_study # NOQA
| 47 | 61 | 0.829787 |
624b2292355be6da314c7a1ac3f0ce3bad4e557e | 4,517 | py | Python | wagtailvideos/views/chooser.py | mariocesar/wagtailvideos | e085cb4fc9e57b46fefa29447b860ef4d286ef15 | [
"BSD-3-Clause"
] | null | null | null | wagtailvideos/views/chooser.py | mariocesar/wagtailvideos | e085cb4fc9e57b46fefa29447b860ef4d286ef15 | [
"BSD-3-Clause"
] | null | null | null | wagtailvideos/views/chooser.py | mariocesar/wagtailvideos | e085cb4fc9e57b46fefa29447b860ef4d286ef15 | [
"BSD-3-Clause"
] | null | null | null | import wagtail
from django.core.paginator import Paginator
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from wagtail.admin.forms.search import SearchForm
from wagtail.admin.modal_workflow import render_modal_workflow
from wagtail.core.models import Collection
from wagtail.images.views.chooser import get_chooser_js_data
from wagtail.search import index as search_index
from wagtailvideos.forms import get_video_form
from wagtailvideos.models import Video
from wagtailvideos.permissions import permission_policy
if wagtail.__version__ >= '2.7':
from wagtail.admin.models import popular_tags_for_model
from wagtail.admin.auth import PermissionPolicyChecker
else:
from wagtail.admin.utils import PermissionPolicyChecker, popular_tags_for_model
permission_checker = PermissionPolicyChecker(permission_policy)
def get_video_json(video):
"""
helper function: given an image, return the json to pass back to the
image chooser panel
"""
return {
'id': video.id,
'edit_link': reverse('wagtailvideos:edit', args=(video.id,)),
'title': video.title,
'preview': {
'url': video.thumbnail.url if video.thumbnail else '',
}
}
def chooser(request):
VideoForm = get_video_form(Video)
uploadform = VideoForm()
videos = Video.objects.order_by('-created_at')
q = None
if (
'q' in request.GET or 'p' in request.GET or 'tag' in request.GET
or 'collection_id' in request.GET
):
# this request is triggered from search, pagination or 'popular tags';
# we will just render the results.html fragment
collection_id = request.GET.get('collection_id')
if collection_id:
videos = videos.filter(collection=collection_id)
searchform = SearchForm(request.GET)
if searchform.is_valid():
q = searchform.cleaned_data['q']
videos = videos.search(q)
is_searching = True
else:
is_searching = False
tag_name = request.GET.get('tag')
if tag_name:
videos = videos.filter(tags__name=tag_name)
# Pagination
paginator = Paginator(videos, per_page=12)
page = paginator.get_page(request.GET.get('p'))
return render(request, "wagtailvideos/chooser/results.html", {
'videos': page,
'is_searching': is_searching,
'query_string': q,
})
else:
searchform = SearchForm()
collections = Collection.objects.all()
if len(collections) < 2:
collections = None
paginator = Paginator(videos, per_page=12)
page = paginator.get_page(request.GET.get('p'))
return render_modal_workflow(request, 'wagtailvideos/chooser/chooser.html', None, {
'videos': page,
'uploadform': uploadform,
'searchform': searchform,
'is_searching': False,
'query_string': q,
'popular_tags': popular_tags_for_model(Video),
'collections': collections,
}, json_data=get_chooser_js_data())
def video_chosen(request, video_id):
video = get_object_or_404(Video, id=video_id)
return render_modal_workflow(
request, None, json_data={
'step': 'video_chosen',
'result': get_video_json(video)
})
@permission_checker.require('add')
def chooser_upload(request):
VideoForm = get_video_form(Video)
searchform = SearchForm()
if request.POST:
video = Video(uploaded_by_user=request.user)
form = VideoForm(request.POST, request.FILES, instance=video)
if form.is_valid():
video.uploaded_by_user = request.user
video.save()
# Reindex the video to make sure all tags are indexed
search_index.insert_or_update_object(video)
return render_modal_workflow(
request, None, json_data={
'step': 'video_chosen',
'result': get_video_json(video)
}
)
else:
form = VideoForm()
videos = Video.objects.order_by('title')
paginator = Paginator(videos, per_page=12)
page = paginator.get_page(request.GET.get('p'))
return render_modal_workflow(
request, 'wagtailvideos/chooser/chooser.html', None,
template_vars={'videos': page, 'uploadform': form, 'searchform': searchform},
json_data=get_chooser_js_data()
)
| 31.151724 | 87 | 0.651317 |
f4f1611d9e53743d40cfb4ec0e5c6e0c15eb5534 | 19,503 | py | Python | src/capsule.py | CTinRay/ADL-Final | 111029d66ddf5beba175efd5569cd986f4cea827 | [
"MIT"
] | null | null | null | src/capsule.py | CTinRay/ADL-Final | 111029d66ddf5beba175efd5569cd986f4cea827 | [
"MIT"
] | null | null | null | src/capsule.py | CTinRay/ADL-Final | 111029d66ddf5beba175efd5569cd986f4cea827 | [
"MIT"
] | null | null | null | import pdb
import math
import tensorflow as tf
EPSILON = 1e-5
def conv_capsule(inputs, activation, kernel_size, stride, channels_out,
routing_iters=3):
"""Build capsule convolution layer.
Args:
inputs (tensor): Pose of lower layer.
Shape: [batch, input_height, input_width, channels_in,
pose_height, pose_width]
activation (tensor): Activation of lower layer.
Shape: [batch, input_height, input_width, channels_in]
kernel_size (int): Size of kernel.
stride (int): Size of stride.
channels_out (int): Number of output channel.
routing_iters (int): Number of routing iterations,
Returns:
pose (tensor): Output pose tensor.
Shape: [batch, output_height, output_width, channels_out,
pose_height, pose_width]
activation (tensor): Output activation tensor.
Shape: [batch, output_height, output_width, channels_out]
"""
batch_size = tf.shape(inputs)[0]
input_height = int(inputs.shape[1])
input_width = int(inputs.shape[2])
channels_in = int(inputs.shape[3])
pose_shape = (int(inputs.shape[4]), int(inputs.shape[5]))
output_height = (input_height - kernel_size) // stride + 1
output_width = (input_width - kernel_size) // stride + 1
# flattern inputs to 4d shape
# so we can use convolution function for image.
inputs = tf.reshape(
inputs,
[batch_size,
input_height,
input_width,
channels_in * pose_shape[0] * pose_shape[1]])
# collect pose matrices convolved by upper layer capsules
conv_poses = tf.extract_image_patches(
inputs,
[1, kernel_size, kernel_size, 1],
[1, stride, stride, 1],
[1, 1, 1, 1],
'VALID')
conv_poses.set_shape([
None,
output_height,
output_width,
kernel_size ** 2 * channels_in
* pose_shape[0] * pose_shape[1]])
# seperate out dimension for poses matrices, so we can do matrix operation.
conv_poses = tf.reshape(
conv_poses,
[batch_size,
output_height,
output_width,
kernel_size ** 2 * channels_in,
pose_shape[0],
pose_shape[1]])
# repeat poses for each output channels
conv_poses = tf.tile(conv_poses, [1, 1, 1,
channels_out,
1, 1])
# conv_poses.shape == [
# batch,
# output_height,
# output_width,
# channels_out * kernel_size ** 2 * channels_in,
# pose_shape[0],
# pose_shape[1]]
# weights of transform matrices
transform_matrices = tf.get_variable(
'transform_matrics',
shape=[channels_out,
kernel_size ** 2,
channels_in,
pose_shape[1],
pose_shape[1]],
initializer=tf.truncated_normal_initializer())
transform_matrices = tf.reshape(
transform_matrices,
[1, # batch_size
1,
1,
channels_out * kernel_size ** 2 * channels_in,
pose_shape[1],
pose_shape[1]])
# matric transformation
tiled_transform_matrics = tf.tile(
transform_matrices,
[batch_size,
output_height,
output_width,
1, 1, 1])
# now the shape of transform matrices should be same as conv_poses
# tiled_transform_matrics.shape[:-2] == conv_poses.shape[:-2]
# so we can do matrix transformation
conv_votes = tf.matmul(tiled_transform_matrics, conv_poses)
# collect activation convolved by upper layer capsules
conv_active = tf.extract_image_patches(
activation,
[1, kernel_size, kernel_size, 1],
[1, stride, stride, 1],
[1, 1, 1, 1],
'VALID')
# conv_poses.shape == [
# batch,
# output_height,
# output_width,
# kernel_size ** 2 * channels_in]
# start doing EM routing
conv_votes = tf.reshape(
conv_votes,
[batch_size,
output_height,
output_width,
channels_out,
kernel_size ** 2,
channels_in,
pose_shape[0] * pose_shape[1]])
conv_active = tf.reshape(
conv_active,
[batch_size,
output_height,
output_width,
kernel_size ** 2,
channels_in])
with tf.variable_scope('em_routing', reuse=tf.AUTO_REUSE):
output_poses, output_actives = conv_em_routing(
conv_active, conv_votes, stride, routing_iters)
# conv_votes = conv_votes * tf.expand_dims(tf.expand_dims(conv_active, 3), -1)
# output_poses = tf.reduce_mean(conv_votes, [-2, -3])
output_poses = tf.reshape(
output_poses,
[batch_size,
output_height,
output_width,
channels_out,
pose_shape[0],
pose_shape[1]])
# output_actives = tf.reduce_mean(output_poses ** 2, [-1, -2])
return output_poses, output_actives
def conv_em_routing(activation, conv_votes, stride, routing_iters):
"""EM routing algorithm of CapsuleEM.
Args:
activation (tensor): Activation of lower capsules.
Shape: [batch, output_height, output_width,
kernel_size ** 2, channels_in]
conv_votes (tensor): Votes of lower capsuls.
Shape: [batch, output_height, output_width, channels_out,
kernel_size ** 2, channels_in,
pose_height * pose_width]
stride (int): Stride of the convolution layer before routing.
routing_iters (int): Number of routing iterations to do.
Returns:
m (tensor): Pose of capsules.
Shape: [batch, output_height, output_width, channels_out,
pose_height, pose_width]
a_prime (tensor): Activation of capsules.
Shape: [batch, output_height, output_width, channels_out]
"""
# initialze r
r = tf.ones([tf.shape(conv_votes)[0], # batch
int(conv_votes.shape[1]), # output_height
int(conv_votes.shape[2]), # output_width
int(conv_votes.shape[3]), # channels_out
int(conv_votes.shape[4]), # kernel_size ** 2
int(conv_votes.shape[5])], # channels_in
name='r_init')
r = _renorm_r(r, stride)
# start EM loop
# [TODO] Schedule inv_tempt (lambda).
inv_tempt = 1
for i in range(routing_iters):
m, s, a_prime = _conv_m_step(r, activation, conv_votes, inv_tempt)
r = _conv_e_step(m, s, a_prime, conv_votes, stride)
return m, a_prime
def _renorm_r(r, stride):
"""Renorm r for each capsule in lower layer, its contribution to upper
layer capsules sum to 1.
Args:
r (tensor): Expected portion of lower capsule that belong to
the upper capsule.
Shape: [batch, output_height, output_width, channels_out,
kernel_size ** 2, channels_in]
stride (int): Stride of the convolution layer before routing.
"""
kernel_size = math.sqrt(int(r.shape[-2]))
assert kernel_size.is_integer()
kernel_size = int(kernel_size)
batch_size = tf.shape(r)[0]
r_height = int(r.shape[1])
r_width = int(r.shape[2])
origin_h = r_height * stride + (kernel_size - 1)
origin_w = r_width * stride + (kernel_size - 1)
channels_out = int(r.shape[3])
channels_in = int(r.shape[-1])
# collect indices of higher level units that convolve lower level
# unit at i, j.
higher_indices = [[[] for w in range(origin_w)]
for h in range(origin_h)]
for i in range(r_height):
for j in range(r_width):
for ki in range(kernel_size):
for kj in range(kernel_size):
higher_indices[i * stride + ki][j * stride + kj].append(
(i, j, ki * kernel_size + kj))
# the max number of upper units that convolve a lower unit
max_convolved = max([max(map(len, arr)) for arr in higher_indices])
# keep only the batch dimension so we can use tf.gather easily
r_flattern = tf.reshape(r, [batch_size,
r_height * r_width
* channels_out
* kernel_size ** 2
* channels_in])
# pad r with 0 so zero_index will point to 0
r_flattern = tf.concat([r_flattern, tf.zeros([batch_size, 1])], axis=-1)
# index that point to 0
zero_index = int(r_flattern.shape[-1]) - 1
sum_r_indices = []
for i in range(origin_h):
for j in range(origin_w):
for cout in range(channels_out):
for k in range(max_convolved):
for cin in range(channels_in):
if k < len(higher_indices[i][j]):
higher_i, higher_j, k_shift \
= higher_indices[i][j][k]
index = ((((higher_i * r_height + higher_j)
* channels_out + cout)
* kernel_size ** 2 + k_shift)
* channels_in + cin)
sum_r_indices.append(index)
else:
# for border units that are convolved less times
# append padding index that points to 0
sum_r_indices.append(zero_index)
# gather r that is contributed from lower layer unit i, j
r_gather = tf.gather(r_flattern, sum_r_indices, axis=-1)
r_gather = tf.reshape(r_gather, [-1,
origin_h,
origin_w,
channels_out * max_convolved,
channels_in])
# summation of r
r_sum = tf.reduce_sum(r_gather, -2)
# collect r_sum
conv_r_sum = tf.extract_image_patches(
r_sum,
[1, kernel_size, kernel_size, 1],
[1, stride, stride, 1],
[1, 1, 1, 1],
'VALID')
# calculate summation of r
conv_r_sum = tf.reshape(
conv_r_sum,
[batch_size,
r_height,
r_width,
kernel_size ** 2,
channels_in])
# renorm r by divide original r with conv_r_sum
conv_r_renormed = r / (tf.expand_dims(conv_r_sum, 3) + EPSILON)
assert conv_r_renormed.shape[1:] == r.shape[1:]
return conv_r_renormed
def _conv_e_step(m, s, a_prime, v, stride):
"""E-step of the EM algorithm.
Note that only VALID padding is supported (when renorming r).
Args:
m (tensor): Pose of capsules.
Shape: [batch, output_height, output_width, channels_out,
pose_height, pose_width]
s (tensor): Standard deviation of capsules.
Shape: [batch, output_height, output_width, channels_out,
pose_height * pose_width]
a_prime (tensor): Activation of capsules.
Shape: [batch, output_height, output_width, channels_out]
v (tensor): Votes of lower capsuls.
Shape: [batch, output_height, output_width, channels_out,
kernel_size ** 2, channels_in,
pose_height * pose_width]
stride (int): Stride of the convolution layer before routing.
Returns:
r (tensor): Expected portion of lower capsule that belong to
the upper capsule.
Shape: [batch, output_height, output_width, channels_out,
kernel_size ** 2, channels_in]
"""
p_exp = - tf.reduce_sum((v - tf.expand_dims(tf.expand_dims(m, 4), 5)) ** 2
/ (2 * tf.expand_dims(tf.expand_dims(s, 4), 5)
+ EPSILON),
-1)
assert p_exp.shape[1:] == [
v.shape[1], # output_height
v.shape[2], # output_width
v.shape[3], # channels_out
v.shape[4], # kernel_size ** 2
v.shape[5]] # channels_in
p_denominator = tf.expand_dims(
tf.expand_dims(tf.reduce_prod(2 * math.pi * s ** 2, -1),
-1),
-1)
p = tf.exp(p_exp) / (p_denominator + EPSILON)
r = p * tf.expand_dims(tf.expand_dims(a_prime, -1), -1)
r = _renorm_r(r, stride)
return r
def _conv_m_step(r, a, v, inv_tempt):
"""M-step of the EM algorithm
Args:
r (tensor): Expected portion of lower capsule that belong to
the upper capsule.
Shape: [batch, output_height, output_width, channels_out,
kernel_size ** 2, channels_in]
a (tensor): Activation of lower capsule.
Shape: [batch, output_height, output_width,
kernel_size ** 2, channels_in]
v (tensor): Votes of lower capsuls.
Shape: [batch, output_height, output_width, channels_out,
kernel_size ** 2, channels_in,
pose_height * pose_width]
inv_tempt (float): Inverse temperature (lambda).
Returns:
m (tensor): Pose of capsules.
Shape: [batch, output_height, output_width, channels_out,
pose_height, pose_width]
s (tensor): Standard deviation of capsules.
Shape: [batch, output_height, output_width, channels_out,
pose_height * pose_width]
a_prime (tensor): Activation of capsules.
Shape: [batch, output_height, output_width, channels_out]
"""
r_prime = r * tf.expand_dims(a, 3)
assert r_prime.shape[1:] == r.shape[1:]
sum_rv = tf.reduce_sum(tf.expand_dims(r_prime, -1) * v, axis=[-2, -3])
assert sum_rv.shape[1:] == [
v.shape[1], # output_height
v.shape[2], # output_width
v.shape[3], # channels_out
v.shape[-1]] # pose_height * pose_width
sum_r = tf.expand_dims(
tf.reduce_sum(tf.reduce_sum(r_prime, 5), 4), -1)
assert sum_r.shape[1:] == [v.shape[1], # output_height
v.shape[2], # output_width
v.shape[3], # channels_out
1] # for broadcast
m = tf.div(sum_rv, sum_r + EPSILON, name='m')
assert m.shape[1:] == sum_rv.shape[1:]
r_square_v_minus_m = \
tf.expand_dims(r_prime, -1) \
* (v - (tf.expand_dims(tf.expand_dims(m, 4), 5))) ** 2
sum_r_square_v_minus_m = tf.reduce_sum(
tf.reduce_sum(r_square_v_minus_m, 5), 4)
assert sum_r_square_v_minus_m.shape[1:] == m.shape[1:]
square_s = sum_r_square_v_minus_m / (sum_r + EPSILON)
s = tf.sqrt(square_s, name='s')
assert s.shape[1:] == m.shape[1:]
beta_v = tf.get_variable('beta_v', [1])
# with tf.control_dependencies([tf.Assert(tf.reduce_min(s) > 0, [s])]):
cost = (beta_v + tf.log(s + EPSILON)) * sum_r
beta_a = tf.get_variable('beta_a', [1])
a_prime = tf.sigmoid(inv_tempt *
(beta_a - tf.reduce_sum(cost, -1)))
assert a_prime.shape[1:] == [v.shape[1], # output_height
v.shape[2], # output_width
v.shape[3]] # channels_out
return m, s, a_prime
def class_capsule(inputs, activation, n_classes,
routing_iters=1):
"""Build class capsule layer.
Args:
inputs (tensor): Pose of lower layer.
Shape: [batch, input_height, input_width, channels_in,
pose_height, pose_width]
activation (tensor): Activation of lower layer.
Shape: [batch, input_height, input_width, channels_in]
n_classes (int): Number of output classes.
routing_iters (int): Number of routing iterations,
Returns:
pose (tensor): Output pose tensor.
Shape: [batch, n_classes, pose_height, pose_width]
activation (tensor): Output activation tensor.
Shape: [batch, n_classes]
"""
batch_size = tf.shape(inputs)[0]
input_height = int(inputs.shape[1])
input_width = int(inputs.shape[2])
channels_in = int(inputs.shape[3])
pose_shape = int(inputs.shape[4]), int(inputs.shape[5])
# copy pose of lower level capsules n_classes times
poses = tf.tile(tf.expand_dims(inputs, axis=1),
[1, n_classes, 1, 1, 1, 1, 1])
assert poses.shape[1:] == [n_classes, input_height, input_width,
channels_in, pose_shape[0], pose_shape[1]]
# weights of transform matrices
transform_matrices = tf.get_variable(
'transform_matrics',
shape=[n_classes,
channels_in,
pose_shape[0],
pose_shape[0]],
initializer=tf.truncated_normal_initializer())
# matric transformation
tiled_transform_matrics = tf.tile(
tf.reshape(transform_matrices,
[1,
n_classes,
1,
1,
channels_in,
pose_shape[0],
pose_shape[0]]),
[batch_size,
1, # n_classes
input_height,
input_width,
1, # channels_in
1, # pose_height
1]) # pose_height
assert tiled_transform_matrics.shape[1:] == [n_classes,
input_height,
input_width,
channels_in,
pose_shape[0],
pose_shape[0]]
# do matrix transformation
votes = tf.matmul(tiled_transform_matrics, poses,
name='votes')
assert votes.shape[1:] == [n_classes,
input_height,
input_width,
channels_in,
pose_shape[0],
pose_shape[1]]
# reshape as if lower layer is convolved to 1x1
votes = tf.reshape(votes,
[batch_size,
1, 1,
n_classes,
input_height * input_width,
channels_in,
pose_shape[0] * pose_shape[1]])
activation = tf.reshape(activation,
[batch_size,
1, 1,
input_height * input_width,
channels_in])
# do EM-routing
with tf.variable_scope('em_routing', reuse=tf.AUTO_REUSE):
output_poses, output_actives = conv_em_routing(
activation, votes, 1, routing_iters)
# flattern results from 2d to 1d
output_poses = tf.reshape(output_poses, [batch_size,
n_classes,
pose_shape[0],
pose_shape[1]],
name='output_poses')
output_actives = tf.reshape(output_actives, [batch_size, n_classes],
name='output_actives')
return output_poses, output_actives
| 36.454206 | 82 | 0.546583 |
e956a90d034b86994f287a940622414df02a3494 | 142 | py | Python | group.py | OlenaRudnytska/python_tests | ffa964f93401865bd75edf9a9c437b1006e2d211 | [
"Apache-2.0"
] | null | null | null | group.py | OlenaRudnytska/python_tests | ffa964f93401865bd75edf9a9c437b1006e2d211 | [
"Apache-2.0"
] | null | null | null | group.py | OlenaRudnytska/python_tests | ffa964f93401865bd75edf9a9c437b1006e2d211 | [
"Apache-2.0"
] | null | null | null |
class Group:
def __init__(self,name,header,footer):
self.name = name
self.header = header
self.footer = footer
| 15.777778 | 42 | 0.598592 |
41b1aad85cd00a3bc34a74ebe2476b2a92b6d78b | 738 | py | Python | Abbreviation.py | see-why/HackerRank-Solutions | 5f3ab61235dc07209664e064292754942f5d41e4 | [
"MIT"
] | 5 | 2022-02-22T08:48:52.000Z | 2022-03-03T22:31:16.000Z | Abbreviation.py | see-why/HackerRank-Solutions | 5f3ab61235dc07209664e064292754942f5d41e4 | [
"MIT"
] | null | null | null | Abbreviation.py | see-why/HackerRank-Solutions | 5f3ab61235dc07209664e064292754942f5d41e4 | [
"MIT"
] | null | null | null | https://www.hackerrank.com/challenges/abbr/problem?isFullScreen=true
def abbreviation(a, b):
# Write your code here
bpos = {}
for i in range(len(b)):
bpos[b[i]] = (bpos[b[i]] | set([i])) if b[i] in bpos else set([i])
possibilities = set([0])
for i in range(len(a)):
if a[i].upper() in bpos:
intersection = bpos[a[i].upper()] & possibilities
advancement = set([i + 1 for i in intersection])
else:
advancement = set([])
if a[i].upper() == a[i]:#capitals must follow the intersection
possibilities = advancement
else:
possibilities = possibilities | advancement
return ("YES" if (len(b)) in possibilities else "NO")
| 36.9 | 74 | 0.575881 |
cf008a9863a8b6819354b4e93d35c7d856b5752a | 1,409 | py | Python | Packs/PingCastle/Integrations/PingCastle/PingCastle_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 799 | 2016-08-02T06:43:14.000Z | 2022-03-31T11:10:11.000Z | Packs/PingCastle/Integrations/PingCastle/PingCastle_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 9,317 | 2016-08-07T19:00:51.000Z | 2022-03-31T21:56:04.000Z | Packs/PingCastle/Integrations/PingCastle/PingCastle_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 1,297 | 2016-08-04T13:59:00.000Z | 2022-03-31T23:43:06.000Z | """
PingCastle Integration for Cortex XSOAR - Unit Tests file
This file contains the Pytest Tests for the PingCastle Integration
This file tests the get_report command but not the long running integration command because in order to do that
I'd need to mock socket itself.
"""
import CommonServerPython
import demistomock as demisto
def test_get_report_no_report_available():
from PingCastle import get_report_command
demisto.setIntegrationContext({})
result = get_report_command({'delete_report': 'No'})
assert result == 'No report available'
def test_get_report_delete():
report = '<example>report</example>'
from PingCastle import get_report_command
demisto.setIntegrationContext({'report': report})
result: CommonServerPython.CommandResults = get_report_command({'delete_report': 'Yes'})
assert result.raw_response == report
assert result.outputs == {'report': report}
assert demisto.getIntegrationContext().get('report') is None
def test_get_report_no_delete():
report = '<example>report</example>'
from PingCastle import get_report_command
demisto.setIntegrationContext({'report': report})
result: CommonServerPython.CommandResults = get_report_command({'delete_report': 'No'})
assert result.raw_response == report
assert result.outputs == {'report': report}
assert demisto.getIntegrationContext().get('report') is not None
| 38.081081 | 111 | 0.760114 |
11c13bef354ba47188318b92d7eb266c3468d282 | 3,718 | py | Python | PPOCRLabel/libs/stringBundle.py | tp655998/PaddleOCR_Chinese_Cht | d9b2b7ada9c7fd2c4b78c3863fc06601947badc2 | [
"Apache-2.0"
] | 3 | 2020-11-25T07:51:40.000Z | 2021-12-22T09:32:51.000Z | PPOCRLabel/libs/stringBundle.py | tenDay22/PaddleOCR | ca44e5766919b61b0e88513d62b551397703be2c | [
"Apache-2.0"
] | 1 | 2020-12-21T06:06:45.000Z | 2020-12-21T06:06:45.000Z | PPOCRLabel/libs/stringBundle.py | tenDay22/PaddleOCR | ca44e5766919b61b0e88513d62b551397703be2c | [
"Apache-2.0"
] | 1 | 2022-03-29T07:09:25.000Z | 2022-03-29T07:09:25.000Z | # Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import sys
import locale
from libs.ustr import ustr
__dir__ = os.path.dirname(os.path.abspath(__file__)) # 获取本程序文件路径
__dirpath__ = os.path.abspath(os.path.join(__dir__, '../resources/strings'))
try:
from PyQt5.QtCore import *
except ImportError:
if sys.version_info.major >= 3:
import sip
sip.setapi('QVariant', 2)
from PyQt4.QtCore import *
class StringBundle:
__create_key = object()
def __init__(self, create_key, localeStr):
assert(create_key == StringBundle.__create_key), "StringBundle must be created using StringBundle.getBundle"
self.idToMessage = {}
paths = self.__createLookupFallbackList(localeStr)
for path in paths:
self.__loadBundle(path)
@classmethod
def getBundle(cls, localeStr=None):
if localeStr is None:
try:
localeStr = locale.getlocale()[0] if locale.getlocale() and len(
locale.getlocale()) > 0 else os.getenv('LANG')
except:
print('Invalid locale')
localeStr = 'en'
return StringBundle(cls.__create_key, localeStr)
def getString(self, stringId):
assert(stringId in self.idToMessage), "Missing string id : " + stringId
return self.idToMessage[stringId]
def __createLookupFallbackList(self, localeStr):
resultPaths = []
basePath = "\strings" if os.name == 'nt' else ":/strings"
resultPaths.append(basePath)
if localeStr is not None:
# Don't follow standard BCP47. Simple fallback
tags = re.split('[^a-zA-Z]', localeStr)
for tag in tags:
lastPath = resultPaths[-1]
resultPaths.append(lastPath + '-' + tag)
resultPaths[-1] = __dirpath__ + resultPaths[-1] + ".properties"
return resultPaths
def __loadBundle(self, path):
PROP_SEPERATOR = '='
f = QFile(path)
if f.exists():
if f.open(QIODevice.ReadOnly | QFile.Text):
text = QTextStream(f)
text.setCodec("UTF-8")
while not text.atEnd():
line = ustr(text.readLine())
key_value = line.split(PROP_SEPERATOR)
key = key_value[0].strip()
value = PROP_SEPERATOR.join(key_value[1:]).strip().strip('"')
self.idToMessage[key] = value
f.close()
| 40.857143 | 119 | 0.656267 |
031a46689062860a2b0037a813fc762a0bca063e | 1,643 | py | Python | baselines/common/vec_env/vec_normalize.py | williamd4112/baselines | d9af95518e41e6e58feba9d70529e1dcabb044c8 | [
"MIT"
] | null | null | null | baselines/common/vec_env/vec_normalize.py | williamd4112/baselines | d9af95518e41e6e58feba9d70529e1dcabb044c8 | [
"MIT"
] | null | null | null | baselines/common/vec_env/vec_normalize.py | williamd4112/baselines | d9af95518e41e6e58feba9d70529e1dcabb044c8 | [
"MIT"
] | null | null | null | from . import VecEnvWrapper
from baselines.common.running_mean_std import RunningMeanStd
import numpy as np
class VecNormalize(VecEnvWrapper):
"""
A vectorized wrapper that normalizes the observations
and returns from an environment.
"""
def __init__(self, venv, ob=True, ret=True, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8, training=True):
VecEnvWrapper.__init__(self, venv)
self.ob_rms = RunningMeanStd(shape=self.observation_space.shape) if ob else None
self.ret_rms = RunningMeanStd(shape=()) if ret else None
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
self.training = training
def step_wait(self):
obs, rews, news, infos = self.venv.step_wait()
self.ret = self.ret * self.gamma + rews
obs = self._obfilt(obs)
if self.ret_rms:
if self.training:
self.ret_rms.update(self.ret)
rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew)
return obs, rews, news, infos
def _obfilt(self, obs):
if self.ob_rms:
if self.training:
self.ob_rms.update(obs)
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
def _deobfilt(self, obs):
return obs # For now return it directly
def reset(self):
obs = self.venv.reset()
return self._obfilt(obs)
| 33.530612 | 120 | 0.619598 |
a7b6f67f126310ec44a38b7d53cd3e05316d54bb | 131 | py | Python | external/AR/ltr/__init__.py | tzhhhh123/Stark | eaf7df3baf27ac064938f831211ae64659bc6808 | [
"MIT"
] | 376 | 2021-03-27T12:29:17.000Z | 2022-03-29T01:22:15.000Z | external/AR/ltr/__init__.py | wp8733684/Stark | ba59f9596b06bc687d726f991e1e7fce8af6b5a5 | [
"MIT"
] | 75 | 2021-03-31T12:44:45.000Z | 2022-03-28T09:02:57.000Z | external/AR/ltr/__init__.py | wp8733684/Stark | ba59f9596b06bc687d726f991e1e7fce8af6b5a5 | [
"MIT"
] | 82 | 2021-03-26T10:07:57.000Z | 2022-03-29T11:08:27.000Z | from .admin.loading import load_network
from .admin.model_constructor import model_constructor
from .admin.multigpu import MultiGPU | 43.666667 | 54 | 0.870229 |
65f4b653e592ed52234ee2c1c9b131adfdd180db | 7,776 | py | Python | docs/source/conf.py | alchemy-fr/GeonamesServer | 8d509773571527ebea941d9b4daf8e35386898ec | [
"MIT"
] | 17 | 2015-01-19T07:52:30.000Z | 2018-09-23T12:17:50.000Z | docs/source/conf.py | alchemy-fr/GeonamesServer | 8d509773571527ebea941d9b4daf8e35386898ec | [
"MIT"
] | 2 | 2016-03-16T12:07:08.000Z | 2016-03-17T10:10:38.000Z | docs/source/conf.py | alchemy-fr/GeonamesServer | 8d509773571527ebea941d9b4daf8e35386898ec | [
"MIT"
] | 10 | 2015-01-11T13:17:10.000Z | 2020-07-06T04:42:10.000Z | # -*- coding: utf-8 -*-
#
# l10n-server documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 24 12:08:55 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Geonames Server'
copyright = u'2012, Alchemy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'Alchemy'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'l10n-serverdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'l10n-server.tex', u'l10n-server Documentation',
u'Andrey Kalinovsky', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'l10n-server', u'l10n-server Documentation',
[u'Andrey Kalinovsky'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'l10n-server', u'l10n-server Documentation',
u'Andrey Kalinovsky', 'l10n-server', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 32 | 80 | 0.714763 |
b8b132fc5ac58a191d3d2abe267b5d648d1e311b | 2,930 | bzl | Python | bindings/python/build_defs.oss.bzl | ikrima/iree | 6d0978e2baa2ba7d618097a46eccfde06483ed60 | [
"Apache-2.0"
] | null | null | null | bindings/python/build_defs.oss.bzl | ikrima/iree | 6d0978e2baa2ba7d618097a46eccfde06483ed60 | [
"Apache-2.0"
] | null | null | null | bindings/python/build_defs.oss.bzl | ikrima/iree | 6d0978e2baa2ba7d618097a46eccfde06483ed60 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Macros for building IREE python extensions."""
load("@iree_native_python//:build_defs.bzl", "py_extension")
load("@rules_cc//cc:defs.bzl", "cc_library")
load("@rules_python//python:defs.bzl", "py_binary", "py_library", "py_test")
load("//iree:build_defs.oss.bzl", _PLATFORM_VULKAN_DEPS = "PLATFORM_VULKAN_DEPS")
NUMPY_DEPS = []
PLATFORM_VULKAN_DEPS = _PLATFORM_VULKAN_DEPS
PYTHON_HEADERS_DEPS = ["@iree_native_python//:python_headers"]
PYTHON_CPP_EXTRA_DEPS = []
PYBIND_COPTS = select({
"//iree:iree_is_msvc": [],
"//conditions:default": [
"-fexceptions",
],
})
PYBIND_FEATURES = select({
"//iree:iree_is_msvc": [],
"//conditions:default": [
"-use_header_modules", # Incompatible with exceptions builds.
],
})
PYBIND_EXTENSION_COPTS = [
"-fvisibility=hidden",
]
# Optional deps to enable an intree TensorFlow python. This build configuration
# defaults to getting TensorFlow from the python environment (empty).
INTREE_TENSORFLOW_PY_DEPS = []
# Optional deps to enable intree TensorFlow Hub. This build configuration
# defaults to getting TensorFlow from the python environment (empty).
INTREE_TF_HUB_DEPS = []
def pybind_cc_library(
name,
copts = [],
features = [],
deps = [],
**kwargs):
"""Wrapper cc_library for deps that are part of the python bindings."""
cc_library(
name = name,
copts = copts + PYBIND_COPTS,
features = PYBIND_FEATURES,
deps = [
"@iree_pybind11//:pybind11",
] + deps + PYTHON_HEADERS_DEPS,
**kwargs
)
def iree_py_library(**kwargs):
"""Compatibility py_library which has bazel compatible args."""
# This is used when args are needed that are incompatible with upstream.
# Presently, this includes:
# imports
py_library(**kwargs)
def iree_py_binary(**kwargs):
"""Compatibility py_binary which has bazel specific args."""
# See: https://github.com/google/iree/issues/2405
py_binary(legacy_create_init = False, **kwargs)
def iree_py_test(**kwargs):
"""Compatibility py_test which has bazel compatible args."""
# See: https://github.com/google/iree/issues/2405
py_test(legacy_create_init = False, **kwargs)
def iree_py_extension(**kwargs):
"""Delegates to the real py_extension."""
py_extension(**kwargs)
| 31.505376 | 81 | 0.695563 |
197fdcb19c1e812ef033e73aa297461b91f71b6c | 2,106 | py | Python | utilitybelt/dicts.py | pedroysb/python-utilitybelt | 13d3502aa1a486c9d775ad2c551fb8e7e48b0d96 | [
"MIT"
] | 11 | 2016-06-12T04:38:20.000Z | 2021-03-15T12:27:54.000Z | utilitybelt/dicts.py | blockstack/utilitybelt | 13d3502aa1a486c9d775ad2c551fb8e7e48b0d96 | [
"MIT"
] | 1 | 2020-01-13T07:08:42.000Z | 2020-03-08T06:20:10.000Z | utilitybelt/dicts.py | blockstack/utilitybelt | 13d3502aa1a486c9d775ad2c551fb8e7e48b0d96 | [
"MIT"
] | 14 | 2016-06-12T04:38:20.000Z | 2021-03-16T15:51:57.000Z | # -*- coding: utf-8 -*-
"""
Utilitybelt
~~~~~
:copyright: (c) 2015 by Halfmoon Labs
:license: MIT, see LICENSE for more details.
"""
from collections import defaultdict
""" A recursive dictionary based on the collections lib defaultdict class.
"""
recursive_dict = lambda: defaultdict(recursive_dict)
def recursive_dict_to_dict(rdict):
""" Convert a recursive dict to a plain ol' dict.
"""
d = {}
for (k, v) in rdict.items():
if isinstance(v, defaultdict):
d[k] = recursive_dict_to_dict(v)
else:
d[k] = v
return d
def scrub_dict(d):
""" Recursively inspect a dictionary and remove all empty values, including
empty strings, lists, and dictionaries.
"""
if type(d) is dict:
return dict(
(k, scrub_dict(v)) for k, v in d.iteritems() if v and scrub_dict(v)
)
elif type(d) is list:
return [
scrub_dict(v) for v in d if v and scrub_dict(v)
]
else:
return d
def _to_json_type(obj, classkey=None):
""" Recursively convert the object instance into a valid JSON type.
"""
if isinstance(obj, dict):
data = {}
for (k, v) in obj.items():
data[k] = _to_json_type(v, classkey)
return data
elif hasattr(obj, "_ast"):
return _to_json_type(obj._ast())
elif hasattr(obj, "__iter__"):
return [_to_json_type(v, classkey) for v in obj]
elif hasattr(obj, "__dict__"):
data = dict([
(key, _to_json_type(value, classkey))
for key, value in obj.__dict__.iteritems()
if not callable(value) and not key.startswith('_')
])
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
else:
return obj
def to_dict(obj):
""" Convert an instance of an object into a dict.
"""
d = _to_json_type(obj)
if isinstance(d, dict):
return scrub_dict(d)
else:
raise ValueError("The value provided must be an object.")
| 27 | 79 | 0.591643 |
4f42606d59e765512d5470b0522454607ec3b30d | 1,937 | py | Python | Node_Classification/2.GraphSN_GIN/citeseer/layers.py | wokas36/GraphSNN | dcc36cad4d015b3c6aeae4e27fb595e35e1168a3 | [
"MIT"
] | 11 | 2022-03-15T08:51:51.000Z | 2022-03-27T14:43:39.000Z | Node_Classification/2.GraphSN_GIN/citeseer/layers.py | wokas36/GraphSNN | dcc36cad4d015b3c6aeae4e27fb595e35e1168a3 | [
"MIT"
] | null | null | null | Node_Classification/2.GraphSN_GIN/citeseer/layers.py | wokas36/GraphSNN | dcc36cad4d015b3c6aeae4e27fb595e35e1168a3 | [
"MIT"
] | 3 | 2022-03-27T14:43:41.000Z | 2022-03-28T12:08:53.000Z | import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import torch.nn as nn
import numpy as np
from IPython.core.debugger import Tracer
class Graphsn_GCN(Module):
def __init__(self, in_features, out_features, bias=True):
super(Graphsn_GCN, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
self.eps = nn.Parameter(torch.FloatTensor(1))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 0.95 / math.sqrt(self.weight.size(1)) #0.9 -> 0.7150 | 0.95 -> 0.7180
self.weight.data.uniform_(-stdv, stdv)
stdv_eps = 0.21 / math.sqrt(self.eps.size(0)) #0.21 -> 0.7180
nn.init.constant_(self.eps, stdv_eps)
'''stdv = 0.8 / math.sqrt(self.weight.size(1)) # 0.9 | 0.8 -> 82.90
self.weight.data.uniform_(-stdv, stdv)
stdv_eps = 0.21 / math.sqrt(self.eps.size(0)) #0.21 -> 82.90
nn.init.constant_(self.eps, stdv_eps)'''
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
v = (self.eps)*torch.diag(adj)
mask = torch.diag(torch.ones_like(v))
adj = mask*torch.diag(v) + (1. - mask)*adj
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')' | 33.396552 | 84 | 0.588023 |
2b78a8464ed1f34e6966149d3e6f7cb88677548c | 1,910 | py | Python | test/hummingbot/connector/exchange/btcturk/test_btcturk_auth.py | orhanb/hummingbot | 2a24fb63173725fa1690c0191a3487c5c79bb508 | [
"Apache-2.0"
] | null | null | null | test/hummingbot/connector/exchange/btcturk/test_btcturk_auth.py | orhanb/hummingbot | 2a24fb63173725fa1690c0191a3487c5c79bb508 | [
"Apache-2.0"
] | null | null | null | test/hummingbot/connector/exchange/btcturk/test_btcturk_auth.py | orhanb/hummingbot | 2a24fb63173725fa1690c0191a3487c5c79bb508 | [
"Apache-2.0"
] | null | null | null | import asyncio
import hashlib
import hmac
from copy import copy
from unittest import TestCase
from unittest.mock import MagicMock
from typing_extensions import Awaitable
from hummingbot.connector.exchange.btcturk.btcturk_auth import BtcturkAuth
from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest
class BtcturkAuthTests(TestCase):
def setUp(self) -> None:
self._api_key = "testApiKey"
self._secret = "testSecret"
def async_run_with_timeout(self, coroutine: Awaitable, timeout: float = 1):
ret = asyncio.get_event_loop().run_until_complete(asyncio.wait_for(coroutine, timeout))
return ret
def test_rest_authenticate(self):
now = 1234567890.000
mock_time_provider = MagicMock()
mock_time_provider.time.return_value = now
params = {
"symbol": "LTCBTC",
"side": "BUY",
"type": "LIMIT",
"timeInForce": "GTC",
"quantity": 1,
"price": "0.1",
}
full_params = copy(params)
auth = BtcturkAuth(api_key=self._api_key, secret_key=self._secret, time_provider=mock_time_provider)
request = RESTRequest(method=RESTMethod.GET, params=params, is_auth_required=True)
configured_request = self.async_run_with_timeout(auth.rest_authenticate(request))
full_params.update({"timestamp": 1234567890000})
encoded_params = "&".join([f"{key}={value}" for key, value in full_params.items()])
expected_signature = hmac.new(
self._secret.encode("utf-8"), encoded_params.encode("utf-8"), hashlib.sha256
).hexdigest()
self.assertEqual(now * 1e3, configured_request.params["timestamp"])
self.assertEqual(expected_signature, configured_request.params["signature"])
self.assertEqual({"X-MBX-APIKEY": self._api_key}, configured_request.headers)
| 38.2 | 108 | 0.687435 |
37dc173392e0bc1503ccc84e1afcada043a821cc | 1,367 | py | Python | sdk/eventhub/azure-eventhub/azure/eventhub/__init__.py | sammiee5311/azure-sdk-for-python | bc99c220bcada3aa7187e915f9df65f4fa0669c5 | [
"MIT"
] | null | null | null | sdk/eventhub/azure-eventhub/azure/eventhub/__init__.py | sammiee5311/azure-sdk-for-python | bc99c220bcada3aa7187e915f9df65f4fa0669c5 | [
"MIT"
] | null | null | null | sdk/eventhub/azure-eventhub/azure/eventhub/__init__.py | sammiee5311/azure-sdk-for-python | bc99c220bcada3aa7187e915f9df65f4fa0669c5 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from uamqp import constants
from ._common import EventData, EventDataBatch
from ._version import VERSION
__version__ = VERSION
from ._producer_client import EventHubProducerClient
from ._consumer_client import EventHubConsumerClient
from ._client_base import EventHubSharedKeyCredential
from ._eventprocessor.checkpoint_store import CheckpointStore
from ._eventprocessor.common import CloseReason, LoadBalancingStrategy
from ._eventprocessor.partition_context import PartitionContext
from ._connection_string_parser import (
parse_connection_string,
EventHubConnectionStringProperties
)
from ._retry import RetryMode
TransportType = constants.TransportType
__all__ = [
"EventData",
"EventDataBatch",
"EventHubProducerClient",
"EventHubConsumerClient",
"TransportType",
"EventHubSharedKeyCredential",
"CheckpointStore",
"CloseReason",
"LoadBalancingStrategy",
"PartitionContext",
"parse_connection_string",
"EventHubConnectionStringProperties",
"RetryMode"
]
| 34.175 | 94 | 0.692758 |
17c378f0d3835707d080d326b8022d5f79504a5e | 1,643 | py | Python | src/comments/migrations/0001_initial.py | jsmesami/naovoce | 235c6e05ef37be23d3b9bd0b76d80080c58617a0 | [
"BSD-3-Clause"
] | 18 | 2016-02-23T15:34:58.000Z | 2022-02-28T08:15:30.000Z | src/comments/migrations/0001_initial.py | jsmesami/naovoce | 235c6e05ef37be23d3b9bd0b76d80080c58617a0 | [
"BSD-3-Clause"
] | 66 | 2016-03-15T19:59:09.000Z | 2022-03-11T23:25:41.000Z | src/comments/migrations/0001_initial.py | jsmesami/naovoce | 235c6e05ef37be23d3b9bd0b76d80080c58617a0 | [
"BSD-3-Clause"
] | 7 | 2016-03-24T09:13:07.000Z | 2018-09-16T17:04:50.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import utils.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('created', models.DateTimeField(editable=False, default=django.utils.timezone.now, verbose_name='created')),
('modified', utils.fields.AutoDateTimeField(editable=False, default=django.utils.timezone.now, verbose_name='modified')),
('text', models.CharField(verbose_name='comment', max_length=1600)),
('ip', models.GenericIPAddressField(null=True, verbose_name="author's IP address")),
('rejected', models.BooleanField(default=False, verbose_name='rejected')),
('object_id', models.PositiveIntegerField()),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='author', on_delete=models.CASCADE)),
('content_type', models.ForeignKey(to='contenttypes.ContentType', on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'comments',
'ordering': ('-created',),
'verbose_name': 'comment',
},
),
]
| 43.236842 | 137 | 0.631771 |
d28ea41e33bb3d66da9104867be2713ef6918a29 | 11,350 | py | Python | src/orion/algo/asha.py | mnoukhov/orion | 7849d77344e84ec805207cf4148aecf6f7d6b3d7 | [
"BSD-3-Clause"
] | 3 | 2019-12-13T03:41:19.000Z | 2021-06-15T20:14:33.000Z | src/orion/algo/asha.py | mnoukhov/orion | 7849d77344e84ec805207cf4148aecf6f7d6b3d7 | [
"BSD-3-Clause"
] | null | null | null | src/orion/algo/asha.py | mnoukhov/orion | 7849d77344e84ec805207cf4148aecf6f7d6b3d7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
:mod:`orion.algo.asha` -- Asynchronous Successive Halving Algorithm
===================================================================
.. module:: asha
:platform: Unix
:synopsis: Asynchronous Successive Halving Algorithm
"""
import copy
import hashlib
import logging
import numpy
from orion.algo.base import BaseAlgorithm
from orion.algo.space import Fidelity
logger = logging.getLogger(__name__)
REGISTRATION_ERROR = """
Bad fidelity level {fidelity}. Should be in {budgets}.
Params: {params}
"""
class ASHA(BaseAlgorithm):
"""Asynchronous Successive Halving Algorithm
`A simple and robust hyperparameter tuning algorithm with solid theoretical underpinnings
that exploits parallelism and aggressive early-stopping.`
For more information on the algorithm, see original paper at https://arxiv.org/abs/1810.05934.
Li, Liam, et al. "Massively parallel hyperparameter tuning."
arXiv preprint arXiv:1810.05934 (2018)
Parameters
----------
space: `orion.algo.space.Space`
Optimisation space with priors for each dimension.
seed: None, int or sequence of int
Seed for the random number generator used to sample new trials.
Default: ``None``
max_resources: int
Maximum amount of resources that will be assigned to trials by ASHA. Only the
best performing trial will be assigned the maximum amount of resources.
Default: 100
grace_period: int
The minimum number of resources assigned to each trial.
Default: 1
reduction_factor: int
The factor by which ASHA promotes trials. If the reduction factor is 4,
it means the number of trials from one fidelity level to the next one is roughly
divided by 4, and each fidelity level has 4 times more resources than the prior one.
Default: 4
num_brackets: int
Using a grace period that is too small may bias ASHA too strongly towards
fast converging trials that do not lead to best results at convergence (stagglers). To
overcome this, you can increase the number of brackets, which increases the amount of
resource required for optimisation but decreases the bias towards stragglers.
Default: 1
"""
def __init__(self, space, seed=None, max_resources=100, grace_period=1, reduction_factor=4,
num_brackets=1):
super(ASHA, self).__init__(
space, seed=seed, max_resources=max_resources, grace_period=grace_period,
reduction_factor=reduction_factor, num_brackets=num_brackets)
if reduction_factor < 2:
raise AttributeError("Reduction factor for ASHA needs to be at least 2.")
self.trial_info = {} # Stores Trial -> Bracket
# Tracks state for new trial add
self.brackets = [
Bracket(self, grace_period, max_resources, reduction_factor, s)
for s in range(num_brackets)
]
def seed_rng(self, seed):
"""Seed the state of the random number generator.
:param seed: Integer seed for the random number generator.
"""
self.rng = numpy.random.RandomState(seed)
@property
def state_dict(self):
"""Return a state dict that can be used to reset the state of the algorithm."""
return {'rng_state': self.rng.get_state()}
def set_state(self, state_dict):
"""Reset the state of the algorithm based on the given state_dict
:param state_dict: Dictionary representing state of an algorithm
"""
self.seed_rng(0)
self.rng.set_state(state_dict['rng_state'])
def suggest(self, num=1):
"""Suggest a `num` of new sets of parameters.
Promote a trial if possible, otherwise randomly draw samples from the space and
randomly assign to a bracket.
:param num: how many sets to be suggested.
.. note:: New parameters must be compliant with the problem's domain
`orion.algo.space.Space`.
"""
if num > 1:
raise ValueError("ASHA should suggest only one point.")
for bracket in self.brackets:
candidate = bracket.update_rungs()
if candidate:
logger.debug('Promoting')
return [candidate]
for _attempt in range(100):
point = list(self.space.sample(1, seed=tuple(self.rng.randint(0, 1000000, size=3)))[0])
if self.get_id(point) not in self.trial_info:
break
if self.get_id(point) in self.trial_info:
raise RuntimeError(
'ASHA keeps sampling already existing points. This should not happen, '
'please report this error to https://github.com/Epistimio/orion/issues')
sizes = numpy.array([len(b.rungs) for b in self.brackets])
probs = numpy.e**(sizes - sizes.max())
normalized = probs / probs.sum()
idx = self.rng.choice(len(self.brackets), p=normalized)
point[self.fidelity_index] = self.brackets[idx].rungs[0][0]
logger.debug('Sampling for bracket %s %s', idx, self.brackets[idx])
return [tuple(point)]
def get_id(self, point):
"""Compute a unique hash for a point based on params, but not fidelity level."""
_point = list(point)
non_fidelity_dims = _point[0:self.fidelity_index]
non_fidelity_dims.extend(_point[self.fidelity_index + 1:])
return hashlib.md5(str(non_fidelity_dims).encode('utf-8')).hexdigest()
def observe(self, points, results):
"""Observe evaluation `results` corresponding to list of `points` in
space.
A simple random sampler though does not take anything into account.
"""
for point, result in zip(points, results):
_id = self.get_id(point)
bracket = self.trial_info.get(_id)
if not bracket:
fidelity = point[self.fidelity_index]
brackets = [bracket for bracket in self.brackets
if bracket.rungs[0][0] == fidelity]
if not brackets:
raise ValueError(
"No bracket found for point {0} with fidelity {1}".format(_id, fidelity))
bracket = brackets[0]
try:
bracket.register(point, result['objective'])
except IndexError:
logger.warning('Point registered to wrong bracket. This is likely due '
'to a corrupted database, where trials of different fidelity '
'have a wrong timestamps.')
continue
if _id not in self.trial_info:
self.trial_info[_id] = bracket
@property
def is_done(self):
"""Return True, if all brackets reached their maximum resources."""
return all(bracket.is_done for bracket in self.brackets)
@property
def fidelity_index(self):
"""Compute the index of the point when fidelity is."""
def _is_fidelity(dim):
return (isinstance(dim, Fidelity) or
(hasattr(dim, 'original_dimension') and
isinstance(dim.original_dimension, Fidelity)))
return [i for i, dim in enumerate(self.space.values()) if _is_fidelity(dim)][0]
class Bracket():
"""Bracket of rungs for the algorithm ASHA."""
def __init__(self, asha, min_t, max_t, reduction_factor, s):
"""Build rungs based on min_t, max_t, reduction_factor and s.
:param asha: `ASHA` algorithm
:param min_t: Minimum resources (grace_period)
:param max_t: Maximum resources
:param reduction_factor: Factor of reduction from `min_t` to `max_t`
:param s: Minimal early stopping factor (used when there is many brackets)
"""
if min_t <= 0:
raise AttributeError("Minimum resources must be a positive number.")
elif min_t > max_t:
raise AttributeError("Minimum resources must be smaller than maximum resources.")
self.asha = asha
self.reduction_factor = reduction_factor
max_rungs = int(numpy.ceil(numpy.log(max_t / min_t) / numpy.log(reduction_factor) - s + 1))
self.rungs = [(min(min_t * reduction_factor**(k + s), max_t), dict())
for k in range(max_rungs)]
logger.debug('Bracket budgets: %s', str([rung[0] for rung in self.rungs]))
def register(self, point, objective):
"""Register a point in the corresponding rung"""
fidelity = point[self.asha.fidelity_index]
rungs = [rung for budget, rung in self.rungs if budget == fidelity]
if not rungs:
budgets = [budget for budget, rung in self.rungs]
raise IndexError(REGISTRATION_ERROR.format(fidelity=fidelity, budgets=budgets,
params=point))
rungs[0][self.asha.get_id(point)] = (objective, point)
def get_candidate(self, rung_id):
"""Get a candidate for promotion"""
_, rung = self.rungs[rung_id]
next_rung = self.rungs[rung_id + 1][1]
rung = list(sorted((objective, point) for objective, point in rung.values()
if objective is not None))
k = len(rung) // self.reduction_factor
k = min(k, len(rung))
for i in range(k):
point = rung[i][1]
_id = self.asha.get_id(point)
if _id not in next_rung:
return point
return None
@property
def is_done(self):
"""Return True, if reached the bracket reached its maximum resources."""
return len(self.rungs[-1][1])
def update_rungs(self):
"""Promote the first candidate that is found and return it
The rungs are iterated over is reversed order, so that high rungs
are prioritised for promotions. When a candidate is promoted, the loop is broken and
the method returns the promoted point.
.. note ::
All trials are part of the rungs, for any state. Only completed trials
are eligible for promotion, i.e., only completed trials can be part of top-k.
Lookup for promotion in rung l + 1 contains trials of any status.
"""
for rung_id in range(len(self.rungs) - 2, -1, -1):
candidate = self.get_candidate(rung_id)
if candidate:
# pylint: disable=logging-format-interpolation
logger.debug(
'Promoting {point} from rung {past_rung} with fidelity {past_fidelity} to '
'rung {new_rung} with fidelity {new_fidelity}'.format(
point=candidate, past_rung=rung_id,
past_fidelity=candidate[self.asha.fidelity_index],
new_rung=rung_id + 1, new_fidelity=self.rungs[rung_id + 1][0]))
candidate = list(copy.deepcopy(candidate))
candidate[self.asha.fidelity_index] = self.rungs[rung_id + 1][0]
return tuple(candidate)
return None
def __repr__(self):
"""Return representation of bracket with fidelity levels"""
return 'Bracket({})'.format([rung[0] for rung in self.rungs])
| 38.087248 | 99 | 0.61815 |
fd359ff256b9837cd01b40090b51a2ab93d36f11 | 486 | py | Python | long_pool_events/module_start_typing_c.py | ihydrogen/hydrogen-chat-bot-py | b21ece5cf2532c0f0d31b5db75fe6b91229f5d59 | [
"Apache-2.0"
] | 9 | 2017-02-19T16:09:53.000Z | 2021-01-05T12:18:22.000Z | long_pool_events/module_start_typing_c.py | ihydrogen/hydrogen-chat-bot-py | b21ece5cf2532c0f0d31b5db75fe6b91229f5d59 | [
"Apache-2.0"
] | 1 | 2017-11-28T04:37:33.000Z | 2017-11-28T04:37:33.000Z | long_pool_events/module_start_typing_c.py | ihydrogen/hydrogen-chat-bot-py | b21ece5cf2532c0f0d31b5db75fe6b91229f5d59 | [
"Apache-2.0"
] | null | null | null | import bot_header
from vk_api.api import User
from vk_api.api import api_request
from vk_api.api import get_api
# execute when user started typing.
def main(message, lpt):
# get id of user
typing_id = message[1]
# get user from VK API by id
user = User.from_json(api_request(get_api(lpt=lpt), "users.get", "user_ids=%s" % typing_id)[0])
# print some message to inform user that someone started typing:)
bot_header.v("%s started typing..." % user.first_last())
| 32.4 | 99 | 0.713992 |
73b8b01aad5023e21c02e3221377540e95aaac1e | 422 | py | Python | projects/golem_integration/tests/actions/alerts/verify_alert_text.py | kangchenwei/keyautotest2 | f980d46cabfc128b2099af3d33968f236923063f | [
"MIT"
] | null | null | null | projects/golem_integration/tests/actions/alerts/verify_alert_text.py | kangchenwei/keyautotest2 | f980d46cabfc128b2099af3d33968f236923063f | [
"MIT"
] | null | null | null | projects/golem_integration/tests/actions/alerts/verify_alert_text.py | kangchenwei/keyautotest2 | f980d46cabfc128b2099af3d33968f236923063f | [
"MIT"
] | null | null | null | from golem import actions
description = 'Verify verify_alert_text action'
def test(data):
actions.navigate(data.env.url+'alert/')
actions.click('#alert-button')
actions.verify_alert_text('an alert')
try:
actions.verify_alert_text('incorrect text')
except Exception as e:
assert "Expected alert text to be 'incorrect text' but was 'an alert'" in e.args[0]
actions.dismiss_alert()
| 28.133333 | 91 | 0.701422 |
3f892a4e1031123c5c5ac6832c46ebe2b5b122df | 6,237 | py | Python | configs/custom/testmodel_attention_multi_conv_2x.py | SeHwanJoo/mmdetection_vinbig | 9a27d2b5cd8b3ec9ed1a94e4704a7c883f15dce3 | [
"Apache-2.0"
] | 2 | 2021-04-01T08:17:08.000Z | 2021-07-12T11:53:53.000Z | configs/custom/testmodel_attention_multi_conv_2x.py | SeHwanJoo/mmdetection_vinbig | 9a27d2b5cd8b3ec9ed1a94e4704a7c883f15dce3 | [
"Apache-2.0"
] | null | null | null | configs/custom/testmodel_attention_multi_conv_2x.py | SeHwanJoo/mmdetection_vinbig | 9a27d2b5cd8b3ec9ed1a94e4704a7c883f15dce3 | [
"Apache-2.0"
] | null | null | null | model = dict(
type='RetinaNet',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='AttentionFPNMultiConv',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
num_outs=5,
multi_conv=3),
bbox_head=dict(
type='RetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(666, 400), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(666, 400),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(
type='CocoDataset',
ann_file='data/coco/annotations/instances_train2017.json',
img_prefix='data/coco/train2017/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(666, 400), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]),
val=dict(
type='CocoDataset',
ann_file='data/coco/annotations/instances_val2017.json',
img_prefix='data/coco/val2017/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(666, 400),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]),
test=dict(
type='CocoDataset',
ann_file='data/coco/annotations/instances_val2017.json',
img_prefix='data/coco/val2017/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(666, 400),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=50)
checkpoint_config = dict(interval=1)
log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
work_dir = './work_dirs/attention_model_multi_conv'
gpu_ids = range(0, 1)
| 33.713514 | 77 | 0.526215 |
35060d9d3929d6bf783dfdd451240702a414ee49 | 26,289 | py | Python | intellipush/client.py | Intellipush/intellipush-python-sdk | ddc322b64b3063dcc5d7d6e91b4eb9286aa12fb8 | [
"MIT"
] | null | null | null | intellipush/client.py | Intellipush/intellipush-python-sdk | ddc322b64b3063dcc5d7d6e91b4eb9286aa12fb8 | [
"MIT"
] | null | null | null | intellipush/client.py | Intellipush/intellipush-python-sdk | ddc322b64b3063dcc5d7d6e91b4eb9286aa12fb8 | [
"MIT"
] | null | null | null | import json as jsonlib
import requests
import time
import datetime
from .utils import php_encode
from .messages import SMS
from .contacts import Target
class Intellipush:
def __init__(self, key, secret, base_url='https://www.intellipush.com/api', version='4.0'):
"""
Creat a client instance for communicating with Intellipush.
`base_url` and `version` should be left to their default values unless you have a particular requirement.
:param key: Your API key
:param secret: Your API secret
:param base_url: The base URL of the Intellipush API
:param version: Version of the API that the client should communicate with
"""
self.key = key
self.secret = secret
self.base_url = base_url.rstrip('/')
self.version = version
self.sdk_tag = 'python'
self.last_error = None
self.last_error_code = None
self.last_error_message = None
def sms(self, countrycode, phonenumber, message):
"""
Simple method to directly send an sms without any extra settings.
:param countrycode: Country code of the phone number (i.e. 0047)
:param phonenumber: Phone number the message should be delivered to
:param message: The message itself - a message will be split into several messages behind the scenes if its
lengthexceeds 160 characters.
:return: Response from the API with metadata about the queued/delivered message
"""
sms = SMS(
receivers=[(countrycode, phonenumber), ],
message=message,
)
return self.send_sms(sms)
def send_sms(self, sms):
"""
Send an SMS object - created from the `SMS` data type (`intellipush.messages.SMS`).
The main difference from the `sms` method is that this allows for far greater granularity when configuring
the message to be sent, such as scheduling the message for later delivery and providing multiple recipients.
:param sms: SMS object (`intellipush.messages.SMS`)
:return: Response from the API with metadata about the queued/delivered message(s)
"""
if len(sms.receivers) > 1:
return self.send_smses((sms, ))
return self._post(
'notification/createNotification',
data=self._sms_as_post_object(sms=sms),
)
def send_smses(self, smses):
"""
Send a batch of messages by giving a list of `SMS` objects (`intellipush.messages.SMS`).
This will deliver a batch / list of messages to the API, reducing the overhead when sending a single message by
itself. Useful if you need to deliver a large amount of messages at the same time.
:param smses: iterable giving an `SMS` object for each iteration
:return: Response from the API with metadata about the queued/delivered message(s)
"""
batch = []
for sms in smses:
for receiver in sms.receivers:
batch.append(self._sms_as_post_object(sms=sms, receiver=receiver))
return self._post(
'notification/createBatch',
data={'batch': batch},
expect_list_return=True,
)
def delete_sms(self, sms_id):
"""
Delete an unsent SMS.
Removes a queued SMS from the API, causing it to not be sent. Useful together with the ability to
schedule sending time for an SMS when submitting it to Intellipush.
:param sms_id: `id` of the SMS to remove - returned by the API when sending an SMS or listing SMSes available.
:return: Response from the API for the operation
"""
return self._post(
'notification/deleteNotification',
data={'notification_id': sms_id}
)
def update_sms(self, sms_id, sms):
"""
Update the information for a queued SMS.
Change the contents of an SMS that has been queued for scheduled sending.
:param sms_id: `id` of the SMS to update - returned by the API when sending an SMS or listing SMSes available.
:param sms: The updated SMS object
:return: Response from the API with metadata about the updated message
"""
sms_object = self._sms_as_post_object(sms)
sms_object['notification_id'] = sms_id
return self._post(
'notification/updateNotification',
data=sms_object
)
def fetch_sms(self, sms_id):
"""
Fetch information about an SMS sent or scheduled through Intellipush.
:param sms_id: `id` of the SMS to retrieve
:return: Metadata about the message
"""
return self._post(
'notification/getNotification',
data={'notification_id': sms_id}
)
def scheduled_smses(self, items=50, page=1):
"""
Retrieve a list of still scheduled messages on Intellipush.
:param items: Number of items on each page
:param page: The current page (1-based)
:return: A list of scheduled messages available on Intellipush
"""
return self._post(
'notification/getUnsendtNotifications',
data={'page': page, 'items': items}
)
def sent_smses(self, items=50, page=1):
"""
Retrieve a list of messages sent through Intellipush.
:param items: Number of items on each page
:param page: The current page (1-based)
:return: A list of messages that have been sent
"""
return self._post(
'notification/getSendtNotifications',
data={'page': page, 'items': items}
)
def received_smses(self, items=50, page=1, keyword=None, second_keyword=None):
"""
Retrieve a list of messages _received_ by your keyword from your recipients.
:param items: Number of items on each page
:param page: The current page (1-based)
:param keyword: The primary keyword to retrieve received smses for
:param second_keyword: The secondary keyword to filter messages by
:return:
"""
return self._post(
'notification/getReceived',
data={'page': page, 'items': items, 'keyword': keyword, 'secondKeyword': second_keyword}
)
def create_contact(self,
name,
countrycode=None,
phonenumber=None,
email=None,
company=None,
sex=None,
country=None,
param1=None,
param2=None,
param3=None,
**kwargs,
):
"""
Create a contact on Intellipush.
:param name: Name of the contact
:param countrycode: Country code of the contact (i.e. `0047``
:param phonenumber: Phone number of the contact
:param email: Email of the contact
:param company: Company name of the contact
:param sex: Sex of the contact
:param country: Associated country of the contact
:param param1: Free form value to associate with the contact
:param param2: Free form value to associate with the contact
:param param3: Free form value to associate with the contact
:param kwargs: Any additional parameters are supported by the client as necessary if the contact format is
extended without the client being updated
:return: Metadata about the created contact from Intellipush
"""
contact = {
'name': name,
'countrycode': countrycode,
'phonenumber': phonenumber,
'email': email,
'company': company,
'sex': sex,
'country': country,
'param1': param1,
'param2': param2,
'param3': param3,
}
contact.update(kwargs)
return self._post('contact/createContact', contact)
def contact(self, contact_id=None, countrycode=None, phonenumber=None):
"""
Retrieve a contact from your Intellipush account. Either `contact_id` or both `countrycode` and `phonenumber`
has to be provided.
:param contact_id: `id` of the contact to retrieve
:param countrycode: Country code of the contact to retrieve (i.e. `0047`)
:param phonenumber: Phone number of the contact to retrieve
:return:
"""
if contact_id:
fetched = self._post('contact/getContact', data={
'contact_id': contact_id,
})
elif countrycode and phonenumber:
fetched = self._post('contact/getContactByPhoneNumber', data={
'countrycode': countrycode,
'phonenumber': phonenumber,
})
else:
raise IntellipushException('Missing contact_id or (countrycode and phonenumber)')
if not fetched:
return None
return fetched[0]
def delete_contact(self, contact_id):
"""
Delete a contact from its id.
:param contact_id: The id of the contact to remove.
:return: Reponse from the API
"""
return self._post('contact/deleteContact', {
'contact_id': contact_id,
})
def update_contact(self, contact_id, name=None, countrycode=None, phonenumber=None, email=None, company=None, sex=None, country=None, param1=None, param2=None, param3=None, **kwargs):
contact = {
'contact_id': contact_id,
'name': name,
'countrycode': countrycode,
'phonenumber': phonenumber,
'email': email,
'company': company,
'sex': sex,
'country': country,
'param1': param1,
'param2': param2,
'param3': param3,
}
contact.update(kwargs)
return self._post('contact/updateContact', contact)
def create_contact_list(self, name):
"""
Create a new contact list.
:param name: Name of the contact list
:return: Response from the API with information about the created contact list
"""
result = self._post('contactlist/createContactlist', {
'contactlist_name': name,
})
return self._adopt_contact_list(result)
def contact_list(self, contact_list_id):
"""
Fetch a contact list given by its id.
:param contact_list_id: The id of the contact list to fetch.
:return:
"""
return self._adopt_contact_list(self._post('contactlist/getContactlist', {
'contactlist_id': contact_list_id,
}))
def add_to_contact_list(self, contact_list_id, contact_id):
"""
Add a contact to a contact list. You can use this to group your contacts into multiple segments.
:param contact_list_id: The `id` of the contact list to add the contact to
:param contact_id: The `id` of the contact to add
:return: Response from the API
"""
return self._post('contactlist/addContactToContactlist', {
'contactlist_id': contact_list_id,
'contact_id': contact_id,
})
def remove_from_contact_list(self, contact_list_id, contact_id):
"""
Remove a contact from a contact list.
:param contact_list_id: The `id` of the contact list to remove the contact from
:param contact_id: The `id` of the contact to remove
:return: Response from the API
"""
return self._post('contactlist/removeContactFromContactlist', {
'contactlist_id': contact_list_id,
'contact_id': contact_id,
})
def delete_contact_list(self, contact_list_id):
"""
Delete a contact list / segment.
:param contact_list_id: The `id` of the contact list to remove
:return: Response from the API
"""
return self._post('contactlist/deleteContactlist', {
'contactlist_id': contact_list_id,
})
def update_contact_list(self, contact_list_id, name):
"""
Update a contact list's information
:param contact_list_id: The `id` of the contact list to update
:param name: New name of the contact list
:return: Response from the API
"""
return self._adopt_contact_list(self._post('contactlist/updateContactlist', {
'contactlist_id': contact_list_id,
'contactlist_name': name,
}))
def contact_list_size(self, contact_list_id, contact_list_filter=None):
"""
Get the number of entries in a given contact list.
:param contact_list_id: The `id` of the contact list to retrieve a count for
:param contact_list_filter: Filter the contact list by these values (a `intellipush.contacts.ContactFilter`)
:return:
"""
result = self._post('contactlist/getNumberOfFilteredContactsInContactlist', {
'contactlist_id': contact_list_id,
})
if 'amount' in result:
return int(result['amount'])
return None
def contacts_not_in_contact_list(self, contact_list_id, items=50, page=1):
"""
Retrieve all your contacts that are _not_ in the specified contact list.
:param contact_list_id: `id` of the contact list to check the contacts against
:param items: Number of contacts on each page
:param page: The current page (1-based)
:return: A list of contacts that isn't in the given contact list
"""
pass
def current_user(self):
"""
Retrieve information about the currently logged in user.
:return:
"""
return self._post('user')
def shorturl(self, shorturl_id=None, shorturl=None):
"""
Retrieve a shorturl definition from its id or its shorturl. One of the parameters has
to be provided.
:param shorturl_id: The id of the shorturl definition to be retrieved
:param shorturl: The shorturl to retrieve details for (with or without `http://host/`)
:return: The fetched shorturl or None on failure
"""
if not shorturl_id and not shorturl:
raise NoValidIDException('Either shorturl_id or shorturl has to be provided')
if shorturl_id:
return self._post('url/getUrlDetailsById', {
'url_id': shorturl_id,
})
return self._post('url/getDetailsByShortUrl', {
'short_url': shorturl,
})
def create_shorturl(self, url, parent_url_id=None, target=None):
"""
Create a shorturl (or a child shorturl if `parent_url_id is provided).
A `target` parameter can be provided that links the shorturl to a specific user. The
parameter should be an `contacts.Target` object.
:param url: The URL to link the shorturl to.
:param parent_url_id: The ID of the parent shorturl if this is a version of the previous URL with a different target
:param target: A `contacts.Target` object that contains information to associate with the shorturl. If a target is given, `parent_url_id` must be set as well.
:return: Details about the created shorturl
"""
if target:
if not isinstance(target, Target):
raise TypeError('A `contacts.Target` object is required for the `target` parameter')
target = self._target_as_post_object(target=target)
if parent_url_id:
return self._post('url/generateChildUrl', {
'long_url': url,
'target': target,
'parent_url_id': parent_url_id,
})
if target:
raise InvalidTargetException('A `target` is only valid for child shorturls (when `parent_url_id` is given).')
return self._post('url/generateShortUrl', {
'long_url': url,
})
def shorturls(self, items=50, page=1, include_children=False, parent_shorturl_id=None, target=None):
"""
Retrieve all shorturls available for your Intellipush account.
:param items: The number of items to return for each request
:param page: The page number to return results for (1-based)
:param include_children: Also return shorturls that are children of other shorturls
:param parent_shorturl_id: Only return shorturls that have `parent_shorturl_id` as a parent
:param target: Only return shorturls that matches this target (`intellipush.contacts.Target`).
:return: A list of shorturls as returned from the API.
"""
if target:
if not isinstance(target, Target):
raise TypeError('A `contacts.Target` object is required for the `target` parameter')
target = self._target_as_post_object(target=target)
return self._post('url/getAll', {
'items': items,
'page': page,
'include_children': include_children,
'parent_shorturl_id': parent_shorturl_id,
'target': target,
})
def statistics(self):
"""
Retrieve statistics about pending messages (`unsentNotifications`), number of contacts
(`contacts`) and the number of contact lists (`contactlists`) active on your account.
These are returned under the `numberOf` key on the root dictionary.
:return: dict
"""
stats = self._post('statistics')
self._fix_statistics_keys(stats)
return stats
def two_factor_send(self, countrycode, phonenumber, message_before_code=None, message_after_code=None):
"""
Send a two factor authentication code to a given countrycode and phone number. The code
is validated by calling `two_factor_validate`.
:param countrycode: Country code of the recipient's phone number
:param phonenumber: Phone number to send 2FA code to
:param message_before_code: String to prefix the 2FA code with. The generated message is "<prefixmessage><code><postfix>".
:param message_after_code: Message to append after the 2FA code. No spaces are added automagically. The generated message is "<prefixmessage><code><postfix>".
:return: Response from Intellipush
:raises: TwoFactorAuthenticationIsAlreadyActive
"""
result = self._post('twofactor/send2FaCode', {
'countrycode': countrycode,
'phonenumber': phonenumber,
'message_p1': message_before_code,
'message_p2': message_after_code,
})
if result.get('hasCode', False):
raise TwoFactorAuthenticationIsAlreadyActive('The phone number has an active two factor authentication request.')
return result
def two_factor_validate(self, countrycode, phonenumber, code):
"""
Validate a previously sent two factor code. Method returns True if the code is valid for the
given phone number and country code, and False if not.
:param countrycode: Country code of the phone number of the user
:param phonenumber: Phone number of the user
:param code: The 2FA code the user has entered
:return: True or False depending on the validity of the code for the given country code and phone number.
"""
result = self._post('twofactor/check2FaCode', {
'countrycode': countrycode,
'phonenumber': phonenumber,
'code': code,
})
if not result:
return False
if 'access' in result and result['access'] is True:
return True
return False
def _default_parameters(self):
"""
Get a dictionary containing the default parameters that should be included in every request.
:return: A dict with basic request information
"""
return {
'api_secret': self.secret,
'appID': self.key,
't': int(time.time()),
'v': self.version,
's': self.sdk_tag,
}
def _url(self, endpoint):
"""
Merge base service URL with the endpoint we're requesting data from.
:param endpoint: Endpoint for the API request (usually `<module>/<command>`)
:return: The complete URL to use for the request
"""
return self.base_url + '/' + endpoint
def _post(self, endpoint, data=None, expect_list_return=False):
"""
Internal helper method to send requests to the intellipush service. Wraps error handling and raises exceptions
for general error conditions (such as HTTP status codes >= 300).
`last_error_code` and `last_error_message` will be set if an error occurs.
:param endpoint: The API endpoint to query (i.e. `contact/getContact`)
:param data: Information to send to the endpoint - depends on what the endpoint expects.
:param expect_list_return: Expect a list returned from the API endpoint - useful when the response consists of
multiple messages.
:return: The response from the API (returned under the `data` key). `last_error_code` and `last_error_message`
will be set to describe any error that occured.
"""
self.last_error_message = None
self.last_error_code = None
if not data:
data = {}
data.update(self._default_parameters())
encoded_data = php_encode(data)
response = requests.post(
url=self._url(endpoint),
data=encoded_data,
)
if response.status_code >= 300:
raise ServerSideException(
'Server generated an error code: ' +
str(response.status_code) +
': ' + response.reason
)
try:
response_data = response.json()
except jsonlib.JSONDecodeError as e:
raise ServerSideException('Invalid JSON: ' + response.text)
# The `batch` command returns a list, one for each message. We keep the first error we find, but return the
# whole list so the client can do what it wants.
if expect_list_return:
for status_message in response_data:
if 'errorcode' in status_message:
self.last_error_code = response_data['errorcode']
self.last_error_message = response_data['status_message']
break
return response_data
if not response_data['success']:
if 'errorcode' in response_data:
self.last_error_code = response_data['errorcode']
self.last_error_message = response_data['status_message']
return None
return response_data['data']
@staticmethod
def _fix_statistics_keys(statistics):
"""
Helper function to clean up the response from the statistics endpoint by removing
misspelled statistics keys.
:param statistics: Dictionary containing statistics, modified by reference
:return:
"""
if 'numberOf' in statistics:
number_of = statistics['numberOf']
if 'unsendtNotifications' in number_of:
number_of['unsentNotifications'] = number_of['unsendtNotifications']
del number_of['unsendtNotifications']
@staticmethod
def _adopt_contact_list(contact_list):
"""
A contact list is returned from the API with the 'name' key as 'contactlist_name' OR as
`list_name`. This is different from the other elements, so we patch the object to be
similar to the other objects returned by the library.
:param contact_list:
:return:
"""
if not contact_list:
return contact_list
# Copy the list so we don't make direct changes to the one sent in
contact_list = dict(contact_list)
if 'contactlist_name' in contact_list:
contact_list['name'] = contact_list['contactlist_name']
del contact_list['contactlist_name']
if 'list_name' in contact_list:
contact_list['name'] = contact_list['list_name']
del contact_list['list_name']
return contact_list
@staticmethod
def _sms_as_post_object(sms, receiver=None):
"""
Convert an SMS object and its values to a format suitable for posting to Intellipush.
:param sms: an `contacts.SMS` object
:param receiver: If given, the `receiver` should be a two element tuple with country code and phone number
that overrides the one given in the SMS. This is useful when doing batch requests, as it
allows us to avoid changing the original SMS object - just the data we're posting to
the server. The tuple would be formatted as `('0047', '900xxxxx').
:return:
"""
data = vars(sms)
if data['when'] and isinstance(data['when'], datetime.datetime):
data['date'] = data['when'].strftime('%Y-%m-%d')
data['time'] = data['when'].strftime('%H:%M:%S')
else:
data['date'] = 'now'
data['time'] = 'now'
if len(data['receivers']) > 1 and not receiver:
raise IntellipushException('Attempted to send message with multiple receivers without proper batching')
if not receiver:
receiver = data['receivers'][0]
data['single_target_countrycode'] = receiver[0]
data['single_target'] = receiver[1]
del data['receivers']
return data
@staticmethod
def _target_as_post_object(target):
return vars(target)
class IntellipushException(Exception):
pass
class NoValidIDException(IntellipushException):
pass
class ServerSideException(IntellipushException):
pass
class InvalidTargetException(IntellipushException):
pass
class TwoFactorAuthenticationIsAlreadyActive(IntellipushException):
pass
| 37.078984 | 187 | 0.619575 |
06ee731fb0f0935054b34e8b868a15a7621bc1b7 | 3,159 | py | Python | apps/server/reMac_server.py | jetedonner/ch.kimhauser.python.remac | 22bc09455c54a0a3c099e58d6e1bad055a8bb2fe | [
"MIT"
] | null | null | null | apps/server/reMac_server.py | jetedonner/ch.kimhauser.python.remac | 22bc09455c54a0a3c099e58d6e1bad055a8bb2fe | [
"MIT"
] | null | null | null | apps/server/reMac_server.py | jetedonner/ch.kimhauser.python.remac | 22bc09455c54a0a3c099e58d6e1bad055a8bb2fe | [
"MIT"
] | null | null | null | import socket
import selectors
import traceback
import sys
# import keyboard
from pynput import keyboard
from apps.server.libs import reMac_libserver
# conHost = "192.168.0.49"
conHost = "127.0.0.1"
conPort = "6890"
sel = selectors.DefaultSelector()
class reMac_server():
global doExit
doExit = False
def __init__(self):
self.setup_server()
def setup_server(self):
print(f'Server setup successfully!')
def accept_connection(self, sock):
conn, addr = sock.accept() # Should be ready to read
print("accepted connection from", addr)
conn.setblocking(False)
message = reMac_libserver.reMac_libserver(sel, conn, addr)
sel.register(conn, selectors.EVENT_READ, data=message)
def on_press(self, key):
if key.char == None:
return
if key == keyboard.Key.esc or key.char == 'q':
# Stop listener
self.doExit = True
# message.close()
# sel.close()
sys.exit(1)
# return False
# else:
# _start()
# Collect events until released
def start_server(self, myHost = conHost, myPort = conPort):
host, port = myHost, int(myPort)
lsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Avoid bind() exception: OSError: [Errno 48] Address already in use
lsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
lsock.bind((host, port))
lsock.listen()
print("reMac Server started successfully - Listening on:", (host, port))
lsock.setblocking(False)
sel.register(lsock, selectors.EVENT_READ, data=None)
# with keyboard.Listener(on_press=self.on_press) as listener:
# listener.join()
try:
while True:
# if self.doExit:
# break
events = sel.select(timeout=None)
for key, mask in events:
if key.data is None:
self.accept_connection(key.fileobj)
else:
message = key.data
try:
message.process_events(mask)
except Exception:
print(
"main: error: exception for",
f"{message.addr}:\n{traceback.format_exc()}",
)
message.close()
except KeyboardInterrupt:
print("caught keyboard interrupt, exiting")
finally:
sel.close()
# a = [1, 2, 3, 4]
# print("Press Enter to continue or press Esc to exit: ")
# while True:
# try:
# if keyboard.is_pressed('ENTER'):
# print("you pressed Enter, so printing the list..")
# print(a)
# break
# if keyboard.is_pressed('Esc'):
# print("\nyou pressed Esc, so exiting...")
# sys.exit(0)
# except:
# break | 31.277228 | 80 | 0.518519 |
d6f4fce6ffc6a36adb9c5cdb1508649d4d862594 | 534 | py | Python | invenio_vocabularies/contrib/subjects/__init__.py | mb-wali/invenio-vocabularies | c159d5bd0ca3e7b857ff1b6764835751e4f446ea | [
"MIT"
] | null | null | null | invenio_vocabularies/contrib/subjects/__init__.py | mb-wali/invenio-vocabularies | c159d5bd0ca3e7b857ff1b6764835751e4f446ea | [
"MIT"
] | null | null | null | invenio_vocabularies/contrib/subjects/__init__.py | mb-wali/invenio-vocabularies | c159d5bd0ca3e7b857ff1b6764835751e4f446ea | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020-2021 CERN.
# Copyright (C) 2021 Northwestern University.
#
# Invenio-Vocabularies is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Subjects module."""
from .resources import SubjectsResource, SubjectsResourceConfig
from .services import SubjectsService, SubjectsServiceConfig
__all__ = [
"SubjectsResource",
"SubjectsResourceConfig",
"SubjectsService",
"SubjectsServiceConfig",
]
| 25.428571 | 73 | 0.741573 |
590ef28f1f2f79e4eac63456e16d1519a99fe022 | 1,049 | py | Python | common/@PulseCalibration/PiCal.py | silky/Qlab | ce4085d3ad5bac7f6056c6b71e4cdfad2d70820c | [
"Apache-2.0"
] | 1 | 2019-06-27T11:33:05.000Z | 2019-06-27T11:33:05.000Z | common/@PulseCalibration/PiCal.py | silky/Qlab | ce4085d3ad5bac7f6056c6b71e4cdfad2d70820c | [
"Apache-2.0"
] | null | null | null | common/@PulseCalibration/PiCal.py | silky/Qlab | ce4085d3ad5bac7f6056c6b71e4cdfad2d70820c | [
"Apache-2.0"
] | null | null | null | import argparse
import sys, os
parser = argparse.ArgumentParser()
parser.add_argument('pyqlabpath', help='path to PyQLab directory')
parser.add_argument('qubit', help='qubit name')
parser.add_argument('direction', help='direction (X or Y)')
parser.add_argument('numPulses', type=int, help='maximum number of 180s')
parser.add_argument('piAmp', type=float, help='piAmp')
args = parser.parse_args()
sys.path.append(args.pyqlabpath)
execfile(os.path.join(args.pyqlabpath, 'startup.py'))
q = QubitFactory(args.qubit)
q.pulseParams['piAmp'] = args.piAmp
if args.direction == 'X':
seqs = [[Id(q), MEAS(q)]] + [[X90(q)] + [X(q)]*n + [MEAS(q)] for n in range(args.numPulses)] + \
[[X90m(q)] + [Xm(q)]*n + [MEAS(q)] for n in range(args.numPulses)]
else:
seqs = [[Id(q), MEAS(q)]] + [[Y90(q)] + [Y(q)]*n + [MEAS(q)] for n in range(args.numPulses)] + \
[[Y90m(q)] + [Ym(q)]*n + [MEAS(q)] for n in range(args.numPulses)]
fileNames = compile_to_hardware(seqs, fileName='PiCal/PiCal', nbrRepeats=2)
# plot_pulse_files(fileNames)
| 40.346154 | 100 | 0.665396 |
e2f2e0442b615bf4bc72398658855ee28e32dfb6 | 7,831 | py | Python | pyam/_aggregate.py | gidden/pyam | c08da618ad9c9b3866326f58562a4a69b165cc79 | [
"Apache-2.0"
] | 2 | 2018-09-04T03:26:39.000Z | 2019-01-14T21:05:08.000Z | pyam/_aggregate.py | gidden/pyam | c08da618ad9c9b3866326f58562a4a69b165cc79 | [
"Apache-2.0"
] | 6 | 2018-11-01T11:02:41.000Z | 2019-04-23T09:06:59.000Z | pyam/_aggregate.py | gidden/pyam | c08da618ad9c9b3866326f58562a4a69b165cc79 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
import logging
from pyam.logging import adjust_log_level
from pyam.utils import (
islistable,
isstr,
find_depth,
reduce_hierarchy,
KNOWN_FUNCS
)
logger = logging.getLogger(__name__)
def _aggregate(df, variable, components=None, method=np.sum):
"""Internal implementation of the `aggregate` function"""
# list of variables require default components (no manual list)
if islistable(variable) and components is not None:
raise ValueError('aggregating by list of variables cannot use '
'custom components')
mapping = {}
msg = 'cannot aggregate variable `{}` because it has no components'
# if single variable
if isstr(variable):
# default components to all variables one level below `variable`
components = components or df._variable_components(variable)
if not len(components):
logger.info(msg.format(variable))
return
for c in components:
mapping[c] = variable
# else, use all variables one level below `variable` as components
else:
for v in variable if islistable(variable) else [variable]:
_components = df._variable_components(v)
if not len(_components):
logger.info(msg.format(v))
continue
for c in _components:
mapping[c] = v
# rename all components to `variable` and aggregate
_df = df.data[df._apply_filters(variable=mapping.keys())].copy()
_df['variable'].replace(mapping, inplace=True)
return _group_and_agg(_df, [], method)
def _aggregate_recursive(df, variable, method=np.sum):
"""Recursive aggregation along the variable tree"""
_df_aggregated = None
_df = df.copy()
# iterate over variables to find all subcategories to be aggregated
sub_variables = []
for d in reversed(range(1, max(find_depth(df.data.variable)) + 1)):
depth = find_depth(df.data.variable)
var_list = (
df.data.variable[[i == d for i in depth]]
.unique()
)
vars_up = pd.Series(
[reduce_hierarchy(i, -1) for i in var_list]).unique()
if [i for i, entr in enumerate(vars_up) if entr.startswith(variable)]:
for v in vars_up:
sub_variables.append(v)
sub_variables = reversed(sorted(set(sub_variables)))
# iterate over subcategories (bottom-up) and perform aggregation
for entry in sub_variables:
_df.aggregate(variable=entry, append=True)
_df_temp = _df.aggregate(variable=entry, append=False)
if _df_aggregated is None:
_df_aggregated = _df_temp.copy()
else:
_df_aggregated.append(_df_temp, inplace=True)
return _df_aggregated.data
def _aggregate_region(df, variable, region, subregions=None, components=False,
method='sum', weight=None):
"""Internal implementation for aggregating data over subregions"""
if not isstr(variable) and components is not False:
msg = 'aggregating by list of variables with components ' \
'is not supported'
raise ValueError(msg)
if weight is not None and components is not False:
msg = 'using weights and components in one operation not supported'
raise ValueError(msg)
# default subregions to all regions other than `region`
subregions = subregions or df._all_other_regions(region, variable)
if not len(subregions):
msg = 'cannot aggregate variable `{}` to `{}` because it does not'\
' exist in any subregion'
logger.info(msg.format(variable, region))
return
# compute aggregate over all subregions
subregion_df = df.filter(region=subregions)
rows = subregion_df._apply_filters(variable=variable)
if weight is None:
col = 'region'
_data = _group_and_agg(subregion_df.data[rows], col, method=method)
else:
weight_rows = subregion_df._apply_filters(variable=weight)
_data = _agg_weight(subregion_df.data[rows],
subregion_df.data[weight_rows], method)
# if not `components=False`, add components at the `region` level
if components is not False:
with adjust_log_level(logger):
region_df = df.filter(region=region)
# if `True`, auto-detect `components` at the `region` level,
# defaults to variables below `variable` only present in `region`
if components is True:
level = dict(level=None)
r_comps = region_df._variable_components(variable, **level)
sr_comps = subregion_df._variable_components(variable, **level)
components = set(r_comps).difference(sr_comps)
if len(components):
# rename all components to `variable` and aggregate
rows = region_df._apply_filters(variable=components)
_df = region_df.data[rows].copy()
_df['variable'] = variable
_data = _data.add(_group_and_agg(_df, 'region'), fill_value=0)
return _data
def _aggregate_time(df, variable, column, value, components, method=np.sum):
"""Internal implementation for aggregating data over subannual time"""
# default `components` to all entries in `column` other than `value`
if components is None:
components = list(set(df.data.subannual.unique()) - set([value]))
# compute aggregate over time
filter_args = dict(variable=variable)
filter_args[column] = components
index = _list_diff(df.data.columns, [column, 'value'])
_data = pd.concat(
[
df.filter(**filter_args).data
.pivot_table(index=index, columns=column)
.value
.rename_axis(None, axis=1)
.apply(_get_method_func(method), axis=1)
], names=[column] + index, keys=[value])
# reset index-level order to original IamDataFrame
_data.index = _data.index.reorder_levels(df._LONG_IDX)
return _data
def _group_and_agg(df, by, method=np.sum):
"""Groupby & aggregate `df` by column(s), return indexed `pd.Series`"""
by = [by] if isstr(by) else by
cols = [c for c in list(df.columns) if c not in ['value'] + by]
# pick aggregator func (default: sum)
return df.groupby(cols)['value'].agg(_get_method_func(method))
def _agg_weight(df, weight, method):
"""Aggregate `df` by regions with weights, return indexed `pd.Series`"""
# only summation allowed with weights
if method not in ['sum', np.sum]:
raise ValueError('only method `np.sum` allowed for weighted average')
w_cols = _list_diff(df.columns, ['variable', 'unit', 'value'])
_weight = _get_value_col(weight, w_cols)
if not _get_value_col(df, w_cols).index.equals(_weight.index):
raise ValueError('inconsistent index between variable and weight')
_data = _get_value_col(df)
col1 = _list_diff(_data.index.names, ['region'])
col2 = _list_diff(w_cols, ['region'])
return (_data * _weight).groupby(col1).sum() / _weight.groupby(col2).sum()
def _list_diff(lst, exclude):
"""Return the list minus those elements in `exclude`"""
return [i for i in lst if i not in exclude]
def _get_value_col(df, cols=None):
"""Return the value column as `pd.Series sorted by index"""
cols = cols or [i for i in df.columns if i != 'value']
return df.set_index(cols)['value'].sort_index()
def _get_method_func(method):
"""Translate a string to a known method"""
if not isstr(method):
return method
if method in KNOWN_FUNCS:
return KNOWN_FUNCS[method]
# raise error if `method` is a string but not in dict of known methods
raise ValueError('method `{}` is not a known aggregator'.format(method))
| 35.922018 | 78 | 0.652918 |
6059d21a7c25d8bae279132031bb1b2fcae1fe68 | 17,249 | py | Python | ingestion/src/metadata/ingestion/sink/metadata_rest.py | spauldurai/OpenMetadata | a8d2fa42ed2d7740ef9e6a14d79a0ad7e0462ece | [
"Apache-2.0"
] | null | null | null | ingestion/src/metadata/ingestion/sink/metadata_rest.py | spauldurai/OpenMetadata | a8d2fa42ed2d7740ef9e6a14d79a0ad7e0462ece | [
"Apache-2.0"
] | null | null | null | ingestion/src/metadata/ingestion/sink/metadata_rest.py | spauldurai/OpenMetadata | a8d2fa42ed2d7740ef9e6a14d79a0ad7e0462ece | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
from typing import Generic, TypeVar
from pydantic import BaseModel, ValidationError
from metadata.config.common import ConfigModel
from metadata.generated.schema.api.data.createChart import CreateChartEntityRequest
from metadata.generated.schema.api.data.createDashboard import (
CreateDashboardEntityRequest,
)
from metadata.generated.schema.api.data.createDatabase import (
CreateDatabaseEntityRequest,
)
from metadata.generated.schema.api.data.createLocation import (
CreateLocationEntityRequest,
)
from metadata.generated.schema.api.data.createMlModel import CreateMlModelEntityRequest
from metadata.generated.schema.api.data.createPipeline import (
CreatePipelineEntityRequest,
)
from metadata.generated.schema.api.data.createTable import CreateTableEntityRequest
from metadata.generated.schema.api.data.createTopic import CreateTopicEntityRequest
from metadata.generated.schema.api.lineage.addLineage import AddLineage
from metadata.generated.schema.api.policies.createPolicy import (
CreatePolicyEntityRequest,
)
from metadata.generated.schema.api.teams.createTeam import CreateTeamEntityRequest
from metadata.generated.schema.api.teams.createUser import CreateUserEntityRequest
from metadata.generated.schema.entity.data.chart import ChartType
from metadata.generated.schema.entity.data.location import Location
from metadata.generated.schema.entity.data.mlmodel import MlModel
from metadata.generated.schema.entity.data.pipeline import Pipeline
from metadata.generated.schema.entity.policies.policy import Policy
from metadata.generated.schema.entity.teams.user import User
from metadata.generated.schema.type.entityReference import EntityReference
from metadata.ingestion.api.common import Entity, WorkflowContext
from metadata.ingestion.api.sink import Sink, SinkStatus
from metadata.ingestion.models.ometa_table_db import OMetaDatabaseAndTable
from metadata.ingestion.models.table_metadata import Chart, Dashboard
from metadata.ingestion.ometa.client import APIError
from metadata.ingestion.ometa.ometa_api import OpenMetadata
from metadata.ingestion.ometa.openmetadata_rest import MetadataServerConfig
logger = logging.getLogger(__name__)
# Allow types from the generated pydantic models
T = TypeVar("T", bound=BaseModel)
om_chart_type_dict = {
"line": ChartType.Line,
"table": ChartType.Table,
"dist_bar": ChartType.Bar,
"bar": ChartType.Bar,
"big_number": ChartType.Line,
"histogram": ChartType.Histogram,
"big_number_total": ChartType.Line,
"dual_line": ChartType.Line,
"line_multi": ChartType.Line,
"treemap": ChartType.Area,
"box_plot": ChartType.Bar,
}
class MetadataRestSinkConfig(ConfigModel):
api_endpoint: str = None
class MetadataRestSink(Sink[Entity]):
config: MetadataRestSinkConfig
status: SinkStatus
def __init__(
self,
ctx: WorkflowContext,
config: MetadataRestSinkConfig,
metadata_config: MetadataServerConfig,
):
super().__init__(ctx)
self.config = config
self.metadata_config = metadata_config
self.status = SinkStatus()
self.wrote_something = False
self.charts_dict = {}
self.metadata = OpenMetadata(self.metadata_config)
self.api_client = self.metadata.client
self.team_entities = {}
self._bootstrap_entities()
@classmethod
def create(
cls, config_dict: dict, metadata_config_dict: dict, ctx: WorkflowContext
):
config = MetadataRestSinkConfig.parse_obj(config_dict)
metadata_config = MetadataServerConfig.parse_obj(metadata_config_dict)
return cls(ctx, config, metadata_config)
def write_record(self, record: Entity) -> None:
if isinstance(record, OMetaDatabaseAndTable):
self.write_tables(record)
elif isinstance(record, CreateTopicEntityRequest):
self.write_topics(record)
elif isinstance(record, Chart):
self.write_charts(record)
elif isinstance(record, Dashboard):
self.write_dashboards(record)
elif isinstance(record, Location):
self.write_locations(record)
elif isinstance(record, Policy):
self.write_policies(record)
elif isinstance(record, Pipeline):
self.write_pipelines(record)
elif isinstance(record, AddLineage):
self.write_lineage(record)
elif isinstance(record, User):
self.write_users(record)
elif isinstance(record, CreateMlModelEntityRequest):
self.write_ml_model(record)
else:
logging.info(
f"Ignoring the record due to unknown Record type {type(record)}"
)
def write_tables(self, db_and_table: OMetaDatabaseAndTable):
try:
db_request = CreateDatabaseEntityRequest(
name=db_and_table.database.name,
description=db_and_table.database.description,
service=EntityReference(
id=db_and_table.database.service.id,
type="databaseService",
),
)
db = self.metadata.create_or_update(db_request)
table_request = CreateTableEntityRequest(
name=db_and_table.table.name,
tableType=db_and_table.table.tableType,
columns=db_and_table.table.columns,
description=db_and_table.table.description.strip(),
database=db.id,
)
if db_and_table.table.viewDefinition:
table_request.viewDefinition = (
db_and_table.table.viewDefinition.__root__
)
created_table = self.metadata.create_or_update(table_request)
if db_and_table.location is not None:
location_request = CreateLocationEntityRequest(
name=db_and_table.location.name,
description=db_and_table.location.description.strip(),
locationType=db_and_table.location.locationType,
owner=db_and_table.location.owner,
service=EntityReference(
id=db_and_table.location.service.id,
type="storageService",
),
)
location = self.metadata.create_or_update(location_request)
self.metadata.add_location(table=created_table, location=location)
if db_and_table.table.sampleData is not None:
try:
self.metadata.ingest_table_sample_data(
table=created_table,
sample_data=db_and_table.table.sampleData,
)
except Exception as e:
logging.error(
f"Failed to ingest sample data for table {db_and_table.table.name}"
)
if db_and_table.table.tableProfile is not None:
for tp in db_and_table.table.tableProfile:
for pd in tp:
if pd[0] == "columnProfile":
for col in pd[1]:
col.name = col.name.replace(".", "_DOT_")
self.metadata.ingest_table_profile_data(
table=created_table,
table_profile=db_and_table.table.tableProfile,
)
if db_and_table.table.dataModel is not None:
self.metadata.ingest_table_data_model(
table=created_table, data_model=db_and_table.table.dataModel
)
logger.info(
"Successfully ingested table {}.{}".format(
db_and_table.database.name.__root__,
created_table.name.__root__,
)
)
self.status.records_written(
f"Table: {db_and_table.database.name.__root__}.{created_table.name.__root__}"
)
except (APIError, ValidationError) as err:
logger.error(
"Failed to ingest table {} in database {} ".format(
db_and_table.table.name.__root__,
db_and_table.database.name.__root__,
)
)
logger.error(err)
self.status.failure(f"Table: {db_and_table.table.name.__root__}")
def write_topics(self, topic: CreateTopicEntityRequest) -> None:
try:
created_topic = self.metadata.create_or_update(topic)
logger.info(f"Successfully ingested topic {created_topic.name.__root__}")
self.status.records_written(f"Topic: {created_topic.name.__root__}")
except (APIError, ValidationError) as err:
logger.error(f"Failed to ingest topic {topic.name.__root__}")
logger.error(err)
self.status.failure(f"Topic: {topic.name}")
def write_charts(self, chart: Chart):
try:
om_chart_type = ChartType.Other
if (
chart.chart_type is not None
and chart.chart_type in om_chart_type_dict.keys()
):
om_chart_type = om_chart_type_dict[chart.chart_type]
chart_request = CreateChartEntityRequest(
name=chart.name,
displayName=chart.displayName,
description=chart.description,
chartType=om_chart_type,
chartUrl=chart.url,
service=chart.service,
)
created_chart = self.metadata.create_or_update(chart_request)
self.charts_dict[chart.name] = EntityReference(
id=created_chart.id, type="chart"
)
logger.info(f"Successfully ingested chart {created_chart.displayName}")
self.status.records_written(f"Chart: {created_chart.displayName}")
except (APIError, ValidationError) as err:
logger.error(f"Failed to ingest chart {chart.displayName}")
logger.error(err)
self.status.failure(f"Chart: {chart.displayName}")
def write_dashboards(self, dashboard: Dashboard):
try:
charts = self._get_chart_references(dashboard)
dashboard_request = CreateDashboardEntityRequest(
name=dashboard.name,
displayName=dashboard.displayName,
description=dashboard.description,
dashboardUrl=dashboard.url,
charts=charts,
service=dashboard.service,
)
created_dashboard = self.metadata.create_or_update(dashboard_request)
logger.info(
f"Successfully ingested dashboard {created_dashboard.displayName}"
)
self.status.records_written(f"Dashboard: {created_dashboard.displayName}")
except (APIError, ValidationError) as err:
logger.error(f"Failed to ingest dashboard {dashboard.name}")
logger.error(err)
self.status.failure(f"Dashboard {dashboard.name}")
def _get_chart_references(self, dashboard: Dashboard) -> []:
chart_references = []
for chart_id in dashboard.charts:
if chart_id in self.charts_dict.keys():
chart_references.append(self.charts_dict[chart_id])
return chart_references
def write_locations(self, location: Location):
try:
location_request = CreateLocationEntityRequest(
name=location.name,
description=location.description,
locationType=location.locationType,
owner=location.owner,
service=location.service,
)
created_location = self.metadata.create_or_update(location_request)
logger.info(f"Successfully ingested Location {created_location.name}")
self.status.records_written(f"Location: {created_location.name}")
except (APIError, ValidationError) as err:
logger.error(f"Failed to ingest Location {location.name}")
logger.error(err)
self.status.failure(f"Location: {location.name}")
def write_pipelines(self, pipeline: Pipeline):
try:
pipeline_request = CreatePipelineEntityRequest(
name=pipeline.name,
displayName=pipeline.displayName,
description=pipeline.description,
pipelineUrl=pipeline.pipelineUrl,
tasks=pipeline.tasks,
service=pipeline.service,
)
created_pipeline = self.metadata.create_or_update(pipeline_request)
logger.info(
f"Successfully ingested Pipeline {created_pipeline.displayName}"
)
self.status.records_written(f"Pipeline: {created_pipeline.displayName}")
except (APIError, ValidationError) as err:
logger.error(f"Failed to ingest pipeline {pipeline.name}")
logger.error(err)
self.status.failure(f"Pipeline: {pipeline.name}")
def write_policies(self, policy: Policy):
try:
policy_request = CreatePolicyEntityRequest(
name=policy.name,
displayName=policy.displayName,
description=policy.description,
owner=policy.owner,
policyUrl=policy.policyUrl,
policyType=policy.policyType,
rules=policy.rules,
)
created_policy = self.metadata.create_or_update(policy_request)
logger.info(f"Successfully ingested Policy {created_policy.name}")
self.status.records_written(f"Policy: {created_policy.name}")
except (APIError, ValidationError) as err:
logger.error(f"Failed to ingest Policy {policy.name}")
logger.error(err)
self.status.failure(f"Policy: {policy.name}")
def write_lineage(self, add_lineage: AddLineage):
try:
logger.info(add_lineage)
created_lineage = self.metadata.add_lineage(add_lineage)
logger.info(f"Successfully added Lineage {created_lineage}")
self.status.records_written(f"Lineage: {created_lineage}")
except (APIError, ValidationError) as err:
logger.error(f"Failed to ingest lineage {add_lineage}")
logger.error(err)
self.status.failure(f"Lineage: {add_lineage}")
def write_ml_model(self, model: CreateMlModelEntityRequest):
try:
created_model = self.metadata.create_or_update(model)
logger.info(f"Successfully added Model {created_model.name}")
self.status.records_written(f"Model: {created_model.name}")
except (APIError, ValidationError) as err:
logger.error(f"Failed to ingest Model {model.name}")
logger.error(err)
self.status.failure(f"Model: {model.name}")
def _bootstrap_entities(self):
team_response = self.api_client.get("/teams")
for team in team_response["data"]:
self.team_entities[team["name"]] = team["id"]
def _create_team(self, team: EntityReference) -> None:
metadata_team = CreateTeamEntityRequest(
name=team.name, displayName=team.name, description=team.description
)
try:
r = self.metadata.create_or_update(metadata_team)
instance_id = str(r.id.__root__)
self.team_entities[team.name] = instance_id
except Exception as err:
logger.error(traceback.format_exc())
logger.error(traceback.print_exc())
logger.error(err)
def write_users(self, record: User):
teams = []
for team in record.teams.__root__:
if team.name not in self.team_entities:
self._create_team(team)
teams.append(self.team_entities[team.name])
metadata_user = CreateUserEntityRequest(
name=record.name.__root__,
displayName=record.displayName,
email=record.email,
teams=teams,
)
try:
self.metadata.create_or_update(metadata_user)
self.status.records_written(record.displayName)
logger.info("Sink: {}".format(record.displayName))
except Exception as err:
logger.error(traceback.format_exc())
logger.error(traceback.print_exc())
logger.error(err)
def get_status(self):
return self.status
def close(self):
pass
| 42.173594 | 93 | 0.639341 |
37b35a63fc847dab9075e507f335c25fa3e4c42a | 1,808 | py | Python | examples/graphics/fractal.py | miketrumpis/arrayfire-python | aead0394ffda9bd820279f59a84a9dcba6e3691f | [
"BSD-3-Clause"
] | 420 | 2015-07-30T00:02:21.000Z | 2022-03-28T16:52:28.000Z | examples/graphics/fractal.py | miketrumpis/arrayfire-python | aead0394ffda9bd820279f59a84a9dcba6e3691f | [
"BSD-3-Clause"
] | 198 | 2015-07-29T17:17:36.000Z | 2022-01-20T18:31:28.000Z | examples/graphics/fractal.py | miketrumpis/arrayfire-python | aead0394ffda9bd820279f59a84a9dcba6e3691f | [
"BSD-3-Clause"
] | 75 | 2015-07-29T15:17:54.000Z | 2022-02-24T06:50:23.000Z | #!/usr/bin/python
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
import arrayfire as af
import sys
from math import sqrt
width = 400
height = 400
def complex_grid(w, h, zoom, center):
x = (af.iota(d0 = 1, d1 = h, tile_dims = (w, 1)) - h/2) / zoom + center[0]
y = (af.iota(d0 = w, d1 = 1, tile_dims = (1, h)) - w/2) / zoom + center[1]
return af.cplx(x, y)
def mandelbrot(data, it, maxval):
C = data
Z = data
mag = af.constant(0, *C.dims())
for ii in range(1, 1 + it):
# Doing the calculation
Z = Z * Z + C
# Get indices where abs(Z) crosses maxval
cond = ((af.abs(Z) > maxval)).as_type(af.Dtype.f32)
mag = af.maxof(mag, cond * ii)
C = C * (1 - cond)
Z = Z * (1 - cond)
af.eval(C)
af.eval(Z)
return mag / maxval
def normalize(a):
mx = af.max(a)
mn = af.min(a)
return (a - mn)/(mx - mn)
if __name__ == "__main__":
if (len(sys.argv) > 1):
af.set_device(int(sys.argv[1]))
af.info()
print("ArrayFire Fractal Demo\n")
win = af.Window(width, height, "Fractal Demo")
win.set_colormap(af.COLORMAP.SPECTRUM)
center = (-0.75, 0.1)
for i in range(10, 400):
zoom = i * i
if not (i % 10):
print("Iteration: %d zoom: %d" % (i, zoom))
c = complex_grid(width, height, zoom, center)
it = sqrt(2*sqrt(abs(1-sqrt(5*zoom))))*100
if (win.close()): break
mag = mandelbrot(c, int(it), 1000)
win.image(normalize(mag))
| 24.106667 | 78 | 0.524336 |
78f34f9460c07a4b23c28125eb8928d4297ad241 | 3,204 | py | Python | tests/test_tools.py | lchojnacki/master | 582553e6fa262a310511304f66cf94753308246c | [
"BSD-3-Clause"
] | 1 | 2021-06-11T12:32:12.000Z | 2021-06-11T12:32:12.000Z | tests/test_tools.py | lchojnacki/master | 582553e6fa262a310511304f66cf94753308246c | [
"BSD-3-Clause"
] | null | null | null | tests/test_tools.py | lchojnacki/master | 582553e6fa262a310511304f66cf94753308246c | [
"BSD-3-Clause"
] | 2 | 2020-08-20T16:13:27.000Z | 2022-03-01T13:49:07.000Z | # -*- coding: UTF-8 -*-
from unittest import TestCase as UnitTest
from django_mail_template.tools import (replace_context_variable,
clean_address_list)
class TestReplaceContextVariable(UnitTest):
"""
There are 6 test for almost 5 lines code.
"""
def test_second_parameter_must_be_dictionary(self):
with self.assertRaises(TypeError):
replace_context_variable('', '')
with self.assertRaises(TypeError):
replace_context_variable('', [])
with self.assertRaises(TypeError):
replace_context_variable('', 1)
replace_context_variable('', {})
def test_first_parameter_must_be_string(self):
with self.assertRaises(AttributeError):
replace_context_variable([], {})
with self.assertRaises(AttributeError):
replace_context_variable(1, {})
with self.assertRaises(AttributeError):
replace_context_variable({}, {})
replace_context_variable('', {})
def test_return_main_text_with_variables_replaced(self):
text = 'Dummy text {context_variable}.'
expected = 'Dummy text example.'
data = {'context_variable': 'example'}
assert expected == replace_context_variable(text, data)
def test_return_main_text_with_multiple_variables_replaced(self):
text = 'Dummy text {context_variable} {replaced_text}.'
expected = 'Dummy text example of replace.'
data = {'context_variable': 'example', 'replaced_text': 'of replace'}
assert expected == replace_context_variable(text, data)
def test_return_valid_value_without_all_variable_mapping(self):
text = 'Dummy text {context_variable}.'
expected = 'Dummy text example.'
data = {'context_variable': 'example', 'replaced_text': 'of replace'}
assert expected == replace_context_variable(text, data)
def test_return_valid_value_without_context_variable_in_text(self):
text = 'Dummy text {context_variable} {fake%2d0} {more-fake}.'
expected = 'Dummy text example {fake%2d0} {more-fake}.'
data = {'context_variable': 'example', 'replaced_text': 'of replace'}
assert expected == replace_context_variable(text, data)
class TestConvertToComaSeparatedList(UnitTest):
def test_receive_string_with_one_email_return_a_list(self):
result = clean_address_list('a@b.com')
assert result == ['a@b.com']
def test_receive_list_with_one_email_return_same_list(self):
result = clean_address_list(['a@b.com', ])
assert result == ['a@b.com']
def test_receive_string_with_more_than_one_address_return_a_list(self):
result = clean_address_list('a@b.com, b@b.com, c@b.com')
assert result == ['a@b.com', 'b@b.com', 'c@b.com']
def test_receive_empty_list_return_empty_list(self):
result = clean_address_list([])
assert result == []
def test_receive_empty_string_return_empty_list(self):
result = clean_address_list('')
assert result == []
def test_receive_none_return_empty_list(self):
result = clean_address_list(None)
assert result == []
| 39.555556 | 77 | 0.672285 |
99d062d4c3ed3163735c802163b7b73bb209cd77 | 3,018 | py | Python | homeassistant/components/zha/core/channels/measurement.py | mikan-megane/core | 837220cce40890e296920d33a623adbc11bd15a6 | [
"Apache-2.0"
] | 5 | 2018-10-23T14:15:05.000Z | 2021-11-26T06:38:44.000Z | homeassistant/components/zha/core/channels/measurement.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 79 | 2020-07-23T07:13:37.000Z | 2022-03-22T06:02:37.000Z | homeassistant/components/zha/core/channels/measurement.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 3 | 2022-01-17T20:10:54.000Z | 2022-01-17T20:17:22.000Z | """Measurement channels module for Zigbee Home Automation."""
import zigpy.zcl.clusters.measurement as measurement
from .. import registries
from ..const import (
REPORT_CONFIG_DEFAULT,
REPORT_CONFIG_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_MIN_INT,
)
from .base import ZigbeeChannel
@registries.ZIGBEE_CHANNEL_REGISTRY.register(measurement.FlowMeasurement.cluster_id)
class FlowMeasurement(ZigbeeChannel):
"""Flow Measurement channel."""
REPORT_CONFIG = [{"attr": "measured_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(
measurement.IlluminanceLevelSensing.cluster_id
)
class IlluminanceLevelSensing(ZigbeeChannel):
"""Illuminance Level Sensing channel."""
REPORT_CONFIG = [{"attr": "level_status", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(
measurement.IlluminanceMeasurement.cluster_id
)
class IlluminanceMeasurement(ZigbeeChannel):
"""Illuminance Measurement channel."""
REPORT_CONFIG = [{"attr": "measured_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(measurement.OccupancySensing.cluster_id)
class OccupancySensing(ZigbeeChannel):
"""Occupancy Sensing channel."""
REPORT_CONFIG = [{"attr": "occupancy", "config": REPORT_CONFIG_IMMEDIATE}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(measurement.PressureMeasurement.cluster_id)
class PressureMeasurement(ZigbeeChannel):
"""Pressure measurement channel."""
REPORT_CONFIG = [{"attr": "measured_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(measurement.RelativeHumidity.cluster_id)
class RelativeHumidity(ZigbeeChannel):
"""Relative Humidity measurement channel."""
REPORT_CONFIG = [
{
"attr": "measured_value",
"config": (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 100),
}
]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(
measurement.TemperatureMeasurement.cluster_id
)
class TemperatureMeasurement(ZigbeeChannel):
"""Temperature measurement channel."""
REPORT_CONFIG = [
{
"attr": "measured_value",
"config": (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 50),
}
]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(
measurement.CarbonMonoxideConcentration.cluster_id
)
class CarbonMonoxideConcentration(ZigbeeChannel):
"""Carbon Monoxide measurement channel."""
REPORT_CONFIG = [
{
"attr": "measured_value",
"config": (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 0.000001),
}
]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(
measurement.CarbonDioxideConcentration.cluster_id
)
class CarbonDioxideConcentration(ZigbeeChannel):
"""Carbon Dioxide measurement channel."""
REPORT_CONFIG = [
{
"attr": "measured_value",
"config": (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 0.000001),
}
]
| 28.742857 | 88 | 0.732273 |
3fd58512545b8f538157871fe0fd6eddb73cd183 | 131 | py | Python | spidy/__init__.py | imohitawasthi/spidy | 4ae0a180c8e07503d0481f664f9e8014fc413e96 | [
"MIT"
] | null | null | null | spidy/__init__.py | imohitawasthi/spidy | 4ae0a180c8e07503d0481f664f9e8014fc413e96 | [
"MIT"
] | null | null | null | spidy/__init__.py | imohitawasthi/spidy | 4ae0a180c8e07503d0481f664f9e8014fc413e96 | [
"MIT"
] | null | null | null | """spidy - Spider who crawls the web"""
__version__ = '0.1.0'
__author__ = 'Mohit Awasthi <imohitawasthi@gmail.com>'
__all__ = []
| 21.833333 | 54 | 0.687023 |
f81f53f5d296868715ce1aaedc76f5024987b22c | 88,106 | py | Python | test/integration/component/test_vpc.py | hymmm/cl | fd808963c8ee3753b72bfe38eddfbd5d56d56ee0 | [
"Apache-2.0"
] | 1 | 2018-05-23T06:13:51.000Z | 2018-05-23T06:13:51.000Z | test/integration/component/test_vpc.py | hymmm/cl | fd808963c8ee3753b72bfe38eddfbd5d56d56ee0 | [
"Apache-2.0"
] | null | null | null | test/integration/component/test_vpc.py | hymmm/cl | fd808963c8ee3753b72bfe38eddfbd5d56d56ee0 | [
"Apache-2.0"
] | 1 | 2018-05-15T08:58:32.000Z | 2018-05-15T08:58:32.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Component tests for VPC functionality
"""
# Import Local Modules
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.cloudstackException import CloudstackAPIException
from marvin.cloudstackAPI import updateZone
from marvin.lib.utils import cleanup_resources
from marvin.lib.base import (Account,
VPC,
VpcOffering,
VirtualMachine,
ServiceOffering,
Network,
NetworkOffering,
PublicIPAddress,
LoadBalancerRule,
Router,
NetworkACL,
NATRule,
Zone,
StaticNATRule)
from marvin.lib.common import (get_domain,
get_zone,
get_template,
list_configurations)
import time
class Services:
"""Test VPC services
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"domain_admin": {
"email": "domain@admin.com",
"firstname": "Domain",
"lastname": "Admin",
"username": "DoA",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
},
"network_offering": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network off',
"guestiptype": 'Isolated',
"supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Vpn": 'VpcVirtualRouter',
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"Lb": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
},
"network_offering_no_lb": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network off',
"guestiptype": 'Isolated',
"supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Vpn": 'VpcVirtualRouter',
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
},
"vpc_offering": {
"name": 'VPC off',
"displaytext": 'VPC off',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat,NetworkACL',
},
"vpc": {
"name": "TestVPC",
"displaytext": "TestVPC",
"cidr": '10.0.0.1/24'
},
"vpc_no_name": {
"displaytext": "TestVPC",
"cidr": '10.0.0.1/24'
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
"netmask": '255.255.255.0'
},
"lbrule": {
"name": "SSH",
"alg": "leastconn",
# Algorithm used for load balancing
"privateport": 22,
"publicport": 2222,
"openfirewall": False,
"startport": 22,
"endport": 2222,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"natrule": {
"privateport": 22,
"publicport": 22,
"startport": 22,
"endport": 22,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"fw_rule": {
"startport": 1,
"endport": 6000,
"cidr": '0.0.0.0/0',
# Any network (For creating FW rule)
"protocol": "TCP"
},
"icmp_rule": {
"icmptype": -1,
"icmpcode": -1,
"cidrlist": '0.0.0.0/0',
"protocol": "ICMP"
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
# Hypervisor type should be same as
# hypervisor type of cluster
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"domain": {
"name": "TestDomain"
},
"ostype": 'CentOS 5.3 (64-bit)',
# Cent OS 5.3 (64 bit)
"sleep": 60,
"timeout": 10,
"mode": 'advanced'
}
class TestVPC(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestVPC, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.vpc_off = VpcOffering.create(
cls.api_client,
cls.services["vpc_offering"]
)
cls.vpc_off.update(cls.api_client, state='Enabled')
cls._cleanup = [
cls.service_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
self.debug("Warning: Exception during cleanup : %s" % e)
return
def validate_vpc_offering(self, vpc_offering):
"""Validates the VPC offering"""
self.debug("Check if the VPC offering is created successfully?")
vpc_offs = VpcOffering.list(
self.apiclient,
id=vpc_offering.id
)
self.assertEqual(
isinstance(vpc_offs, list),
True,
"List VPC offerings should return a valid list"
)
self.assertEqual(
vpc_offering.name,
vpc_offs[0].name,
"Name of the VPC offering should match with listVPCOff data"
)
self.debug(
"VPC offering is created successfully - %s" %
vpc_offering.name)
return
def validate_vpc_network(self, network, state=None):
"""Validates the VPC network"""
self.debug("Check if the VPC network is created successfully?")
vpc_networks = VPC.list(
self.apiclient,
id=network.id
)
self.assertEqual(
isinstance(vpc_networks, list),
True,
"List VPC network should return a valid list"
)
self.assertEqual(
network.name,
vpc_networks[0].name,
"Name of the VPC network should match with listVPC data"
)
if state:
self.assertEqual(
vpc_networks[0].state,
state,
"VPC state should be '%s'" % state
)
self.debug("VPC network validated - %s" % network.name)
return
# list_vpc_apis should be the first case otherwise the vpc counts would be
# wrong
@attr(tags=["advanced", "intervlan"], required_hardware="false")
def test_01_list_vpc_apis(self):
""" Test list VPC APIs
"""
# Validate the following
# 1. Create multiple VPCs
# 2. listVPCs() by name. VPC with the provided name should be listed.
# 3. listVPCs() by displayText. VPC with the provided displayText
# should be listed.
# 4. listVPCs() by cidr. All the VPCs with the provided cidr should
# be listed.
# 5. listVPCs() by vpcofferingId.All the VPCs with the vpcofferingId
# should be listed.
# 6. listVPCs() by supported Services(). All the VPCs that provide the
# list of services should be listed.
# 7. listVPCs() by restartRequired (set to true). All the VPCs that
# require restart should be listed.
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
self.account.name)
vpc_1 = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.validate_vpc_network(vpc_1)
self.services["vpc"]["cidr"] = "10.1.46.1/16"
vpc_2 = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.validate_vpc_network(vpc_2)
self.debug("Check list VPC API by Name?")
vpcs = VPC.list(
self.apiclient,
name=vpc_1.name,
listall=True
)
self.assertEqual(
isinstance(vpcs, list),
True,
"List VPC shall return a valid resposne"
)
vpc = vpcs[0]
self.assertEqual(
vpc.name,
vpc_1.name,
"VPC name should match with the existing one"
)
self.debug("Check list VPC API by displayText?")
vpcs = VPC.list(
self.apiclient,
displaytext=vpc_1.displaytext,
listall=True
)
self.assertEqual(
isinstance(vpcs, list),
True,
"List VPC shall return a valid resposne"
)
vpc = vpcs[0]
self.assertEqual(
vpc.displaytext,
vpc_1.displaytext,
"VPC displaytext should match with the existing one"
)
self.debug("Check list VPC API by cidr?")
vpcs = VPC.list(
self.apiclient,
cidr=vpc_2.cidr,
listall=True
)
self.assertEqual(
isinstance(vpcs, list),
True,
"List VPC shall return a valid resposne"
)
vpc = vpcs[0]
self.assertEqual(
vpc.cidr,
vpc_2.cidr,
"VPC cidr should match with the existing one"
)
self.debug("Validating list VPC by Id")
self.validate_vpc_network(vpc_1)
self.debug("Validating list VPC by vpcofferingId")
vpcs = VPC.list(
self.apiclient,
vpcofferingid=self.vpc_off.id,
listall=True
)
self.assertEqual(
isinstance(vpcs, list),
True,
"List VPC by vpcofferingId should return a valid response"
)
self.debug("Length of list VPC response: %s" % len(vpcs))
self.assertEqual(
len(vpcs),
2,
"List VPC should return 2 enabled VPCs"
)
for vpc in vpcs:
self.assertEqual(
vpc.vpcofferingid,
self.vpc_off.id,
"VPC offering ID should match with that of resposne"
)
self.debug("Validating list VPC by supportedservices")
vpcs = VPC.list(
self.apiclient,
supportedservices='Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
listall=True,
account=self.account.name,
domainid=self.account.domainid)
self.assertEqual(
isinstance(vpcs, list),
True,
"List VPC by vpcofferingId should return a valid response"
)
for vpc in vpcs:
self.assertIn(
vpc.id,
[vpc_1.id, vpc_2.id],
"VPC offering ID should match with that of resposne"
)
self.debug("Validating list VPC by restart required")
vpcs = VPC.list(
self.apiclient,
restartrequired=True,
listall=True,
account=self.account.name,
domainid=self.account.domainid
)
if vpcs is not None:
for vpc in vpcs:
self.assertEqual(
vpc.restartrequired,
True,
"RestartRequired should be set as True"
)
self.debug("Validating list VPC by restart required")
vpcs = VPC.list(
self.apiclient,
restartrequired=False,
listall=True,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(vpcs, list),
True,
"List VPC by vpcofferingId should return a valid response"
)
if vpcs is not None:
for vpc in vpcs:
self.assertEqual(
vpc.restartrequired,
False,
"RestartRequired should be set as False"
)
return
@attr(tags=["advanced", "intervlan", "dvs"], required_hardware="false")
def test_02_restart_vpc_no_networks(self):
""" Test restart VPC having no networks
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Restart VPC. Restart VPC should be successful
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
self.account.name)
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.validate_vpc_network(vpc)
self.debug("Restarting the VPC with no network")
try:
vpc.restart(self.apiclient)
except Exception as e:
self.fail("Failed to restart VPC network - %s" % e)
self.validate_vpc_network(vpc, state='Enabled')
return
@attr(tags=["advanced", "intervlan", "dvs"], required_hardware="false")
def test_03_restart_vpc_with_networks(self):
""" Test restart VPC having networks
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Add couple of networks to VPC.
# 3. Restart VPC. Restart network should be successful
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
self.account.name)
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.validate_vpc_network(vpc)
self.network_offering = NetworkOffering.create(
self.apiclient,
self.services["network_offering"],
conservemode=False
)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering)
gateway = vpc.cidr.split('/')[0]
# Split the cidr to retrieve gateway
# for eg. cidr = 10.0.0.1/24
# Gateway = 10.0.0.1
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network_1 = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc.id
)
self.debug("Created network with ID: %s" % network_1.id)
self.network_offering_no_lb = NetworkOffering.create(
self.apiclient,
self.services["network_offering_no_lb"],
conservemode=False
)
# Enable Network offering
self.network_offering_no_lb.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering_no_lb)
gateway = '10.1.2.1' # New network -> different gateway
self.debug("Creating network with network offering: %s" %
self.network_offering_no_lb.id)
network_2 = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering_no_lb.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc.id
)
self.debug("Created network with ID: %s" % network_2.id)
self.debug("Restarting the VPC with no network")
try:
vpc.restart(self.apiclient)
except Exception as e:
self.fail("Failed to restart VPC network - %s" % e)
self.validate_vpc_network(vpc, state='Enabled')
return
@attr(tags=["advanced", "intervlan"], required_hardware="false")
def test_04_delete_vpc_no_networks(self):
""" Test delete VPC having no networks
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Delete VPC. Delete VPC should be successful
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
self.account.name)
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.validate_vpc_network(vpc)
self.debug("Restarting the VPC with no network")
try:
vpc.delete(self.apiclient)
except Exception as e:
self.fail("Failed to delete VPC network - %s" % e)
self.debug("Check if the VPC offering is deleted successfully?")
vpcs = VPC.list(
self.apiclient,
id=vpc.id
)
self.assertEqual(
vpcs,
None,
"List VPC offerings should not return anything"
)
return
@attr(tags=["advanced", "intervlan"], required_hardware="false")
def test_05_delete_vpc_with_networks(self):
""" Test delete VPC having networks
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Add couple of networks to VPC.
# 3. Delete VPC. Delete network should be successful
# 4. Virtual Router should be deleted
# 5. Source NAT should be released back to pool
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
self.account.name)
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.validate_vpc_network(vpc)
self.network_offering = NetworkOffering.create(
self.apiclient,
self.services["network_offering"],
conservemode=False
)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering)
gateway = vpc.cidr.split('/')[0]
# Split the cidr to retrieve gateway
# for eg. cidr = 10.0.0.1/24
# Gateway = 10.0.0.1
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network_1 = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc.id
)
self.debug("Created network with ID: %s" % network_1.id)
self.network_offering_no_lb = NetworkOffering.create(
self.apiclient,
self.services["network_offering_no_lb"],
conservemode=False
)
# Enable Network offering
self.network_offering_no_lb.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering_no_lb)
gateway = '10.1.2.1' # New network -> different gateway
self.debug("Creating network with network offering: %s" %
self.network_offering_no_lb.id)
network_2 = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering_no_lb.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc.id
)
self.debug("Created network with ID: %s" % network_2.id)
self.debug("Deleting the VPC with no network")
with self.assertRaises(Exception):
vpc.delete(self.apiclient)
self.debug("Delete VPC failed as there are still networks in VPC")
self.debug("Deleting the networks in the VPC")
try:
network_1.delete(self.apiclient)
network_2.delete(self.apiclient)
except Exception as e:
self.fail("failed to delete the VPC networks: %s" % e)
self.debug("Now trying to delete VPC")
try:
vpc.delete(self.apiclient)
except Exception as e:
self.fail("Delete to restart VPC network - %s" % e)
self.debug("Check if the VPC offering is deleted successfully?")
vpcs = VPC.list(
self.apiclient,
id=vpc.id
)
self.assertEqual(
vpcs,
None,
"List VPC offerings should not return anything"
)
self.debug(
"Waiting for network.gc.interval to cleanup network resources")
interval = list_configurations(
self.apiclient,
name='network.gc.interval'
)
wait = list_configurations(
self.apiclient,
name='network.gc.wait'
)
# Sleep to ensure that all resources are deleted
time.sleep(int(interval[0].value) + int(wait[0].value))
self.debug("Check if VR is deleted or not?")
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
routers,
None,
"List Routers for the account should not return any response"
)
return
@attr(tags=["advanced", "intervlan"], required_hardware="false")
def test_06_list_vpc_apis_admin(self):
""" Test list VPC APIs for different user roles
"""
# Validate the following
# 1. list VPCS as admin User to view all the Vpcs owned by admin user
# 2. list VPCS as regular User to view all the Vpcs owned by user
# 3. list VPCS as domain admin User to view all the Vpcs owned by admin
self.user = Account.create(
self.apiclient,
self.services["account"],
)
self.cleanup.append(self.user)
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
self.account.name)
vpc_1 = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.validate_vpc_network(vpc_1)
self.services["vpc"]["cidr"] = "10.1.46.1/16"
vpc_2 = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.user.name,
domainid=self.user.domainid
)
self.validate_vpc_network(vpc_2)
self.debug("Validating list VPCs call by passing account and domain")
vpcs = VPC.list(
self.apiclient,
account=self.user.name,
domainid=self.user.domainid,
listall=True
)
self.assertEqual(
isinstance(vpcs, list),
True,
"List VPC should return a valid response"
)
vpc = vpcs[0]
self.assertEqual(
vpc.id,
vpc_2.id,
"List VPC should return VPC belonging to that account"
)
return
@attr(tags=["advanced", "intervlan", "multiple"], required_hardware="true")
def test_07_restart_network_vm_running(self):
""" Test Restart VPC when there are multiple networks associated
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Add network1(10.1.1.1/24) and network2(10.1.2.1/24) to this VPC
# 3. Deploy vm1 and vm2 in network1 and vm3 and vm4 in network2
# 4. Create a PF rule using TCP protocol on port 22 for vm1
# 5. Create a Static Nat rule for vm2
# 6. Create an LB rule for vm3 and vm4
# 7. Create ingress network ACL for allowing all the above rules from
# public ip range on network1 and network2.
# 8. Create egress network ACL for network1 and network2 to access
# google.com
# 9. Create a private gateway for this VPC and add a static route to
# this gateway
# 10. Create a VPN gateway for this VPC and add static route to gateway
# 11. Make sure that all the PF, LB and Static NAT rules work
# 12. Make sure that we are able to access google.com from all VM
# 13. Make sure that the newly added private gateway's and VPN
# gateway's static routes work as expected.
self.debug("Creating a VPC offering..")
vpc_off = VpcOffering.create(
self.apiclient,
self.services["vpc_offering"]
)
self.cleanup.append(vpc_off)
self.validate_vpc_offering(vpc_off)
self.debug("Enabling the VPC offering created")
vpc_off.update(self.apiclient, state='Enabled')
self.debug("creating a VPC network in the account: %s" %
self.account.name)
self.services["vpc"]["cidr"] = '10.1.1.1/16'
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.validate_vpc_network(vpc)
self.network_offering = NetworkOffering.create(
self.apiclient,
self.services["network_offering"],
conservemode=False
)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering)
self.network_offering_no_lb = NetworkOffering.create(
self.apiclient,
self.services["network_offering_no_lb"],
conservemode=False
)
# Enable Network offering
self.network_offering_no_lb.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering_no_lb)
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering_no_lb.id)
network_1 = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering_no_lb.id,
zoneid=self.zone.id,
gateway='10.1.1.1',
vpcid=vpc.id
)
self.debug("Created network with ID: %s" % network_1.id)
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network_2 = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
gateway='10.1.2.1',
vpcid=vpc.id
)
self.debug("Created network with ID: %s" % network_2.id)
self.debug("deploying VMs in network: %s" % network_1.name)
# Spawn an instance in that network
vm_1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network_1.id)]
)
self.debug("Deployed VM in network: %s" % network_1.id)
vm_2 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network_1.id)]
)
self.debug("Deployed VM in network: %s" % network_1.id)
self.debug("deploying VMs in network: %s" % network_2.name)
# Spawn an instance in that network
vm_3 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network_2.id)]
)
self.debug("Deployed VM in network: %s" % network_2.id)
vm_4 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network_2.id)]
)
self.debug("Deployed VM in network: %s" % network_2.id)
self.debug("Associating public IP for network: %s" % network_1.name)
public_ip_1 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network_1.id,
vpcid=vpc.id
)
self.debug("Associated %s with network %s" % (
public_ip_1.ipaddress.ipaddress,
network_1.id
))
NATRule.create(
self.apiclient,
vm_1,
self.services["natrule"],
ipaddressid=public_ip_1.ipaddress.id,
openfirewall=False,
networkid=network_1.id,
vpcid=vpc.id
)
self.debug("Adding NetwrokACl rules to make NAT rule accessible")
NetworkACL.create(
self.apiclient,
networkid=network_1.id,
services=self.services["natrule"],
traffictype='Ingress'
)
self.debug("Associating public IP for network: %s" % network_1.name)
public_ip_2 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network_1.id,
vpcid=vpc.id
)
self.debug("Associated %s with network %s" % (
public_ip_2.ipaddress.ipaddress,
network_1.id
))
self.debug("Enabling static NAT for IP: %s" %
public_ip_2.ipaddress.ipaddress)
try:
StaticNATRule.enable(
self.apiclient,
ipaddressid=public_ip_2.ipaddress.id,
virtualmachineid=vm_2.id,
networkid=network_1.id
)
self.debug("Static NAT enabled for IP: %s" %
public_ip_2.ipaddress.ipaddress)
except Exception as e:
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip_2.ipaddress.ipaddress, e))
public_ips = PublicIPAddress.list(
self.apiclient,
networkid=network_1.id,
listall=True,
isstaticnat=True,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(public_ips, list),
True,
"List public Ip for network should list the Ip addr"
)
self.assertEqual(
public_ips[0].ipaddress,
public_ip_2.ipaddress.ipaddress,
"List public Ip for network should list the Ip addr"
)
self.debug("Associating public IP for network: %s" % vpc.name)
public_ip_3 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network_2.id,
vpcid=vpc.id
)
self.debug("Associated %s with network %s" % (
public_ip_3.ipaddress.ipaddress,
network_2.id
))
self.debug("Creating LB rule for IP address: %s" %
public_ip_3.ipaddress.ipaddress)
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=public_ip_3.ipaddress.id,
accountid=self.account.name,
networkid=network_2.id,
vpcid=vpc.id,
domainid=self.account.domainid
)
self.debug("Adding virtual machines %s and %s to LB rule" % (
vm_3.name, vm_4.name))
lb_rule.assign(self.apiclient, [vm_3, vm_4])
self.debug("Adding NetwrokACl rules to make PF and LB accessible")
NetworkACL.create(
self.apiclient,
networkid=network_2.id,
services=self.services["lbrule"],
traffictype='Ingress'
)
self.debug("Adding Egress rules to network %s and %s to allow\
access to internet")
NetworkACL.create(
self.apiclient,
networkid=network_1.id,
services=self.services["icmp_rule"],
traffictype='Egress'
)
NetworkACL.create(
self.apiclient,
networkid=network_2.id,
services=self.services["icmp_rule"],
traffictype='Egress'
)
self.debug("Checking if we can SSH into VM_1?")
try:
ssh_1 = vm_1.get_ssh_client(
ipaddress=public_ip_1.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule"]["publicport"]
)
self.debug("SSH into VM is successfully")
self.debug("Verifying if we can ping to outside world from VM?")
# Ping to outsite world
res = ssh_1.execute("ping -c 1 www.google.com")
# res = 64 bytes from maa03s17-in-f20.1e100.net (74.125.236.212):
# icmp_req=1 ttl=57 time=25.9 ms
# --- www.l.google.com ping statistics ---
# 1 packets transmitted, 1 received, 0% packet loss, time 0ms
# rtt min/avg/max/mdev = 25.970/25.970/25.970/0.000 ms
except Exception as e:
self.fail("Failed to SSH into VM - %s, %s" %
(public_ip_1.ipaddress.ipaddress, e))
result = str(res)
self.debug("Result: %s" % result)
self.assertEqual(
result.count("1 received"),
1,
"Ping to outside world from VM should be successful"
)
self.debug("Checking if we can SSH into VM_2?")
try:
ssh_2 = vm_2.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule"]["publicport"]
)
self.debug("SSH into VM is successfully")
self.debug("Verifying if we can ping to outside world from VM?")
res = ssh_2.execute("ping -c 1 www.google.com")
except Exception as e:
self.fail("Failed to SSH into VM - %s, %s" %
(public_ip_2.ipaddress.ipaddress, e))
result = str(res)
self.debug("Result: %s" % result)
self.assertEqual(
result.count("1 received"),
1,
"Ping to outside world from VM should be successful"
)
self.debug("Checking if we can SSH into VM using LB rule?")
try:
ssh_3 = vm_3.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH into VM is successfully")
self.debug("Verifying if we can ping to outside world from VM?")
res = ssh_3.execute("ping -c 1 www.google.com")
except Exception as e:
self.fail("Failed to SSH into VM - %s, %s" %
(public_ip_3.ipaddress.ipaddress, e))
result = str(res)
self.debug("Result: %s" % result)
self.assertEqual(
result.count("1 received"),
1,
"Ping to outside world from VM should be successful"
)
return
@attr(tags=["advanced", "intervlan"], required_hardware="true")
def test_08_delete_vpc(self):
""" Test vpc deletion after account deletion
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Add network1(10.1.1.1/24) and network2(10.1.2.1/24) to this VPC
# 3. Deploy vm1 and vm2 in network1 and vm3 and vm4 in network2
# 4. Create a PF rule using TCP protocol on port 22 for vm1
# 5. Create a Static Nat rule for vm2
# 6. Create an LB rule for vm3 and vm4
# 7. Create ingress network ACL for allowing all the above rules from
# public ip range on network1 and network2.
# 8. Create egress network ACL for network1 and network2 to access
# google.com
# 9. Delete account
self.debug("Removing account from cleanup list")
self.cleanup = []
self.debug("Creating a VPC offering..")
vpc_off = VpcOffering.create(
self.apiclient,
self.services["vpc_offering"]
)
self.cleanup.append(vpc_off)
self.validate_vpc_offering(vpc_off)
self.debug("Enabling the VPC offering created")
vpc_off.update(self.apiclient, state='Enabled')
self.debug("creating a VPC network in the account: %s" %
self.account.name)
self.services["vpc"]["cidr"] = '10.1.1.1/16'
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.validate_vpc_network(vpc)
self.network_offering = NetworkOffering.create(
self.apiclient,
self.services["network_offering"],
conservemode=False
)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering)
self.network_offering_no_lb = NetworkOffering.create(
self.apiclient,
self.services["network_offering_no_lb"],
conservemode=False
)
# Enable Network offering
self.network_offering_no_lb.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering_no_lb)
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network_1 = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering_no_lb.id,
zoneid=self.zone.id,
gateway='10.1.1.1',
vpcid=vpc.id
)
self.debug("Created network with ID: %s" % network_1.id)
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering_no_lb.id)
network_2 = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
gateway='10.1.2.1',
vpcid=vpc.id
)
self.debug("Created network with ID: %s" % network_2.id)
self.debug("deploying VMs in network: %s" % network_1.name)
# Spawn an instance in that network
vm_1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network_1.id)]
)
self.debug("Deployed VM in network: %s" % network_1.id)
vm_2 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network_1.id)]
)
self.debug("Deployed VM in network: %s" % network_1.id)
self.debug("deploying VMs in network: %s" % network_2.name)
# Spawn an instance in that network
vm_3 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network_2.id)]
)
self.debug("Deployed VM in network: %s" % network_2.id)
vm_4 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network_2.id)]
)
self.debug("Deployed VM in network: %s" % network_2.id)
self.debug("Associating public IP for network: %s" % network_1.name)
public_ip_1 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network_1.id,
vpcid=vpc.id
)
self.debug("Associated %s with network %s" % (
public_ip_1.ipaddress.ipaddress,
network_1.id
))
NATRule.create(
self.apiclient,
vm_1,
self.services["natrule"],
ipaddressid=public_ip_1.ipaddress.id,
openfirewall=False,
networkid=network_1.id,
vpcid=vpc.id
)
self.debug("Adding NetwrokACl rules to make NAT rule accessible")
NetworkACL.create(
self.apiclient,
networkid=network_1.id,
services=self.services["natrule"],
traffictype='Ingress'
)
self.debug("Associating public IP for network: %s" % network_1.name)
public_ip_2 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network_1.id,
vpcid=vpc.id
)
self.debug("Associated %s with network %s" % (
public_ip_2.ipaddress.ipaddress,
network_1.id
))
self.debug("Enabling static NAT for IP: %s" %
public_ip_2.ipaddress.ipaddress)
try:
StaticNATRule.enable(
self.apiclient,
ipaddressid=public_ip_2.ipaddress.id,
virtualmachineid=vm_2.id,
networkid=network_1.id
)
self.debug("Static NAT enabled for IP: %s" %
public_ip_2.ipaddress.ipaddress)
except Exception as e:
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip_2.ipaddress.ipaddress, e))
public_ips = PublicIPAddress.list(
self.apiclient,
networkid=network_1.id,
listall=True,
isstaticnat=True,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(public_ips, list),
True,
"List public Ip for network should list the Ip addr"
)
self.assertEqual(
public_ips[0].ipaddress,
public_ip_2.ipaddress.ipaddress,
"List public Ip for network should list the Ip addr"
)
self.debug("Associating public IP for network: %s" % vpc.name)
public_ip_3 = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network_2.id,
vpcid=vpc.id
)
self.debug("Associated %s with network %s" % (
public_ip_3.ipaddress.ipaddress,
network_2.id
))
self.debug("Creating LB rule for IP address: %s" %
public_ip_3.ipaddress.ipaddress)
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
ipaddressid=public_ip_3.ipaddress.id,
accountid=self.account.name,
networkid=network_2.id,
vpcid=vpc.id,
domainid=self.account.domainid
)
self.debug("Adding virtual machines %s and %s to LB rule" % (
vm_3.name, vm_4.name))
lb_rule.assign(self.apiclient, [vm_3, vm_4])
self.debug("Adding NetwrokACl rules to make PF and LB accessible")
NetworkACL.create(
self.apiclient,
networkid=network_2.id,
services=self.services["lbrule"],
traffictype='Ingress'
)
self.debug(
"Adding Egress rules to network %s and %s to allow\
access to internet")
NetworkACL.create(
self.apiclient,
networkid=network_1.id,
services=self.services["icmp_rule"],
traffictype='Egress'
)
NetworkACL.create(
self.apiclient,
networkid=network_2.id,
services=self.services["icmp_rule"],
traffictype='Egress'
)
self.debug("Checking if we can SSH into VM_1?")
try:
ssh_1 = vm_1.get_ssh_client(
ipaddress=public_ip_1.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule"]["publicport"])
self.debug("SSH into VM is successfully")
self.debug("Verifying if we can ping to outside world from VM?")
# Ping to outsite world
res = ssh_1.execute("ping -c 1 www.google.com")
# res = 64 bytes from maa03s17-in-f20.1e100.net (74.125.236.212):
# icmp_req=1 ttl=57 time=25.9 ms
# --- www.l.google.com ping statistics ---
# 1 packets transmitted, 1 received, 0% packet loss, time 0ms
# rtt min/avg/max/mdev = 25.970/25.970/25.970/0.000 ms
except Exception as e:
self.fail("Failed to SSH into VM - %s, %s" %
(public_ip_1.ipaddress.ipaddress, e))
result = str(res)
self.debug("result: %s" % result)
self.assertEqual(
result.count("1 received"),
1,
"Ping to outside world from VM should be successful"
)
self.debug("Checking if we can SSH into VM_2?")
try:
ssh_2 = vm_2.get_ssh_client(
ipaddress=public_ip_2.ipaddress.ipaddress,
reconnect=True,
port=self.services["natrule"]["publicport"])
self.debug("SSH into VM is successfully")
self.debug("Verifying if we can ping to outside world from VM?")
res = ssh_2.execute("ping -c 1 www.google.com")
except Exception as e:
self.fail("Failed to SSH into VM - %s, %s" %
(public_ip_2.ipaddress.ipaddress, e))
result = str(res)
self.debug("Result: %s" % result)
self.assertEqual(
result.count("1 received"),
1,
"Ping to outside world from VM should be successful"
)
self.debug("Checking if we can SSH into VM using LB rule?")
try:
ssh_3 = vm_3.get_ssh_client(
ipaddress=public_ip_3.ipaddress.ipaddress,
reconnect=True,
port=self.services["lbrule"]["publicport"]
)
self.debug("SSH into VM is successfully")
self.debug("Verifying if we can ping to outside world from VM?")
res = ssh_3.execute("ping -c 1 www.google.com")
except Exception as e:
self.fail("Failed to SSH into VM - %s, %s" %
(public_ip_3.ipaddress.ipaddress, e))
result = str(res)
self.debug("Result: %s" % result)
self.assertEqual(
result.count("1 received"),
1,
"Ping to outside world from VM should be successful"
)
self.debug("Deleting the account")
self.account.delete(self.apiclient)
self.debug("Waiting for account to cleanup")
interval = list_configurations(
self.apiclient,
name='account.cleanup.interval'
)
# Sleep to ensure that all resources are deleted
time.sleep(int(interval[0].value))
self.debug("Checking if VPC is deleted after account deletion")
vpcs = VPC.list(
self.apiclient,
id=vpc.id,
listall=True
)
self.assertEqual(
vpcs,
None,
"List VPC should not return any response"
)
return
@attr(tags=["advanced", "intervlan"], required_hardware="false")
def test_09_vpc_create(self):
""" Test to create vpc and verify VPC state, VR and SourceNatIP
"""
# Validate the following:
# 1. VPC should get created with "Enabled" state.
# 2. The VR should start when VPC is created.
# 3. SourceNatIP address should be allocated to the VR
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
self.account.name)
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.validate_vpc_network(vpc)
self.debug("Verify if the VPC was created with enabled state")
self.assertEqual(
vpc.state,
'Enabled',
"VPC after creation should be in enabled state but the "
"state is %s" % vpc.state
)
self.debug("Verify if the Router has started")
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"List Routers should return a valid list"
)
self.assertEqual(routers[0].state,
'Running',
"Router should be in running state"
)
src_nat_list = PublicIPAddress.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True,
issourcenat=True,
vpcid=vpc.id
)
self.assertEqual(src_nat_list[0].ipaddress,
routers[0].publicip,
"Source Nat IP address was not allocated to VR"
)
@attr(tags=["advanced", "intervlan"], required_hardware="false")
def test_10_nonoverlaping_cidrs(self):
""" Test creation of multiple VPCs with non-overlapping CIDRs
"""
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("Creating a VPC network in the account: %s" %
self.account.name)
vpc_1 = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.validate_vpc_network(vpc_1)
self.services["vpc"]["cidr"] = "10.2.1.1/16"
self.debug(
"Creating a non-overlapping VPC network in the account: %s" %
self.account.name)
vpc_2 = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.validate_vpc_network(vpc_2)
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("Creating a overlapping VPC network in the account: %s" %
self.account.name)
try:
vpc_3 = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.debug("%s" % vpc_3)
except Exception as e:
self.debug("%s" % e)
pass
else:
assert("VPC created with overlapping CIDR")
return
@attr(tags=["advanced", "intervlan"], required_hardware="true")
def test_11_deploy_vm_wo_network_netdomain(self):
""" Test deployment of vm in a VPC without network domain
"""
# 1. Create VPC without providing networkDomain.
# 2. Add network without networkDomain to this VPC.
# 3. Deploy VM in this network.
if self.zone.domain is None:
cmd = updateZone.updateZoneCmd()
cmd.id = self.zone.id
cmd.domain = "test.domain.org"
self.apiclient.updateZone(cmd)
self.zone = Zone.list(self.apiclient, id=self.zone.id)[0]
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
self.account.name)
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.validate_vpc_network(vpc)
self.network_offering = NetworkOffering.create(
self.apiclient,
self.services["network_offering"],
conservemode=False
)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering)
gateway = vpc.cidr.split('/')[0]
# Split the cidr to retrieve gateway
# for eg. cidr = 10.0.0.1/24
# Gateway = 10.0.0.1
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc.id,
)
self.debug("Created network with ID: %s" % network.id)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
self.validate_vm_netdomain(
virtual_machine,
vpc,
network,
self.zone.domain)
def validate_vm_netdomain(self, vm, vpc, network, expected_netdomain):
self.debug("Associating public IP for network: %s" % network.name)
src_nat_ip_addr = PublicIPAddress.create(
self.apiclient,
zoneid=self.zone.id,
accountid=self.account.name,
domainid=self.account.domainid,
networkid=network.id,
vpcid=vpc.id
)
self.debug("Associated %s with network %s" % (
src_nat_ip_addr.ipaddress.ipaddress,
network.id
))
self.debug("Public IP %s" % src_nat_ip_addr.__dict__)
# Create NAT rule
nat_rule = NATRule.create(
self.apiclient,
vm,
self.services["natrule"],
src_nat_ip_addr.ipaddress.id,
openfirewall=False,
networkid=network.id,
vpcid=vpc.id
)
list_nat_rule_response = NATRule.list(
self.apiclient,
id=nat_rule.id
)
self.assertEqual(
isinstance(list_nat_rule_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_nat_rule_response),
0,
"Check Port Forwarding Rule is created"
)
self.assertEqual(
list_nat_rule_response[0].id,
nat_rule.id,
"Check Correct Port forwarding Rule is returned"
)
self.debug("Adding NetworkACl rules to make NAT rule accessible")
NetworkACL.create(
self.apiclient,
networkid=network.id,
services=self.services["natrule"],
traffictype='Ingress'
)
self.debug("SSHing into VM with IP address %s with NAT IP %s" %
(
vm.ipaddress,
src_nat_ip_addr.ipaddress.ipaddress))
try:
ssh_1 = vm.get_ssh_client(
ipaddress=src_nat_ip_addr.ipaddress.ipaddress)
self.debug("SSH into VM is successfully")
# Ping to outsite world
res = ssh_1.execute("cat /etc/resolv.conf")
except Exception as e:
self.fail("Failed to SSH into VM - %s, %s" %
(vm.ssh_ip, e))
vm_domain = res[1].split(" ")[1]
self.assertEqual(
vm_domain,
expected_netdomain,
"The network domain assigned to virtual machine "
"is %s expected domain was %s" %
(vm_domain, expected_netdomain)
)
@attr(tags=["advanced", "intervlan"], required_hardware="false")
def test_12_deploy_vm_with_netdomain(self):
""" Test deployment of vm in a VPC with network domain
"""
# 1. Create VPC without providing networkDomain.
# 2. Add network with networkDomain to this VPC.
# 3. It should fail.
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
self.account.name)
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.validate_vpc_network(vpc)
self.network_offering = NetworkOffering.create(
self.apiclient,
self.services["network_offering"],
conservemode=False
)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering)
gateway = vpc.cidr.split('/')[0]
# Split the cidr to retrieve gateway
# for eg. cidr = 10.0.0.1/24
# Gateway = 10.0.0.1
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
# Creation of network with different network domain than the one
# specified in VPC should fail.
with self.assertRaises(Exception):
Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc.id,
networkdomain='test.netdomain'
)
@attr(tags=["advanced", "intervlan"], required_hardware="true")
def test_13_deploy_vm_with_vpc_netdomain(self):
""" Test deployment of vm in a VPC with network domain
"""
# 1. Create VPC with providing networkDomain.
# 2. Add network without networkDomain to this VPC.
# 3. Deploy VM in this network, it should get VPC netdomain
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
self.account.name)
netdomain = "cl2.internal"
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
networkDomain=netdomain
)
self.validate_vpc_network(vpc)
self.network_offering = NetworkOffering.create(
self.apiclient,
self.services["network_offering"],
conservemode=False
)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering)
gateway = vpc.cidr.split('/')[0]
# Split the cidr to retrieve gateway
# for eg. cidr = 10.0.0.1/24
# Gateway = 10.0.0.1
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc.id,
)
self.debug("Created network with ID: %s" % network.id)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
self.validate_vm_netdomain(virtual_machine, vpc, network, netdomain)
@attr(tags=["advanced", "intervlan"], required_hardware="false")
def test_14_deploy_vm_1(self):
""" Test vm deploy in network by a user where VPC was created
without account/domain ID
"""
# 1. Create VPC without providing account/domain ID.
# 2. Add network with using user account to this VPC.
# 3. Deploy VM in this network
user = Account.create(
self.apiclient,
self.services["account"]
)
self.debug("Created account: %s" % user.name)
self.cleanup.append(user)
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
user.name)
userapiclient = self.testClient.getUserApiClient(
UserName=user.name,
DomainName=user.domain,
type=0)
vpc = VPC.create(
userapiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
)
self.validate_vpc_network(vpc)
self.network_offering = NetworkOffering.create(
self.apiclient,
self.services["network_offering"],
conservemode=False
)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering)
gateway = vpc.cidr.split('/')[0]
# Split the cidr to retrieve gateway
# for eg. cidr = 10.0.0.1/24
# Gateway = 10.0.0.1
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
userapiclient,
self.services["network"],
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc.id
)
self.debug("Created network with ID: %s" % network.id)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
userapiclient,
self.services["virtual_machine"],
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
self.assertNotEqual(virtual_machine,
None,
"VM creation in the network failed")
return
@attr(tags=["advanced", "intervlan"], required_hardware="false")
def test_15_deploy_vm_2(self):
""" Test deployment of vm in a network in a domain admin
account where VPC is created without account/domain ID
"""
# 1. Create VPC without providing account/domain ID.
# 2. Add network with using domain admin account to this VPC.
# 3. Deploy VM in this network
user = Account.create(
self.apiclient,
self.services["account"]
)
self.debug("Created account: %s" % user.name)
self.cleanup.append(user)
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
user.name)
# 0 - User, 1 - Root Admin, 2 - Domain Admin
userapiclient = self.testClient.getUserApiClient(
UserName=user.name,
DomainName=self.services["domain"]["name"],
type=2)
vpc = VPC.create(
userapiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
)
self.validate_vpc_network(vpc)
self.network_offering = NetworkOffering.create(
self.apiclient,
self.services["network_offering"],
conservemode=False
)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering)
gateway = vpc.cidr.split('/')[0]
# Split the cidr to retrieve gateway
# for eg. cidr = 10.0.0.1/24
# Gateway = 10.0.0.1
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
userapiclient,
self.services["network"],
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc.id
)
self.debug("Created network with ID: %s" % network.id)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
userapiclient,
self.services["virtual_machine"],
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
self.assertNotEqual(virtual_machine,
None,
"VM creation in the network failed")
return
@attr(tags=["advanced", "intervlan"], required_hardware="false")
def test_16_deploy_vm_for_user_by_admin(self):
""" Test deployment of vm in a network by root admin for user.
"""
# 1. As root admin account ,
# Create VPC(name,zoneId,cidr,vpcOfferingId,networkDomain by
# passing user Account/domain ID.
# 2. As the user account used in step1 , create a network as part
# of this VPC.
# 3. Deploy Vms as part of this network.
user = Account.create(
self.apiclient,
self.services["account"]
)
self.debug("Created account: %s" % user.name)
self.cleanup.append(user)
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
user.name)
userapiclient = self.testClient.getUserApiClient(
UserName=user.name,
DomainName=user.domain,
type=0)
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
account=user.name,
domainid=user.domainid,
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
)
self.validate_vpc_network(vpc)
self.network_offering = NetworkOffering.create(
self.apiclient,
self.services["network_offering"],
conservemode=False
)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering)
gateway = vpc.cidr.split('/')[0]
# Split the cidr to retrieve gateway
# for eg. cidr = 10.0.0.1/24
# Gateway = 10.0.0.1
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
userapiclient,
self.services["network"],
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc.id
)
self.debug("Created network with ID: %s" % network.id)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
userapiclient,
self.services["virtual_machine"],
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
self.assertNotEqual(virtual_machine,
None,
"VM creation in the network failed")
return
@attr(tags=["advanced", "intervlan"], required_hardware="false")
def test_17_deploy_vm_for_user_by_domain_admin(self):
""" Test deployment of vm in a network by domain admin for user.
"""
# 1. As domain admin account , Create
# VPC(name,zoneId,cidr,vpcOfferingId,networkDomain
# by passing user Account/domain ID.
# 2. As the user account used in step1, create network as part of
# this VPC
# 3. Deploy Vms as part of this network.
domain_admin = Account.create(
self.apiclient,
self.services["domain_admin"]
)
self.debug("Created account: %s" % domain_admin.name)
self.cleanup.append(domain_admin)
da_apiclient = self.testClient.getUserApiClient(
UserName=domain_admin.name,
DomainName=domain_admin.domain,
type=2)
user = Account.create(
self.apiclient,
self.services["account"]
)
self.debug("Created account: %s" % user.name)
self.cleanup.append(user)
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
user.name)
# 0 - User, 1 - Root Admin, 2 - Domain Admin
self.testClient.getUserApiClient(
UserName=user.name,
DomainName=user.domain,
type=0)
with self.assertRaises(CloudstackAPIException):
VPC.create(
da_apiclient,
self.services["vpc"],
account=user.name,
domainid=user.domainid,
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
)
@attr(tags=["advanced", "intervlan"], required_hardware="false")
def test_18_create_net_for_user_diff_domain_by_doadmin(self):
""" Test creation of network by domain admin for user from different domain
"""
# 1. As domain admin account , Create VPC(name,zoneId,cidr,
# vpcOfferingId,networkDomain) without passing Account/domain ID.
# 2. As any User account that is not under this domain , create a
# network as part of this VPC.
domain_admin = Account.create(
self.apiclient,
self.services["domain_admin"]
)
self.debug("Created account: %s" % domain_admin.name)
self.cleanup.append(domain_admin)
da_apiclient = self.testClient.getUserApiClient(
UserName=domain_admin.name,
DomainName=self.services["domain"]["name"],
type=2)
user = Account.create(
self.apiclient,
self.services["account"]
)
self.debug("Created account: %s" % user.name)
self.cleanup.append(user)
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
user.name)
# 0 - User, 1 - Root Admin, 2 - Domain Admin
userapiclient = self.testClient.getUserApiClient(
UserName=user.name,
DomainName=user.domain,
type=0)
vpc = VPC.create(
da_apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
)
self.validate_vpc_network(vpc)
self.network_offering = NetworkOffering.create(
self.apiclient,
self.services["network_offering"],
conservemode=False
)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering)
gateway = vpc.cidr.split('/')[0]
# Split the cidr to retrieve gateway
# for eg. cidr = 10.0.0.1/24
# Gateway = 10.0.0.1
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
with self.assertRaises(Exception):
Network.create(
userapiclient,
self.services["network"],
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc.id
)
@attr(tags=["advanced", "intervlan"], required_hardware="false")
def test_19_create_vpc_wo_params(self):
""" Test creation of VPC without mandatory parameters
"""
# Validate the following
# 1. Create a VPC with cidr - 10.1.1.1/16
# 2. Delete VPC. Delete VPC should be successful
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
self.account.name)
# Create VPC without vpcOffering param
with self.assertRaises(Exception):
VPC.create(
self.apiclient,
self.services["vpc"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.services["vpc_no_name"]["cidr"] = "10.1.1.1/16"
# Create VPC without name param
with self.assertRaises(Exception):
VPC.create(
self.apiclient,
self.services["vpc_no_name"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
# Create VPC without zoneid param
with self.assertRaises(Exception):
VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
account=self.account.name,
domainid=self.account.domainid
)
vpc_wo_cidr = {"name": "TestVPC_WO_CIDR",
"displaytext": "TestVPC_WO_CIDR"
}
# Create VPC without CIDR
with self.assertRaises(Exception):
VPC.create(
self.apiclient,
vpc_wo_cidr,
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
@attr(tags=["advanced", "intervlan"], required_hardware="false")
def test_20_update_vpc_name_display_text(self):
""" Test to verify updation of vpc name and display text
"""
# Validate the following:
# 1. VPC should get created with "Enabled" state.
# 2. The VR should start when VPC is created.
# 3. SourceNatIP address should be allocated to the VR
self.services["vpc"]["cidr"] = "10.1.1.1/16"
self.debug("creating a VPC network in the account: %s" %
self.account.name)
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.validate_vpc_network(vpc)
self.network_offering = NetworkOffering.create(
self.apiclient,
self.services["network_offering"],
conservemode=False
)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering)
gateway = vpc.cidr.split('/')[0]
# Split the cidr to retrieve gateway
# for eg. cidr = 10.0.0.1/24
# Gateway = 10.0.0.1
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc.id
)
self.debug("Created network with ID: %s" % network.id)
new_name = "New VPC"
new_display_text = "New display text"
vpc.update(
self.apiclient,
name=new_name,
displaytext=new_display_text
)
vpc_networks = VPC.list(
self.apiclient,
id=vpc.id
)
self.assertEqual(
isinstance(vpc_networks, list),
True,
"List VPC network should return a valid list"
)
self.assertEqual(vpc_networks[0].name,
new_name,
"Updation of VPC name failed.")
self.assertEqual(vpc_networks[0].displaytext,
new_display_text,
"Updation of VPC display text failed.")
@attr(tags=["advanced", "intervlan"], required_hardware="false")
def test_21_deploy_vm_with_gateway_ip(self):
self.services["vpc"]["cidr"] = "192.168.1.0/24"
self.debug("creating a VPC network in the account: %s" %
self.account.name)
vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.validate_vpc_network(vpc)
self.network_offering = NetworkOffering.create(
self.apiclient,
self.services["network_offering"],
conservemode=False
)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering)
#Instead of first ip, assigning last ip in the CIDR as the gateway ip
gateway = "192.168.1.2"
self.services["network"]["netmask"] = "255.255.255.252"
# Split the cidr to retrieve gateway
# for eg. cidr = 10.0.0.1/24
# Gateway = 10.0.0.1
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc.id
)
self.debug("Created network with ID: %s" % network.id)
vm = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
self.assertIsNotNone(
vm,
"Failed to create VM with first ip address in the CIDR as the vm ip"
)
return
| 35.341356 | 110 | 0.553946 |
380d8f4508bb28382677c3a6cbf5a25e3b2710a6 | 3,260 | py | Python | obniz/obniz/libs/io_peripherals/io.py | obniz/obniz-python-sdk | da72848ac2a3eeeef238847d86a3f8cbd90d4ce3 | [
"MIT"
] | 11 | 2019-03-22T12:02:11.000Z | 2021-01-21T04:57:18.000Z | obniz/obniz/libs/io_peripherals/io.py | obniz/obniz-python-sdk | da72848ac2a3eeeef238847d86a3f8cbd90d4ce3 | [
"MIT"
] | 5 | 2019-03-02T08:28:25.000Z | 2021-02-02T22:06:37.000Z | obniz/obniz/libs/io_peripherals/io.py | obniz/obniz-python-sdk | da72848ac2a3eeeef238847d86a3f8cbd90d4ce3 | [
"MIT"
] | 3 | 2019-07-20T06:55:09.000Z | 2019-12-04T05:05:00.000Z | import asyncio
class PeripheralIO:
def __init__(self, obniz, id):
self.obniz = obniz
self.id = id
self.onchange = None
self._reset()
def _reset(self):
self.value = 0
self.observers = []
def add_observer(self, callback):
if callback:
self.observers.append(callback)
def output(self, value):
value = bool(value)
obj = {}
obj["io" + str(self.id)] = value
self.value = value
self.obniz.send(obj)
def drive(self, drive):
if type(drive) is not str:
raise Exception("please specify drive methods in string")
if drive == "5v":
output_type = "push-pull5v"
elif drive == "3v":
output_type = "push-pull3v"
elif drive == "open-drain":
output_type = "open-drain"
else:
raise Exception("unknown drive method")
obj = {}
obj["io" + str(self.id)] = {"output_type": output_type}
self.obniz.send(obj)
def pull(self, updown):
if updown is not None and type(updown) is not str:
raise Exception("please specify pull methods in string")
if updown == "5v" or updown == "pull-up5v":
pull_type = "pull-up5v"
elif updown == "3v" or updown == "pull-up3v":
pull_type = "pull-up3v"
elif updown == "0v" or updown == "pull-down":
pull_type = "pull-down"
elif updown is None or updown == "float":
pull_type = "float"
else:
raise Exception("unknown pull_type method")
obj = {}
obj["io" + str(self.id)] = {"pull_type": pull_type}
self.obniz.send(obj)
def input(self, callback):
self.onchange = callback
obj = {}
obj["io" + str(self.id)] = {"direction": "input", "stream": True}
self.obniz.send(obj)
return self.value
def input_wait(self):
future = asyncio.get_event_loop().create_future()
self.add_observer(future)
obj = {}
obj["io" + str(self.id)] = {"direction": "input", "stream": False}
self.obniz.send(obj)
return future
def end(self):
obj = {}
obj["io" + str(self.id)] = None
self.obniz.send(obj)
def notified(self, obj):
if type(obj) is bool:
self.value = obj
if len(self.observers) > 0:
item = self.observers.pop(0)
if callable(item): # callback
item(obj)
else: # future
item.set_result(obj)
if self.onchange:
self.onchange(obj)
elif type(obj) is dict:
if obj.get('warning'):
self.obniz.warning(
{
"alert": "warning",
"message": "io{}: {}".format(self.id, obj['warning']['message']),
}
)
if obj.get('error'):
self.obniz.error(
{
"alert": "error",
"message": "io{}}: {}".format(self.id, obj['error']['message']),
}
)
# }
| 28.849558 | 89 | 0.483129 |
3ddcdf352668b19437e995a215a08be798bbd1d9 | 4,272 | py | Python | src/notification.py | mikefeneley/secure-cloud | f19d66ecf0ae18c4c0402edfaa298ac946d70aeb | [
"MIT"
] | 2 | 2017-01-04T07:58:27.000Z | 2017-01-06T20:55:20.000Z | src/notification.py | mikefeneley/secure-cloud | f19d66ecf0ae18c4c0402edfaa298ac946d70aeb | [
"MIT"
] | 11 | 2017-01-04T17:57:25.000Z | 2017-01-13T13:04:11.000Z | src/notification.py | mikefeneley/secure-cloud | f19d66ecf0ae18c4c0402edfaa298ac946d70aeb | [
"MIT"
] | 2 | 2016-12-27T17:33:51.000Z | 2017-01-16T01:18:16.000Z | import os
import smtplib
from validate_email import validate_email
from logger import Logger
class Notification:
"""
Interface that allows user to send notifications using email protocols.
"""
def __init__(self, email_server="127.0.0.1", email_port=587,
email_username="", email_pwd=""):
"""
Set up connection information and authentication tokens to allow user to
access smtp server.
:param email_server: IP Address of SMTP server for sending mail.
:type email_server: string
:param email_port: Port to use to send email
:type email_port: int
:param email_username: Authentication username for SMTP server.
:type email_username: string
:param email_pwd: Authentication username for SMTP server.
:type email_pwd: string
"""
self.email_port = email_port
self.email_server = email_server
self.gmail_user = email_username
self.gmail_pwd = email_pwd
self.logger = Logger()
def build_email(
self,
subject="Notification from Vulnerability",
message="",
source="",
destination=""):
"""
Creates an email notification object from arguments. The email is
constructed using python MIME object types.
:param subject: Subject line of the email.
:type subject: string
:param message: Message body of the email.
:type message: string
:param source: Email address message is sent from.
:type source: string
:param destination: Email address to send message to.
:type destination: string
:returns: MIMEText -- Constructed MIMETextobject with email information
"""
email = MIMEText(text=messgae)
print(email)
def send_notification(self, message="", recipient=""):
"""
Sends a notifiction message to email address specified by recipient.
:param message: Notification message to send
:type message: string
:param recipient: Email address of the recipient
:type recipient: string
:returns: bool -- True if the message was successfuly sent. False otherwise.
"""
TO = recipient
SUBJECT = "Notification from Vulnerability"
TEXT = message
server = smtplib.SMTP(self.email_server, self.email_port)
# Verify that these things are necessary.
server.starttls()
server.ehlo()
# : a login attemt by server
server.login(self.gmail_user, self.gmail_pwd)
BODY = '\r\n'.join(['To: %s' % TO,
'From: %s' % self.gmail_user,
'Subject: %s' % SUBJECT,
'', TEXT])
server.sendmail(self.gmail_user, [TO], BODY)
# NEW IMPLEMENTATION
# email = self.build_email(message=message, soruce=self.gmail_user, destination=recipient)
# server.sendmail(email)
print ('email sent')
server.close()
return True
def notify_all(self, message, recipients):
"""
Sends the message to every email address on the recipient list.
:param message: Notification message to send
:type message: string
:param recipients: List of emails to send notification message
:type recipients: List of strings
:returns: bool -- True if the message was successfuly sent to all
recipients. Otherwise False
"""
success = True
for recipient in recipients:
if not self.send_notification(message, recipient):
success = False
return success
if __name__ == "__main__":
notify = Notifier()
"""
gmail = "smtp.gmail.com"
notification_sender = Notification(
email_server='localhost',
email_port=587,
email_username="",
email_pwd="")
message = "Message I want to send"
source = "Who I want to send the message to. Most likely an email address??"
#notification_sender.send_notification("Hi", 'michael@sample.com')
recipients = []
recipients.append("sample1@gmail.com")
recipients.append("sample2@gmail.com")
"""
| 32.363636 | 98 | 0.618212 |
93f02a840b4a77d9a3824ed9421b52bc350691ce | 537 | py | Python | DS&Algo Programs in Python/shellSort.py | prathimacode-hub/HacktoberFest-2020 | c18bbb42a5e78f6a7dbfc15fbafd127e738f53f7 | [
"MIT"
] | 386 | 2020-05-08T16:05:16.000Z | 2021-10-05T17:39:14.000Z | DS&Algo Programs in Python/shellSort.py | prathimacode-hub/HacktoberFest-2020 | c18bbb42a5e78f6a7dbfc15fbafd127e738f53f7 | [
"MIT"
] | 830 | 2020-08-31T17:16:45.000Z | 2021-10-06T14:14:23.000Z | DS&Algo Programs in Python/shellSort.py | prathimacode-hub/HacktoberFest-2020 | c18bbb42a5e78f6a7dbfc15fbafd127e738f53f7 | [
"MIT"
] | 923 | 2020-05-29T15:04:29.000Z | 2021-10-06T15:18:01.000Z | def shellSort(array, n):
# Rearrange elements at each n/2, n/4, n/8, ... intervals
interval = n // 2
while interval > 0:
for i in range(interval, n):
temp = array[i]
j = i
while j >= interval and array[j - interval] > temp:
array[j] = array[j - interval]
j -= interval
array[j] = temp
interval //= 2
data = [9, 8, 3, 7, 5, 6, 4, 1]
size = len(data)
shellSort(data, size)
print('Sorted Array in Ascending Order:')
print(data)
| 24.409091 | 63 | 0.513966 |
0b62b9bc7b0272c955257b34f78451dda2ea26e7 | 19,563 | py | Python | projects-management/gitlaber/controllers.py | fgouteroux/flask-puppet-projects | deea0abadf5a306f41bef073fa722f7e17f644fb | [
"Apache-2.0"
] | 5 | 2015-12-10T17:09:51.000Z | 2015-12-11T14:13:50.000Z | projects-management/gitlaber/controllers.py | fgouteroux/flask-puppet-projects | deea0abadf5a306f41bef073fa722f7e17f644fb | [
"Apache-2.0"
] | null | null | null | projects-management/gitlaber/controllers.py | fgouteroux/flask-puppet-projects | deea0abadf5a306f41bef073fa722f7e17f644fb | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Flask controller
'''
from __future__ import absolute_import
import os
import json
from flask_oauthlib.client import OAuth
from gitlaber import config
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
def _raise_error_from_response(response):
"""
Tries to parse error message from response and raises error.
"""
try:
message = response.data['message']
except (KeyError, ValueError):
message = response.raw_data
raise StandardError(message)
def find_element_in_list(list_element, search_element, match_element):
"""Return the position of an element in a dictionnary list
:param list_element: the dictionnary list
:param search_element: the value of searched element
:param match_element: the searched key
:return: the position of searched element
"""
try:
for element in list_element:
if getattr(element, match_element, None) == search_element:
return list_element.index(element)
except ValueError:
pass
class Gitlab(object):
"""Gitlab class"""
def __init__(self, session):
"""setup the token used for all the api calls and all the urls for the current session
:param session: session
"""
def get_gitlab_token():
"""Return session token"""
return session.get('access_token')
self.oauth = OAuth()
self.auth = self.oauth.remote_app(
'gitlab',
consumer_key=config.GITLAB_APP_ID,
consumer_secret=config.GITLAB_APP_SECRET,
base_url=config.GITLAB_URL,
access_token_url='{0}/oauth/token'.format(config.GITLAB_URL),
authorize_url='{0}/oauth/authorize'.format(config.GITLAB_URL),
content_type='application/json'
)
self.auth.tokengetter(get_gitlab_token)
self._url = '{0}/api/v3'.format(self.auth.base_url)
@staticmethod
def getall(method, *args, **kwargs):
"""Auto-iterate over the paginated results of various methods of the API.
Pass the GitLabAPI method as the first argument, followed by the
other parameters as normal. Include `page` to determine first page to poll.
Remaining kwargs are passed on to the called method, including `per_page`.
:param method: Actual method to call
:param *args: Positional arguments to actual method
:param rpath: Relative resource path, like '/users'
:param page: Page number to start at
:param **kwargs: Keyword arguments to actual method
:return: Yields each item in the result until exhausted, and then
implicit StopIteration; or no elements if error
"""
rpath = kwargs.pop('rpath', '')
page = kwargs.pop('page', '')
if not all([page, rpath]):
raise RuntimeError('Missing rpath or page arguments')
while True:
results = method(*args, rpath=rpath, page=page, **kwargs)
if not results:
break
for result in results:
yield result
page += 1
def get(self, path):
"""Send get request (with auth)"""
url = '%s%s' % (self._url, path)
try:
request = self.auth.get(url)
except Exception:
raise StandardError(
"Failed to get a response from: %s" % url)
if request.status == 200:
return request.data
else:
_raise_error_from_response(request)
def post(self, path, params=None):
"""Send post request (with auth)"""
url = '%s%s' % (self._url, path)
try:
if params:
request = self.auth.post(url, params)
else:
request = self.auth.post(url)
except Exception:
raise StandardError(
"Failed to post data at: %s" % url)
if request.status == 201:
return request.data
else:
_raise_error_from_response(request)
def put(self, path, params):
"""Send put request (with auth)"""
url = '%s%s' % (self._url, path)
try:
request = self.auth.put(url, params)
except Exception:
raise StandardError(
"Failed to update data at: %s" % url)
if request.status == 200:
return request.data
else:
_raise_error_from_response(request)
def delete(self, path):
"""Send delete request (with auth)"""
url = '%s%s' % (self._url, path)
try:
request = self.auth.delete(url)
except Exception:
raise StandardError(
"Failed to delete data from: %s" % url)
if request.status == 200:
return request.data
else:
_raise_error_from_response(request)
def get_paginated_resources(self, rpath, page=1, per_page=20):
"""Return a dictionary list for a given resource
:param page: Which page to return (default is 1)
:param per_page: Number of items to return per page (default is 20)
:return: returs a dictionary of the given resource searched, false if there is an error
"""
try:
url = '%s%s' % (self._url, rpath)
params = {'page': page, 'per_page': per_page}
request = self.auth.get(url, params)
except Exception:
raise StandardError(
"Failed to get a response from: %s" % url)
if request.status == 200:
return request.data
else:
_raise_error_from_response(request)
def get_all_users(self):
"""Return a user list"""
users = [x for x in self.getall(self.get_paginated_resources,
rpath='/users',
page=1,
per_page=20)
]
return sorted(users, key=lambda k: k['name'])
def get_all_groups(self):
"""Return a group list"""
users = [x for x in self.getall(self.get_paginated_resources,
rpath='/groups',
page=1,
per_page=20)
]
return sorted(users, key=lambda k: k['name'])
def get_all_projects(self):
"""Returns a dictionary list of all the projects
:return: list with the repo name, description, last activity,web url...
"""
users = [x for x in self.getall(self.get_paginated_resources,
rpath='/projects/all',
page=1,
per_page=20)
]
return sorted(users, key=lambda k: k['name'])
def get_project_with_namespace(self, path_with_namespace):
"""Retrieve project information
:param path_with_namespace: mygroup/myproject
"""
try:
for project in self.get_all_projects():
if project['path_with_namespace'] == path_with_namespace:
return project
except ValueError:
pass
def get_project_branches(self, path_with_namespace):
"""List all the branches from a project
:param path_with_namespace: mygroup/myproject
:return: list of project branches
"""
try:
project_id = self.get_project_with_namespace(path_with_namespace)['id']
project_branches = []
branches = self.get('/projects/{0}/repository/branches'.format(project_id))
for branch in branches:
project_branches.append(branch['name'])
return project_branches
except ValueError:
pass
def get_projects_in_group(self, group):
"""Returns a dictionary list of all the projects for a group name
:param group: group name
:return: list with the repo name, description, last activity,web url...
"""
result = list()
for project in self.get_all_projects():
if project['namespace']['name'] == group:
result.append(project)
return result
def get_group_with_name(self, name):
"""Retrieve group information
:param name: group name
"""
try:
for group in self.get_all_groups():
if name == group['name']:
return group
except ValueError:
pass
def get_member_group(self, group_name, username):
"""Lists the members of a given group name
:param group_name: the group_name id
:return: the group's members
"""
try:
group_id = self.get_group_with_name(group_name)['id']
group_members = self.get('/groups/{0}/members'.format(group_id))
for member in group_members:
if username == member['username']:
return member
except ValueError:
pass
def manage_project(self, user, name, group, access, action, import_url, del_user_project):
"""
Manage projects
"""
username = str(user.split(",")[0])
user_id = int(user.split(",")[1])
result = list()
path = group + "/" + name
project = self.get_project_with_namespace(path)
if action == "create":
if project == None:
pgroup = self.get_group_with_name(group)
member = self.get_member_group(group, username)
if import_url:
# Create Project
op_project = "Create new project {0} in {1}".format(name, group)
new_project_url = '/projects'
new_project_data = {
"name":name,
"namespace_id":pgroup['id'],
"import_url":import_url
}
new_project = self.post(new_project_url, new_project_data)
result.append({op_project: new_project})
if member == None:
# Add member permissions
op_member = "Add member {0} on project {1}".format(username, name)
op_member_url = '/projects/{0}/members'.format(new_project['id'])
op_member_data = {
"id":new_project['id'],
"user_id":user_id,
"access_level":access
}
member = self.post(op_member_url, op_member_data)
result.append({op_member: member})
# Beacuse we import project from an url, we cannot fork directly.
# Create User Project
op_user_project = "Create new project {0} for {1}".format(name, username)
new_user_project_url = '/projects?sudo={0}'.format(username)
new_user_project_data = {
"name":name,
"import_url":import_url
}
new_user_project = self.post(new_user_project_url, new_user_project_data)
result.append({op_user_project: new_user_project})
# Create fork relation
op_fork = "Create fork relation from project \
{0} in namespace {1}".format(name, username)
fork_url = '/projects/{0}/fork/{1}'.format(new_user_project['id'],
new_project['id']
)
fork = self.post(fork_url)
result.append({op_fork: fork})
else:
# Create Project
op_project = "Create new project {0} in {1}".format(name, group)
new_project_url = '/projects'
new_project_data = {
"name":name,
"namespace_id":pgroup['id']
}
new_project = self.post(new_project_url, new_project_data)
result.append({op_project: new_project})
if member == None:
# Need member permissions on project target
# before fork it under user namespace
op_member = "Add member {0} on project {1}".format(username, name)
op_member_url = '/projects/{0}/members'.format(new_project['id'])
op_member_data = {
"id":new_project['id'],
"user_id":user_id,
"access_level":access
}
member = self.post(op_member_url, op_member_data)
result.append({op_member: member})
op_fork = "Fork project {0} in namespace {1}".format(name, username)
fork_url = '/projects/fork/{0}?sudo={1}'.format(new_project['id'], username)
fork = self.post(fork_url)
result.append({op_fork: fork})
else:
result.append({"Error": "Project {0} already exists".format(path)})
elif action == "delete":
op_project = "Delete project {0} in {1}".format(name, group)
if project:
result.append({op_project: self.delete('/projects/{0}'.format(project['id']))})
else:
result.append({"Error": "Project {0} not found".format(path)})
if del_user_project:
path = username + "/" + name
current_user_project = self.get_project_with_namespace(path)
if current_user_project:
op_user_project = "Delete project {0} in {1}".format(name, username)
user_project = self.delete('/projects/{0}'.format(current_user_project['id']))
result.append({op_user_project: user_project})
else:
result.append({"Error": "Project {0} not found".format(path)})
return result
def manage_user_env(self, user, projects, env_action):
"""
Manage user env
"""
username = str(user.split(",")[0])
user_id = int(user.split(",")[1])
result = list()
projects = json.loads(projects)
for project in projects:
path = project['group'] + "/" + project['name']
current_project = self.get_project_with_namespace(path)
if current_project:
member = self.get_member_group(project['group'], username)
op_branch = "{0} branch {1} in project {2}".format(env_action,
username,
project['name']
)
op_member = "{0} member {1} on project {2}".format(env_action,
username,
project['name']
)
if env_action == "create":
branch_url = '/projects/{0}/repository/branches'.format(current_project['id'])
current_project_branches = self.get(branch_url)
index_branch = find_element_in_list(current_project_branches, username, "name")
print index_branch
if project['branch'] and index_branch == None:
branch_data = {
"id":current_project['id'],
"branch_name":username,
"ref":project['branch']
}
branch = self.post(branch_url, branch_data)
result.append({op_branch: branch})
else:
result.append({op_branch: "Nothing to do"})
if project['access']:
# Check if user is already in project's group
if member == None:
current_project_members = []
op_member_url = '/projects/{0}/members'.format(current_project['id'])
for member in self.get(op_member_url):
current_project_members.append(member['id'])
if not user_id in current_project_members:
op_member_data = {
"id":current_project['id'],
"user_id":user_id,
"access_level":project['access']
}
member = self.post(op_member_url, op_member_data)
result.append({op_member: member})
else:
result.append({op_member: "Nothing to do"})
elif env_action == "delete":
current_project_branches = []
branch_url = '/projects/{0}/repository/branches'.format(current_project['id'])
for branch in self.get(branch_url):
current_project_branches.append(branch['name'])
if username in current_project_branches:
branch = self.delete('{0}/{1}'.format(branch_url, username))
result.append({op_branch: branch})
else:
result.append({op_branch: "Nothing to do"})
member_url = '/projects/{0}/members'.format(current_project['id'])
current_project_members = self.get(member_url)
index_member = find_element_in_list(current_project_members, user_id, "id")
if index_member >= 0:
member = self.delete('{0}/{1}'.format(member_url,
current_project_members[index_member]
)
)
result.append({op_member: member})
else:
result.append({op_member: "Nothing to do"})
else:
result.append({"Error": "Project {0} not found".format(path)})
return result
| 38.815476 | 100 | 0.490825 |
3e6c1ddc77941d7f430a3d03c41daf26c6675393 | 15,405 | py | Python | superset/tasks/schedules.py | AshishKapoor/incubator-superset | 394a888e96b404e34b0ddf3cd2d099721dd2235a | [
"Apache-2.0"
] | null | null | null | superset/tasks/schedules.py | AshishKapoor/incubator-superset | 394a888e96b404e34b0ddf3cd2d099721dd2235a | [
"Apache-2.0"
] | 8 | 2020-08-02T03:31:21.000Z | 2022-03-29T22:27:56.000Z | superset/tasks/schedules.py | AshishKapoor/incubator-superset | 394a888e96b404e34b0ddf3cd2d099721dd2235a | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility functions used across Superset"""
import logging
import time
import urllib.request
from collections import namedtuple
from datetime import datetime, timedelta
from email.utils import make_msgid, parseaddr
from typing import Any, Dict, Iterator, List, Optional, Tuple, TYPE_CHECKING, Union
from urllib.error import URLError # pylint: disable=ungrouped-imports
import croniter
import simplejson as json
from celery.app.task import Task
from dateutil.tz import tzlocal
from flask import render_template, Response, session, url_for
from flask_babel import gettext as __
from flask_login import login_user
from retry.api import retry_call
from selenium.common.exceptions import WebDriverException
from selenium.webdriver import chrome, firefox
from werkzeug.http import parse_cookie
# Superset framework imports
from superset import app, db, security_manager
from superset.extensions import celery_app
from superset.models.schedules import (
DashboardEmailSchedule,
EmailDeliveryType,
EmailSchedule,
get_scheduler_model,
ScheduleType,
SliceEmailReportFormat,
SliceEmailSchedule,
)
from superset.utils.core import get_email_address_list, send_email_smtp
if TYPE_CHECKING:
# pylint: disable=unused-import
from werkzeug.datastructures import TypeConversionDict
# Globals
config = app.config
logger = logging.getLogger("tasks.email_reports")
logger.setLevel(logging.INFO)
EMAIL_PAGE_RENDER_WAIT = config["EMAIL_PAGE_RENDER_WAIT"]
WEBDRIVER_BASEURL = config["WEBDRIVER_BASEURL"]
WEBDRIVER_BASEURL_USER_FRIENDLY = config["WEBDRIVER_BASEURL_USER_FRIENDLY"]
EmailContent = namedtuple("EmailContent", ["body", "data", "images"])
def _get_recipients(
schedule: Union[DashboardEmailSchedule, SliceEmailSchedule]
) -> Iterator[Tuple[str, str]]:
bcc = config["EMAIL_REPORT_BCC_ADDRESS"]
if schedule.deliver_as_group:
to = schedule.recipients
yield (to, bcc)
else:
for to in get_email_address_list(schedule.recipients):
yield (to, bcc)
def _deliver_email(
schedule: Union[DashboardEmailSchedule, SliceEmailSchedule],
subject: str,
email: EmailContent,
) -> None:
for (to, bcc) in _get_recipients(schedule):
send_email_smtp(
to,
subject,
email.body,
config,
data=email.data,
images=email.images,
bcc=bcc,
mime_subtype="related",
dryrun=config["SCHEDULED_EMAIL_DEBUG_MODE"],
)
def _generate_mail_content(
schedule: EmailSchedule, screenshot: bytes, name: str, url: str
) -> EmailContent:
data: Optional[Dict[str, Any]]
if schedule.delivery_type == EmailDeliveryType.attachment:
images = None
data = {"screenshot.png": screenshot}
body = __(
'<b><a href="%(url)s">Explore in Superset</a></b><p></p>',
name=name,
url=url,
)
elif schedule.delivery_type == EmailDeliveryType.inline:
# Get the domain from the 'From' address ..
# and make a message id without the < > in the ends
domain = parseaddr(config["SMTP_MAIL_FROM"])[1].split("@")[1]
msgid = make_msgid(domain)[1:-1]
images = {msgid: screenshot}
data = None
body = __(
"""
<b><a href="%(url)s">Explore in Superset</a></b><p></p>
<img src="cid:%(msgid)s">
""",
name=name,
url=url,
msgid=msgid,
)
return EmailContent(body, data, images)
def _get_auth_cookies() -> List["TypeConversionDict[Any, Any]"]:
# Login with the user specified to get the reports
with app.test_request_context():
user = security_manager.find_user(config["EMAIL_REPORTS_USER"])
login_user(user)
# A mock response object to get the cookie information from
response = Response()
app.session_interface.save_session(app, session, response)
cookies = []
# Set the cookies in the driver
for name, value in response.headers:
if name.lower() == "set-cookie":
cookie = parse_cookie(value)
cookies.append(cookie["session"])
return cookies
def _get_url_path(view: str, user_friendly: bool = False, **kwargs: Any) -> str:
with app.test_request_context():
base_url = (
WEBDRIVER_BASEURL_USER_FRIENDLY if user_friendly else WEBDRIVER_BASEURL
)
return urllib.parse.urljoin(str(base_url), url_for(view, **kwargs))
def create_webdriver() -> Union[
chrome.webdriver.WebDriver, firefox.webdriver.WebDriver
]:
# Create a webdriver for use in fetching reports
if config["EMAIL_REPORTS_WEBDRIVER"] == "firefox":
driver_class = firefox.webdriver.WebDriver
options = firefox.options.Options()
elif config["EMAIL_REPORTS_WEBDRIVER"] == "chrome":
driver_class = chrome.webdriver.WebDriver
options = chrome.options.Options()
options.add_argument("--headless")
# Prepare args for the webdriver init
kwargs = dict(options=options)
kwargs.update(config["WEBDRIVER_CONFIGURATION"])
# Initialize the driver
driver = driver_class(**kwargs)
# Some webdrivers need an initial hit to the welcome URL
# before we set the cookie
welcome_url = _get_url_path("Superset.welcome")
# Hit the welcome URL and check if we were asked to login
driver.get(welcome_url)
elements = driver.find_elements_by_id("loginbox")
# This indicates that we were not prompted for a login box.
if not elements:
return driver
# Set the cookies in the driver
for cookie in _get_auth_cookies():
info = dict(name="session", value=cookie)
driver.add_cookie(info)
return driver
def destroy_webdriver(
driver: Union[chrome.webdriver.WebDriver, firefox.webdriver.WebDriver]
) -> None:
"""
Destroy a driver
"""
# This is some very flaky code in selenium. Hence the retries
# and catch-all exceptions
try:
retry_call(driver.close, tries=2)
except Exception: # pylint: disable=broad-except
pass
try:
driver.quit()
except Exception: # pylint: disable=broad-except
pass
def deliver_dashboard(schedule: DashboardEmailSchedule) -> None:
"""
Given a schedule, delivery the dashboard as an email report
"""
dashboard = schedule.dashboard
dashboard_url = _get_url_path(
"Superset.dashboard", dashboard_id_or_slug=dashboard.id
)
dashboard_url_user_friendly = _get_url_path(
"Superset.dashboard", user_friendly=True, dashboard_id_or_slug=dashboard.id
)
# Create a driver, fetch the page, wait for the page to render
driver = create_webdriver()
window = config["WEBDRIVER_WINDOW"]["dashboard"]
driver.set_window_size(*window)
driver.get(dashboard_url)
time.sleep(EMAIL_PAGE_RENDER_WAIT)
# Set up a function to retry once for the element.
# This is buggy in certain selenium versions with firefox driver
get_element = getattr(driver, "find_element_by_class_name")
element = retry_call(
get_element, fargs=["grid-container"], tries=2, delay=EMAIL_PAGE_RENDER_WAIT
)
try:
screenshot = element.screenshot_as_png
except WebDriverException:
# Some webdrivers do not support screenshots for elements.
# In such cases, take a screenshot of the entire page.
screenshot = driver.screenshot() # pylint: disable=no-member
finally:
destroy_webdriver(driver)
# Generate the email body and attachments
email = _generate_mail_content(
schedule, screenshot, dashboard.dashboard_title, dashboard_url_user_friendly
)
subject = __(
"%(prefix)s %(title)s",
prefix=config["EMAIL_REPORTS_SUBJECT_PREFIX"],
title=dashboard.dashboard_title,
)
_deliver_email(schedule, subject, email)
def _get_slice_data(schedule: SliceEmailSchedule) -> EmailContent:
slc = schedule.slice
slice_url = _get_url_path(
"Superset.explore_json", csv="true", form_data=json.dumps({"slice_id": slc.id})
)
# URL to include in the email
slice_url_user_friendly = _get_url_path(
"Superset.slice", slice_id=slc.id, user_friendly=True
)
cookies = {}
for cookie in _get_auth_cookies():
cookies["session"] = cookie
opener = urllib.request.build_opener()
opener.addheaders.append(("Cookie", f"session={cookies['session']}"))
response = opener.open(slice_url)
if response.getcode() != 200:
raise URLError(response.getcode())
# TODO: Move to the csv module
content = response.read()
rows = [r.split(b",") for r in content.splitlines()]
if schedule.delivery_type == EmailDeliveryType.inline:
data = None
# Parse the csv file and generate HTML
columns = rows.pop(0)
with app.app_context(): # type: ignore
body = render_template(
"superset/reports/slice_data.html",
columns=columns,
rows=rows,
name=slc.slice_name,
link=slice_url_user_friendly,
)
elif schedule.delivery_type == EmailDeliveryType.attachment:
data = {__("%(name)s.csv", name=slc.slice_name): content}
body = __(
'<b><a href="%(url)s">Explore in Superset</a></b><p></p>',
name=slc.slice_name,
url=slice_url_user_friendly,
)
return EmailContent(body, data, None)
def _get_slice_visualization(schedule: SliceEmailSchedule) -> EmailContent:
slc = schedule.slice
# Create a driver, fetch the page, wait for the page to render
driver = create_webdriver()
window = config["WEBDRIVER_WINDOW"]["slice"]
driver.set_window_size(*window)
slice_url = _get_url_path("Superset.slice", slice_id=slc.id)
slice_url_user_friendly = _get_url_path(
"Superset.slice", slice_id=slc.id, user_friendly=True
)
driver.get(slice_url)
time.sleep(EMAIL_PAGE_RENDER_WAIT)
# Set up a function to retry once for the element.
# This is buggy in certain selenium versions with firefox driver
element = retry_call(
driver.find_element_by_class_name,
fargs=["chart-container"],
tries=2,
delay=EMAIL_PAGE_RENDER_WAIT,
)
try:
screenshot = element.screenshot_as_png
except WebDriverException:
# Some webdrivers do not support screenshots for elements.
# In such cases, take a screenshot of the entire page.
screenshot = driver.screenshot() # pylint: disable=no-member
finally:
destroy_webdriver(driver)
# Generate the email body and attachments
return _generate_mail_content(
schedule, screenshot, slc.slice_name, slice_url_user_friendly
)
def deliver_slice(schedule: Union[DashboardEmailSchedule, SliceEmailSchedule]) -> None:
"""
Given a schedule, delivery the slice as an email report
"""
if schedule.email_format == SliceEmailReportFormat.data:
email = _get_slice_data(schedule)
elif schedule.email_format == SliceEmailReportFormat.visualization:
email = _get_slice_visualization(schedule)
else:
raise RuntimeError("Unknown email report format")
subject = __(
"%(prefix)s %(title)s",
prefix=config["EMAIL_REPORTS_SUBJECT_PREFIX"],
title=schedule.slice.slice_name,
)
_deliver_email(schedule, subject, email)
@celery_app.task(
name="email_reports.send",
bind=True,
soft_time_limit=config["EMAIL_ASYNC_TIME_LIMIT_SEC"],
)
def schedule_email_report( # pylint: disable=unused-argument
task: Task,
report_type: ScheduleType,
schedule_id: int,
recipients: Optional[str] = None,
) -> None:
model_cls = get_scheduler_model(report_type)
schedule = db.create_scoped_session().query(model_cls).get(schedule_id)
# The user may have disabled the schedule. If so, ignore this
if not schedule or not schedule.active:
logger.info("Ignoring deactivated schedule")
return
# TODO: Detach the schedule object from the db session
if recipients is not None:
schedule.id = schedule_id
schedule.recipients = recipients
if report_type == ScheduleType.dashboard:
deliver_dashboard(schedule)
elif report_type == ScheduleType.slice:
deliver_slice(schedule)
else:
raise RuntimeError("Unknown report type")
def next_schedules(
crontab: str, start_at: datetime, stop_at: datetime, resolution: int = 0
) -> Iterator[datetime]:
crons = croniter.croniter(crontab, start_at - timedelta(seconds=1))
previous = start_at - timedelta(days=1)
for eta in crons.all_next(datetime):
# Do not cross the time boundary
if eta >= stop_at:
break
if eta < start_at:
continue
# Do not allow very frequent tasks
if eta - previous < timedelta(seconds=resolution):
continue
yield eta
previous = eta
def schedule_window(
report_type: ScheduleType, start_at: datetime, stop_at: datetime, resolution: int
) -> None:
"""
Find all active schedules and schedule celery tasks for
each of them with a specific ETA (determined by parsing
the cron schedule for the schedule)
"""
model_cls = get_scheduler_model(report_type)
if not model_cls:
return None
dbsession = db.create_scoped_session()
schedules = dbsession.query(model_cls).filter(model_cls.active.is_(True))
for schedule in schedules:
args = (report_type, schedule.id)
# Schedule the job for the specified time window
for eta in next_schedules(
schedule.crontab, start_at, stop_at, resolution=resolution
):
schedule_email_report.apply_async(args, eta=eta)
return None
@celery_app.task(name="email_reports.schedule_hourly")
def schedule_hourly() -> None:
""" Celery beat job meant to be invoked hourly """
if not config["ENABLE_SCHEDULED_EMAIL_REPORTS"]:
logger.info("Scheduled email reports not enabled in config")
return
resolution = config["EMAIL_REPORTS_CRON_RESOLUTION"] * 60
# Get the top of the hour
start_at = datetime.now(tzlocal()).replace(microsecond=0, second=0, minute=0)
stop_at = start_at + timedelta(seconds=3600)
schedule_window(ScheduleType.dashboard, start_at, stop_at, resolution)
schedule_window(ScheduleType.slice, start_at, stop_at, resolution)
| 31.828512 | 87 | 0.68322 |
d9b923ca29caf77ceeeb8c0a0221b26e1fa6990d | 1,303 | py | Python | bitmovin_api_sdk/encoding/filters/audio_mix/customdata/customdata_api.py | hofmannben/bitmovin-api-sdk-python | 71aae5cd8a31aa0ad54ca07a6f546a624e8686a9 | [
"MIT"
] | null | null | null | bitmovin_api_sdk/encoding/filters/audio_mix/customdata/customdata_api.py | hofmannben/bitmovin-api-sdk-python | 71aae5cd8a31aa0ad54ca07a6f546a624e8686a9 | [
"MIT"
] | 1 | 2020-07-06T07:13:43.000Z | 2020-07-06T07:13:43.000Z | bitmovin_api_sdk/encoding/filters/audio_mix/customdata/customdata_api.py | hofmannben/bitmovin-api-sdk-python | 71aae5cd8a31aa0ad54ca07a6f546a624e8686a9 | [
"MIT"
] | 1 | 2020-07-06T07:07:26.000Z | 2020-07-06T07:07:26.000Z | # coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.custom_data import CustomData
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
class CustomdataApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(CustomdataApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def get(self, filter_id, **kwargs):
# type: (string_types, dict) -> CustomData
"""Audio Mix Filter Custom Data
:param filter_id: Id of the Audio Mix configuration.
:type filter_id: string_types, required
:return: Audio Mix Config custom data
:rtype: CustomData
"""
return self.api_client.get(
'/encoding/filters/audio-mix/{filter_id}/customData',
path_params={'filter_id': filter_id},
type=CustomData,
**kwargs
)
| 32.575 | 80 | 0.681504 |
68ea9a2952cd5a6ebc323f954b0c362f4f6d1a7a | 129 | py | Python | weunion/apps.py | 30Meridian/RozumneMistoSnapshot | 67a83b3908674d01992561dfb37596e395b4d482 | [
"BSD-3-Clause"
] | null | null | null | weunion/apps.py | 30Meridian/RozumneMistoSnapshot | 67a83b3908674d01992561dfb37596e395b4d482 | [
"BSD-3-Clause"
] | null | null | null | weunion/apps.py | 30Meridian/RozumneMistoSnapshot | 67a83b3908674d01992561dfb37596e395b4d482 | [
"BSD-3-Clause"
] | null | null | null | from django.apps import AppConfig
class WeunionConfig(AppConfig):
name = 'weunion'
verbose_name = 'Системі налаштування' | 25.8 | 41 | 0.75969 |
2b7748d2b06af2149507c3d9f97214b53fc1362d | 9,527 | py | Python | covid-19.py | pimajor/py-simulitis | 6e0f2d419c28c47dcb5f9b6ee90c6b466e47204a | [
"MIT"
] | 2 | 2020-03-21T15:50:02.000Z | 2021-04-10T01:07:06.000Z | covid-19.py | pimajor/py-simulitis | 6e0f2d419c28c47dcb5f9b6ee90c6b466e47204a | [
"MIT"
] | null | null | null | covid-19.py | pimajor/py-simulitis | 6e0f2d419c28c47dcb5f9b6ee90c6b466e47204a | [
"MIT"
] | null | null | null | import numpy as np
import csv
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
import matplotlib.dates as mdates
import datetime as dt
import pandas as pd
word_pop_file = "C:\\work\\py-simulitis\\data\\world_population_2020.csv"
countries = []
confirmed = []
confirmed_file = "C:\\work\\COVID-19\\csse_covid_19_data\\csse_covid_19_time_series\\time_series_covid19_confirmed_global.csv"
confirmed_subtitle = "Confirmed cases"
confirmed.append(confirmed_file)
confirmed.append(confirmed_subtitle)
recovered = []
recovered_file = "C:\\work\\COVID-19\\csse_covid_19_data\\csse_covid_19_time_series\\time_series_covid19_recovered_global.csv"
recovered_subtitle = "Recovered cases"
recovered.append(recovered_file)
recovered.append(recovered_subtitle)
death = []
death_file = "C:\\work\\COVID-19\\csse_covid_19_data\\csse_covid_19_time_series\\time_series_covid19_deaths_global.csv"
death_subtitle = "Death cases"
death.append(death_file)
death.append(death_subtitle)
world_pop = pd.read_csv(word_pop_file,sep=";")
print(world_pop.info())
col_country_name = world_pop.columns[0]
col_country_pop = world_pop.columns[1]
# change the case here
case = confirmed
cases = [confirmed[0],death[0]]
westernEurope =["France", "Germany", "Italy", "Spain","Portugal", "United Kingdom"]
bigCountries = ["Mexico", "Brazil", "US", "Russia", "India"]
nordicCountries = ["Finland","Iceland","Denmark", "Sweden", "Norway" ]
smallNorthEurope = ["Belgium", "Netherlands", "Sweden", "Switzerland", "Austria"]
asia=[ "Vietnam", "Singapore", "Japan","Philippines","Taiwan*","Korea, South"]
eastern_europe = ["Poland","Bosnia and Herzegovina","Croatia", "Serbia", "Slovenia","Ukraine", "Romania", "Bulgaria" ,"Slovakia","Hungary"]#, "Czechia"]
# eastern_europe = ["Czechia"]
def getPopulationCount(country,relative=True):
pop = 1
df = world_pop.loc[world_pop[col_country_name].str.contains(country)]
if relative:
c_pop = world_pop.loc[world_pop[col_country_name] == country]
if c_pop.shape[0]==0:
print(country + " has no entry in world pop")
else:
pop = c_pop.iloc[0][col_country_pop]
return pop
def scatter(data,countries,relative = False):
daataa = []
daataa.append(getMatrix(getMatrixFromCSV(data[0])))
daataa.append(getMatrix(getMatrixFromCSV(data[1])))
datax, datay,dataz =[],[],[]
for country in daataa[0]:
if country in countries:
datax.append( daataa[0][country]["values"][-1]/getPopulationCount(country,relative))
dataz.append(country)
for country in daataa[1]:
if country in countries:
datay.append( daataa[1][country]["values"][-1]/getPopulationCount(country,relative))
fig, ax = plt.subplots()
ax.scatter(datax, datay)
for i, txt in enumerate(dataz):
ax.annotate(txt, (datax[i], datay[i]))
if data[0] == confirmed[0]:
plt.xlabel("Confirmed")
if data[1] == confirmed[0]:
plt.xlabel("Confirmed")
elif data[1] == death[0]:
plt.ylabel("Death")
plt.show()
return 0
def main():
mat = getMatrixFromCSV(case[0])
time = getTimeLine(mat)
state = getMatrix(mat)
scatter([confirmed[0],death[0]],eastern_europe)
scatter([confirmed[0],death[0]],westernEurope)
scatter([confirmed[0],death[0]],bigCountries)
scatter([confirmed[0],death[0]],nordicCountries)
scatter([confirmed[0],death[0]],smallNorthEurope)
scatter([confirmed[0],death[0]],asia)
# showCountries(state)
speed = getSpeedMatrix(mat)
acc = getAccelerationMatrix(speed)
country = "Norway"
groups= []
groups.append(eastern_europe)
groups.append(westernEurope)
groups.append(bigCountries)
groups.append(smallNorthEurope)
groups.append(["Morocco", "Algeria", "Senegal", "Tunisia"])
groups.append(nordicCountries)
groups.append(asia)
print("Last report ", time[-1])
print("total ",case[1]," count in ", country," : " , state[country]["values"][-1])
print("new cases for ",case[1]," in ", country," : " , speed[country]["values"][-1])
# plotAgainstTime(country,speed,time,3,True)
# plotAgainstTime(country,acc,time,10,False)
# plotAgainstTime(country,state,time,1, True)
for countries in groups:
plotCountries(countries,speed,time,14,log=True,relative=True)
# plot(country,speed,state,7)
def average(array,window_size):
avg = []
for i in range(len(array)):
a = 0
count = 0
for j in range(max(0,i-window_size+1),i +1):
a +=array[j]
count +=1
avg.append(a/(count if count > 0 else 1))
return avg
def plotCountries(countries,dicto,time,window_size,log,relative):
title =""
line, ax = plt.subplots(figsize=(10, 6))
for country in countries:
data = dicto[country]["values"]
pop_count =getPopulationCount(country,relative) /100000
data = [x / pop_count for x in data]
data_avg = average(data,window_size)
title = country if title =="" else title + " vs. " + country
if log:
ax.semilogy(time,data_avg)
date_form = DateFormatter("%d-%m-%y")
ax.xaxis.set_major_formatter(date_form)
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax.set_label(country)
plt.grid(True, which="both")
else:
line, = plt.plot(time,data)
line.set_label(country)
plt.legend(countries)
plt.title(title +" \n" + case[1] + " " + (dicto[country]["unit"] + "/100000 inhabitants" if relative else dicto[country]["unit"] ))
plt.xlabel("Time")
plt.xticks(rotation=90)
plt.show()
def plot(name,ydico,xdico,window_size):
data_avg = average(ydico[name]["values"],window_size)
xdata = average(xdico[name]["values"],window_size)
plt.title(name + " " + case[1] + " " + ydico[name]["unit"])
plt.ylabel(ydico[name]["unit"])
plt.xlabel(xdico[name]["unit"])
plt.loglog(xdata,data_avg)
plt.show()
def plotAgainstTime(name,dicto,time,window_size,log):
data = dicto[name]["values"]
data_avg = average(data,window_size)
plt.title(name + " " + case[1] + " " + dicto[name]["unit"])
plt.ylabel(dicto[name]["unit"])
plt.xlabel("Time")
plt.xticks(rotation=90)
if log:
plt.semilogy(time,data)
plt.semilogy(time,data_avg)
plt.grid(True, which="both")
else:
plt.plot(time,data)
plt.plot(time,data_avg)
plt.show()
def plot(name,ydico,xdico,window_size):
data_avg = average(ydico[name]["values"],window_size)
xdata = average(xdico[name]["values"],window_size)
plt.title(name + " " + case[1] + " " + ydico[name]["unit"])
plt.ylabel(ydico[name]["unit"])
plt.xlabel(xdico[name]["unit"])
plt.loglog(xdata,data_avg)
plt.show()
def getTimeLine(mat):
dates = mat[0][4:]
dates_list = [dt.datetime.strptime(date, '%m/%d/%y') for date in dates]
print(dates_list[0].isoformat())
return dates_list
def getMatrix(mat):
state = {}
rowCount = 0
for country in mat:
if rowCount > 1:
col = 0
colCount = 0
value = []
for colCount in range(len(country)):
if colCount > 3:
value.append(int(country[colCount]))
name = country[1] if country[0] == "" else country[0] + "-" + country[1]
countries.append(name)
record = {}
record["unit"] = "# cases"
record["values"] = value
state[name] = record
rowCount +=1
return state
def getSpeedMatrix(mat):
speed = {}
rowCount = 0
for country in mat:
if rowCount > 1:
col = 0
colCount = 0
diff = []
for colCount in range(len(country)-1):
if colCount > 3:
diff.append(int(country[colCount + 1]) - int(country[colCount]))
name = country[1] if country[0] == "" else country[0] + "-" + country[1]
diff.insert(0,0)
record = {}
record["unit"] = "new cases/day"
record["values"] = diff
speed[name]= record
rowCount +=1
return speed
def getAccelerationMatrix(speed):
acc = {}
for country in speed:
diff = []
colCount = 0
for value in speed[country]["values"]:
if colCount > 0:
diff.append(speed[country]["values"][colCount]-speed[country]["values"][colCount-1])
colCount+=1
diff.insert(0,0)
record = {}
record["unit"] = "Acceleration: new cases/day^2"
record["values"] = diff
acc[country] = record
return acc
def showCountries(countries):
for row in countries:
print(row)
def getMatrixFromCSV(csvFile):
rowCount = 0
matrix = []
with open(csvFile, newline='') as f:
reader = csv.reader(f)
for row in reader:
colCount = 0
if rowCount >= len(matrix):
matrix.append([])
for col in row:
matrix[rowCount].append(col)
colCount += 1
rowCount += 1
return matrix
if __name__ == '__main__':
main()
| 29.495356 | 152 | 0.603758 |
319b48691ae2b35b7bdfe23afd70ad42e6a68fb5 | 237 | py | Python | project/tstyaml.py | cybertraining-dsc/fa19-516-147 | 767e9e2e27ef48a3e8405093b9f105f334bd67d3 | [
"Apache-2.0"
] | null | null | null | project/tstyaml.py | cybertraining-dsc/fa19-516-147 | 767e9e2e27ef48a3e8405093b9f105f334bd67d3 | [
"Apache-2.0"
] | 2 | 2019-09-25T00:58:50.000Z | 2019-09-25T01:10:35.000Z | project/tstyaml.py | cybertraining-dsc/fa19-516-147 | 767e9e2e27ef48a3e8405093b9f105f334bd67d3 | [
"Apache-2.0"
] | 1 | 2019-09-06T17:44:28.000Z | 2019-09-06T17:44:28.000Z | import os
import sys
import yaml
try:
yamlFilename = os.sys.argv[1]
yamlFile = open( yamlfilename ,"r")
except:
print("file does not exist")
sys.exit()
try:
yaml.load(yamlFile.read())
except:
print("valid file")
| 15.8 | 39 | 0.649789 |
381c3b70b01b5a0c40dda473bb42ec602d1483ff | 3,889 | py | Python | libs/eb/lib/aws/exception.py | coen-hyde/dotfiles | 87a48b5e005d2764a1c72fc605f03b02741e526c | [
"MIT"
] | null | null | null | libs/eb/lib/aws/exception.py | coen-hyde/dotfiles | 87a48b5e005d2764a1c72fc605f03b02741e526c | [
"MIT"
] | null | null | null | libs/eb/lib/aws/exception.py | coen-hyde/dotfiles | 87a48b5e005d2764a1c72fc605f03b02741e526c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
class AwsErrorCode(object):
'''AWS common error code'''
AccessDenied = u'AccessDenied'
InsufficientPrivileges = u'InsufficientPrivileges'
InvalidClientTokenId = u'InvalidClientTokenId'
InvalidParameterCombination = u'InvalidParameterCombination'
InvalidParameterValue = u'InvalidParameterValue'
InvalidQueryParameter = u'InvalidQueryParameter'
MalformedQueryString = u'MalformedQueryString'
MissingParameter = u'MissingParameter'
OptInRequired = u'OptInRequired'
RequestExpired = u'RequestExpired'
Throttling = u'Throttling'
class AwsServiceException(Exception):
def __init__(self, msg, code, http_code):
self._msg = msg
self._code = code
self._http_code = http_code
@property
def message(self):
return self._msg
@property
def code(self):
return self._code
@property
def http_code(self):
return self._http_code
def __str__(self):
return u'{0}. {1}'.format(self._code, self._msg)
def __repr__(self):
return u'HTTP {0}:{1}. {2}'.format(self._http_code, self._code, self._msg)
class UnknownHttpCodeException(AwsServiceException):
''' Exception of receiving http code other than 200'''
def __init__(self, message, code, http_code):
super(UnknownHttpCodeException, self).__init__(message, code, http_code)
class MissingParameterException(AwsServiceException):
def __init__(self, ex):
if not issubclass(ex.__class__, AwsServiceException):
raise AttributeError(u'Must initialize from instance of AwsServiceException subclass.')
super(MissingParameterException, self).__init__(ex.message, ex.code, ex.http_code)
class InsufficientPrivilegesException(AwsServiceException):
def __init__(self, ex):
if not issubclass(ex.__class__, AwsServiceException):
raise AttributeError(u'Must initialize from instance of AwsServiceException subclass.')
super(InsufficientPrivilegesException, self).__init__(ex.message, ex.code, ex.http_code)
class InvalidParameterValueException(AwsServiceException):
def __init__(self, ex):
if not issubclass(ex.__class__, AwsServiceException):
raise AttributeError(u'Must initialize from instance of AwsServiceException subclass.')
super(InvalidParameterValueException, self).__init__(ex.message, ex.code, ex.http_code)
class OptInRequiredException(AwsServiceException):
def __init__(self, ex):
if not issubclass(ex.__class__, AwsServiceException):
raise AttributeError(u'Must initialize from instance of AwsServiceException subclass.')
super(OptInRequiredException, self).__init__(ex.message, ex.code, ex.http_code)
class AccessDeniedException(AwsServiceException):
def __init__(self, ex):
if not issubclass(ex.__class__, AwsServiceException):
raise AttributeError(u'Must initialize from instance of AwsServiceException subclass.')
super(AccessDeniedException, self).__init__(ex.message, ex.code, ex.http_code)
| 41.817204 | 99 | 0.686809 |
86dec428f39fc7297796ec617c06325f66455aa2 | 4,740 | py | Python | tests/rados/pool_tests.py | vivekanandan-k-rh/cephci | 4d9b89685d3c9b8f9b612f40b58cbded4762b7cc | [
"MIT"
] | 1 | 2019-10-07T09:25:07.000Z | 2019-10-07T09:25:07.000Z | tests/rados/pool_tests.py | vivekanandan-k-rh/cephci | 4d9b89685d3c9b8f9b612f40b58cbded4762b7cc | [
"MIT"
] | 1 | 2020-06-16T06:14:39.000Z | 2020-06-16T06:14:39.000Z | tests/rados/pool_tests.py | tintumathew10/cephci | 558207a4327c30cad0ecc4496f12b4b0548a8311 | [
"MIT"
] | null | null | null | import datetime
import logging
import time
from ceph.ceph_admin import CephAdmin
from ceph.rados.core_workflows import RadosOrchestrator
log = logging.getLogger(__name__)
def run(ceph_cluster, **kw):
"""
Performs various pool related validation tests
Returns:
1 -> Fail, 0 -> Pass
"""
log.info(run.__doc__)
config = kw["config"]
cephadm = CephAdmin(cluster=ceph_cluster, **config)
rados_obj = RadosOrchestrator(node=cephadm)
if config.get("ec_pool_recovery_improvement"):
ec_config = config.get("ec_pool_recovery_improvement")
if not rados_obj.create_erasure_pool(name="recovery", **ec_config):
log.error("Failed to create the EC Pool")
return 1
if not rados_obj.bench_write(**ec_config):
log.error("Failed to write objects into the EC Pool")
return 1
rados_obj.bench_read(**ec_config)
log.info("Created the EC Pool, Finished writing data into the pool")
# getting the acting set for the created pool
acting_pg_set = rados_obj.get_pg_acting_set(pool_name=ec_config["pool_name"])
if len(acting_pg_set) != ec_config["k"] + ec_config["m"]:
log.error(
f"acting set consists of only these : {acting_pg_set} OSD's, less than k+m"
)
return 1
log.info(f" Acting set of the pool consists of OSD's : {acting_pg_set}")
log.info(
f"Killing m, i.e {ec_config['m']} OSD's from acting set to verify recovery"
)
stop_osds = [acting_pg_set.pop() for _ in range(ec_config["m"])]
for osd_id in stop_osds:
if not rados_obj.change_osd_state(action="stop", target=osd_id):
log.error(f"Unable to stop the OSD : {osd_id}")
return 1
log.info("Stopped 'm' number of OSD's from, starting to wait for recovery")
rados_obj.change_recover_threads(config=ec_config, action="set")
# Sleeping for 25 seconds ( "osd_heartbeat_grace": "20" ) for osd's to be marked down
time.sleep(25)
# Waiting for up to 2.5 hours for the recovery to complete and PG's to enter active + Clean state
end_time = datetime.datetime.now() + datetime.timedelta(seconds=9000)
while end_time > datetime.datetime.now():
flag = True
status_report = rados_obj.run_ceph_command(cmd="ceph report")
# Proceeding to check if all PG's are in active + clean
for entry in status_report["num_pg_by_state"]:
rec = (
"backfilling",
"degraded",
"incomplete",
"recovering",
"recovery_wait",
"backfilling_wait",
"peered",
"undersized",
)
if any(key in rec for key in entry["state"].split("+")):
flag = False
if flag:
log.info("The recovery and back-filling of the OSD is completed")
break
log.info(
f"Waiting for active + clean. Active aletrs: {status_report['health']['checks'].keys()},"
f"PG States : {status_report['num_pg_by_state']}"
f" checking status again in 1 minute"
)
time.sleep(60)
# getting the acting set for the created pool after recovery
acting_pg_set = rados_obj.get_pg_acting_set(pool_name=ec_config["pool_name"])
if len(acting_pg_set) != ec_config["k"] + ec_config["m"]:
log.error(
f"acting set consists of only these : {acting_pg_set} OSD's, less than k+m"
)
return 1
log.info(f" Acting set of the pool consists of OSD's : {acting_pg_set}")
# Changing recovery threads back to default
rados_obj.change_recover_threads(config=ec_config, action="rm")
log.debug("Starting the stopped OSD's")
for osd_id in stop_osds:
if not rados_obj.change_osd_state(action="restart", target=osd_id):
log.error(f"Unable to restart the OSD : {osd_id}")
return 1
# Sleep for 5 seconds for OSD's to join the cluster
time.sleep(5)
if not flag:
log.error("The pool did not reach active + Clean state after recovery")
return 1
# Deleting the pool created
if not rados_obj.detete_pool(pool=ec_config["pool_name"]):
log.error(f"the pool {ec_config['pool_name']} could not be deleted")
return 1
log.info("Successfully tested EC pool recovery with K osd's surviving")
return 0
| 39.831933 | 105 | 0.592194 |
8e9f78656c73ca7461c723c9f35d7b83294763e1 | 3,059 | py | Python | cloudmarker/test/test_azstorageaccountallowtrustedservicesevent.py | TinLe/cloudmarker | 29698420457a86d5d8a0bac156bc98bd656198e1 | [
"MIT"
] | 208 | 2019-04-10T05:15:11.000Z | 2022-03-16T17:41:29.000Z | cloudmarker/test/test_azstorageaccountallowtrustedservicesevent.py | TinLe/cloudmarker | 29698420457a86d5d8a0bac156bc98bd656198e1 | [
"MIT"
] | 88 | 2018-12-17T18:24:13.000Z | 2021-05-15T04:19:53.000Z | cloudmarker/test/test_azstorageaccountallowtrustedservicesevent.py | TinLe/cloudmarker | 29698420457a86d5d8a0bac156bc98bd656198e1 | [
"MIT"
] | 15 | 2019-01-03T04:18:33.000Z | 2021-06-03T09:24:31.000Z | """Tests for AzStorageAccountAllowTrustedServicesEvent plugin."""
import copy
import unittest
from cloudmarker.events import azstorageaccountallowtrustedservicesevent
base_record = {
'com': {
'cloud_type': 'azure',
'record_type': 'storage_account_properties',
},
'ext': {
'record_type': 'storage_account_properties',
'trusted_services_allowed': False
}
}
class AzStorageAccountAllowTrustedServicesEventTest(unittest.TestCase):
"""Tests for AzStorageAccountAllowTrustedServicesEvent plugin."""
def test_com_bucket_missing(self):
record = copy.deepcopy(base_record)
record['com'] = None
plugin = azstorageaccountallowtrustedservicesevent. \
AzStorageAccountAllowTrustedServicesEvent()
events = list(plugin.eval(record))
self.assertEqual(events, [])
def test_cloud_non_azure(self):
record = copy.deepcopy(base_record)
record['com']['cloud_type'] = 'non_azure'
plugin = azstorageaccountallowtrustedservicesevent. \
AzStorageAccountAllowTrustedServicesEvent()
events = list(plugin.eval(record))
self.assertEqual(events, [])
def test_record_type_non_storage_account_properties(self):
record = copy.deepcopy(base_record)
record['ext']['record_type'] = 'non_storage_account_properties'
plugin = azstorageaccountallowtrustedservicesevent. \
AzStorageAccountAllowTrustedServicesEvent()
events = list(plugin.eval(record))
self.assertEqual(events, [])
def test_ext_bucket_missing(self):
record = copy.deepcopy(base_record)
record['ext'] = None
plugin = azstorageaccountallowtrustedservicesevent. \
AzStorageAccountAllowTrustedServicesEvent()
events = list(plugin.eval(record))
self.assertEqual(events, [])
def test_trusted_services_allowed(self):
record = copy.deepcopy(base_record)
record['ext']['trusted_services_allowed'] = True
plugin = azstorageaccountallowtrustedservicesevent. \
AzStorageAccountAllowTrustedServicesEvent()
events = list(plugin.eval(record))
self.assertEqual(events, [])
def test_trusted_services_not_allowed(self):
record = copy.deepcopy(base_record)
plugin = azstorageaccountallowtrustedservicesevent. \
AzStorageAccountAllowTrustedServicesEvent()
events = list(plugin.eval(record))
self.assertEqual(len(events), 1)
self.assertEqual(events[0]['ext']['record_type'],
'storage_account_allow_trusted_services_event')
self.assertEqual(events[0]['com']['cloud_type'],
'azure')
self.assertEqual(events[0]['com']['record_type'],
'storage_account_allow_trusted_services_event')
self.assertTrue('reference' in events[0]['com'])
self.assertIsNotNone(events[0]['com']['description'])
self.assertIsNotNone(events[0]['com']['recommendation'])
| 38.721519 | 72 | 0.675711 |
791a5b6d363910f23487f57acb69001c7ddd0a4c | 1,994 | py | Python | zigbear/custom_protocol/coordinatorcli.py | philippnormann/zigbear | 3cfdb4c9b13adf1e785f27109194b575edf241af | [
"BSD-3-Clause"
] | 14 | 2020-04-15T09:43:20.000Z | 2022-01-29T19:36:27.000Z | zigbear/custom_protocol/coordinatorcli.py | philippnormann1337/zigbear | 3cfdb4c9b13adf1e785f27109194b575edf241af | [
"BSD-3-Clause"
] | null | null | null | zigbear/custom_protocol/coordinatorcli.py | philippnormann1337/zigbear | 3cfdb4c9b13adf1e785f27109194b575edf241af | [
"BSD-3-Clause"
] | 1 | 2020-06-06T21:41:10.000Z | 2020-06-06T21:41:10.000Z | from cmd import Cmd
from zigbear.custom_protocol.Coordinator import Coordinator
class CoordinatorCli(Cmd):
def __init__(self, connector):
self.prompt = 'Zigbear/coordinator> '
super().__init__()
self.coordinator = Coordinator(connector)
def do_devices(self, _):
pass # TODO print list of devices
def do_info(self, _):
self.coordinator.print_info()
def do_start(self, _):
self.coordinator.start_server()
def do_stop(self, _):
self.coordinator.stop_server()
def do_toggle(self, arg: str):
'''brightness <dest_addr>: toggle lamp'''
try:
dest_addr = int(arg)
self.coordinator.toggle_lamp(dest_addr)
except ValueError:
print('invalid destination address')
def do_brightness(self, arg: str):
'''brightness <dest_addr> <brightness (0-255)>: set lamp to specific brightness'''
args = arg.split()
brightness = None
dest_addr = None
try:
dest_addr = int(args[0])
except:
print('invalid destination address')
try:
brightness = int(args[1])
except:
print('invalid brightness value')
if brightness is not None and dest_addr is not None:
if 0 <= brightness <= 255:
self.coordinator.set_lamp_brightness(dest_addr, brightness)
else:
print('brightness value must be between 0 and 255')
def do_initiate(self, arg: str):
try:
dest_addr = int(arg)
self.coordinator.initiate_contact(dest_addr)
except ValueError:
print('invalid destination address')
def do_inits(self, _):
self.coordinator.print_init()
def do_sendkey(self, arg: str):
try:
dest_addr = int(arg)
self.coordinator.pair_devices(dest_addr)
except ValueError:
print('invalid destination address')
| 29.761194 | 90 | 0.597793 |
780fd07fa13fe13b44f354aa8c65d1719d514512 | 6,730 | py | Python | test/util/bitcoin-util-test.py | mrmikeo/GAU-Core | 6f56bb73d0736a4245c22391314d6ba55de0e0d8 | [
"MIT"
] | 2 | 2020-08-25T18:02:32.000Z | 2021-08-23T09:40:41.000Z | test/util/bitcoin-util-test.py | mrmikeo/GAU-Core | 6f56bb73d0736a4245c22391314d6ba55de0e0d8 | [
"MIT"
] | null | null | null | test/util/bitcoin-util-test.py | mrmikeo/GAU-Core | 6f56bb73d0736a4245c22391314d6ba55de0e0d8 | [
"MIT"
] | 2 | 2020-08-06T20:56:42.000Z | 2020-11-23T03:11:17.000Z | #!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for gauntlet utils.
Runs automatically during `make check`.
Can also be run manually."""
from __future__ import division,print_function,unicode_literals
import argparse
import binascii
try:
import configparser
except ImportError:
import ConfigParser as configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.read_file(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "bitcoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, gauntlet-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
| 37.18232 | 125 | 0.637296 |
9a9b2ed81f9c428f856780f2b6f1381c1d6f74ea | 457 | py | Python | blueprint/unit_tests/test_functional.py | andrey-mishchenko/blueprint-oss | 3bad9258571a0e08c53a9a05061e8461a1e62567 | [
"MIT"
] | 7 | 2021-08-16T09:17:31.000Z | 2022-02-16T01:27:08.000Z | blueprint/unit_tests/test_functional.py | andrey-mishchenko/blueprint-oss | 3bad9258571a0e08c53a9a05061e8461a1e62567 | [
"MIT"
] | null | null | null | blueprint/unit_tests/test_functional.py | andrey-mishchenko/blueprint-oss | 3bad9258571a0e08c53a9a05061e8461a1e62567 | [
"MIT"
] | 1 | 2021-08-11T20:17:06.000Z | 2021-08-11T20:17:06.000Z | from unittest import TestCase
from bp.functional import *
class TestFunctional(TestCase):
def test_all_equal(self) -> None:
self.assertTrue(
all_equal([1, 1, 1, 1]))
self.assertFalse(
all_equal([1, 2, 3]))
self.assertTrue(
all_equal([]))
def explode() -> int:
raise Exception
# Test short-circuiting.
self.assertFalse(
all_equal(
explode() if i == 2 else i
for i in range(3)))
| 17.576923 | 35 | 0.601751 |
5a79d0973b6a317ab8987c4de5ca4bc680af0bed | 1,668 | py | Python | var/spack/repos/builtin/packages/r-iranges/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2018-11-27T03:39:44.000Z | 2021-09-06T15:50:35.000Z | var/spack/repos/builtin/packages/r-iranges/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-01-11T20:11:52.000Z | 2019-01-11T20:11:52.000Z | var/spack/repos/builtin/packages/r-iranges/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-10-14T14:20:17.000Z | 2020-10-14T14:20:17.000Z | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RIranges(RPackage):
"""Provides efficient low-level and highly
reusable S4 classes for storing,
manipulating and aggregating over annotated ranges of
integers. Implements an
algebra of range operations, including efficient
algorithms for finding overlaps
and nearest neighbors. Defines efficient list-like
classes for storing, transforming
and aggregating large grouped data,
i.e., collections of atomic vectors and DataFrames."""
homepage = "https://www.bioconductor.org/packages/IRanges/"
git = "https://git.bioconductor.org/packages/IRanges.git"
version('2.14.10', commit='c76118a38e84c7c764141adbd66ee350d0882bc9')
version('2.12.0', commit='1b1748655a8529ba87ad0f223f035ef0c08e7fcd')
version('2.10.5', commit='b00d1d5025e3c480d17c13100f0da5a0132b1614')
depends_on('r-biocgenerics@0.21.1:', type=('build', 'run'), when='@2.10.5')
depends_on('r-biocgenerics@0.23.3:', type=('build', 'run'), when='@2.12.0')
depends_on('r-biocgenerics@0.25.3:', type=('build', 'run'), when='@2.14.10')
depends_on('r-s4vectors@0.13.17:', type=('build', 'run'), when='@2.10.5')
depends_on('r-s4vectors@0.15.5:', type=('build', 'run'), when='@2.12.0')
depends_on('r-s4vectors@0.18.2:', type=('build', 'run'), when='@2.14.10')
depends_on('r@3.4.0:3.4.9', when='@2.10.5', type=('build', 'run'))
depends_on('r@3.5.0:3.5.9', when='@2.14.10', type=('build', 'run'))
| 46.333333 | 80 | 0.685851 |
a6ae3499b0eb5f9cb5b6a03aa9d41577e085848b | 59 | py | Python | eda/nlp/__init__.py | alexklapheke/eda | 027b3b94fe7d308cdb7cf3637551f4db75142f24 | [
"MIT"
] | null | null | null | eda/nlp/__init__.py | alexklapheke/eda | 027b3b94fe7d308cdb7cf3637551f4db75142f24 | [
"MIT"
] | null | null | null | eda/nlp/__init__.py | alexklapheke/eda | 027b3b94fe7d308cdb7cf3637551f4db75142f24 | [
"MIT"
] | null | null | null | from .nlp import tf_idf
from .nlp import logodds_dirichlet
| 19.666667 | 34 | 0.830508 |
54aee6cede04f1134f49f69ae03bdc0a7a8c29e4 | 2,050 | py | Python | examples/adwords/v201601/account_management/create_account.py | fosterwei/adwords-keyword-planner-API-googleads-python-lib | b80b8b3741a55f1d00c5974bc58f92540663c6f6 | [
"Apache-2.0"
] | 1 | 2020-05-23T11:32:32.000Z | 2020-05-23T11:32:32.000Z | examples/adwords/v201601/account_management/create_account.py | fosterwei/adwords-keyword-planner-API-googleads-python-lib | b80b8b3741a55f1d00c5974bc58f92540663c6f6 | [
"Apache-2.0"
] | null | null | null | examples/adwords/v201601/account_management/create_account.py | fosterwei/adwords-keyword-planner-API-googleads-python-lib | b80b8b3741a55f1d00c5974bc58f92540663c6f6 | [
"Apache-2.0"
] | 2 | 2018-04-20T02:16:33.000Z | 2020-11-12T20:58:54.000Z | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example illustrates how to create an account.
Note by default this account will only be accessible via its parent AdWords
manager account..
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from datetime import datetime
from googleads import adwords
def main(client):
# Initialize appropriate service.
managed_customer_service = client.GetService(
'ManagedCustomerService', version='v201601')
today = datetime.today().strftime('%Y%m%d %H:%M:%S')
# Construct operations and add campaign.
operations = [{
'operator': 'ADD',
'operand': {
'name': 'Account created with ManagedCustomerService on %s' % today,
'currencyCode': 'EUR',
'dateTimeZone': 'Europe/London',
}
}]
# Create the account. It is possible to create multiple accounts with one
# request by sending an array of operations.
accounts = managed_customer_service.mutate(operations)
# Display results.
for account in accounts['value']:
print ('Account with customer ID \'%s\' was successfully created.'
% account['customerId'])
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
| 32.03125 | 78 | 0.727805 |
545be5cef84ada878aacc720b8ca2babde041a5c | 17,176 | py | Python | ALS_DR_benchmark_twitter.py | HanbaekLyu/BCD-DR | 0c4d65cb247073545507a6168ee7bd75a418177e | [
"MIT"
] | 1 | 2021-03-26T03:12:26.000Z | 2021-03-26T03:12:26.000Z | ALS_DR_benchmark_twitter.py | HanbaekLyu/BCD-DR | 0c4d65cb247073545507a6168ee7bd75a418177e | [
"MIT"
] | null | null | null | ALS_DR_benchmark_twitter.py | HanbaekLyu/BCD-DR | 0c4d65cb247073545507a6168ee7bd75a418177e | [
"MIT"
] | null | null | null | from utils.BCD_DR import ALS_DR
from utils.ocpdl import Online_CPDL
import numpy as np
import matplotlib.pyplot as plt
import pickle
from scipy.interpolate import interp1d
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = ['Times New Roman'] + plt.rcParams['font.serif']
def Out_tensor(loading):
### given loading, take outer product of respected columns to get CPdict
CPdict = {}
n_modes = len(loading.keys())
n_components = loading.get('U0').shape[1]
print('!!! n_modes', n_modes)
print('!!! n_components', n_components)
for i in np.arange(n_components):
A = np.array([1])
for j in np.arange(n_modes):
loading_factor = loading.get('U' + str(j)) ### I_i X n_components matrix
# print('loading_factor', loading_factor)
A = np.multiply.outer(A, loading_factor[:, i])
A = A[0]
CPdict.update({'A' + str(i): A})
print('!!! CPdict.keys()', CPdict.keys())
X = np.zeros(shape=CPdict.get('A0').shape)
for j in np.arange(len(loading.keys())):
X += CPdict.get('A' + str(j))
return X
def ALS_run(X,
n_components=10,
iter=100,
regularizer=None, # L1 regularizer for each factor matrix
ini_loading=None,
beta=None,
search_radius_const=1000,
if_compute_recons_error=True,
save_folder='Output_files',
subsample_ratio = None,
output_results=True):
ALSDR = ALS_DR(X=X,
n_components=n_components,
ini_loading=None,
ini_A=None,
ini_B=None,
alpha=regularizer)
result_dict = ALSDR.ALS(iter=iter,
ini_loading=ini_loading,
beta=beta,
search_radius_const=search_radius_const,
if_compute_recons_error=if_compute_recons_error,
save_folder=save_folder,
subsample_ratio=subsample_ratio,
output_results=output_results)
return result_dict
def MU_run(X,
n_components=10,
iter=100,
regularizer=0,
ini_loading=None,
if_compute_recons_error=True,
save_folder='Output_files',
output_results=True):
ALSDR = ALS_DR(X=X,
n_components=n_components,
ini_loading=None,
ini_A=None,
ini_B=None,
alpha=regularizer)
result_dict = ALSDR.MU(iter=iter,
ini_loading=ini_loading,
if_compute_recons_error=if_compute_recons_error,
save_folder=save_folder,
output_results=output_results)
return result_dict
def OCPDL_run(X,
n_components=10,
iter=100,
regularizer=0,
ini_loading=None,
batch_size=100,
mode_2be_subsampled=-1,
if_compute_recons_error=True,
save_folder='Output_files',
output_results=True):
OCPDL = Online_CPDL(X=X,
batch_size=batch_size,
iterations=iter,
n_components=n_components,
ini_loading=ini_loading,
ini_A=None,
ini_B=None,
alpha=regularizer,
subsample=True)
result_dict = OCPDL.train_dict(mode_2be_subsampled=mode_2be_subsampled,
if_compute_recons_error=if_compute_recons_error,
save_folder=save_folder,
output_results=output_results)
return result_dict
def plot_benchmark_errors(ALS_result0, ALS_result1, ALS_result2, MU_result, name=1, errorbar=True, save_folder = None):
n_components = ALS_result1.get('n_components')
if not errorbar:
ALS_errors = ALS_result0.get('time_error')
MU_errors = MU_result.get('time_error')
fig, axs = plt.subplots(nrows=1, ncols=1, figsize=(8, 6))
axs.plot(ALS_errors[0, :], ALS_errors[1, :], label='ALS')
axs.plot(OCLDP_errors[0, :], OCLDP_errors[1, :], label='OCPDL')
axs.set_xlabel('Elapsed time (s)')
axs.set_ylabel('Reconstruction error')
plt.suptitle('Reconstruction error benchmarks')
axs.legend()
plt.tight_layout()
plt.suptitle('Reconstruction error benchmarks', fontsize=13)
plt.subplots_adjust(0.1, 0.1, 0.9, 0.9, 0.00, 0.00)
plt.savefig('Output_files/benchmark_plot' + '_' + str(name))
# plt.show()
else:
ALS_errors0 = ALS_result0.get('timed_errors_trials') # shape (# trials) x (2 for time, error) x (iterations)
ALS_errors1 = ALS_result1.get('timed_errors_trials') # shape (# trials) x (2 for time, error) x (iterations)
ALS_errors2 = ALS_result2.get('timed_errors_trials')
MU_errors = MU_result.get('timed_errors_trials')
n_trials = ALS_errors1.shape[0]
print('!! ALS_errors0.shape', ALS_errors0.shape)
print('!! ALS_errors1.shape', ALS_errors1.shape)
print('!! ALS_errors2.shape', ALS_errors2.shape)
print('!! MU_errors.shape', MU_errors.shape)
print('!!!!! MU_errors', MU_errors)
x_all_max = max(min(ALS_errors1[:, :, -1][:, 0]), min(MU_errors[:, :, -1][:, 0]), min(ALS_errors2[:, :, -1][:, 0]))
x_all = np.linspace(0, x_all_max, num=101, endpoint=True)
x_all_ALS0 = x_all[x_all<min(ALS_errors0[:, :, -1][:, 0])]
x_all_ALS1 = x_all[x_all<min(ALS_errors1[:, :, -1][:, 0])]
x_all_ALS2 = x_all[x_all<min(ALS_errors2[:, :, -1][:, 0])]
x_all_MU = x_all[x_all<min(MU_errors[:, :, -1][:, 0])]
x_all_common = x_all_ALS1[range(np.round(len(x_all_ALS1)//1.1).astype(int))]
# x_all_MU = x_all_common
print('!!! x_all', x_all)
# interpolate data and have common carrier
f_ALS_interpolated0 = []
f_ALS_interpolated1 = []
f_ALS_interpolated2 = []
f_MU_interpolated = []
for i in np.arange(MU_errors.shape[0]):
f_ALS0 = interp1d(ALS_errors0[i, 0, :], ALS_errors0[i, 1, :], fill_value="extrapolate")
f_ALS_interpolated0.append(f_ALS0(x_all_ALS0))
f_ALS1 = interp1d(ALS_errors1[i, 0, :], ALS_errors1[i, 1, :], fill_value="extrapolate")
f_ALS_interpolated1.append(f_ALS1(x_all_ALS1))
f_ALS2 = interp1d(ALS_errors2[i, 0, :], ALS_errors2[i, 1, :], fill_value="extrapolate")
f_ALS_interpolated2.append(f_ALS2(x_all_ALS2))
f_MU = interp1d(MU_errors[i, 0, :], MU_errors[i, 1, :], fill_value="extrapolate")
f_MU_interpolated.append(f_MU(x_all_MU))
f_ALS_interpolated0 = np.asarray(f_ALS_interpolated0)
f_ALS_interpolated1 = np.asarray(f_ALS_interpolated1)
f_ALS_interpolated2 = np.asarray(f_ALS_interpolated2)
f_MU_interpolated = np.asarray(f_MU_interpolated)
f_ALS_avg0 = np.sum(f_ALS_interpolated0, axis=0) / f_ALS_interpolated0.shape[0] ### axis-0 : trials
f_ALS_std0 = np.std(f_ALS_interpolated0, axis=0)
#print('!!! f_ALS_std0', f_ALS_std0)
f_ALS_avg1 = np.sum(f_ALS_interpolated1, axis=0) / f_ALS_interpolated1.shape[0] ### axis-0 : trials
f_ALS_std1 = np.std(f_ALS_interpolated1, axis=0)
#print('!!! f_ALS_std1', f_ALS_std1)
f_ALS_avg2 = np.sum(f_ALS_interpolated2, axis=0) / f_ALS_interpolated2.shape[0] ### axis-0 : trials
f_ALS_std2 = np.std(f_ALS_interpolated2, axis=0)
#print('!!! f_ALS_std2', f_ALS_std2)
f_MU_avg = np.sum(f_MU_interpolated, axis=0) / f_MU_interpolated.shape[0] ### axis-0 : trials
f_MU_std = np.std(f_MU_interpolated, axis=0)
print('!!! f_MU_avg', f_MU_avg)
print('!!! f_MU_std', f_MU_std)
fig, axs = plt.subplots(nrows=1, ncols=1, figsize=(8, 6))
markers, caps, bars = axs.errorbar(x_all_ALS0, f_ALS_avg0, yerr=f_ALS_std0,
fmt='r-', marker = '*', label='BCD-DR-0.5', errorevery=5)
axs.fill_between(x_all_ALS0, f_ALS_avg0-f_ALS_std0, f_ALS_avg0+f_ALS_std0, facecolor='r',alpha=0.1)
markers, caps, bars = axs.errorbar(x_all_ALS1, f_ALS_avg1, yerr=f_ALS_std1,
fmt='b-', marker = '*', label='BCD-DR-1', errorevery=5)
axs.fill_between(x_all_ALS1, f_ALS_avg1-f_ALS_std1, f_ALS_avg1+f_ALS_std1, facecolor='b',alpha=0.1)
markers, caps, bars = axs.errorbar(x_all_ALS2, f_ALS_avg2, yerr=f_ALS_std2,
fmt='c-', marker = '*', label='ALS', errorevery=5)
axs.fill_between(x_all_ALS2, f_ALS_avg2-f_ALS_std2, f_ALS_avg2+f_ALS_std2, facecolor='c',alpha=0.1)
markers, caps, bars = axs.errorbar(x_all_MU, f_MU_avg, yerr=f_MU_std,
fmt='g-', marker = 'x', label='MU', errorevery=5)
axs.fill_between(x_all_MU, f_MU_avg-f_MU_std, f_MU_avg+f_MU_std, facecolor='g',alpha=0.2)
axs.set_xlim(0, min(max(x_all_ALS0), max(x_all_ALS1), max(x_all_ALS2), max(x_all_MU)))
axs.set_ylim(min(f_MU_avg-f_MU_std), max(f_MU_avg+f_MU_std+0.5))
[bar.set_alpha(0.5) for bar in bars]
# axs.set_ylim(0, np.maximum(np.max(f_OCPDL_avg + f_OCPDL_std), np.max(f_ALS_avg + f_ALS_std)) * 1.1)
axs.set_xlabel('Elapsed time (s)', fontsize=14)
axs.set_ylabel('Reconstruction error', fontsize=12)
plt.suptitle('Reconstruction error benchmarks')
axs.legend(fontsize=13)
plt.tight_layout()
plt.suptitle('Reconstruction error benchmarks', fontsize=13)
plt.subplots_adjust(0.1, 0.1, 0.9, 0.9, 0.00, 0.00)
if save_folder is None:
root = 'Output_files_BCD'
else:
root = save_folder
plt.savefig(root+'/benchmark_plot_errorbar' + '_ntrials_' + str(n_trials) + "_" + "_ncomps_" + str(
n_components) + "_" + str(name) + ".pdf")
def main():
loading = {}
n_components = 5
iter = 50
num_repeat = 10
# save_folder = "Output_files_BCD_new1"
save_folder = "Output_files_BCD_twitter5"
synthetic_data = False
run_ALS = True
run_MU = False
run_OCPDL = False
plot_errors = False
search_radius_const = 100000
file_identifier = 'new1'
# Load data
file_name = "Synthetic"
if synthetic_data:
np.random.seed(1)
U0 = np.random.rand(100, n_components)
np.random.seed(2)
U1 = np.random.rand(100, n_components)
np.random.seed(3)
U2 = np.random.rand(1000, n_components)
loading.update({'U0': U0})
loading.update({'U1': U1})
loading.update({'U2': U2})
X = Out_tensor(loading)
else:
path = "Data/Twitter/top_1000_daily/data_tensor_top1000.pickle"
dict = pickle.load(open(path, "rb"))
X = dict[1]
file_name = "Twitter"
file_name = file_name + "_" + file_identifier
print('X.shape', X.shape)
# print('!!! average entry size of tensor:', np.linalg.norm(X.reshape(-1,1),1)/np.product(X.shape))
if run_ALS:
# beta_list = [1/2, 1, None]
beta_list = [1]
ALS_subsample_ratio_list=[20]
# ALS_subsample_ratio_list=[None]
for subsample_ratio in ALS_subsample_ratio_list:
print('!!! ALS subsample_ratio:', subsample_ratio)
for beta in beta_list:
print('!!! ALS initialized with beta:', beta)
list_full_timed_errors = []
iter1 = iter
if subsample_ratio is not None:
iter1 = iter1
for i in np.arange(num_repeat):
result_dict_ALS = ALS_run(X,
n_components=n_components,
iter=iter1,
regularizer=0,
# inverse regularizer on time mode (to promote long-lasting topics),
# no regularizer on on words and tweets
ini_loading=None,
beta=beta,
search_radius_const=search_radius_const,
subsample_ratio=subsample_ratio,
if_compute_recons_error=True,
save_folder=save_folder,
output_results=True)
time_error = result_dict_ALS.get('time_error')
list_full_timed_errors.append(time_error.copy())
# print('!!! list_full_timed_errors', len(list_full_timed_errors))
timed_errors_trials = np.asarray(
list_full_timed_errors) # shape (# trials) x (2 for time, error) x (iterations)
result_dict_ALS.update({'timed_errors_trials': timed_errors_trials})
save_filename = "ALS_result_" + "beta_" + str(beta) + "_" + "subsample_" + str(subsample_ratio) + "_" + str(file_name)
np.save(save_folder + "/" + save_filename, result_dict_ALS)
print('result_dict_ALS.keys()', result_dict_ALS.keys())
result_dict_ALS = {}
if run_MU:
list_full_timed_errors = []
print('!!! MU initialized')
for i in np.arange(num_repeat):
result_dict_MU = MU_run(X,
n_components=n_components,
iter=iter*2,
regularizer=0,
ini_loading=None,
if_compute_recons_error=True,
save_folder=save_folder,
output_results=True)
time_error = result_dict_MU.get('time_error')
list_full_timed_errors.append(time_error.copy())
# print('!!! list_full_timed_errors', len(list_full_timed_errors))
timed_errors_trials = np.asarray(
list_full_timed_errors) # shape (# trials) x (2 for time, error) x (iterations)
result_dict_MU.update({'timed_errors_trials': timed_errors_trials})
np.save(save_folder + "/MU_result_" + str(file_name), result_dict_MU)
print('result_dict_MU.keys()', result_dict_MU.keys())
if run_OCPDL:
print('!!! OCPDL initialized')
list_full_timed_errors = []
for i in np.arange(num_repeat):
result_dict_OCPDL = OCPDL_run(X,
n_components=n_components,
iter=iter,
regularizer=0,
ini_loading=None,
mode_2be_subsampled=-1,
if_compute_recons_error=True,
save_folder=save_folder,
output_results=True)
time_error = result_dict_OCPDL.get('time_error')
list_full_timed_errors.append(time_error.copy())
timed_errors_trials = np.asarray(
list_full_timed_errors) # shape (# trials) x (2 for time, error) x (iterations)
result_dict_OCPDL.update({'timed_errors_trials': timed_errors_trials})
print('!!! list_full_timed_errors', len(list_full_timed_errors))
np.save(save_folder + "/OCPDL_result_" + str(file_name), result_dict_OCPDL)
print('result_dict_OCPDL.keys()', result_dict_OCPDL.keys())
if plot_errors:
save_filename = file_name + ".npy"
ALS_result0 = np.load(save_folder+'/ALS_result_beta_0.5_' + save_filename, allow_pickle=True).item()
ALS_result1 = np.load(save_folder+'/ALS_result_beta_1_' + save_filename, allow_pickle=True).item()
ALS_result2 = np.load(save_folder+'/ALS_result_beta_None_' + save_filename, allow_pickle=True).item()
MU_result = np.load(save_folder+'/MU_result_' + save_filename, allow_pickle=True).item()
plot_benchmark_errors(ALS_result0, ALS_result1, ALS_result2, MU_result, name=file_name, errorbar=True, save_folder=save_folder)
if __name__ == '__main__':
main()
| 44.154242 | 136 | 0.554495 |
dcc0fa3e0ba5493334b29e9b4f3e52267a00a303 | 2,662 | py | Python | tracker_project/tracker/models.py | abarto/tracker_project | d7e1a6cb34a3b1d48a3aff16ca119f9c670b357d | [
"MIT"
] | 64 | 2015-03-17T15:54:59.000Z | 2021-02-21T16:39:49.000Z | tracker_project/tracker/models.py | Bakley/tracker_project | d7e1a6cb34a3b1d48a3aff16ca119f9c670b357d | [
"MIT"
] | 3 | 2016-02-24T13:31:19.000Z | 2019-02-08T04:06:23.000Z | tracker_project/tracker/models.py | Bakley/tracker_project | d7e1a6cb34a3b1d48a3aff16ca119f9c670b357d | [
"MIT"
] | 25 | 2015-04-03T10:12:47.000Z | 2020-08-01T20:47:16.000Z | from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.contrib.gis.db import models
from geojson import Feature, loads
class Incident(models.Model):
objects = models.GeoManager()
URGENT = 'UR'
HIGH = 'HI'
MEDIUM = 'ME'
LOW = 'LO'
INFO = 'IN'
SEVERITY_CHOICES = (
(URGENT, 'Urgent'),
(HIGH, 'High'),
(MEDIUM, 'Medium'),
(LOW, 'Low'),
(INFO, 'Info'),
)
ALERT_SEVERITIES = {
URGENT: (URGENT, HIGH, MEDIUM, LOW, INFO),
HIGH: (HIGH, MEDIUM, LOW, INFO),
MEDIUM: (MEDIUM, LOW, INFO),
LOW: (LOW, INFO),
INFO: (INFO,),
}
name = models.CharField(max_length=150)
description = models.TextField(max_length=1000)
severity = models.CharField(max_length=2, choices=SEVERITY_CHOICES, default=MEDIUM)
closed = models.BooleanField(default=False)
location = models.PointField()
created = models.DateTimeField(editable=False, auto_now_add=True)
@property
def alert_severities(self):
return Incident.ALERT_SEVERITIES[self.severity]
@property
def geojson_feature(self):
return Feature(
geometry=loads(self.location.geojson),
id='Incident:{pk}'.format(pk=self.pk),
properties={
'name': self.name,
'description': self.description,
'severity': self.get_severity_display(),
'created': str(self.created),
'closed': self.closed,
'model': 'Incident',
'pk': self.pk,
'url': reverse('tracker:incident-detail', kwargs={'pk': self.pk}),
}
)
class AreaOfInterest(models.Model):
objects = models.GeoManager()
name = models.CharField(max_length=150)
severity = models.CharField(max_length=2, choices=Incident.SEVERITY_CHOICES, default=Incident.MEDIUM)
polygon = models.PolygonField()
@property
def path_expression(self):
return '|'.join('{y},{x}'.format(x=x, y=y) for x, y in self.polygon[0])
@property
def geojson_feature(self):
return Feature(
geometry=loads(self.polygon.geojson),
id='AreaOfInterest:{pk}'.format(pk=self.pk),
properties={
'name': self.name,
'severity': self.get_severity_display(),
'centroid': self.polygon.centroid.geojson,
'model': 'AreaOfInterest',
'pk': self.pk,
'url': reverse('tracker:area-of-interest-detail', kwargs={'pk': self.pk}),
}
) | 30.953488 | 105 | 0.582645 |
e8f794861f3cc91233fe681105886dc69e0ed268 | 8,655 | py | Python | tensormate/graph/base.py | songgc/tensormate | 3d7f3cb8dbca4bb346cc7525e247ccefd18ab80b | [
"Apache-2.0"
] | 1 | 2018-08-29T04:17:06.000Z | 2018-08-29T04:17:06.000Z | tensormate/graph/base.py | songgc/tensormate | 3d7f3cb8dbca4bb346cc7525e247ccefd18ab80b | [
"Apache-2.0"
] | null | null | null | tensormate/graph/base.py | songgc/tensormate | 3d7f3cb8dbca4bb346cc7525e247ccefd18ab80b | [
"Apache-2.0"
] | null | null | null | import copy
from collections import Counter
import numpy as np
import six
import tensorflow as tf
from tensorflow.core.framework import graph_pb2, node_def_pb2
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
class TfGgraphBuilder(object):
def __init__(self, scope=None, device=None, plain=False):
self._call_count = 0
self._scope = scope
self._device = device
self._plain = plain
self._trainable_variables = None
self._update_ops = None
self._shapes = []
self._created_nodes = []
self._node_map = dict()
self._before_states = dict()
self._after_states = dict()
self._actual_scopes = []
def _build(self, *args, **kwargs):
raise NotImplementedError("Please implement this method")
def _subgraph(self):
out_graph = graph_pb2.GraphDef()
to_be_inputed = []
for node in self._created_nodes:
out_graph.node.extend([copy.deepcopy(node)])
op = tf.get_default_graph().get_operation_by_name(node.name)
if op.outputs:
out_graph.node[-1].attr["_output_shapes"].list.shape.extend([
output.get_shape().as_proto() for output in op.outputs])
for name in node.input:
if "/" not in name:
to_be_inputed.append(name)
else:
flag = False
for scope in self._actual_scopes:
seq = scope.split("/")
if "/".join(name.split("/")[0: len(seq)]) == scope:
flag = True
break
if not flag:
to_be_inputed.append(name)
# elif name.split("/")[0] != self.scope:
# to_be_inputed.append(name)
for name in to_be_inputed:
op = tf.get_default_graph().get_operation_by_name(name)
node = _NodeDef("Placeholder", name)
out_graph.node.extend([node])
if op.outputs:
out_graph.node[-1].attr["_output_shapes"].list.shape.extend([
output.get_shape().as_proto() for output in op.outputs])
return out_graph
def visualize(self, output_file=None, whole_graph=False):
"""Visualize TensorFlow graph."""
if self.ref_count == 0:
raise RuntimeError("Not built yet")
if whole_graph:
graph = tf.get_default_graph()
graph_def = graph.as_graph_def(add_shapes=True)
else:
graph_def = self._subgraph()
strip_def = self.strip_consts(graph_def, max_const_size=32)
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph' + str(np.random.rand()))
iframe = """
<iframe seamless style="width:1200px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
if output_file is None:
return iframe
with open(output_file, "tw") as f:
f.write(iframe)
@staticmethod
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = str.encode("<stripped %s bytes>" % size)
return strip_def
def _before_call(self):
g = tf.get_default_graph().as_graph_def()
existing_nodes = set([node.name for node in g.node])
self._before_states = dict()
self._before_states["existing_nodes"] = existing_nodes
return
def _call_body(self, *args, **kwargs):
# is_training = kwargs.get("is_training", True)
reuse = self.ref_count > 0
with tf.variable_scope(self._scope, reuse=reuse):
if self._device is None:
output = self._build(*args, **kwargs)
else:
with tf.device(self._device):
output = self._build(*args, **kwargs)
return output
def _after_call(self):
existing_nodes = self._before_states["existing_nodes"]
if self._call_count == 1:
self._trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
self._update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, self.scope)
g = tf.get_default_graph().as_graph_def()
new_nodes = [node for node in g.node if node.name not in existing_nodes]
self._created_nodes += new_nodes
name_list = new_nodes[-1].name.split("/")
current_scopes = []
for name in name_list:
current_scopes.append(name)
if self._scope in name:
break
self._actual_scopes.append("/".join(current_scopes))
def __call__(self, *args, **kwargs):
if not self._plain:
self._before_call()
output = self._call_body(*args, **kwargs)
self._call_count += 1
if not self._plain:
self._after_call()
return output
@property
def ref_count(self):
return self._call_count
@property
def scope(self):
return self._scope
@property
def device(self):
return self._device
@property
def plain(self):
return self.plain
@deprecated("2017-10-31", "Use infer_output_shape(tensor)")
def _infer_output_shape(self, tensor):
self.infer_output_shape(tensor)
def infer_output_shape(self, tensor):
assert tf.is_numeric_tensor(tensor)
self._shapes.append((tensor.name, tensor.get_shape().as_list()))
def get_shapes(self):
if self.ref_count == 0:
raise RuntimeError("Not built yet")
return self._shapes
def get_trainable_variables(self):
if self.ref_count == 0:
raise RuntimeError("Not built yet")
return self._trainable_variables
def get_update_ops(self):
if self.ref_count == 0:
raise RuntimeError("Not built yet")
return self._update_ops
def get_model_info(self):
objs = self.get_trainable_variables()
output = []
for obj in objs:
output.append(obj.name)
return output
def op_counting(self):
op_list = [node.op for node in self._created_nodes]
counter = Counter(op_list)
return counter.most_common(len(op_list))
def count_on_conditions(self, strs):
pass
def add_node_to_map(self, name, node):
self._node_map[name] = node
def get_node_from_map(self, name):
return self._node_map.get(name)
def get_last_actual_scope(self):
if self.ref_count == 0:
raise RuntimeError("Not built yet")
return self._actual_scopes[-1]
def _node_name(n):
if n.startswith("^"):
return n[1:]
else:
return n.split(":")[0]
def _NodeDef(op_type, name, device=None, attrs=None):
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
device: string, device, or function from NodeDef to string.
Value for the "device" attribute of the NodeDef proto.
attrs: Optional dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef()
node_def.op = compat.as_bytes(op_type)
node_def.name = compat.as_bytes(name)
if attrs is not None:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
# if device is not None:
# if callable(device):
# node_def.device = device(node_def)
# else:
# node_def.device = _device_string(device)
return node_def
| 34.074803 | 110 | 0.58937 |
f2450712fd105b047d6b0f2a040acae2ee99bbe6 | 881 | py | Python | policykit/policyengine/migrations/0004_webhooktriggeraction.py | mashton/policyk | 623523d76d63c06b6d559ad7b477d80512fbd2e7 | [
"MIT"
] | 78 | 2020-05-08T17:25:38.000Z | 2022-01-13T05:44:50.000Z | policykit/policyengine/migrations/0004_webhooktriggeraction.py | mashton/policyk | 623523d76d63c06b6d559ad7b477d80512fbd2e7 | [
"MIT"
] | 302 | 2020-02-20T07:04:30.000Z | 2022-02-25T17:44:23.000Z | policykit/policyengine/migrations/0004_webhooktriggeraction.py | mashton/policyk | 623523d76d63c06b6d559ad7b477d80512fbd2e7 | [
"MIT"
] | 13 | 2020-04-17T19:44:26.000Z | 2022-02-25T17:18:04.000Z | # Generated by Django 3.2.2 on 2021-11-10 14:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('policyengine', '0003_choicevote'),
]
operations = [
migrations.CreateModel(
name='WebhookTriggerAction',
fields=[
('baseaction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='policyengine.baseaction')),
('json_data', models.JSONField(blank=True, null=True)),
('event_type', models.CharField(blank=True, max_length=50, null=True)),
],
options={
'abstract': False,
},
bases=('policyengine.baseaction', models.Model),
),
]
| 32.62963 | 204 | 0.609535 |
ab6087cd01c336cf5a9d52051d411daa6cb0cfb0 | 356 | py | Python | setup.py | vtmoreau/google_trans_new | 1f4fecf17461107a27148896a87a26a565e9049a | [
"MIT"
] | null | null | null | setup.py | vtmoreau/google_trans_new | 1f4fecf17461107a27148896a87a26a565e9049a | [
"MIT"
] | null | null | null | setup.py | vtmoreau/google_trans_new | 1f4fecf17461107a27148896a87a26a565e9049a | [
"MIT"
] | null | null | null | from setuptools import find_packages
from setuptools import setup
setup(name='google_trans_new',
version="1.0",
description="Project Description",
packages=find_packages(),
# include_package_data: to install data from MANIFEST.in
include_package_data=True,
# scripts=['scripts/OpenFakeData-run'],
zip_safe=False)
| 29.666667 | 62 | 0.716292 |
d44dd8e87c0cdb86fe9188bdae7130f07e554be3 | 111,315 | py | Python | theano/gpuarray/elemwise.py | sebastien-j/Theano | ad628f1f388931ba04a46a179c0eaa9a1d90ec2a | [
"BSD-3-Clause"
] | 1 | 2017-06-30T21:37:52.000Z | 2017-06-30T21:37:52.000Z | theano/gpuarray/elemwise.py | sebastien-j/Theano | ad628f1f388931ba04a46a179c0eaa9a1d90ec2a | [
"BSD-3-Clause"
] | null | null | null | theano/gpuarray/elemwise.py | sebastien-j/Theano | ad628f1f388931ba04a46a179c0eaa9a1d90ec2a | [
"BSD-3-Clause"
] | 1 | 2020-01-06T20:28:42.000Z | 2020-01-06T20:28:42.000Z | from __future__ import absolute_import, print_function, division
import copy
import numpy as np
import theano
from theano import Apply, scalar, Op
from six.moves import StringIO, xrange
from theano.gof.utils import MethodNotDefined
from theano.scalar import Scalar, Composite
from theano.tensor.elemwise import (Elemwise, DimShuffle, CAReduceDtype)
from theano.scalar.basic_scipy import Erfinv, Erfcinv
from theano.scalar.basic import upgrade_to_float_no_complex, complex_types
try:
import pygpu
from pygpu import gpuarray
from pygpu.tools import ArrayArg
from pygpu.reduction import ReductionKernel
from pygpu.gpuarray import dtype_to_typecode
except ImportError:
pass
from .basic_ops import (as_gpuarray_variable, HideC, GpuKernelBase, Kernel,
infer_context_name)
from .type import GpuArrayType, gpu_context_type
from .fp16_help import load_w, write_w
def make_argument(v, name):
return ArrayArg(np.dtype(v.type.dtype), name)
def as_C_string_const(s):
return '\n'.join('"%s\\n"' % (l.replace('"', '\\"'))
for l in s.split('\n'))
def get_scal(dt):
if dt == 'float16':
dt = 'float32'
return scalar.get_scalar_type(dt)
def max_inputs_to_GpuElemwise(node_or_outputs):
"""
Compute the maximum number of inputs that fit in a kernel call.
"""
if isinstance(node_or_outputs, Apply):
outputs = node_or_outputs.outputs
else:
outputs = node_or_outputs
n_out = len(outputs)
ndim = outputs[0].type.ndim
ptr_size = 8
# Even with call32, the interface does not change, and shapes,
# strides, and offset are passed as 64-bits (8 bytes)
int_size = 8
# we take the limit from CUDA for now
nb_bytes_total = 4096
# Regardless of the number of arguments, we have:
# - The total number of elements (int)
# - The shape (int) on each dimension
fixed_size = int_size + int_size * ndim
# Each argument (input or output) has:
# - 1 pointer (ptr)
# - 1 offset (int)
# - 1 stride (int) per dimension
# Even if the tensor ends up being contiguous, code for the
# non-contiguous case still needs to be generated.
param_size = ptr_size + int_size + int_size * ndim
# Remaining for inputs
nb_bytes_for_inputs = nb_bytes_total - fixed_size - param_size * n_out
# Maximum number of inputs
max_nb_inputs = nb_bytes_for_inputs // param_size
return max_nb_inputs
class GpuElemwise(HideC, Elemwise):
"""
Elemwise on the GPU.
"""
params_type = gpu_context_type
nin = property(lambda self: self.scalar_op.nin)
nout = property(lambda self: self.scalar_op.nout)
_f16_ok = True
def __str__(self):
if self.name is not None:
return self.name
items = str(sorted(self.inplace_pattern.items()))
return "GpuElemwise{%s}%s<gpuarray>" % (self.scalar_op, items)
def max_inputs(self, node_or_outputs):
return max_inputs_to_GpuElemwise(node_or_outputs)
def make_node(self, *inputs):
ctx_name = infer_context_name(*inputs)
inputs = [as_gpuarray_variable(i, ctx_name) for i in inputs]
out_info = Elemwise.get_output_info(self, GpuDimShuffle, *inputs)
inputs = out_info[2]
outputs = [GpuArrayType(broadcastable=br,
context_name=ctx_name,
dtype=dtype)() for dtype, br in
zip(out_info[0], out_info[1])]
if len(outputs) > 1:
raise NotImplementedError()
if len(inputs) > max_inputs_to_GpuElemwise(outputs):
raise NotImplementedError(
"Can not make this GpuElemwise with that much inputs")
# Try to generate the kernel to catch SupportCodeErrors
scal_ins = [get_scal(i.dtype) for i in inputs]
fake_node = self.scalar_op.make_node(*[i() for i in scal_ins])
try:
code = fake_node.op.c_support_code_apply(fake_node, "test")
if code:
raise SupportCodeError(code)
except MethodNotDefined:
pass
try:
support_code = fake_node.op.c_support_code()
if "struct" in support_code:
# The macro is fine, the C++ struct is not.
raise SupportCodeError(
"struct aren't supported in GpuElemwise support_code" +
support_code)
except MethodNotDefined:
pass
node = Apply(self, inputs, outputs)
return node
def get_params(self, node):
return node.inputs[0].type.context
def _get_vnames(self, node):
inps = ['i%d' % (n,) for n, _ in enumerate(node.inputs)]
outs = ['o%d' % (n,) if n not in self.inplace_pattern else
inps[self.inplace_pattern[n]]
for n, _ in enumerate(node.outputs)]
return inps, outs
def _generate_op_string(self, node):
inps, outs = self._get_vnames(node)
scal_v_ins = [get_scal(i.dtype)() for i in node.inputs]
# As float16 isn't a c type and most GPU don't compute on it,
# We convert the computation to float32, and let libgpuarray
# load in float16 and cast to float32 and do the reverse for
# the output.
scalar_op = self.scalar_op
if isinstance(scalar_op, (scalar.Cast, Composite)):
scalar_op = scalar_op.clone_float32()
fake_node = scalar_op.make_node(*scal_v_ins)
scal_v_out = fake_node.outputs
assert len(scal_v_out) == len(node.outputs)
try:
kop = fake_node.op.c_code(fake_node, 'elem_scalar',
inps, outs,
dict(fail='return;'))
except MethodNotDefined:
raise AssertionError(
"No c code for this scalar. Can not make a GpuElemwise")
# If the following assert fail, then we need to update the
# code handler above.
assert 'npy_float16' not in kop
support_code = ""
try:
# We accept only some c_support_code().
# This filter is done in the make_node()
support_code += fake_node.op.c_support_code()
except MethodNotDefined:
pass
for npy, ga in [("npy_bool", "ga_bool"),
("npy_uint8", "ga_ubyte"),
("npy_uint16", "ga_ushort"),
("npy_uint32", "ga_uint"),
("npy_uint64", "ga_ulong"),
("npy_int8", "ga_byte"),
("npy_int16", "ga_short"),
("npy_int32", "ga_int"),
("npy_int64", "ga_long"),
("npy_float16", "ga_half"),
("npy_float32", "ga_float"),
("npy_float64", "ga_double"),
]:
kop = kop.replace(npy, ga)
return support_code, kop
def c_headers(self):
return ['<numpy_compat.h>', '<gpuarray/types.h>',
'<gpuarray/elemwise.h>']
def c_support_code_struct(self, node, name):
return "\nGpuElemwise *ge;\n"
def c_init_code_struct(self, node, name, sub):
inps, outs = self._get_vnames(node)
nargs = len(inps) + len(outs) - len(self.inplace_pattern)
support_code, kop = self._generate_op_string(node)
res = """
gpuelemwise_arg args[%(nargs)s] = {{0}};
""" % dict(nargs=nargs)
for n, (i, name) in enumerate(zip(node.inputs, inps)):
res += """
args[%(n)s].name = %(name)s;
args[%(n)s].typecode = %(typecode)s;
args[%(n)s].flags = GE_READ;
""" % dict(n=n, name='"%s"' % (name,),
typecode=i.type.typecode)
p = len(inps)
for n, o in enumerate(node.outputs):
if n in self.inplace_pattern:
assert(len(node.outputs) == 1)
res += "\nargs[%(n)s].flags |= GE_WRITE;\n" % dict(n=self.inplace_pattern[n])
else:
res += """
args[%(n)s].name = %(name)s;
args[%(n)s].typecode = %(typecode)s;
args[%(n)s].flags = GE_WRITE;
""" % dict(n=p, name='"%s"' % (outs[n],),
typecode=o.type.typecode)
p += 1
res += """
ge = GpuElemwise_new(%(ctx)s->ctx, %(support)s, %(kop)s, %(nargs)s, args, %(nd)s, GE_CONVERT_F16);
if (ge == NULL) {
PyErr_SetString(PyExc_RuntimeError, "Could not initialize elemwise support");
%(fail)s
}
""" % dict(nargs=nargs, ctx=sub['params'], fail=sub['fail'],
support=as_C_string_const(support_code),
kop=as_C_string_const(kop), nd=node.inputs[0].ndim)
return res
def c_cleanup_code_struct(self, node, name):
return """
GpuElemwise_free(ge);
"""
def c_code(self, node, name, inputs, outputs, sub):
nd = node.outputs[0].ndim
fail = sub["fail"]
initial_dims = ','.join('1' for i in xrange(nd))
opname = str(self.scalar_op)
ctx = sub['params']
nargs = len(node.inputs) + len(node.outputs) - len(self.inplace_pattern)
# check that all inputs have valid dimensions
emitted_inames = {}
code = """
// +1 is so that MSVC is happy when nd == 0
size_t dims[%(nd)s+1] = {%(initial_dims)s};
void *rargs[%(nargs)s] = {0};
int err;
""" % locals()
for idx, iname in enumerate(inputs):
if iname in emitted_inames:
assert emitted_inames[iname] is node.inputs[idx]
continue
broadcasts = map(int, node.inputs[idx].broadcastable)
broadcasts = ', '.join(map(str, broadcasts))
nd = node.inputs[idx].ndim
code += """
int broadcasts_%(iname)s[%(nd)s+1] = {%(broadcasts)s};
""" % locals()
emitted_inames[iname] = node.inputs[idx]
# check that all inputs have valid dimensions
emitted_inames = {}
for idx, iname in enumerate(inputs):
code += "rargs[%(idx)s] = &%(iname)s->ga;\n" % dict(idx=idx, iname=iname)
if iname in emitted_inames:
continue
code += """
if (%(nd)s != PyGpuArray_NDIM(%(iname)s))
{
PyErr_Format(PyExc_TypeError,
"need %(nd)s dims, not %%u",
PyGpuArray_NDIM(%(iname)s));
%(fail)s;
}
for (int i = 0; i< %(nd)s; ++i)
{
dims[i] = (dims[i] == 1) ? PyGpuArray_DIMS(%(iname)s)[i] : dims[i];
if ((!(broadcasts_%(iname)s[i] &&
PyGpuArray_DIMS(%(iname)s)[i] == 1)) &&
(dims[i] != PyGpuArray_DIMS(%(iname)s)[i]))
{
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" %(idx)d (indices start at 0) has shape[%%d] == %%llu"
", but the output's size on that axis is %%llu.",
i,
(unsigned long long)PyGpuArray_DIMS(%(iname)s)[i],
(unsigned long long)dims[i]
);
%(fail)s;
}
}
""" % locals()
emitted_inames[iname] = True
# check that all outputs have valid dimensions
p = len(node.inputs)
for idx, oname in enumerate(outputs):
typecode = dtype_to_typecode(node.outputs[idx].dtype)
if idx not in self.inplace_pattern.keys():
code += """
for (int i = 0; (i< %(nd)s) && (%(oname)s); ++i) {
if (dims[i] != PyGpuArray_DIMS(%(oname)s)[i])
{
Py_DECREF(%(oname)s);
%(oname)s = NULL;
}
}
if (%(oname)s && !GpuArray_CHKFLAGS(&(%(oname)s->ga), GA_C_CONTIGUOUS))
{
Py_XDECREF(%(oname)s);
%(oname)s = NULL;
}
if (NULL == %(oname)s)
{
%(oname)s = pygpu_empty(%(nd)d, dims,
%(typecode)s, GA_C_ORDER,
%(ctx)s, Py_None);
if (!%(oname)s) {
%(fail)s
}
}
rargs[%(p)s] = &%(oname)s->ga;
""" % locals()
p += 1
else:
input_idx = self.inplace_pattern[idx]
iname = inputs[input_idx]
code += """
Py_XDECREF(%(oname)s);
%(oname)s = %(iname)s;
Py_INCREF(%(oname)s);
for (int i = 0; (i< %(nd)s) && (%(oname)s); ++i) {
if (dims[i] != PyGpuArray_DIMS(%(oname)s)[i])
{
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Output dimension mis-match. Output"
" %(idx)d (indices start at 0), working inplace"
" on input %(input_idx)s, has shape[%%i] == %%llu"
", but the output's size on that axis is %%llu.",
i,
(unsigned long long)PyGpuArray_DIMS(%(oname)s)[i],
(unsigned long long)dims[i]
);
Py_DECREF(%(oname)s);
%(oname)s = NULL;
%(fail)s;
}
}
""" % locals()
code += """
if (GpuElemwise_call(ge, rargs, GE_BROADCAST) != GA_NO_ERROR) {
PyErr_SetString(PyExc_RuntimeError, "Error in the elemwise call");
%(fail)s
}
""" % dict(fail=sub['fail'])
return str(code)
# To disable the superclass perform.
perform = Op.perform
# Since we don't have a perform ...
def python_constant_folding(self, node):
return False
def c_code_cache_version(self):
ver = self.scalar_op.c_code_cache_version()
if ver:
return (10, ver)
else:
return ver
class SupportCodeError(Exception):
"""
We do not support certain things (such as the C++ complex struct).
"""
class GpuDimShuffle(DimShuffle):
"""
DimShuffle on the GPU.
"""
_f16_ok = True
c_func_name = 'gpu_dimshuffle'
def make_node(self, input):
ctx_name = infer_context_name(input)
res = DimShuffle.make_node(self, input)
otype = GpuArrayType(dtype=res.outputs[0].type.dtype,
broadcastable=res.outputs[0].type.broadcastable,
context_name=ctx_name)
input = as_gpuarray_variable(input, ctx_name)
return Apply(self, [input], [otype()])
def __str__(self):
if self.inplace:
s = "InplaceGpuDimShuffle{%s}"
else:
s = "GpuDimShuffle{%s}"
return s % (','.join(str(x) for x in self.new_order))
def perform(self, node, inp, out, params):
input, = inp
storage, = out
res = input
res = res.transpose(self.shuffle + self.drop)
shape = list(res.shape[:len(self.shuffle)])
for augm in self.augment:
shape.insert(augm, 1)
res = res.reshape(shape)
if not self.inplace:
res = res.copy()
storage[0] = res
class GpuCAReduceCuda(GpuKernelBase, HideC, CAReduceDtype):
"""
GpuCAReduceCuda is a Reduction along some dimensions by a scalar op.
Parameters
----------
reduce_mask
The dimensions along which to reduce. The `reduce_mask` is a tuple of
booleans (actually integers 0 or 1) that specify for each input
dimension, whether to reduce it (1) or not (0).
pre_scalar_op
If present, must be a scalar op with only 1 input. We will execute it
on the input value before reduction.
Examples
--------
When scalar_op is a theano.scalar.basic.Add instance:
- reduce_mask == (1,) sums a vector to a scalar
- reduce_mask == (1,0) computes the sum of each column in a matrix
- reduce_mask == (0,1) computes the sum of each row in a matrix
- reduce_mask == (1,1,1) computes the sum of all elements in a 3-tensor.
Notes
-----
Any reduce_mask of all zeros is a sort of 'copy', and may be removed during
graph optimization.
This Op is a work in progress.
This op was recently upgraded from just GpuSum a general CAReduce. Not
many code cases are supported for scalar_op being anything other than
scalar.Add instances yet.
Important note: if you implement new cases for this op, be sure to
benchmark them and make sure that they actually result in a speedup.
GPUs are not especially well-suited to reduction operations so it is
quite possible that the GPU might be slower for some cases.
"""
__props__ = ('axis', 'reduce_mask', 'dtype', 'acc_dtype', 'scalar_op',
'pre_scalar_op')
_f16_ok = True
def __init__(self, scalar_op, axis=None,
reduce_mask=None, dtype=None, acc_dtype=None,
pre_scalar_op=None):
if reduce_mask is not None:
reduce_mask = tuple(reduce_mask)
self.reduce_mask = reduce_mask
# used to make sure that calls to scalar op
# have unique name arguments
self._n_scalar_op_calls = 0
CAReduceDtype.__init__(self, scalar_op, axis=axis,
dtype=dtype, acc_dtype=acc_dtype)
self.pre_scalar_op = pre_scalar_op
if pre_scalar_op:
assert pre_scalar_op.nin == 1
def __str__(self):
pre = ""
if self.pre_scalar_op:
pre = "pre=%s,red=" % str(self.pre_scalar_op)
ax = ''
if self.axis is not None:
ax = '{%s}' % (', '.join(str(x) for x in self.axis),)
return "GpuCAReduceCuda{%s%s}%s" % (pre, str(self.scalar_op), ax)
def __setstate__(self, d):
self.__dict__.update(d)
# For unpickling of old ops.
if not hasattr(self, "pre_scalar_op"):
self.pre_scalar_op = None
def make_node(self, x):
x = as_gpuarray_variable(x, infer_context_name(x))
if x.type.context.kind != b'cuda':
raise TypeError("GpuCAReduceCuda doesn't work for non-cuda devices")
ret = super(GpuCAReduceCuda, self).make_node(x)
self = copy.copy(self)
self.axis = ret.op.axis
if self.pre_scalar_op:
# Currently we only tested pre_scalar_op that don't cause
# upcast.
assert Elemwise(self.pre_scalar_op)(x).dtype == x.dtype
if self.reduce_mask is None:
if self.axis is None:
reduce_mask = [1] * x.type.ndim
else:
reduce_mask = [0] * x.type.ndim
for a in self.axis:
assert reduce_mask[a] == 0
reduce_mask[a] = 1
self.reduce_mask = tuple(reduce_mask)
if (x.type.ndim != len(self.reduce_mask)):
raise TypeError("x must have rank %i" % len(self.reduce_mask))
if ("complex" in x.dtype or
"complex" in ret.outputs[0].dtype or
"complex" in self._acc_dtype(x.dtype)):
raise NotImplementedError("We don't support complex in gpu reduction")
return Apply(self, [x], [GpuArrayType(ret.outputs[0].dtype,
ret.outputs[0].type.broadcastable,
context_name=x.type.context_name)()])
def perform(self, node, inp, out, ctx):
theano.Op.perform(self, node, inp, out, ctx)
def supports_c_code(self, inputs):
"""
Returns True if the current op and reduce pattern has functioning C code.
"""
# If we don't even have the right method, we certainly
# don't support the C code
# (This is the test that used to be implemented by
# local_gpu_sum)
pattern = (''.join(str(i) for i in self.reduce_mask))
if not hasattr(self, 'c_code_reduce_%s' % pattern):
return False
# Now that this is a general reduction op, we might
# have a method for a pattern, but that pattern
# might not be implemented for the current scalar op.
# To detect this more complicated situation, we
# make fake arguments to c_code, try to run them,
# and see if NotImplementedError gets raised.
node = self.make_node(*inputs)
name = 'fake_name'
inp = ['fake_input_name_%d' % i for i in xrange(len(inputs))]
out = ['fake_output_name_%d' % i for i in xrange(len(node.outputs))]
sub = {'fail': 'fake failure code', 'params': 'fake context'}
try:
self.c_code(node, name, inp, out, sub)
if not self.gpu_kernels(node, name):
return False
except NotImplementedError:
return False
return True
def c_headers(self):
return ['<numpy_compat.h>', '<gpuarray/types.h>']
def c_support_code(self):
return """
template <typename T>
static T ceil_intdiv(T a, T b)
{
return (a/b) + ((a % b) ? 1: 0);
}
"""
def c_code(self, node, name, inp, out, sub):
x, = inp
z, = out
nd_in = node.inputs[0].type.ndim
nd_out = node.outputs[0].type.ndim
# For complex, we need to use theano_complex* in the c code to
# have it run. But libgpuarray don't understand it.
in_dtype = node.inputs[0].type.dtype_specs()[1]
out_dtype = node.outputs[0].type.dtype_specs()[1]
gin_dtype = "npy_" + node.inputs[0].dtype
gout_dtype = "npy_" + node.outputs[0].dtype
assert nd_in - nd_out == sum(self.reduce_mask)
sio = StringIO()
fail = sub['fail']
ctx = sub['params']
# check input
print("""
if (PyGpuArray_NDIM(%(x)s) != %(nd_in)s)
{
PyErr_Format(PyExc_TypeError,
"required nd=%(nd_in)s, got nd=%%u", PyGpuArray_NDIM(%(x)s));
%(fail)s;
}
""" % locals(), file=sio)
# It might be nice to use a property of the op class to do this,
# but tensor.elemwise.CAReduce has this exact same check so I guess
# this is OK to do
if self.scalar_op in [scalar.minimum, scalar.maximum]:
conds = ["(PyGpuArray_DIMS(%s)[%d] == 0)" % (x, i)
for i in xrange(nd_in)
if self.reduce_mask[i]]
assert len(conds) > 0
cond = "(" + " || ".join(conds) + ")"
print("""
if %(cond)s
{
PyErr_Format(PyExc_ValueError," tried to reduce a 0-length axis.");
%(fail)s;
}
""" % locals(), file=sio)
#
# alloc an output if we need one
#
# check the basics of out output
print("""
if ( !%(z)s
|| (PyGpuArray_NDIM(%(z)s) != %(nd_out)s)
""" % locals(), file=sio)
# ensure that the output has the right non-reduced dimensions
j = 0
for i in xrange(nd_in):
if not self.reduce_mask[i]:
print(" || (PyGpuArray_DIMS(%(z)s)[%(j)s] != PyGpuArray_DIMS(%(x)s)[%(i)d]) " % locals(), file=sio)
j += 1
print("""
)
{
""" % locals(), file=sio)
if nd_out > 0:
print("size_t new_dims[%(nd_out)s]; " % locals(), file=sio)
else:
print("size_t *new_dims=NULL; ", file=sio)
j = 0
for i in xrange(nd_in):
if not self.reduce_mask[i]:
print('new_dims[%(j)s] = PyGpuArray_DIMS(%(x)s)[%(i)s];' % locals(), file=sio)
j += 1
out_typecode = dtype_to_typecode(gout_dtype[4:])
print("""
Py_XDECREF(%(z)s);
%(z)s = pygpu_empty(%(nd_out)s, new_dims,
%(out_typecode)s, GA_C_ORDER,
%(ctx)s, Py_None);
if (NULL == %(z)s)
{
PyErr_Format(PyExc_RuntimeError, "Failed to allocate output");
%(fail)s;
}
}
""" % locals(), file=sio)
# \begin bracket the reduction in a check that there is
# actually work to do
if getattr(self.scalar_op, 'identity', None) == 0:
zero_shp = "GpuArray_memset(&%(z)s->ga, 0)" % locals()
# TODO: elif getattr(self.scalar_op, 'identity', None) == 1:
else:
scalar_op = self.scalar_op
zero_shp = """
PyErr_Format(PyExc_NotImplementedError,
"GpuCAReduceCuda not implemented when input shape is 0"
" for this scalar_op: %(scalar_op)s");
%(fail)s;
""" % locals()
print("""
if (PyGpuArray_SIZE(%(z)s) && ! PyGpuArray_SIZE(%(x)s)){
%(zero_shp)s;
}
else if (PyGpuArray_SIZE(%(z)s))
{
""" % locals(), file=sio)
#
# Now perform the reduction
#
if all(i == 1 for i in self.reduce_mask):
# check if the tensor is ccontiguous, if true, use the c_code_reduce_ccontig code.
# TODO: check if we are ccontiguous when we un-dimshuffle
# TODO: if only some dims are ccontiguous, call version with less dims.
print('if(%(x)s->ga.flags & GA_C_CONTIGUOUS){' % locals(),
file=sio)
self.c_code_reduce_ccontig(sio, node, name, x, z, fail)
print("}else{", file=sio)
getattr(self, 'c_code_reduce_%s' %
(''.join(str(i) for i in self.reduce_mask)))(
sio, node, name, x, z, fail)
print("}", file=sio)
else:
getattr(self, 'c_code_reduce_%s' % (''.join(
str(i) for i in self.reduce_mask)))(sio, node, name, x, z, fail)
# \end bracket the reduction ...
print("""
}
""" % locals(), file=sio)
return sio.getvalue()
def _makecall(self, node, name, x, z, fail, pattern=None, extra_dims=(), extra_strides=()):
"""
Return a string for making a kernel call.
The return value looks something like:
.. code-block:: c
ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
if (verbose)
printf("running kernel_reduce_10_%(name)s\\n");
size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0] * n_threads[1] * n_threads[2];
void *kernel_params[] = {
(void *)&PyGpuArray_DIMS(%(x)s)[0],
(void *)&PyGpuArray_DIMS(%(x)s)[1],
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)&stride_A0,
(void *)&stride_A1,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset,
(void *)&stride_Z0};
int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, n_shared, kernel_params);
%(err_check)s
"""
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
sio = StringIO()
if pattern is None:
pattern = ''.join(str(c) for c in self.reduce_mask)
ndim = len(self.reduce_mask)
nd_out = ndim - sum(self.reduce_mask)
shapes_format = "shape=(%s)" % ",".join(["%llu"] * node.inputs[0].ndim)
shapes_data = ",".join(["(size_t) PyGpuArray_DIMS(%s)[%d]" % (x, i)
for i in range(node.inputs[0].ndim)])
k_var = "kernel_reduce_%(pattern)s_%(name)s" % locals()
params = []
for i in xrange(ndim):
params.append("(void *)&PyGpuArray_DIMS(%(x)s)[%(i)s]" % locals())
for declaration, value in extra_dims:
print(declaration % locals(), file=sio)
params.append(value)
params.append("(void *)%(x)s->ga.data" % locals())
params.append("(void *)&%(x)s->ga.offset" % locals())
for i in xrange(ndim):
print("""
ssize_t stride_A%(i)d = PyGpuArray_STRIDES(%(x)s)[%(i)s]/sizeof(%(in_dtype)s);
""" % locals(), file=sio)
params.append("(void *)&stride_A%(i)d" % locals())
for declaration, value in extra_strides:
print(declaration % locals(), file=sio)
params.append(value)
params.append("(void *)%(z)s->ga.data" % locals())
params.append("(void *)&%(z)s->ga.offset" % locals())
for i in xrange(nd_out):
print("""
ssize_t stride_Z%(i)d = PyGpuArray_STRIDES(%(z)s)[%(i)s]/sizeof(%(out_dtype)s);
""" % locals(), file=sio)
params.append("(void *)&stride_Z%(i)d" % locals())
kernel_params = ', '.join(params)
err_check = """
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s;
}
""" % locals()
print("""
if (verbose)
printf("running kernel_reduce_%(pattern)s_%(name)s\\n");
size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0] * n_threads[1] * n_threads[2];
void *kernel_params[] = { %(kernel_params)s };
if (verbose>1)
printf("n_threads[0]=%%lu, n_threads[1]=%%lu, "
"n_threads[2]=%%lu, n_threads=%%lu, "
"n_blocks[0]=%%lu, n_blocks[1]=%%lu, n_blocks[2]=%%lu, "
"n_blocks=%%lu, n_shared=%%d, %(shapes_format)s\\n",
n_threads[0],n_threads[1],
n_threads[2],
n_threads[0]*n_threads[1]*
n_threads[2],
n_blocks[0],n_blocks[1],n_blocks[2],
n_blocks[0]*n_blocks[1]*n_blocks[2],
n_shared, %(shapes_data)s);
int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, n_shared, kernel_params);
%(err_check)s
""" % locals(), file=sio)
return sio.getvalue()
def _k_decl(self, node, nodename, pattern=None,
ndim=None, reduce_mask=None):
"""
Return a string to declare a kernel function.
The result will look something like this:
.. code-block:: c
KERNEL void kernel_reduce_110_%(nodename)s(
const ga_size d0,
const ga_size d1,
const ga_size d2,
const %(in_type)s *A,
const ga_size offset_A,
const ga_ssize sA0,
const ga_ssize sA1,
const ga_ssize sA2,
%(out_type)s * Z,
const ga_size offset_Z,
const ga_ssize sZ0)
Since the nodename is unique, we don't need to put the name
of the scalar_op in here.
"""
in_dtype = node.inputs[0].dtype
out_dtype = node.outputs[0].dtype
in_type = gpuarray.dtype_to_ctype(in_dtype)
out_type = gpuarray.dtype_to_ctype(out_dtype)
if reduce_mask is None:
reduce_mask = self.reduce_mask
if ndim is None:
ndim = len(reduce_mask)
if pattern is None:
pattern = ''.join(str(i) for i in reduce_mask)
kname = "kernel_reduce_%(pattern)s" % locals()
k_var = "kernel_reduce_%(pattern)s_%(nodename)s" % locals()
params = []
sio = StringIO()
print("""
KERNEL void %(kname)s(
""" % locals(), file=sio)
for i in xrange(ndim):
params.append('uintp')
print("""
const ga_size d%(i)s,
""" % locals(), file=sio)
params.append(gpuarray.GpuArray)
params.append('uintp')
print("""
const %(in_type)s *A, const ga_size offset_A,
""" % locals(), file=sio)
for i in xrange(ndim):
params.append('intp')
print("""
const ga_ssize sA%(i)s,
""" % locals(), file=sio)
params.append(gpuarray.GpuArray)
params.append('uintp')
print("""
%(out_type)s * Z, const ga_size offset_Z
""" % locals(), file=sio)
for i in xrange(ndim - sum(reduce_mask)):
params.append('intp')
print("""
, const ga_ssize sZ%(i)s
""" % locals(), file=sio)
print(")", file=sio)
return sio.getvalue(), kname, params, k_var
def _k_init(self, node, nodename):
in_dtype = node.inputs[0].dtype
out_dtype = node.outputs[0].dtype
acc_dtype = self._acc_dtype(node.inputs[0].dtype)
# We need to use theano_complex* and not npy_complex*
in_type = gpuarray.dtype_to_ctype(in_dtype)
out_type = gpuarray.dtype_to_ctype(out_dtype)
acc_type = gpuarray.dtype_to_ctype(acc_dtype)
return """
const int threadCount = blockDim.x * blockDim.y * blockDim.z;
const int threadNum = threadIdx.z * blockDim.x * blockDim.y
+ threadIdx.y * blockDim.x + threadIdx.x;
extern __shared__ %(acc_type)s buf[];
%(acc_type)s myresult = 0;
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
""" % locals()
def _assign_init(self, first_item):
"""
This return the initial value for myresult.
If the scalar op have an identity value, return it.
Otherwise, check that the scalar op is maximum or minimum
and return first_item. It should be the first element of the reduction.
As the maximum and minimum of the same value don't change, this work.
"""
if hasattr(self.scalar_op, 'identity'):
return str(self.scalar_op.identity)
else:
assert isinstance(self.scalar_op, (scalar.Maximum,
scalar.Minimum))
if self.pre_scalar_op: # TODO: multiple dtypes
# dtype = node.inputs[0].dtype
dtype = 'float32'
dummy_var = scalar.Scalar(dtype=dtype)()
dummy_node = self.pre_scalar_op.make_node(dummy_var)
dummy_name = 'assign_init_pre_scalar_op' + str(self._n_scalar_op_calls)
self._n_scalar_op_calls += 1
t = self.pre_scalar_op.c_code(dummy_node, dummy_name,
(first_item,), ("",), {})
assert t.startswith(' = ')
first_item = t[3:]
if first_item[-1] == ';':
first_item = first_item[:-1]
return first_item
def _assign_reduce(self, node, name, left, right, sub, pre):
"""
Parameters
----------
node
The node argument to this op's c_code.
name
The name argument to this op's c_code.
left
A C code string identifying an lvalue.
right
A C code string identifying an expression.
sub
The sub argument to this op's c_code.
pre
If True, we will add the pre_scalar_op.c_code.
Returns
-------
str
C code to reduce left and right, assigning the result to left.
"""
x, = node.inputs
in_dtype = x.dtype
out_dtype = node.outputs[0].dtype
dummy_left = Scalar(dtype=out_dtype)()
dummy_right = Scalar(dtype=in_dtype)()
dummy_node = self.scalar_op.make_node(dummy_left, dummy_right)
dummy_name = name + '_scalar_op' + str(self._n_scalar_op_calls)
self._n_scalar_op_calls += 1
if pre and self.pre_scalar_op:
assert left == "myresult"
dummy_node = self.pre_scalar_op.make_node(dummy_left)
dummy_name = name + '_scalar_op' + str(self._n_scalar_op_calls)
self._n_scalar_op_calls += 1
t = self.pre_scalar_op.c_code(dummy_node, dummy_name,
(right,), ("",), sub)
assert t.startswith(' = ')
right = t[3:]
if right[-1] == ';':
right = right[:-1]
return self.scalar_op.c_code(dummy_node, dummy_name, (left, right),
(left,), sub)
def _k_reduce_buf(self, z_pos, node, name, sub):
"""
WRITEME
Parameters
----------
node, name, sub
These should be passed through from the original call to c_code.
"""
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
write_out = write_w(node.outputs[0].dtype)
current_version = """
__syncthreads(); // some kernel do multiple reduction.
buf[threadNum] = myresult;
__syncthreads();
// rest of function is handled by one warp
if (threadNum < warpSize) {
//round up all the partial sums into the first `warpSize` elements
for (int i = threadNum + warpSize; i < threadCount; i += warpSize)
{
"""
current_version += self._assign_reduce(node, name,
'myresult', 'buf[i]',
sub, False) + """
}
buf[threadNum] = myresult;
}
__syncthreads();
for (unsigned int _n = warpSize / 2; _n > 0; _n /= 2) {
if (threadNum < _n && threadNum + _n < threadCount)
"""
current_version += self._assign_reduce(node, name, 'buf[threadNum]',
'buf[threadNum+_n]', sub, False)
current_version += """
__syncthreads();
}
if (threadNum == 0) {
%(z_pos)s = %(write_out)s(buf[0]);
}
"""
current_version = current_version % locals()
return current_version
# Threads must be organized as: threadNum%nb_reduce correspond to the same sum
# nb_reduce<=warpSize
def _k_reduce_buf_multiple(self, z_pos, node, name, nb_reduce):
reduce_fct = self._assign_reduce(node, name, 'myresult', 'buf[i]', {}, False)
write_out = write_w(node.outputs[0].dtype)
return """
__syncthreads(); // some kernel do multiple reduction.
buf[threadNum] = myresult;
__syncthreads();
// rest of function is handled by one warp
if (threadNum < %(nb_reduce)s)
{
//round up all the partial sums into the first `nb_reduce` elements
for (int i = threadNum + %(nb_reduce)s; i < threadCount; i += %(nb_reduce)s)
{
%(reduce_fct)s;
}
%(z_pos)s = %(write_out)s(myresult);
}
""" % locals()
def c_code_reduce_ccontig(self, sio, node, name, x, z, fail):
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
if getattr(self.scalar_op, 'identity', None) == 0:
zero_shp = "GpuArray_memset(&%(z)s->ga, 0)" % locals()
# TODO: elif getattr(self.scalar_op, 'identity', None) == 1:
else:
zero_shp = """
PyErr_Format(PyExc_NotImplementedError,
"GpuCAReduceCuda not implemented when input shape is 0 for this scalar_op");
%(fail)s;
""" % locals()
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
k_var = "kernel_reduce_ccontig_%(name)s" % locals()
err_check = """
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s;
}
""" % locals()
print("""
{
if(PyGpuArray_SIZE(%(x)s)==0){
%(zero_shp)s;
}else{
int verbose = 0;
size_t numEls = PyGpuArray_SIZE(%(x)s);
size_t n_threads = std::min(numEls, (size_t) 256);
size_t n_blocks = 1;
void *kernel_params[] = {(void *)&numEls,
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset};
if (verbose) printf("running kernel_reduce_ccontig_%(name)s"
" n_threads=%%llu, size=%%llu, ndim=%%u\\n",
n_threads, numEls,
PyGpuArray_NDIM(%(x)s));
size_t n_shared = sizeof(%(acc_dtype)s) * n_threads;
int err = GpuKernel_call(&%(k_var)s, 1, &n_blocks, &n_threads, n_shared, kernel_params);
%(err_check)s
}
}
""" % locals(), file=sio)
def c_code_reduce_1(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};
size_t n_blocks[3] = {1, 1, 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_11(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 256), 1, 1};
while (n_threads[1] * n_threads[0] <= 256) ++n_threads[1];
n_threads[1] -= 1;
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[0])
n_threads[1] = PyGpuArray_DIMS(%(x)s)[0];
size_t n_blocks[3] = {1, 1, 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_01X(self, sio, node, name, x, z, fail, N):
"""
Parameters
----------
N
The number of 1 in the pattern N=1 -> 01, N=2 -> 011 N=3 ->0111
Work for N=1,2,3.
"""
assert N in [1, 2, 3]
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
makecall = self._makecall(node, name, x, z, fail)
N_pattern = ''.join(['1'] * N)
param_dim = ",".join(["PyGpuArray_DIMS(%s)[%d]" % (x, i)
for i in xrange(N + 1)])
strides_dim = ",".join(["PyGpuArray_STRIDES(%s)[%d]/sizeof(%s)"
% (x, i, in_dtype) for i in xrange(N + 1)])
threads_y = """
//get as many y threads as we can fit
while (n_threads[0] * (n_threads[1]+1) <= 256)
{
if (n_threads[1] < PyGpuArray_DIMS(%(x)s)[%(N)s-1])
n_threads[1] += 1;
else
break;
}""" % locals()
threads_z = """
//get as many z threads as we can fit
while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256)
{
if (n_threads[2] < PyGpuArray_DIMS(%(x)s)[%(N)s-2])
n_threads[2] += 1;
else
break;
}
//Maximum for Fermi GPU on that dimensions.
n_threads[2] = std::min(n_threads[2], (size_t)64);
""" % locals()
if len(self.reduce_mask) == 2:
threads_y = ''
threads_z = ''
if len(self.reduce_mask) == 3:
threads_z = ''
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[%(N)s], (size_t) 256), 1, 1};
%(threads_y)s
%(threads_z)s
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_01(self, sio, node, name, x, z, fail):
self.c_code_reduce_01X(sio, node, name, x, z, fail, 1)
def c_code_reduce_011(self, sio, node, name, x, z, fail):
self.c_code_reduce_01X(sio, node, name, x, z, fail, 2)
def c_code_reduce_0111(self, sio, node, name, x, z, fail):
self.c_code_reduce_01X(sio, node, name, x, z, fail, 3)
def c_code_reduce_10(self, sio, node, name, x, z, fail):
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
k_var = "kernel_reduce_10_%(name)s" % locals()
err_check = """
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(%(k_var)s, err));
%(fail)s;
}
""" % locals()
print("""
{
int verbose = 0;
if(PyGpuArray_STRIDES(%(x)s)[0]>
PyGpuArray_STRIDES(%(x)s)[1]){
// If there are a lot of summations to do, then we can use simple parallelization -
// use each thread to do one sum.
// we might as well launch blocks of 32 threads because that's the warp size.
// we could schedule more threads if we were maxing out the gridsize below, but
// the gridsize is way more than the physical hardware and I think 32 threads
// on a huge grid is enough to fully use the hardware.
size_t n_threads[3] = {32, 1, 1};
// We kindof reshape the input implicitly to something 4D:
// the shape A,B,C -> A, B, D, E
// where C <= D*E < C+32
// where E==32
GpuKernel *%(k_var)s = &kernel_reduce_010_AD_%(name)s;
size_t A = 1;
size_t B = PyGpuArray_DIMS(%(x)s)[0];
size_t C = PyGpuArray_DIMS(%(x)s)[1];
size_t D = C/32;
if (32*D < C) D+= 1;
assert ((C <= 32*D) && (32*D < C+32));
// The gridsize would ideally be (A, D). But we do the following logic to make
// sure we don't ask for a grid that is too big.
size_t n_blocks[3] = {A, D, 1};
if (n_blocks[0] > 4096) n_blocks[0] = 4096;
if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];
ssize_t stride_A0 = 1;
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = 1;
ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
void *kernel_params[] = {
(void *)&A, (void *)&B, (void *)&C, (void *)&D,
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset,
(void *)&stride_Z0, (void *)&stride_Z1};
int err = GpuKernel_call(%(k_var)s, 3, n_blocks, n_threads, 0, kernel_params);
%(err_check)s
}else{
GpuKernel *%(k_var)s = &kernel_reduce_010_%(name)s;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};
size_t n_blocks[3] = {1, std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 4096), 1};
if (verbose) {
fprintf(stderr,
"running kernel_reduce_10_%(name)s n_blocks=(%%llu,%%llu)\\n",
(unsigned long long)n_blocks[0],
(unsigned long long)n_blocks[1]);
}
assert(PyGpuArray_DIMS(%(x)s)[1] == PyGpuArray_DIMS(%(z)s)[0]);
size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0];
size_t dim_0 = 1;
ssize_t stride_A0 = 1;
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = 1;
ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
void *kernel_params[] = {
(void *)&dim_0,
(void *)&PyGpuArray_DIMS(%(x)s)[0],
(void *)&PyGpuArray_DIMS(%(x)s)[1],
(void *)%(x)s->ga.data, (void *)&%(x)s->ga.offset,
(void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
(void *)%(z)s->ga.data, (void *)&%(z)s->ga.offset,
(void *)&stride_Z0, (void *)&stride_Z1};
int err = GpuKernel_call(%(k_var)s, 3, n_blocks, n_threads, n_shared, kernel_params);
%(err_check)s
}
}
""" % locals(), file=sio)
def c_code_reduce_010(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
makecall_inner = self._makecall(node, name, x, z, fail,
pattern="010_inner")
pattern = ''.join(str(i) for i in self.reduce_mask)
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
k_var = "kernel_reduce_010_AD_%(name)s" % locals()
err_check = """
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s;
}
""" % locals()
print("""
{
//int n_summations = PyGpuArray_DIMS(%(x)s)[0] * PyGpuArray_DIMS(%(x)s)[2];
//if ((n_summations >= 15 * 32) && (PyGpuArray_DIMS(%(x)s)[2]>=16))
if (1) // if the alternative is less buggy, consider not using this branch
{
// If there are a lot of summations to do, then we can use simple parallelization -
// use each thread to do one sum.
// we might as well launch blocks of 32 threads because that's the warp size.
// we could schedule more threads if we were maxing out the gridsize below, but
// the gridsize is way more than the physical hardware and I think 32 threads
// on a huge grid is enough to fully use the hardware.
size_t n_threads[3] = {32, 1, 1};
// We kindof reshape the input implicitly to something 4D:
// the shape A,B,C -> A, B, D, E
// where C <= D*E < C+32
// where E==32
size_t A = PyGpuArray_DIMS(%(x)s)[0];
size_t B = PyGpuArray_DIMS(%(x)s)[1];
size_t C = PyGpuArray_DIMS(%(x)s)[2];
size_t D = C/32;
if (32*D < C) D+= 1;
assert ((C <= 32*D) && (32*D < C+32));
// The gridsize would ideally be (A, D). But we do the following logic to make
// sure we don't ask for a grid that is too big.
size_t n_blocks[3] = {A, D, 1};
if (n_blocks[0] > 4096) n_blocks[0] = 4096;
if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];
ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[1]/sizeof(%(out_dtype)s);
void *kernel_params[] = {
(void *)&A, (void *)&B, (void *)&C, (void *)&D,
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset,
(void *)&stride_Z0, (void *)&stride_Z1};
int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, 0, kernel_params);
%(err_check)s
}
else
{
int verbose = 2;
size_t n_threads[3] = {std::min((size_t) 32, PyGpuArray_DIMS(%(x)s)[2]), 1, 1};
while( (n_threads[0]*(n_threads[1]+1)<=256)
&& (n_threads[1]<PyGpuArray_DIMS(%(x)s)[1])){
n_threads[1]++;
}
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t)4096), 1, 1};
n_blocks[1] = std::min(
ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],
(size_t)n_threads[0]),
(size_t)(4096 / n_blocks[0])
);
if(std::min(std::min(PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s),
PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s)),
PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s))
==PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s)
&& n_blocks[1]==ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],
(size_t)n_threads[0])){
if(verbose>1)
printf("n_block.x.1=%%d, n_block.x.2=%%d, n_block.y.1=%%d, n_block.y.2=%%d,\\n",
PyGpuArray_DIMS(%(x)s)[0],4096,
ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],(size_t)n_threads[0]),
(size_t)(4096 / n_blocks[0]));
assert(n_threads[0]<=32);
%(makecall_inner)s
}else{
n_threads[0] = std::min(PyGpuArray_DIMS(%(x)s)[1],
(size_t) 256);
n_blocks[0] = std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t)4096);
n_blocks[1] = std::min(
PyGpuArray_DIMS(%(x)s)[2],
(size_t)(4096 / n_blocks[0])
);
%(makecall)s
}
}
}
""" % locals(), file=sio)
def c_code_reduce_0101(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};
while (n_threads[0] * n_threads[1] <= 256)
{
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1]) break;
n_threads[1] += 1;
}
n_threads[1] -= 1;
size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[0], PyGpuArray_DIMS(%(x)s)[2], 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_100(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
k_var = "kernel_reduce_010_AD_%(name)s" % locals()
err_check = """
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: %(k_var)s: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s;
}
""" % locals()
# use threadIdx.x for i0
# use blockIdx.x for i1
# use blockIdx.y for i2
print("""
{
int verbose = 0;
if (PyGpuArray_STRIDES(%(x)s)[2] != sizeof(%(in_dtype)s)){
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t)4096), 1, 1};
while (n_blocks[0] * (n_blocks[1]+1) <= 4096 &&
n_blocks[1] <= PyGpuArray_DIMS(%(x)s)[2])
{
n_blocks[1] += 1;
}
%(makecall)s
}
else
{ // reuse 010_AD kernel, we transpose the 2 first dim
// See the reduction for the real 010_AD kernel for
// explanation. We do this to get coalesced read.
size_t n_threads[3] = {32, 1, 1};
size_t A = PyGpuArray_DIMS(%(x)s)[1];
size_t B = PyGpuArray_DIMS(%(x)s)[0];
size_t C = PyGpuArray_DIMS(%(x)s)[2];
size_t D = C/32;
if (32*D < C) D+= 1;
assert ((C <= 32*D) && (32*D < C+32));
// The gridsize would ideally be (A, D). But we do the following logic to make
// sure we don't ask for a grid that is too big.
size_t n_blocks[3] = {A, D, 1};
if (n_blocks[0] > 4096) n_blocks[0] = 4096;
if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];
size_t n_shared = 0;
ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s);
ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[1]/sizeof(%(out_dtype)s);
void *kernel_params[] = {
(void *)&A, (void *)&B, (void *)&C, (void *)&D,
(void *)%(x)s->ga.data,
(void *)&%(x)s->ga.offset,
(void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
(void *)%(z)s->ga.data,
(void *)&%(z)s->ga.offset,
(void *)&stride_Z0, (void *)&stride_Z1};
int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, 0, kernel_params);
%(err_check)s
}
}
""" % locals(), file=sio)
def c_code_reduce_110(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 256), 1, 1};
while (n_threads[0]*n_threads[1] <= 256)
{
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[0])
break;
n_threads[1] += 1;
}
n_threads[1] -= 1;
size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[2], 1, 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_001(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};
while (n_blocks[0] * n_blocks[1] <= 4096)
{
if (n_blocks[1] > PyGpuArray_DIMS(%(x)s)[1])
break;
n_blocks[1] += 1;
}
n_blocks[1] -= 1;
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_101(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail,
extra_dims=[("size_t one = 1;", "(void *) &one")],
extra_strides=[("ssize_t sone = 1;", "(void *) &sone")],
pattern="1011")
print("""
{
int verbose = 0;
// size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3],
// (size_t) 256), 1, 1};
size_t n_threads[3] = {1, 1, 1};
while (n_threads[0] * (n_threads[1]+1) <= 256) ++n_threads[1];
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[2])
n_threads[1] = PyGpuArray_DIMS(%(x)s)[2];
while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256)
++n_threads[2];
if (n_threads[2] > 64)
n_threads[2] = 64;
if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
n_threads[2] = PyGpuArray_DIMS(%(x)s)[0];
size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[1], 1, 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_111(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};
//get as many y threads as we can fit
while (n_threads[0] * n_threads[1] <= 256)
{
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1])
break;
n_threads[1] += 1;
}
n_threads[1] -= 1;
//get as many z threads as we can fit
while (n_threads[0] * n_threads[1] * n_threads[2] <= 256)
{
if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
break;
n_threads[2] += 1;
}
n_threads[2] -= 1;
//Maximum for Fermi GPU on that dimensions.
n_threads[2] = std::min(n_threads[2], (size_t)64);
size_t n_blocks[3] = {1, 1, 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_0011(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
in_dtype = "npy_" + node.inputs[0].dtype
out_dtype = "npy_" + node.outputs[0].dtype
acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
print("""
{
int verbose = 0;
size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};
while (n_blocks[0] * n_blocks[1] <= 4096 &&
n_blocks[1] < PyGpuArray_DIMS(%(x)s)[1])
{
n_blocks[1] += 1;
}
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};
while (n_threads[0] * n_threads[1] <= 256
&& n_threads[1] < PyGpuArray_DIMS(%(x)s)[2]
&& n_threads[0] * n_threads[1] * sizeof(%(acc_dtype)s) <=(15*1024-200))
{
n_threads[1] += 1;
}
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_1111(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};
//get as many y threads as we can fit
while (n_threads[0] * n_threads[1] <= 256)
{
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1])
break;
n_threads[1] += 1;
}
n_threads[1] -= 1;
//get as many z threads as we can fit
while (n_threads[0] * n_threads[1] * n_threads[2] <= 256)
{
if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
break;
n_threads[2] += 1;
}
n_threads[2] -= 1;
//Maximum for Fermi GPU on that dimensions.
n_threads[2] = std::min(n_threads[2], (size_t)64);
size_t n_blocks[3] = {1, 1, 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_reduce_1011(self, sio, node, name, x, z, fail):
makecall = self._makecall(node, name, x, z, fail)
print("""
{
int verbose = 0;
size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};
while (n_threads[0] * (n_threads[1]+1) <= 256) ++n_threads[1];
if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[2])
n_threads[1] = PyGpuArray_DIMS(%(x)s)[2];
while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256) ++n_threads[2];
if (n_threads[2] > 64)
n_threads[2] = 64;
if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
n_threads[2] = PyGpuArray_DIMS(%(x)s)[0];
size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[1], 1, 1};
%(makecall)s
}
""" % locals(), file=sio)
def c_code_cache_version_apply(self, node):
version = [21] # the version corresponding to the c code in this Op
# now we insert versions for the ops on which we depend...
scalar_node = Apply(
self.scalar_op,
[Scalar(dtype=input.type.dtype)() for input in node.inputs],
[Scalar(dtype=output.type.dtype)() for output in node.outputs])
version.extend(self.scalar_op.c_code_cache_version_apply(scalar_node))
for i in node.inputs + node.outputs:
version.extend(Scalar(dtype=i.type.dtype).c_code_cache_version())
version.extend(self.kernel_version(node))
if all(version):
return tuple(version)
else:
return ()
def gpu_kernels(self, node, nodename):
nd_in = len(self.reduce_mask)
in_dtype = node.inputs[0].dtype
out_dtype = node.outputs[0].dtype
acc_dtype = self._acc_dtype(node.inputs[0].dtype)
flags = Kernel.get_flags(in_dtype, acc_dtype, out_dtype)
in_type = gpuarray.dtype_to_ctype(in_dtype)
out_type = gpuarray.dtype_to_ctype(out_dtype)
acc_type = gpuarray.dtype_to_ctype(acc_dtype)
load_in = load_w(in_dtype)
write_out = write_w(out_dtype)
kernels = []
if all(i == 1 for i in self.reduce_mask):
# this kernel is ok for up to a few thousand elements, but
# it only runs on ONE multiprocessor
reducebuf = self._k_reduce_buf('Z[0]', node, nodename, sub={})
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[0])")
kname = "kernel_reduce_ccontig"
k_var = "kernel_reduce_ccontig_" + nodename
sio = StringIO()
print("""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0,
const %(in_type)s *A, const ga_size offset_A,
%(out_type)s *Z, const ga_size offset_Z)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
extern __shared__ %(acc_type)s buf[];
%(acc_type)s myresult = %(reduce_init)s;
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)
{
%(reduce_fct)s
}
%(reducebuf)s
}
""" % locals(), file=sio)
params = [
'uintp',
gpuarray.GpuArray, 'uintp',
gpuarray.GpuArray, 'uintp'
]
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (1,):
# this kernel is ok for up to a few thousand elements, but
# it only runs on ONE multiprocessor
reducebuf = self._k_reduce_buf('Z[0]', node, nodename, sub={})
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[0])")
kname = "kernel_reduce_1"
k_var = "kernel_reduce_1_" + nodename
sio = StringIO()
print("""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0,
%(out_type)s * Z, const ga_size offset_Z)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
extern __shared__ %(acc_type)s buf[];
%(acc_type)s myresult = %(reduce_init)s;
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)
{
%(reduce_fct)s
}
%(reducebuf)s
}
""" % locals(), file=sio)
params = [
'uintp',
gpuarray.GpuArray, 'uintp',
'intp',
gpuarray.GpuArray, 'uintp'
]
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (1, 1):
# this kernel is ok for up to a few thousand elements, but
# it only runs on ONE multiprocessor
reducebuf = self._k_reduce_buf('Z[0]', node, nodename, sub={})
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[0])")
kname = "kernel_reduce_11"
k_var = "kernel_reduce_11_" + nodename
sio = StringIO()
print("""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1,
%(out_type)s * Z, const ga_size offset_Z)
{
const int threadCount = blockDim.x * blockDim.y;
const int threadNum = threadIdx.y*blockDim.x + threadIdx.x;
extern __shared__ %(acc_type)s buf[];
%(acc_type)s myresult = %(reduce_init)s;
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
for (int i0 = threadIdx.y; i0 < d0; i0 += blockDim.y)
{
for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)
{
%(reduce_fct)s;
}
}
%(reducebuf)s
}
""" % locals(), file=sio)
params = [
'uintp', 'uintp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp',
gpuarray.GpuArray, 'uintp'
]
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
# 01, 011, 0111
if (0 == self.reduce_mask[0] and
all(self.reduce_mask[1:]) and
nd_in in[2, 3, 4]):
# this kernel uses one block for each row.
# threads per block for each element per row.
N_pattern = ''.join(['1'] * (nd_in - 1))
# TODO: is it faster to hardcode sA3, etc. in the later
# code, rather than have the for_* variables declare them
# and the later code use their names?
if nd_in == 2:
for_i1 = "for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)"
first_i1 = 'threadIdx.x'
sA1 = 'sA1'
for_i2 = "int i2=0, sA2=0;"
sA2 = '0'
first_i2 = '0'
for_i3 = "int i3=0, sA3=0;"
sA3 = '0'
first_i3 = '0'
if nd_in == 3:
for_i1 = "for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)"
first_i1 = 'threadIdx.y'
sA1 = 'sA1'
for_i2 = "for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)"
first_i2 = 'threadIdx.x'
sA2 = 'sA2'
for_i3 = "int i3=0, sA3=0;"
first_i3 = 0
sA3 = '0'
if nd_in == 4:
for_i1 = "for (int i1 = threadIdx.z; i1 < d1; i1 += blockDim.z)"
first_i1 = 'threadIdx.z'
sA1 = 'sA1'
for_i2 = "for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)"
first_i2 = 'threadIdx.y'
sA2 = 'sA2'
for_i3 = "for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)"
first_i3 = 'threadIdx.x'
sA3 = 'sA3'
reducebuf = self._k_reduce_buf('Z[i0 * sZ0]', node,
nodename, sub={})
param_dim = ",".join(["const ga_size d%d" % i
for i in xrange(nd_in)])
param_strides = ",".join(["const ga_ssize sA%d" % i
for i in xrange(nd_in)])
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_init = self._assign_init(load_in + "(A[%(first_i3)s * %(sA3)s + %(first_i2)s * %(sA2)s + %(first_i1)s * %(sA1)s + i0 * sA0])" % locals())
reduce_fct = self._assign_reduce(
node, nodename, "myresult",
load_in + "(A[i3 * sA3 + i2 * sA2 + i1 * sA1 + i0 * sA0])",
{}, True)
sio = StringIO()
print("""#include "cluda.h"
%(decl)s{
%(init)s
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x){
myresult = %(reduce_init)s;
%(for_i1)s{
%(for_i2)s{
%(for_i3)s{
%(reduce_fct)s;
}
}
}
%(reducebuf)s
}
}
""" % locals(), file=sio)
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (0, 1, 0) or self.reduce_mask == (1, 0):
# this kernel uses one block for each column,
# threads per block for each element per column.
# TODO: This kernel is pretty inefficient in terms of reading, because if A is
# c_contiguous (typical case) then each warp is accessing non-contigous
# memory (a segment of a column).
reducebuf = self._k_reduce_buf('Z[i0 * sZ0 + i2*sZ1]',
node, nodename, sub={})
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[i0 * sA0 + threadIdx.x * sA1 + i2 * sA2])")
kname = "kernel_reduce_010"
k_var = "kernel_reduce_010_" + nodename
sio = StringIO()
print("""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1, const ga_size d2,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0, const ga_ssize sZ1)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)
{
%(acc_type)s myresult = %(reduce_init)s;
for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)
{
%(reduce_fct)s;
}
%(reducebuf)s
}
}
}
""" % locals(), file=sio)
params = [
'uintp', 'uintp', 'uintp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp', 'intp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp'
]
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask in [(0, 1, 0), (1, 0), (1, 0, 0)]:
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(X[a * sX0 + b * sX1 + c * sX2])",
{}, True)
reduce_init = self._assign_init(load_in + "(X[a * sX0 + 0 * sX1 + c * sX2])")
kname = "kernel_reduce_010_AD"
k_var = "kernel_reduce_010_AD_" + nodename
sio = StringIO()
print("""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size A, const ga_size B, const ga_size C, const ga_size D,
const %(in_type)s *X, const ga_size offset_X,
const ga_ssize sX0, const ga_ssize sX1, const ga_ssize sX2,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0, const ga_ssize sZ1)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
%(acc_type)s myresult = 0;
X = (const %(in_type)s *)(((char *)X)+offset_X);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
for (int a = blockIdx.x; a < A; a += gridDim.x)
{
for (int i2_D = blockIdx.y; i2_D < D; i2_D += gridDim.y)
{
int c = i2_D * 32 + threadIdx.x;
if (c < C)
{
myresult = %(reduce_init)s;
for (int b = 0; b < B; ++b)
{
%(reduce_fct)s;
}
Z[a * sZ0 + c * sZ1] = %(write_out)s(myresult);
}
}
}
}
""" % locals(), file=sio)
params = [
'uintp', 'uintp', 'uintp', 'uintp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp', 'intp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp'
]
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (0, 1, 0):
#
# This kernel is optimized when the inner most dimensions
# have the smallest stride.
# this kernel uses one block for multiple column(up to 32TODO),
# threads per block for each element per column.
# thread.x = dim 2 contiguous
# thread.y = dim 1
# block.x = dim 0
# block.y = dim 1 rest
init = self._k_init(node, nodename)
decl, kname, params, k_var = self._k_decl(node, nodename, pattern="010_inner")
reducebuf = self._k_reduce_buf_multiple('Z[i0 * sZ0 + i2*sZ1]',
node, nodename,
'blockDim.x')
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[i0 * sA0 + 0 * sA1 + i2 * sA2])")
sio = StringIO()
print("""#include "cluda.h"
%(decl)s
{
%(init)s
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i2 = blockIdx.y*blockDim.x+threadIdx.x; i2 < d2; i2 += gridDim.y*blockDim.x)
{
myresult = %(reduce_init)s;
for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)
{
%(reduce_fct)s;
}
%(reducebuf)s
}
}
}
""" % locals(), file=sio)
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (1, 1, 0):
# this kernel uses one block for each column,
# threads per block for each element per column.
# TODO: This kernel is pretty inefficient in terms of reading, because if A is
# c_contiguous (typical case) then each warp is accessing non-contigous
# memory (a segment of a column).
reducebuf = self._k_reduce_buf('Z[blockIdx.x * sZ0]', node, nodename, sub={})
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + blockIdx.x * sA2])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[blockIdx.x * sA2])")
kname = "kernel_reduce_110"
k_var = "kernel_reduce_110_" + nodename
sio = StringIO()
print("""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1, const ga_size d2,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0)
{
const int threadCount = blockDim.x * blockDim.y;
const int threadNum = threadIdx.y * blockDim.x + threadIdx.x;
extern __shared__ %(acc_type)s buf[];
%(acc_type)s myresult = %(reduce_init)s;
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
for (int i0 = threadIdx.y; i0 < d0; i0 += blockDim.y)
{
for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)
{
%(reduce_fct)s;
}
}
%(reducebuf)s
}
""" % locals(), file=sio)
params = [
'uintp', 'uintp', 'uintp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp', 'intp',
gpuarray.GpuArray, 'uintp',
'intp'
]
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (1, 0, 0):
reducebuf = self._k_reduce_buf('Z[i1 * sZ0 + i2 * sZ1]',
node, nodename, sub={})
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[i1 * sA1 + i2 * sA2])")
sio = StringIO()
print("""#include "cluda.h"
%(decl)s
{
%(init)s
for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)
{
for (int i1 = blockIdx.x; i1 < d1; i1 += gridDim.x)
{
myresult = %(reduce_init)s;
for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)
{
%(reduce_fct)s
}
%(reducebuf)s
}
}
}
""" % locals(), file=sio)
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (1, 1, 1):
reducebuf = self._k_reduce_buf('Z[0]', node,
nodename, sub={})
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[0])")
sio = StringIO()
print("""#include "cluda.h"
%(decl)s
{
%(init)s
myresult = %(reduce_init)s;
for (int i0 = threadIdx.z; i0 < d0; i0 += blockDim.z)
{
for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)
{
for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)
{
%(reduce_fct)s;
}
}
}
%(reducebuf)s
}
""" % locals(), file=sio)
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (0, 0, 1):
# this kernel uses one block for each row,
# threads per block for each element per row.
reducebuf = self._k_reduce_buf('Z[i0 * sZ0 + i1 * sZ1]',
node, nodename, sub={})
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[i0 * sA0 + i1 * sA1])")
kname = "kernel_reduce_001"
k_var = "kernel_reduce_001_" + nodename
sio = StringIO()
print("""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1, const ga_size d2,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0, const ga_ssize sZ1)
{
const int threadCount = blockDim.x;
const int threadNum = threadIdx.x;
extern __shared__ %(acc_type)s buf[];
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i1 = blockIdx.y; i1 < d1; i1 += gridDim.y)
{
%(acc_type)s myresult = %(reduce_init)s;
for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)
{
%(reduce_fct)s;
}
%(reducebuf)s
}
}
}
""" % locals(), file=sio)
params = [
'uintp', 'uintp', 'uintp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp', 'intp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp'
]
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (0, 0, 1, 1):
# this kernel uses one block for each row,
# threads per block for each element per row.
reducebuf = self._k_reduce_buf('Z[i0 * sZ0 + i1 * sZ1]',
node, nodename, sub={})
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[i0 * sA0 + i1 * sA1])")
sio = StringIO()
print("""#include "cluda.h"
%(decl)s
{
%(init)s
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i1 = blockIdx.y; i1 < d1; i1 += gridDim.y)
{
%(acc_type)s myresult = %(reduce_init)s;
for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)
{
for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
{
%(reduce_fct)s;
}
}
%(reducebuf)s
}
}
}
""" % locals(), file=sio)
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (0, 1, 0, 1):
# this kernel uses one block for each row,
# threads per block for each element per row.
reducebuf = self._k_reduce_buf('Z[i0 * sZ0 + i2 * sZ1]',
node, nodename, sub={})
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[i0 * sA0 + i2 * sA2])")
sio = StringIO()
print("""#include "cluda.h"
%(decl)s
{
%(init)s
for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
{
for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)
{
%(acc_type)s myresult = %(reduce_init)s;
for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)
{
for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
{
%(reduce_fct)s;
}
}
%(reducebuf)s
}
}
}
""" % locals(), file=sio)
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (1, 1, 1, 1):
reducebuf = self._k_reduce_buf('Z[0]', node, nodename,
sub={})
decl, kname, params, k_var = self._k_decl(node, nodename)
init = self._k_init(node, nodename)
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[0])")
sio = StringIO()
print("""#include "cluda.h"
%(decl)s
{
%(init)s
myresult = %(reduce_init)s;
for (int i0 = 0; i0 < d0; i0++)
for (int i1 = threadIdx.z; i1 < d1; i1 += blockDim.z)
{
for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)
{
for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
{
%(reduce_fct)s;
}
}
}
%(reducebuf)s
}
""" % locals(), file=sio)
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
if self.reduce_mask == (1, 0, 1, 1) or self.reduce_mask == (1, 0, 1):
reducebuf = self._k_reduce_buf('Z[blockIdx.x*sZ0]',
node, nodename, sub={})
reduce_fct = self._assign_reduce(node, nodename, "myresult",
load_in + "(A[i0 * sA0 + blockIdx.x * sA1 + i2 * sA2 + i3 * sA3])",
{}, True)
reduce_init = self._assign_init(load_in + "(A[blockIdx.x * sA1])")
kname = "kernel_reduce_1011"
k_var = "kernel_reduce_1011_" + nodename
sio = StringIO()
print("""#include "cluda.h"
KERNEL void %(kname)s(
const ga_size d0, const ga_size d1, const ga_size d2, const ga_size d3,
const %(in_type)s *A, const ga_size offset_A,
const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2, const ga_ssize sA3,
%(out_type)s * Z, const ga_size offset_Z,
const ga_ssize sZ0)
{
const int threadCount = blockDim.x * blockDim.y * blockDim.z;
const int threadNum = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
extern __shared__ %(acc_type)s buf[];
%(acc_type)s myresult = %(reduce_init)s;
A = (const %(in_type)s *)(((char *)A)+offset_A);
Z = (%(out_type)s *)(((char *)Z)+offset_Z);
for (int i0 = threadIdx.z; i0 < d0; i0 += blockDim.z)
{
for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)
{
for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
{
%(reduce_fct)s;
}
}
}
%(reducebuf)s
}
""" % locals(), file=sio)
params = [
'uintp', 'uintp', 'uintp', 'uintp',
gpuarray.GpuArray, 'uintp',
'intp', 'intp', 'intp', 'intp',
gpuarray.GpuArray, 'uintp',
'intp'
]
kernels.append(Kernel(code=sio.getvalue(), name=kname,
params=params, flags=flags, objvar=k_var))
return kernels
class GpuErfinv(Erfinv):
"""
Inverse error function for GPU.
"""
def c_headers(self):
return ['math_functions.h', 'cublas_v2.h']
def c_code(self, node, name, inp, out, sub):
x, = inp
z, = out
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
# NB: CUDA erfinv function (GPU op) returns NaN if x not in [-1;1],
# while `scipy.special.erfinv` (CPU op) returns an infinite (-inf if x < -1, +inf if x > 1).
# For consistency of CPU and GPU ops, we wrap the CUDA erfinv in the following conditions
# to ensure that GPU op returns the same values as CPU op.
return "%(z)s = (%(x)s <= -1) ? erfinv(-1.0): ((%(x)s >= 1) ? erfinv(1.0): erfinv(%(x)s));" % locals()
gpu_erfinv = GpuErfinv(upgrade_to_float_no_complex, name='gpu_erfinv')
class GpuErfcinv(Erfcinv):
"""
Inverse complementary error function for GPU.
"""
def c_headers(self):
return ['math_functions.h', 'cublas_v2.h']
def c_code(self, node, name, inp, out, sub):
x, = inp
z, = out
if node.inputs[0].type in complex_types:
raise NotImplementedError('type not supported', type)
# NB: CUDA erfcinv function (GPU op) returns NaN if x not in [0;2],
# while `scipy.special.erfcinv` (CPU op) returns an infinite (+inf if x < 0, -inf if x > 2).
# For consistency of CPU and GPU ops, we wrap the CUDA erfcinv in the following conditions
# to ensure that GPU op returns the same values as CPU op.
return "%(z)s = (%(x)s <= 0) ? erfcinv(0.0): ((%(x)s >= 2) ? erfcinv(2.0): erfcinv(%(x)s));" % locals()
gpu_erfcinv = GpuErfcinv(upgrade_to_float_no_complex, name='gpu_erfcinv')
# Caching GpuCAReduceCuda
def gpu_ca_reduce_cuda(scalar_op, axis=None, reduce_mask=None, dtype=None, acc_dtype=None,
pre_scalar_op=None):
key = (scalar_op, axis, reduce_mask, dtype, acc_dtype,
pre_scalar_op)
if key not in gpu_ca_reduce_cuda.cache:
gpu_ca_reduce_cuda.cache[key] = GpuCAReduceCuda(scalar_op, axis, reduce_mask, dtype,
acc_dtype, pre_scalar_op)
return gpu_ca_reduce_cuda.cache[key]
gpu_ca_reduce_cuda.cache = {}
class GpuCAReduceCPY(GpuKernelBase, HideC, CAReduceDtype):
"""
CAReduce that reuse the python code from gpuarray.
"""
def __init__(self, scalar_op, axis=None, dtype=None, acc_dtype=None):
if not hasattr(scalar_op, 'identity'):
raise ValueError("No identity on scalar op")
CAReduceDtype.__init__(self, scalar_op, axis=axis, dtype=dtype,
acc_dtype=acc_dtype)
def __str__(self):
ax = ''
if self.axis is not None:
ax = '{%s}' % (', '.join(str(x) for x in self.axis),)
return "GpuReduce{%s}%s" % (self.scalar_op, ax)
def make_node(self, input):
ctx_name = infer_context_name(input)
res = CAReduceDtype.make_node(self, input)
input = as_gpuarray_variable(input, ctx_name)
otype = GpuArrayType(dtype=res.outputs[0].dtype,
broadcastable=res.outputs[0].broadcastable,
context_name=ctx_name)
if res.op.axis is not None:
redux = []
for i in range(len(input.type.broadcastable)):
redux.append(i in res.op.axis)
# since redux is just another way to describe what is in axis
# it doesn't need to be compared in __eq__ or __hash__
res.op.redux = redux
return Apply(res.op, [input], [otype()])
def get_params(self, node):
return node.outputs[0].type.context
def prepare_node(self, node, storage_map, compute_map, impl):
# cache the kernel object
self.get_kernel_cache(node)
def get_kernel_cache(self, node):
attr = '@cache_reduction_k'
if self.axis is None:
redux = [True] * node.inputs[0].ndim
else:
redux = self.redux
if not hasattr(node, attr):
acc_dtype = getattr(self, 'acc_dtype', None)
if acc_dtype is None:
acc_dtype = node.outputs[0].type.dtype
if any(redux):
setattr(node, attr, self.generate_kernel(node, acc_dtype,
redux))
if any(redux):
return getattr(node, attr)
def gpu_kernels(self, node, name):
if not any(getattr(self, 'redux', [node.inputs[0].ndim != 0])):
# Some OpenCL compilers do not accept no-arguments empty kernels
src = "#include \"cluda.h\"\nKERNEL void reduk(GLOBAL_MEM float *a) { a[0] = 0; }"
params = ['float32']
else:
k = self.get_kernel_cache(node)
_, src, _, _ = k._get_basic_kernel(k.init_local_size,
node.inputs[0].ndim)
nd = node.inputs[0].ndim
params = ['uint32', gpuarray.GpuArray, 'uint32']
params.extend('uint32' for _ in range(nd))
params.append(gpuarray.GpuArray)
params.append('uint32')
params.extend('int32' for _ in range(nd))
acc_dtype = getattr(self, 'acc_dtype', None)
if acc_dtype is None:
acc_dtype = node.outputs[0].type.dtype
return [Kernel(code=src, name="reduk", params=params,
flags=Kernel.get_flags(node.inputs[0].type.dtype,
acc_dtype,
node.outputs[0].type.dtype),
objvar='k_reduk_' + name)]
def c_code(self, node, name, inp, out, sub):
if not any(getattr(self, 'redux', [node.inputs[0].ndim != 0])):
# We special case the no-reduction case since the gpu
# kernel has trouble handling it.
return """
Py_XDECREF(%(out)s);
%(out)s = pygpu_copy(%(inp)s, GA_ANY_ORDER);
if (!%(out)s) {
%(fail)s
}
""" % dict(out=out[0], inp=inp[0], fail=sub['fail'])
k = self.get_kernel_cache(node)
_, src, _, ls = k._get_basic_kernel(k.init_local_size,
node.inputs[0].ndim)
if self.axis is None:
redux = [True] * node.inputs[0].ndim
else:
redux = self.redux
acc_dtype = getattr(self, 'acc_dtype', None)
if acc_dtype is None:
acc_dtype = node.outputs[0].type.dtype
input = inp[0]
output = out[0]
nd_out = node.outputs[0].ndim
code = """
size_t gs = 1;
size_t ls;
unsigned int n = 1;
unsigned int proxy_dim[%(nd_in)s];
unsigned int proxy_off;
int proxy_str[%(nd_in)s];
void *args[%(n_args)s];
PyGpuArrayObject *tmp;
int err;
""" % dict(n_args=4 + (node.inputs[0].ndim * 2), nd_in=node.inputs[0].ndim)
if nd_out != 0:
code += """
size_t out_dims[%(nd_out)s];
int need_out = %(output)s == NULL || %(output)s->ga.nd != %(nd_out)s;
""" % dict(nd_out=nd_out, output=output)
j = 0
for i in range(node.inputs[0].ndim):
if not self.redux[i]:
code += """
out_dims[%(j)s] = %(input)s->ga.dimensions[%(i)s];
if (!need_out)
need_out |= %(output)s->ga.dimensions[%(j)s] != out_dims[%(j)s];
""" % dict(j=j, i=i, input=input, output=output)
j += 1
code += """
if (need_out) {
%(output)s = pygpu_empty(%(nd_out)s, out_dims, %(out_type)s, GA_C_ORDER, %(ctx)s, Py_None);
if (!%(output)s) {
%(fail)s
}
}
""" % dict(output=output, nd_out=nd_out, fail=sub['fail'],
ctx=sub['params'],
out_type=dtype_to_typecode(node.outputs[0].type.dtype))
else:
code += """
if (%(output)s == NULL || %(output)s->ga.nd != 0) {
Py_XDECREF(%(output)s);
%(output)s = pygpu_empty(0, NULL, %(out_type)s, GA_C_ORDER,
%(ctx)s, Py_None);
if (!%(output)s) {
%(fail)s
}
}
""" % dict(output=output, fail=sub['fail'], ctx=sub['params'],
out_type=dtype_to_typecode(node.outputs[0].type.dtype))
if acc_dtype != node.outputs[0].type.dtype:
code += """
tmp = pygpu_empty(%(output)s->ga.nd, %(output)s->ga.dimensions,
%(acc_type)s, GA_C_ORDER, %(ctx)s, Py_None);
if (!tmp) %(fail)s
""" % dict(output=output, fail=sub['fail'], ctx=sub['params'],
acc_type=dtype_to_typecode(acc_dtype))
else:
code += """
tmp = %(output)s;
Py_INCREF(tmp);
""" % dict(output=output)
# We need the proxies since we are passing a pointer to the
# data into the call and therefore we need a real copy of the
# data in the proper type.
code += """
args[0] = &n;
args[1] = tmp->ga.data;
args[2] = &tmp->ga.offset;
""" % dict(output=output)
p = 3
for i in range(node.inputs[0].ndim):
code += """
proxy_dim[%(i)s] = %(input)s->ga.dimensions[%(i)s];
args[%(p)s] = &proxy_dim[%(i)s];
n *= %(input)s->ga.dimensions[%(i)s];
""" % dict(i=i, p=p, input=input)
p += 1
if not redux[i]:
code += "gs *= %(input)s->ga.dimensions[%(i)s];" % dict(input=input, i=i)
code += """
args[%(p)s] = %(input)s->ga.data;
proxy_off = %(input)s->ga.offset;
args[%(p)s+1] = &proxy_off;
""" % dict(p=p, input=input)
p += 2
for i in range(node.inputs[0].ndim):
code += """
proxy_str[%(i)s] = %(input)s->ga.strides[%(i)s];
args[%(p)s] = &proxy_str[%(i)s];
""" % dict(p=p, i=i, input=input)
p += 1
code += """
if (gs == 0) gs = 1;
n /= gs;
ls = %(ls)s;
err = GpuKernel_call(&%(k_var)s, 1, &gs, &ls, 0, args);
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: GpuCAReduceCPY: %%s.",
GpuKernel_error(&%(k_var)s, err));
%(fail)s
}
if (%(cast_out)d) {
err = GpuArray_move(&%(output)s->ga, &tmp->ga);
Py_XDECREF(tmp);
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError,
"gpuarray error: GpuCAReduceCPY [cast]: %%s.",
GpuArray_error(&tmp->ga, err));
%(fail)s
}
} else {
Py_XDECREF(%(output)s);
%(output)s = tmp;
}
""" % dict(k_var='k_reduk_' + name,
ls=ls, fail=sub['fail'], output=output, input=input,
cast_out=bool(acc_dtype != node.outputs[0].type.dtype))
return code
def c_code_cache_version_apply(self, node):
return (4, self.kernel_version(node))
def generate_kernel(self, node, odtype, redux):
if isinstance(self.scalar_op, scalar.basic.Add):
reduce_expr = "a + b"
elif isinstance(self.scalar_op, scalar.basic.Mul):
reduce_expr = "a * b"
else:
raise NotImplementedError()
return ReductionKernel(node.inputs[0].type.context, odtype,
self.scalar_op.identity, reduce_expr, redux,
arguments=[make_argument(node.inputs[0], 'a')],
init_nd=node.inputs[0].ndim)
def perform(self, node, inp, out, ctx):
input, = inp
output, = out
if self.axis is None:
redux = [True] * input.ndim
else:
redux = self.redux
if any(redux):
output[0] = self.get_kernel_cache(node)(input).astype(
copy=False, dtype=node.outputs[0].type.dtype)
else:
output[0] = pygpu.gpuarray.array(input, copy=True,
dtype=node.outputs[0].type.dtype,
context=ctx)
# To allow reloading old pickled files
GpuCAReduce = GpuCAReduceCPY
| 40.744876 | 156 | 0.480843 |
501c06c7ba0990f65237272f1df91e10553de33e | 1,690 | py | Python | tests/test_ingestion.py | VestiDev/ml-powered-applications-2020-book | 4dcfdeb42cdce47406985dcbf8a0533cc086cd20 | [
"MIT"
] | 542 | 2019-06-11T20:15:11.000Z | 2022-03-30T00:30:05.000Z | tests/test_ingestion.py | VestiDev/ml-powered-applications-2020-book | 4dcfdeb42cdce47406985dcbf8a0533cc086cd20 | [
"MIT"
] | 84 | 2020-06-18T13:32:05.000Z | 2021-08-02T13:18:27.000Z | tests/test_ingestion.py | VestiDev/ml-powered-applications-2020-book | 4dcfdeb42cdce47406985dcbf8a0533cc086cd20 | [
"MIT"
] | 180 | 2019-04-15T01:47:32.000Z | 2022-03-13T13:58:04.000Z | import sys
import os
from pathlib import Path
import pandas as pd
# Needed for pytest to resolve imports properly
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + "/../")
from ml_editor.data_ingestion import parse_xml_to_csv
TEXT_LENGTH_FIELD = "text_len"
# We defined the features required at the top level of our test
REQUIRED_COLUMNS = [
"Id",
"AnswerCount",
"PostTypeId",
"AcceptedAnswerId",
"Body",
"body_text",
"Title",
"Score",
]
# Acceptable interval created based on data exploration
ACCEPTABLE_TEXT_LENGTH_MEANS = pd.Interval(left=20, right=2000)
def get_fixture_df():
"""
Use parser to return DataFrame
:return:
"""
curr_path = Path(os.path.dirname(__file__))
return parse_xml_to_csv(curr_path / Path("fixtures/MiniPosts.xml"))
def test_parser_returns_dataframe():
"""
Tests that our parser runs and returns a DataFrame
"""
df = get_fixture_df()
assert isinstance(df, pd.DataFrame)
def test_feature_columns_exist():
"""
Validate that all required columns are present
"""
df = get_fixture_df()
for col in REQUIRED_COLUMNS:
assert col in df.columns
def test_features_not_all_null():
"""
Validate that no features are missing every value
"""
df = get_fixture_df()
for col in REQUIRED_COLUMNS:
assert not df[col].isnull().all()
def test_text_mean():
"""
Validate that text mean matches with exploration expectations
"""
df = get_fixture_df()
df["text_len"] = df["body_text"].str.len()
text_col_mean = df["text_len"].mean()
assert text_col_mean in ACCEPTABLE_TEXT_LENGTH_MEANS
| 22.837838 | 71 | 0.689349 |
c3ae4f9c7794950ee8f6161b20744301d7104261 | 8,879 | py | Python | main/Formalization.py | Vul4Vendetta/Vul_Tech | 5d532442e6938a4ace8c30b526b477a41352f455 | [
"MIT"
] | 5 | 2019-10-12T06:37:31.000Z | 2019-10-28T05:24:15.000Z | main/Formalization.py | RosenZhu/Vul_Tech | 5ae99c2fe606143929dfe3669af17b6a871b1a18 | [
"MIT"
] | 1 | 2019-10-17T03:41:37.000Z | 2019-10-17T03:41:37.000Z | main/Formalization.py | RosenZhu/Vul_Tech | 5ae99c2fe606143929dfe3669af17b6a871b1a18 | [
"MIT"
] | 2 | 2018-12-08T10:04:54.000Z | 2019-06-26T02:52:20.000Z | import os
from xml.dom.minidom import parse
import xml.dom.minidom
#======================== checksum claims and definition =======================
# switch for checksum
# parameters:
# checkusm_type -> the checksum_type
def switchForChecksum(checksum_type):
switch = {
1 : [remainder_check_claim,remainder_check_definition],
0 : [remainder_check_claim,remainder_check_definition]
}
return switch.get(checksum_type,0)
# the remainder_check ----------- type = 1
remainder_check_claim = "int remainder_check(char* parameter_1,int checksum_div,int length);\n\n"
remainder_check_definition = "\nint remainder_check(char* parameter_1,int checksum_div,int length)\n"
remainder_check_definition+= "{\n"
remainder_check_definition+= " if(strlen(parameter_1)!=length) return 0;\n"
remainder_check_definition+= " int sum = 0;\n"
remainder_check_definition+= " for(int i=0;i<length;i++) sum = sum + parameter_1[i];\n"
remainder_check_definition+= " if((sum % checksum_div==0)) return 1;\n"
remainder_check_definition+= " else return 0;\n"
remainder_check_definition+= "}\n\n"
#======================== checksum claims and definition ends =======================
#======================== vulnerability track ==================================
# get the track from program entrance to vulnerability
# parameters:
# vul_path -> the vulnerability dataflow path
# nodeSet -> all function calls
def getTrack(vul_path,nodeSet):
track = [] # variable to store the whole track
# the track to entre the vul_node
for index in range(0,len(vul_path[:-1])):
node_track = [] # variable to store each function node`s track
target_line = len(nodeSet[vul_path[index]]["cfv"])-1 # varibale to record the end line for scanning track
for i in range(0,len(nodeSet[vul_path[index]]["cfv"])):
# if find a input check, put it into track
if "controller4vul_" in nodeSet[vul_path[index]]["cfv"][i] and "if" in nodeSet[vul_path[index]]["cfv"][i]:
node_track.append(nodeSet[vul_path[index]]["cfv"][i])
# if find the function call which is calling to next node in vul path, put the function call in track then break
if nodeSet[vul_path[index+1]]["name"] in nodeSet[vul_path[index]]["cfv"][i]:
node_track.append("call " + nodeSet[vul_path[index+1]]["name"])
break
# put the function node`s track into the whole track record
track.append([nodeSet[vul_path[index]]["name"],node_track])
# the last node in the dataflow is the vul_node (which hides the vulnerability)
node_track = []
vul_node = nodeSet[vul_path[-1]] # get the vul_node
target_line = len(vul_node["cfv"])-1
# get the vul_node`s track
for i in range(0,len(vul_node["cfv"])):
if "controller4vul_" in vul_node["cfv"][i] and "if" in vul_node["cfv"][i]:
node_track.append(vul_node["cfv"][i])
if "Vul/KeyStatement" in vul_node["cfv"][i]:
break
node_track.append("Vulnerability")
# add the vul_node`s track into the whole track record
track.append([vul_node["name"],node_track])
return track
# get the vulnerability track and transform it to string content
# parameters:
# vul_path -> the vulnerability path
# nodeSet -> all function nodes
def getTrackContent(vul_path,nodeSet):
track = getTrack(vul_path,nodeSet) # get the vulnerability track
content = ""
content +="===================== vulnerability track =============================\n\n"
# write the track content
for line in track:
content += "The function name: "+line[0]+"\n"
for item in line[1]:
content += " "+ item + "\n"
content+="\n"
content += "\n===================== vulnerability track Ends ============================="
return content
#======================== vulnerability track ends ==============================
# delete the vulnerability marks in cfv
# parameters:
# cfv -> the function node cfv
def cleanVulMark(cfv):
# find the mark - 'Vul/KeyStatement' - then delete it
for i in range(0,len(cfv)):
if cfv[i] == "Vul/KeyStatement":
del cfv[i]
break
return cfv
# get all dependencies for this program
# parameters:
# GrammarTree -> the GrammarTree xml file path
# vul_node -> the selected vulnerability class node in VulLib xml file
def getIncludingList(GrammarTree,vul_node):
including_list = ["stdio.h","stdlib.h"] # the base dependencies
# get the dependencies from the GrammarTree xml file
DOMtree=xml.dom.minidom.parse(GrammarTree)
GT = DOMtree.documentElement
root = GT.getElementsByTagName("root")[0]
funcLibs = root.getElementsByTagName("functionLib")
for funcLib in funcLibs:
if funcLib.hasAttribute("header"):
if funcLib.getAttribute("header") not in including_list:
including_list.append(funcLib.getAttribute("header"))
# according to the vulnerability requirements, get the dependencies from VulLib xml file
if vul_node.hasAttribute("header"):
vul_includings = vul_node.getAttribute("header")
if "-" in vul_includings: # vul_lib has more than one includings
for includes in vul_includings.split("-"):
if includes not in including_list:
including_list.append(includes)
else: # vul_lib only has one or no includings
if vul_includings != "" and vul_includings not in including_list:
including_list.append(vul_includings)
return including_list
# get the header content for this program
# the header includes dependencies
# parameters:
# GrammarTree -> the GrammarTree xml file path
# vul_node -> the selected vulnerability class node in VulLib xml file
def header4program(GrammarTree,vul_node):
header_content = ""
including_list = getIncludingList(GrammarTree,vul_node) # get the dependencies list
# write the dependencies into content
for includings in including_list:
header_content += "#include<"+includings+">\n"
header_content += "\n\n\n"
return header_content
# transform the claims of function into string content
# parameters:
# nodeSet -> all function nodes
# index -> the function node index of all function nodes
def Definition2Code(nodeSet,index):
definition = ""
prefix = "parameter_" # the prefix for the parameter
if "(" in nodeSet[index]["full_definition"]:
definition = nodeSet[index]["full_definition"]
else:
func_name = nodeSet[index]["name"]
ret = nodeSet[index]["full_definition"].split(func_name)[0]
paras = nodeSet[index]["full_definition"].split(func_name)[1]
para_list = paras.split(",")
definition = ret+func_name+"("
para_index = 1
for para in para_list:
parameter = prefix+str(para_index)
if para == "controller4unique": # if the parameter is to keep the dataflow unique
parameter = "uni_para"
definition+="int "+parameter+","
else:
if para.strip() !="void": # if the parameter is void
definition+=para+" "+parameter+","
para_index+=1
if definition[-1]=="(":
definition +=")"
else:
definition = definition[:-1]+")"
return definition
# transform the node cfv to string content
# parameters:
# nodeSet -> all function nodes
# index -> the function node index
def Node2Code(nodeSet,index):
cfv = nodeSet[index]["cfv"]
retract = ""
content = ""
prefix = "parameter_"
# code content
for i in range(0,len(cfv)):
if not isinstance(cfv[i], (list)): # if find a not-list content
if cfv[i].startswith("{"): # if find a '{'
cfv[i] = "{"
content+=retract+"{"
retract+="\t" # fix the retract
else:
if cfv[i].startswith("}"): # if find a '}'
if len(retract)>0:
retract=retract[:-1] # fix the retract
content+=retract+"}"
else:
content+=retract+cfv[i] # if find a statement, write it into content
else: # if find a list
for line in cfv[i]:
content+=retract+line+";\n"
content = content[:-1]
content += '\n'
claim = Definition2Code(nodeSet,index)
content = claim + '\n' + content
return content
| 38.107296 | 124 | 0.59804 |
6d398497b636ee8ec0839af13b3b4412143c3125 | 1,118 | py | Python | hexdata.py | ManualDoCodigo/pyhexeditor | 211cc360d468de98367cfd5b4972e7fa3da46712 | [
"MIT"
] | null | null | null | hexdata.py | ManualDoCodigo/pyhexeditor | 211cc360d468de98367cfd5b4972e7fa3da46712 | [
"MIT"
] | null | null | null | hexdata.py | ManualDoCodigo/pyhexeditor | 211cc360d468de98367cfd5b4972e7fa3da46712 | [
"MIT"
] | null | null | null | from PyQt5 import QtCore
class HexData:
def __init__(self):
self.data = None
def __len__(self):
if self.data:
return self.data.size()
return 0
def __getitem__(self, index):
return int.from_bytes(self.data[index], "little")
def __setitem__(self, index, data):
self.data.replace(index, 1, bytes([data]))
def replaceWithValue(self, pos, size, value):
values = bytearray([value & 0xFF] * size)
self.data.replace(pos, size, QtCore.QByteArray(values))
def insert(self, pos, data):
self.data.insert(pos, data)
def remove(self, pos, size):
values = bytearray(size)
self.data.replace(pos, size, QtCore.QByteArray(values))
def setData(self, data):
if isinstance(data, (bytearray, bytes)):
self.data = QtCore.QByteArray(data)
elif isinstance(data, (QtCore.QByteArray)):
self.data = data
else:
raise ValueError("Invalid Data Format. Needs to be a bytearray, bytes or QByteArray.")
def getData(self):
return self.data.data()
| 27.95 | 98 | 0.611807 |
027ed41dfcd1bd1d349d714d18db285890228f1b | 682 | py | Python | tests/rootfinders/test_newton.py | timofeymukha/wallriori | a24961da70f79fd51cd0ab70a9bbeac2d939103b | [
"MIT"
] | null | null | null | tests/rootfinders/test_newton.py | timofeymukha/wallriori | a24961da70f79fd51cd0ab70a9bbeac2d939103b | [
"MIT"
] | null | null | null | tests/rootfinders/test_newton.py | timofeymukha/wallriori | a24961da70f79fd51cd0ab70a9bbeac2d939103b | [
"MIT"
] | 1 | 2019-03-20T22:41:47.000Z | 2019-03-20T22:41:47.000Z | # This file is part of wallriori
# (c) Timofey Mukha
# The code is released under the MIT Licence.
# See LICENCE.txt and the Legal section in the README for more information
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from wallriori.rootfinders import Newton
from numpy.testing import assert_allclose
def f(x):
return x**2
def d(x):
return 2*x
def test_newton_init_default():
newton = Newton()
def test_newton_init():
newton = Newton(f, f, 10, 0.01)
def test_newton_solve():
newton = Newton(f, d, 100, 0.01)
root = newton.solve(1)
assert_allclose(root, 0, rtol=0.01, atol=1e-2)
| 20.666667 | 74 | 0.727273 |
7b8e21664e98f16e906db09e24eb7c920227f0fd | 1,510 | py | Python | src/sentry/api/endpoints/organization_incident_details.py | overquota/sentry | 2cb3a3e40ca0b7ca3308deb0d1d9c436ce8aaeb8 | [
"BSD-3-Clause"
] | 1 | 2019-08-28T11:03:13.000Z | 2019-08-28T11:03:13.000Z | src/sentry/api/endpoints/organization_incident_details.py | overquota/sentry | 2cb3a3e40ca0b7ca3308deb0d1d9c436ce8aaeb8 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/api/endpoints/organization_incident_details.py | overquota/sentry | 2cb3a3e40ca0b7ca3308deb0d1d9c436ce8aaeb8 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from rest_framework.response import Response
from sentry import features
from sentry.api.bases.incident import IncidentPermission
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.api.serializers.models.incident import DetailedIncidentSerializer
from sentry.incidents.models import Incident
class OrganizationIncidentDetailsEndpoint(OrganizationEndpoint):
permission_classes = (IncidentPermission, )
def convert_args(self, request, incident_id, *args, **kwargs):
args, kwargs = super(OrganizationIncidentDetailsEndpoint, self).convert_args(
request,
*args,
**kwargs
)
organization = kwargs['organization']
if not features.has('organizations:incidents', organization, actor=request.user):
raise ResourceDoesNotExist
try:
kwargs['incident'] = Incident.objects.get(
organization=organization,
identifier=incident_id,
)
except Incident.DoesNotExist:
raise ResourceDoesNotExist
return args, kwargs
def get(self, request, organization, incident):
"""
Fetch an Incident.
``````````````````
:auth: required
"""
data = serialize(incident, request.user, DetailedIncidentSerializer())
return Response(data)
| 32.12766 | 89 | 0.686093 |
c65caa9666386a45cb8bc3a43eaeecdf3b97cb3b | 4,173 | py | Python | cumulusci/tasks/salesforce/tests/test_CreateCommunity.py | bethbrains/CumulusCI | 933d305f1a0b580aaaded9254611fbc0141f4bed | [
"BSD-3-Clause"
] | null | null | null | cumulusci/tasks/salesforce/tests/test_CreateCommunity.py | bethbrains/CumulusCI | 933d305f1a0b580aaaded9254611fbc0141f4bed | [
"BSD-3-Clause"
] | null | null | null | cumulusci/tasks/salesforce/tests/test_CreateCommunity.py | bethbrains/CumulusCI | 933d305f1a0b580aaaded9254611fbc0141f4bed | [
"BSD-3-Clause"
] | null | null | null | import json
import mock
import responses
import unittest
from datetime import datetime
from cumulusci.tasks.salesforce import CreateCommunity
from cumulusci.core.exceptions import SalesforceException
from .util import create_task
task_options = {
"name": "Test Community",
"description": "Community Details",
"template": "VF Template",
"url_path_prefix": "test",
}
class test_CreateCommunity(unittest.TestCase):
@responses.activate
def test_creates_community(self):
cc_task = create_task(CreateCommunity, task_options)
servlet_url = "{}/sites/servlet.SitePrerequisiteServlet".format(
cc_task.org_config.instance_url
)
community_url = "{}/services/data/v46.0/connect/communities".format(
cc_task.org_config.instance_url
)
responses.add(
method=responses.GET, url=cc_task.org_config.start_url, status=200
)
responses.add(method=responses.GET, url=servlet_url, status=200)
responses.add(method=responses.POST, url=community_url, status=200, json={})
responses.add(
method=responses.GET,
url=community_url,
status=200,
json={"communities": [{"name": "Test Community", "id": "000000000000000"}]},
)
cc_task()
self.assertEqual(4, len(responses.calls))
self.assertEqual(cc_task.org_config.start_url, responses.calls[0].request.url)
self.assertEqual(servlet_url, responses.calls[1].request.url)
self.assertEqual(community_url, responses.calls[2].request.url)
self.assertEqual(community_url, responses.calls[3].request.url)
self.assertEqual(
json.dumps(
{
"name": "Test Community",
"description": "Community Details",
"templateName": "VF Template",
"urlPathPrefix": "test",
}
),
responses.calls[2].request.body,
)
@responses.activate
def test_waits_for_community_result__not_complete(self):
cc_task = create_task(CreateCommunity, task_options)
community_url = "{}/services/data/v46.0/connect/communities".format(
cc_task.org_config.instance_url
)
responses.add(
method=responses.GET,
url=community_url,
status=200,
json={"communities": []},
)
cc_task._init_task()
cc_task.time_start = datetime.now()
cc_task._poll_action()
self.assertFalse(cc_task.poll_complete)
@responses.activate
def test_waits_for_community_result__complete(self):
cc_task = create_task(CreateCommunity, task_options)
community_url = "{}/services/data/v46.0/connect/communities".format(
cc_task.org_config.instance_url
)
responses.add(
method=responses.GET,
url=community_url,
status=200,
json={"communities": [{"name": "Test Community", "id": "000000000000000"}]},
)
cc_task.logger = mock.Mock()
cc_task._init_task()
cc_task.time_start = datetime.now()
cc_task._poll_action()
self.assertTrue(cc_task.poll_complete)
cc_task.logger.info.assert_called_once_with("Community 000000000000000 created")
def test_throws_exception_for_timeout(self):
cc_task = create_task(CreateCommunity, task_options)
cc_task.time_start = datetime(2019, 1, 1)
with self.assertRaises(SalesforceException):
cc_task._poll_action()
@responses.activate
def test_throws_exception_for_failed_prepare_step(self):
cc_task = create_task(CreateCommunity, task_options)
servlet_url = "{}/sites/servlet.SitePrerequisiteServlet".format(
cc_task.org_config.instance_url
)
responses.add(
method=responses.GET, url=cc_task.org_config.start_url, status=200
)
responses.add(method=responses.GET, url=servlet_url, status=500)
with self.assertRaises(SalesforceException):
cc_task._run_task()
| 33.926829 | 88 | 0.638869 |
ce0636145b4d4686024edc940ebd5b05d56a5907 | 3,216 | py | Python | aspire/app/domain/rater.py | nemmons/aspire | 59237f9f0890a92e710484aec037bddde811a4a4 | [
"MIT"
] | 2 | 2021-09-18T05:22:30.000Z | 2021-11-10T17:57:49.000Z | aspire/app/domain/rater.py | nemmons/flask-rater | 59237f9f0890a92e710484aec037bddde811a4a4 | [
"MIT"
] | 2 | 2021-01-10T04:58:45.000Z | 2021-03-01T15:38:24.000Z | aspire/app/domain/rater.py | nemmons/aspire | 59237f9f0890a92e710484aec037bddde811a4a4 | [
"MIT"
] | null | null | null | from .rating_manual import RatingManual
from .rating_step import Loop, AbstractRatingStep
import copy
from typing import List
class Rater:
rating_manual: RatingManual = None
rating_variables: dict = None
detailed_results: list = None
def __init__(self, rating_manual: RatingManual):
self.rating_manual = rating_manual
def rate(self, rate_inputs, capture_details=False):
rating_variables = rate_inputs
if capture_details:
self.detailed_results = [{
'step': {'name': 'Initial Input'},
'rating_variables': copy.deepcopy(rating_variables)
}]
# apply each rating step sequentially to the rate inputs
rating_variables = self.run_steps(self.rating_manual.rating_steps, rating_variables, capture_details)
self.rating_variables = rating_variables
return rating_variables['rate']
def run_steps(self, rating_steps, rating_variables, capture_details):
for rating_step in rating_steps:
if isinstance(rating_step, Loop):
rating_variables = self.handle_rate_loop(rating_step, rating_variables, capture_details)
else:
rating_variables = copy.copy(rating_step).run(rating_variables)
if capture_details:
self.detailed_results.append({
'step': copy.copy(rating_step),
'rating_variables': copy.deepcopy(rating_variables)
})
return rating_variables
def handle_rate_loop(self, rating_step: Loop, rating_variables: dict, capture_details):
sub_risk_label = rating_step.sub_risk_label.evaluate(rating_variables)
sub_risks = rating_variables[sub_risk_label] # type: List[dict]
original_rating_variables = rating_variables.keys()
for i, sub_risk_vars in enumerate(sub_risks):
rating_variables = self.run_steps(rating_step.rating_steps, {**sub_risk_vars, **rating_variables}, capture_details)
updated_sub_risk_vars = {k: rating_variables[k] for k in rating_variables.keys() - original_rating_variables}
rating_variables = {k: rating_variables[k] for k in original_rating_variables}
rating_variables[sub_risk_label][i] = updated_sub_risk_vars
return rating_variables
def check_output(self, rating_variable: str):
if rating_variable in self.rating_variables:
return self.rating_variables[rating_variable]
return None
def get_step_by_step_diff(self):
diffed_results = [self.detailed_results[0]]
for key, result in enumerate(self.detailed_results):
if key == 0:
continue
prev_vars = self.detailed_results[key - 1]['rating_variables']
current_vars = result['rating_variables']
diffed_vars = {}
for k in current_vars.keys():
if k not in prev_vars or prev_vars[k] != current_vars[k]:
diffed_vars[k] = current_vars[k]
diffed_results.append({
'step': result['step'],
'rating_variables': diffed_vars
})
return diffed_results
| 40.2 | 127 | 0.658893 |
7c2c23ae9777b1ad5f952495c805ec47ad33e1dc | 646 | py | Python | setup.py | cartertemm/tformat | 5a81361ab18d9badf24eb3d15c60f59860403afb | [
"Unlicense"
] | null | null | null | setup.py | cartertemm/tformat | 5a81361ab18d9badf24eb3d15c60f59860403afb | [
"Unlicense"
] | 1 | 2021-09-30T02:43:48.000Z | 2021-09-30T02:43:48.000Z | setup.py | cartertemm/tformat | 5a81361ab18d9badf24eb3d15c60f59860403afb | [
"Unlicense"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="tformat",
version="0.1",
packages=find_packages(),
author="Carter Temm",
author_email="cartertemm@gmail.com",
description="Efficient conversion of timestamps to human-readable equivalents",
long_description=open("readme.md", "r").read(),
long_description_content_type="text/markdown",
url="https://github.com/cartertemm/tformat",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"License :: Public Domain",
"License :: OSI Approved :: MIT License",
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
]
) | 29.363636 | 80 | 0.718266 |
77358464023fa39c4718524899032725a224e66c | 1,647 | py | Python | var/spack/repos/builtin/packages/vpfft/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/vpfft/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/vpfft/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Vpfft(MakefilePackage):
"""Proxy Application. VPFFT is an implementation of a mesoscale
micromechanical materials model. By solving the viscoplasticity
model, VPFFT simulates the evolution of a material under deformation.
The solution time to the viscoplasticity model, described by a set
of partial differential equations, is significantly reduced by the
application of Fast Fourier Transform in the VPFFT algorithm.
"""
homepage = "http://www.exmatex.org/vpfft.html"
git = "https://github.com/exmatex/VPFFT.git"
tag = ['proxy-app']
version('develop')
depends_on('eigen')
depends_on('fftw')
depends_on('mpi')
@property
def build_targets(self):
targets = [
"--file=Makefile.make",
"EIGEN_PATH={0}".format(
join_path(
self.spec['eigen'].prefix.include,
'eigen{0}'.format(
self.spec['eigen'].version.up_to(1)))),
"FFTW_PATH={0}".format(self.spec['fftw'].prefix),
"CC={0}".format(self.spec['mpi'].mpicxx)
]
return targets
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('VPFFT++', prefix.bin)
install('README.md', prefix)
install('README.make', prefix)
install('README-license.txt', prefix)
install_tree('docs', prefix.docs)
| 32.94 | 73 | 0.629022 |
0c8a421a5d4da9c5fa3336e502a6be5f1c99d8e0 | 1,549 | py | Python | bio_embeddings/extract/bindEmbed21DL/binding_residues_cnn.py | kvetab/bio_embeddings | 97309f73c964861f6e4e3d4510f4b5711d3b6b32 | [
"MIT"
] | 219 | 2020-01-19T16:39:09.000Z | 2022-03-21T16:02:04.000Z | bio_embeddings/extract/bindEmbed21DL/binding_residues_cnn.py | kvetab/bio_embeddings | 97309f73c964861f6e4e3d4510f4b5711d3b6b32 | [
"MIT"
] | 175 | 2019-12-05T13:27:14.000Z | 2022-03-30T16:58:32.000Z | bio_embeddings/extract/bindEmbed21DL/binding_residues_cnn.py | kvetab/bio_embeddings | 97309f73c964861f6e4e3d4510f4b5711d3b6b32 | [
"MIT"
] | 33 | 2019-12-16T09:59:44.000Z | 2022-03-05T06:35:16.000Z | import torch.nn as nn
class BindingResiduesCNN(nn.Module):
"""Convolutional neural network for prediction of 3 different types of binding residues (metal, nucleic acids,
small molecules. Final output is determined by taking the average output probability from 5 different models from
5 cross-validation runs"""
n_features = 1024
bottleneck_dim = 128
n_classes = 3
dropout_rate = 0.7
def __init__(self):
super(BindingResiduesCNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv1d(
in_channels=self.n_features,
out_channels=self.bottleneck_dim,
kernel_size=5,
stride=1,
padding=2,
),
nn.ELU(),
nn.Dropout(self.dropout_rate),
nn.Conv1d(
in_channels=self.bottleneck_dim,
out_channels=self.n_classes,
kernel_size=5,
stride=1,
padding=2,
),
)
def forward(self, x):
"""
L = protein length
B = batch-size
F = number of features (1024 for embeddings)
N = number of classes (3 for binding)
:param x:
:return:
"""
# IN: X = (L x F); OUT: (1 x F x L)
y = x.unsqueeze(dim=0).permute(0, 2, 1)
# IN (1 x F x L) --> (1 x 128 x L) --> (1 x 3 x L)
y = self.conv1(y)
# IN: (1 x 3 x L); OUT: (3 x L)
y = y.squeeze(dim=0)
return y
| 29.788462 | 117 | 0.518399 |
d0e1499f3b70ff5a1a25a0ecf2cf91502cc87b18 | 714 | py | Python | quantifiedcode/backend/test/api/helpers.py | marcinguy/quantifiedcode | cafc8b99d56a5e51820421af5d77be8b736ab03d | [
"BSD-3-Clause"
] | 138 | 2022-02-02T15:38:29.000Z | 2022-03-30T21:23:33.000Z | quantifiedcode/backend/test/api/helpers.py | bbbfkl/scanmycode-ce | 786ae9a83a0839b70ac773a673a3ac69a0484ee4 | [
"BSD-3-Clause"
] | 14 | 2016-12-21T11:26:48.000Z | 2022-03-02T10:32:24.000Z | quantifiedcode/backend/test/api/helpers.py | bbbfkl/scanmycode-ce | 786ae9a83a0839b70ac773a673a3ac69a0484ee4 | [
"BSD-3-Clause"
] | 26 | 2017-08-01T10:00:16.000Z | 2022-02-06T15:31:55.000Z |
from quantifiedcode.backend.app import get_app
from quantifiedcode.test.helpers import ApplicationTest
class ApiTest(ApplicationTest):
"""
An API test setups up the database (using the DatabaseTest) and launches a process
that provides a fully functional API server on the local host.
This process answers the API requests issued by the tests, using the database
set up by DatabaseTest. This process will be set up only once for each test
in a given class to save time.
"""
fixtures = []
host = 'localhost'
port = 5555
protocol = 'http'
get_app = staticmethod(get_app)
base_url = '/v1'
recreate_db = False
create_db = True
delete_data = True
| 25.5 | 86 | 0.705882 |
0701eaed68bf3bf3f79624d5431d5897a67fc67c | 4,404 | py | Python | Remoter.py | DeemOnSecurity/RePy | b2ceefd7ea5f7cf84dbfb373e62ab180da2b4220 | [
"MIT"
] | null | null | null | Remoter.py | DeemOnSecurity/RePy | b2ceefd7ea5f7cf84dbfb373e62ab180da2b4220 | [
"MIT"
] | null | null | null | Remoter.py | DeemOnSecurity/RePy | b2ceefd7ea5f7cf84dbfb373e62ab180da2b4220 | [
"MIT"
] | null | null | null | from getpass import getpass
from typing import List
from paramiko import SSHClient, AutoAddPolicy
from paramiko.hostkeys import HostKeys
class _RePyError(Exception):
pass
class _RePyClient(object):
def __init__(self, user, host, pswd, port, sudo, sudopass):
self.user: str = user
self.host: str = host
self.pswd: str = pswd
self.sudo: bool = sudo
self.port: int = port
self.sudopass: str = sudopass
self.pyver: str = ''
def __repr__(self):
return str({'user': self.user, 'host': self.host, 'pswd': self.pswd, 'port': self.port, 'sudo': self.sudo,
'python_version': self.pyver})
def __str__(self):
return f'Client(user:{self.user}, host:{self.host}, pswd:{self.pswd}, port:{self.port}, sudo:{self.sudo}, python_version:{self.pyver})'
class SSH(_RePyClient, _RePyError):
def __init__(self, user, host, pswd='', port=22, sudo=False, sudopass=''):
super().__init__(user, host, pswd, port, sudo, sudopass)
self._ssh = SSHClient()
self._ssh.set_missing_host_key_policy(AutoAddPolicy)
self._ssh.load_system_host_keys()
if not HostKeys().lookup(self.host):
if not self.pswd:
getpass(f'No key found for {self.user}@{self.host}, please enter password: [WILL NOT ECHO]')
self.ssh_client = self._ssh.connect(hostname=self.host, username=self.user, password=self.pswd,
port=self.port)
else:
self.ssh_client = self._ssh.connect(hostname=self.host, username=self.user, port=self.port)
self.pyver = self.execute('python --version').strip()
if not self.pyver:
raise _RePyError('Python is not accessible on the remote host. Check if it is installed.')
def pyxecute(self, commands: List[str] or str) -> str:
if isinstance(commands, list):
for file in commands:
command = self.sudoer(f'python <<EOF\n \n{open(file).read()} \nEOF')
return self.pseudo(command)
elif isinstance(commands, str):
command = self.sudoer(f'python <<EOF\n \n{commands} \nEOF')
return self.pseudo(command)
else:
raise _RePyError('SSH.pyxecute only accepts a list of files to read and execute or a single python '
'command string.')
def execute(self, files: List[str] or str) -> str:
if isinstance(files, list):
for file in files:
command = self.sudoer(f'{open(file).read()}')
return self.pseudo(command)
elif isinstance(files, str):
command = self.sudoer(files)
return self.pseudo(command)
else:
raise _RePyError('SSH.execute only accepts a list of files to read and execute ore a single shell command '
'string')
def sudoer(self, text: str) -> str:
if self.sudo:
return f"sudo -S -p '' {text}"
else:
return text
def pseudo(self, command):
stdin, stdout, stderr = self._ssh.exec_command(command=command, get_pty=True)
if self.sudo:
stdin.write(self.sudopass + '\n')
stdin.flush()
stdin.close()
return stdout.read().decode('utf8').replace(self.sudopass, '')
class SFTP(_RePyClient, _RePyError):
def __init__(self, user, host, pswd='', port=22, sudo=False, sudopass=''):
super().__init__(user, host, pswd, port, sudo, sudopass)
self._ssh = SSHClient()
self._ssh.set_missing_host_key_policy(AutoAddPolicy)
self._ssh.load_system_host_keys()
if not HostKeys().lookup(self.host):
if not self.pswd:
getpass(f'No key found for {self.user}@{self.host}, please enter password: [WILL NOT ECHO]')
self.ssh_client = self._ssh.connect(hostname=self.host, username=self.user, password=self.pswd,
port=self.port)
else:
self.ssh_client = self._ssh.connect(hostname=self.host, username=self.user, port=self.port)
self._sftp = self._ssh.open_sftp()
def get_file(self, rem_path, lcl_path):
self._sftp.get(rem_path, lcl_path)
def put_file(self, lcl_path, rem_path):
self._sftp.put(lcl_path, rem_path)
| 40.036364 | 143 | 0.600136 |
f2aedd1905d4edddb8ef6c0c7afa325268e0752b | 9,983 | py | Python | tests/python/relay/test_pass_partial_eval.py | gyshi/tvm | 264660471193cf7b062dbf945678e0bbd06a5144 | [
"Apache-2.0"
] | null | null | null | tests/python/relay/test_pass_partial_eval.py | gyshi/tvm | 264660471193cf7b062dbf945678e0bbd06a5144 | [
"Apache-2.0"
] | null | null | null | tests/python/relay/test_pass_partial_eval.py | gyshi/tvm | 264660471193cf7b062dbf945678e0bbd06a5144 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import relay
from tvm.relay.ir_pass import alpha_equal, gradient
from tvm.relay.prelude import Prelude
from tvm.relay import op, create_executor, transform
from tvm.relay import Var, TypeVar, TupleGetItem, Let, Function, const, RefRead, RefWrite, RefCreate
from tvm.relay import TensorType, Tuple, If, Module, Clause, PatternConstructor, PatternVar, Match
from tvm.relay import GlobalVar, Call
from tvm.relay.testing import add_nat_definitions, make_nat_expr
def check_eval(expr, expected_result, mod=None, rtol=1e-07):
ctx = tvm.context("llvm", 0)
intrp = create_executor(mod=mod, ctx=ctx, target="llvm")
result = intrp.evaluate(expr)
np.testing.assert_allclose(result.asnumpy(), expected_result, rtol=rtol)
def tipe(expr):
return transform.OptimizeOnExpr(expr,
[transform.InferType(),
transform.PartialEvaluate(),
transform.InferType()])
def dcpe(expr, mod=None, grad=False):
passes = [transform.PartialEvaluate(),
transform.DeadCodeElimination(inline_once=True)]
if grad:
expr = gradient(expr)
if mod:
assert isinstance(expr, Function)
mod[mod.entry_func] = expr
seq = transform.Sequential(passes)
mod = seq(mod)
return mod[mod.entry_func]
return transform.OptimizeOnExpr(expr, passes)
def test_tuple():
t = TypeVar("t")
x = Var("x", t)
body = TupleGetItem(relay.Tuple([relay.const(4.0), x]), 1)
f = Function([x], body, None, [t])
expected = relay.Function([x], x, None, [t])
expected = transform.OptimizeOnExpr(expected, transform.InferType())
assert alpha_equal(dcpe(f), expected)
def test_const_inline():
t = relay.TensorType([], "float32")
d = Var("d", t)
double = Function([d], d + d)
orig = double(const(4.0))
assert alpha_equal(dcpe(orig), const(8.0))
def test_ref():
t = relay.TensorType([], "float32")
d = relay.Var("d", t)
r = relay.Var("r", relay.RefType(t))
x = relay.Var("x")
body = relay.RefRead(r)
body = Let(x, RefWrite(r, RefRead(r) * RefRead(r)), body)
body = Let(r, RefCreate(d), body)
square = Function([d], body)
expected = transform.OptimizeOnExpr(Function([d], d * d),
transform.InferType())
assert alpha_equal(dcpe(square), expected)
def test_empty_ad():
shape = (10, 10)
dtype = "float32"
t = TensorType(shape, dtype)
d = Var("d", t)
f = Function([d], d)
g = dcpe(f, grad=True)
expected = Function([d], Tuple([d, Tuple([op.ones_like(d)])]))
expected = transform.OptimizeOnExpr(expected, transform.InferType())
assert alpha_equal(g, expected)
def test_ad():
shape = (10, 10)
dtype = "float32"
t = TensorType(shape, dtype)
d = Var("d", t)
f = Function([d], d * d)
g = dcpe(f, grad=True)
m = d * d
x = relay.Var("x")
o = op.ones_like(x)
x1 = relay.Var("x1")
grad = op.zeros_like(d) + op.collapse_sum_like(x1 * d, d) + op.collapse_sum_like(x1 * d, d)
body = Tuple([x, Tuple([grad])])
body = relay.Let(x1, o, body)
expected = Function([d], relay.Let(x, m, body))
expected = transform.OptimizeOnExpr(expected, transform.InferType())
assert alpha_equal(g, expected)
def test_if_ref():
shape = ()
dtype = "bool"
t = TensorType(shape, dtype)
d = Var("d", t)
r = Var("r")
update = Function([], RefWrite(r, RefRead(r) + RefRead(r)))
u = Var("u")
body = If(d, u(), u())
eff = Var("eff")
body = Let(eff, body, RefRead(r))
f = Function([d], Let(r, RefCreate(const(1)), Let(u, update, body)))
pe_f = tipe(f)
ex = create_executor()
f_res = ex.evaluate(f)(const(True))
pe_f_res = ex.evaluate(pe_f)(const(True))
np.testing.assert_allclose(f_res.asnumpy(), 2 * np.ones_like(f_res.asnumpy()))
np.testing.assert_allclose(pe_f_res.asnumpy(), 2 * np.ones_like(pe_f_res.asnumpy()))
def test_function_invalidate():
shape = ()
dtype = "bool"
t = TensorType(shape, dtype)
d = Var("d", t)
r = Var("r")
fetch = Function([], RefRead(r))
fet = Var("fetch")
fet_obscured = Var("fetch_obscured")
u = Var("u")
body = If(d, fet_obscured(), fet_obscured())
body = Let(u, RefWrite(r, const(1)), body)
body = Let(fet_obscured, If(d, fet, fet), body)
body = Let(fet, fetch, body)
body = Let(r, RefCreate(const(0)), body)
f = Function([d], body)
pe_f = tipe(f)
ex = create_executor()
f_res = ex.evaluate(f)(const(True))
pe_f_res = ex.evaluate(pe_f)(const(True))
np.testing.assert_allclose(f_res.asnumpy(), np.ones_like(f_res.asnumpy()))
np.testing.assert_allclose(pe_f_res.asnumpy(), np.ones_like(pe_f_res.asnumpy()))
def test_head_cons():
mod = Module()
p = Prelude(mod)
hd = p.hd
t = TypeVar("t")
x = Var("x", t)
body = hd(p.cons(x, p.nil()))
f = Function([x], body, None, [t])
res = dcpe(f, mod)
assert alpha_equal(res, Function([x], x, t, [t]))
def test_map():
mod = Module()
p = Prelude(mod)
f = GlobalVar("f")
t = TypeVar("t")
a = Var("a", t)
mod[f] = Function([a], a, t, [t])
orig = p.map(f, p.cons(const(1), p.cons(const(2), p.cons(const(3), p.nil()))))
expected = p.cons((const(1)), p.cons((const(2)), p.cons((const(3)), p.nil())))
expected = Function([], expected)
mod[mod.entry_func] = expected
expected = mod[mod.entry_func]
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert alpha_equal(res.body, expected.body)
def test_loop():
mod = Module()
t = TypeVar("t")
x = Var("x", t)
loop = GlobalVar("loop")
mod[loop] = Function([x], loop(x), t, [t])
expected = Call(loop, [const(1)])
mod[mod.entry_func] = Function([], expected)
expected = mod[mod.entry_func].body
call = Function([], loop(const(1)))
res = dcpe(call, mod=mod)
assert alpha_equal(res.body, expected)
def test_swap_loop():
mod = Module()
p = Prelude(mod)
add_nat_definitions(p)
nat = p.nat()
x = Var("x", nat)
y = Var("y", nat)
loop = GlobalVar("loop")
mod[loop] = Function([x, y], loop(y, x), nat)
prog = loop(make_nat_expr(p, 1), make_nat_expr(p, 2))
res = Function([], prog)
res = dcpe(res, mod=mod)
assert alpha_equal(prog, res.body)
def test_abs_diff():
# TODO(@M.K.): refactor using tuple pattern (not yet implemented)
mod = Module()
p = Prelude(mod)
add_nat_definitions(p)
nat = p.nat()
x = Var("x", nat)
y = Var("y", nat)
xp = Var("x'", nat)
yp = Var("y'", nat)
diff = GlobalVar("diff")
y_z_case = Clause(PatternConstructor(p.z, []), x)
y_s_case = Clause(PatternConstructor(p.s, [PatternVar(yp)]), diff(yp, xp))
x_z_case = Clause(PatternConstructor(p.z, []), y)
x_s_case = Clause(PatternConstructor(p.s, [PatternVar(xp)]), Match(y, [y_z_case, y_s_case]))
mod[diff] = Function([x, y], Match(x, [x_z_case, x_s_case]))
orig = diff(make_nat_expr(p, 7), make_nat_expr(p, 3))
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert alpha_equal(res.body, make_nat_expr(p, 4))
def test_match_nat_id():
mod = Module()
p = Prelude(mod)
add_nat_definitions(p)
nat = p.nat()
x = Var("x", nat)
y = Var("y", nat)
nat_id = GlobalVar("nat_id")
z_case = Clause(PatternConstructor(p.z, []), p.z())
s_case = Clause(PatternConstructor(p.s, [PatternVar(y)]), p.s(y))
mod[nat_id] = Function([x], Match(x, [z_case, s_case]))
orig = nat_id(make_nat_expr(p, 3))
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert alpha_equal(res.body, make_nat_expr(p, 3))
def test_nat_id():
mod = Module()
p = Prelude(mod)
add_nat_definitions(p)
nat = p.nat()
x = Var("x", nat)
y = Var("y", nat)
nat_id = GlobalVar("nat_id")
mod[nat_id] = Function([x], x)
orig = nat_id(make_nat_expr(p, 3))
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert alpha_equal(res.body, make_nat_expr(p, 3))
def test_global_match_nat_id():
mod = Module()
p = Prelude(mod)
add_nat_definitions(p)
nat = p.nat()
x = Var("x", nat)
z_case = Clause(PatternConstructor(p.z, []), p.z())
s_case = Clause(PatternConstructor(p.s, [PatternVar(x)]), p.s(x))
orig = Match(make_nat_expr(p, 3), [z_case, s_case])
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert alpha_equal(res.body, make_nat_expr(p, 3))
def test_double():
mod = Module()
p = Prelude(mod)
add_nat_definitions(p)
orig = p.double(make_nat_expr(p, 3))
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert alpha_equal(res.body, make_nat_expr(p, 6))
if __name__ == '__main__':
test_empty_ad()
test_tuple()
test_const_inline()
test_ref()
test_ad()
test_if_ref()
test_function_invalidate()
test_head_cons()
test_map()
test_loop()
test_swap_loop()
test_abs_diff()
test_double()
test_nat_id()
test_global_match_nat_id()
test_match_nat_id()
| 31.393082 | 100 | 0.619253 |
808bca6a856e3c12ecfc92e85128141783fed5c7 | 1,005 | py | Python | django_jwt/server/admin.py | Casassarnau/django-jwt-oidc | 0c047c060ff08736b56f408432fbff9ad5799ad3 | [
"MIT"
] | 5 | 2022-02-21T10:19:19.000Z | 2022-03-29T19:05:44.000Z | django_jwt/server/admin.py | Casassarnau/django-jwt-oidc | 0c047c060ff08736b56f408432fbff9ad5799ad3 | [
"MIT"
] | null | null | null | django_jwt/server/admin.py | Casassarnau/django-jwt-oidc | 0c047c060ff08736b56f408432fbff9ad5799ad3 | [
"MIT"
] | 1 | 2022-03-27T08:39:47.000Z | 2022-03-27T08:39:47.000Z | from django.contrib import admin
from django_jwt.server.forms import IdTokenExtraClaimAdminForm, RestrictUsersAdminForm, WebPageAdminForm
from django_jwt.server.models import WebPage, AttributeWebPage
class WebPagesAttributesAdmin(admin.StackedInline):
verbose_name = 'ID Token extra claim'
model = AttributeWebPage
extra = 1
form = IdTokenExtraClaimAdminForm
def get_queryset(self, request):
return super().get_queryset(request).filter(restrict=False)
class RestrictUsersAdmin(admin.StackedInline):
verbose_name = 'User attribute restricted'
verbose_name_plural = 'User attributes restricted'
model = AttributeWebPage
extra = 1
form = RestrictUsersAdminForm
def get_queryset(self, request):
return super().get_queryset(request).filter(restrict=True)
@admin.register(WebPage)
class WebPageFullAdmin(admin.ModelAdmin):
readonly_fields = ('id',)
inlines = [WebPagesAttributesAdmin, RestrictUsersAdmin]
form = WebPageAdminForm
| 30.454545 | 104 | 0.768159 |
d81d49b0144a138b86fbd8f1e8f9db75b2584d59 | 3,292 | py | Python | .history/profiles_project/settings_20191230001517.py | chanakanissanka/Python-REST-API | 78a6b25ed9403c8ac075f4b8df35f5ff7159b0df | [
"MIT"
] | null | null | null | .history/profiles_project/settings_20191230001517.py | chanakanissanka/Python-REST-API | 78a6b25ed9403c8ac075f4b8df35f5ff7159b0df | [
"MIT"
] | 12 | 2020-02-12T03:17:15.000Z | 2022-02-10T12:49:58.000Z | .history/profiles_project/settings_20191230001517.py | chanakanissanka/Python-REST-API | 78a6b25ed9403c8ac075f4b8df35f5ff7159b0df | [
"MIT"
] | null | null | null | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v(&03s8h+ojr8dld&sad101soolmr+nq+xjkz3z5&2voa%^=7h'
# SECURITY WARNING: don't run with debug turned on in production!
#DEBUG = bool(int(os.environ.get('DEBUG',)))
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
STATIC_ROOT = 'static/' | 25.92126 | 91 | 0.699878 |
9dc919889973acb4ff49d43f99e4f6e3222eb127 | 2,331 | py | Python | inclearn/lib/callbacks.py | Zotkin/incremental_learning.pytorch | 6a0d7385d209abcd40a402dcad42293dd4e8b362 | [
"MIT"
] | 277 | 2019-04-19T08:19:57.000Z | 2022-03-28T12:44:54.000Z | inclearn/lib/callbacks.py | Zotkin/incremental_learning.pytorch | 6a0d7385d209abcd40a402dcad42293dd4e8b362 | [
"MIT"
] | 55 | 2019-05-07T08:38:30.000Z | 2022-03-28T06:35:53.000Z | inclearn/lib/callbacks.py | Zotkin/incremental_learning.pytorch | 6a0d7385d209abcd40a402dcad42293dd4e8b362 | [
"MIT"
] | 48 | 2019-05-10T06:35:38.000Z | 2022-03-24T13:39:55.000Z | import copy
import torch
class Callback:
def __init__(self):
self._iteration = 0
self._in_training = True
@property
def in_training(self):
return self._in_training
def on_epoch_begin(self):
pass
def on_epoch_end(self, metric=None):
self._iteration += 1
def before_step(self):
pass
class GaussianNoiseAnnealing(Callback):
"""Add gaussian noise to the gradients.
Add gaussian noise to the gradients with the given mean & std. The std will
decrease at each batch up to 0.
# References:
- Adding Gradient Noise Improves Learning for Very Deep Networks
- https://arxiv.org/abs/1511.06807
:param eta: TODO
:param gamma: Decay rate.
"""
def __init__(self, parameters, eta=0.3, gamma=0.55):
self._parameters = parameters
self._eta = eta
self._gamma = gamma
super(GaussianNoiseAnnealing, self).__init__()
def before_step(self):
variance = self._eta / ((1 + self._iteration) ** self._gamma)
for param in self._parameters:
# Noise on gradients:
noise = torch.randn(param.grad.shape, device=param.grad.device) * variance
param.grad.add_(noise)
class EarlyStopping(Callback):
def __init__(self, network, minimize_metric=True, patience=5, epsilon=1e-3):
self._patience = patience
self._wait = 0
if minimize_metric:
self._cmp_fun = lambda old, new: (old - epsilon) > new
self._best = float('inf')
else:
self._cmp_fun = lambda old, new: (old + epsilon) < new
self._best = float("-inf")
self.network = network
self._record = []
super(EarlyStopping, self).__init__()
def on_epoch_end(self, metric):
self._record.append(metric)
if self._cmp_fun(self._best, metric):
self._best = metric
self._wait = 0
self.network = copy.deepcopy(self.network)
else:
self._wait += 1
if self._wait == self._patience:
print("Early stopping, metric is: {}.".format(metric))
print(self._record[-self._patience:])
self._in_training = False
super(EarlyStopping, self).on_epoch_end(metric=metric)
| 26.488636 | 86 | 0.604033 |
2c0c77dfc4e3b96576d4d6cd84809c3939e14079 | 4,486 | py | Python | purity_fb/purity_fb_1dot5/models/hardware_response.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 5 | 2017-09-08T20:47:22.000Z | 2021-06-29T02:11:05.000Z | purity_fb/purity_fb_1dot5/models/hardware_response.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 16 | 2017-11-27T20:57:48.000Z | 2021-11-23T18:46:43.000Z | purity_fb/purity_fb_1dot5/models/hardware_response.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 22 | 2017-10-13T15:33:05.000Z | 2021-11-08T19:56:21.000Z | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.5 Python SDK
Pure Storage FlashBlade REST 1.5 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.5
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class HardwareResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[Hardware]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None): # noqa: E501
"""HardwareResponse - a model defined in Swagger""" # noqa: E501
self._pagination_info = None
self._items = None
self.discriminator = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""Gets the pagination_info of this HardwareResponse. # noqa: E501
pagination information, only available in GET requests # noqa: E501
:return: The pagination_info of this HardwareResponse. # noqa: E501
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""Sets the pagination_info of this HardwareResponse.
pagination information, only available in GET requests # noqa: E501
:param pagination_info: The pagination_info of this HardwareResponse. # noqa: E501
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""Gets the items of this HardwareResponse. # noqa: E501
a list of hardware components # noqa: E501
:return: The items of this HardwareResponse. # noqa: E501
:rtype: list[Hardware]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this HardwareResponse.
a list of hardware components # noqa: E501
:param items: The items of this HardwareResponse. # noqa: E501
:type: list[Hardware]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(HardwareResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HardwareResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.708609 | 204 | 0.596745 |
5ea5299ab73a1f206a3a340eedb6d53105efa470 | 2,135 | py | Python | views/view_calender.py | hanzala123/Flask_calendar | c6c32a22fa1b11bedd7780bb951769671c0e7451 | [
"MIT"
] | 1 | 2021-10-31T14:09:15.000Z | 2021-10-31T14:09:15.000Z | views/view_calender.py | hanzala123/Flask_calendar | c6c32a22fa1b11bedd7780bb951769671c0e7451 | [
"MIT"
] | null | null | null | views/view_calender.py | hanzala123/Flask_calendar | c6c32a22fa1b11bedd7780bb951769671c0e7451 | [
"MIT"
] | null | null | null | from core.security import session_required
from flask import (
Blueprint,
request,
redirect,
make_response,
render_template,
jsonify
)
from datetime import datetime
from core.redis import rds
classes = {'Normal':'event-info','Low': 'event-success','Medium':'event-warning','Important':'event-important', 'Special': 'event-special'}
def convertToMilis(time):
dt_obj = datetime.strptime(time,'%Y-%m-%dT%H:%M')
millisec = dt_obj.timestamp() * 1000
return millisec
calender = Blueprint('calender', __name__,
template_folder='templates')
@calender.route('/calender')
@session_required
def home():
return render_template('calender_only.html')
@calender.route('/calendar-events')
@session_required
def calendar_events():
try:
rows = rds.get_json('calenderEvents')
if not rows:
rows = []
resp = jsonify({'success' : 1, 'result' : rows})
resp.status_code = 200
return resp
except Exception as e:
print(e)
@calender.route('/calender/addevent')
@session_required
def view_addevent():
title = request.args.get('title')
start = convertToMilis(request.args.get('start'))
if request.args.get('end'):
stop = convertToMilis(request.args.get('end'))
else:
stop = start
tmp = rds.get_json('calenderEvents')
if tmp:
id = tmp[0]['id']+1
else:
id = 1
eventClass = classes[request.args.get('class')]
data = {
"id": id,
"title": title,
"url": request.args.get('url'),
"class": eventClass,
"start": int(start), #Milliseconds
"end": int(stop) # Milliseconds
}
if tmp:
tmp.insert(0,data)
else:
tmp = [data]
rds.store_json('calenderEvents',tmp)
return redirect('/calender')
@calender.route('/calender/removeevent')
@session_required
def view_removeevent():
id = request.args.get('id')
tmp = rds.get_json('calenderEvents')
for i in range(len(tmp)):
if tmp[i]['id'] == int(id):
del tmp[i]
break
if tmp:
rds.store_json('calenderEvents',tmp)
else:
rds.delete('calenderEvents')
return redirect('/calender')
@calender.route('/calender/removeall')
@session_required
def view_removeall():
rds.delete('calenderEvents')
return redirect('/calender')
| 22.712766 | 139 | 0.693677 |
c19ff76f0b999911e5c7761ea04e28f32b2a7ea5 | 3,014 | py | Python | sleepens/io/interfaces/smrMAT.py | paradoxysm/sleepens | 9ee4bd8fc8fe2a901e8c16e778daabd31cc5d793 | [
"BSD-3-Clause"
] | 2 | 2020-07-24T02:35:43.000Z | 2021-09-01T11:27:48.000Z | sleepens/io/interfaces/smrMAT.py | paradoxysm/sleepens | 9ee4bd8fc8fe2a901e8c16e778daabd31cc5d793 | [
"BSD-3-Clause"
] | null | null | null | sleepens/io/interfaces/smrMAT.py | paradoxysm/sleepens | 9ee4bd8fc8fe2a901e8c16e778daabd31cc5d793 | [
"BSD-3-Clause"
] | null | null | null | """smrMAT I/O Interface"""
# Authors: Jeffrey Wang
# License: BSD 3 clause
import numpy as np
from scipy.io import loadmat
from sleepens.io import DataObject, Dataset
name = "smrMAT"
standard = ".mat files exported by CED Spike2"
filetypes = [("MAT-files", "*.mat")]
type = "RAW"
tags = {'r'}
def read_data(filepath, channel):
"""
Read the data file at a specific data channel.
Parameters
----------
filepath : path
Path to the .mat file.
channel : str
Name of the channel in the .mat file.
Returns
-------
dataobject : DataObject
The DataObject containing the data from
the specific channel.
"""
matfile = _load(filepath)
fields = [f for f in matfile.keys() if '_Ch' in f]
channels = [matfile[field][0][0][0][0] for field in fields]
if channel in channels:
field = fields[channels.index(channel)]
try:
data = matfile[field][0][0][8].flatten()
resolution = matfile[field][0][0][2][0][0]
except Exception:
raise FileNotFoundError("An error occurred extracting from channel")
else:
raise FileNotFoundError("Channel named " + channel + " not found. Instead found: " + str(channels))
return DataObject(name=channel, data=data, resolution=resolution)
def read_labels(filepath, channel, map={}):
"""
Read the data file at a specific label channel.
Parameters
----------
filepath : path
Path to the .mat file.
channel : str
Name of the channel in the .mat file.
map : dict, default={}
Mapping the label values to some
set of integers.
Returns
-------
dataobject : DataObject
The DataObject containing the labels from
the specific channel.
"""
matfile = _load(filepath)
fields = [f for f in matfile.keys() if '_Ch' in f]
channels = [matfile[field][0][0][0][0] for field in fields]
if channel in channels:
field = fields[channels.index(channel)]
try:
labels = matfile[field][0][0][7].flatten()[:-1]
resolution = matfile[field][0][0][2][0][0]
for k, v in map.items():
labels[labels == k] = v
labels = labels.astype(int)
except Exception:
raise FileNotFoundError("An error occurred extracting from channel")
else:
raise FileNotFoundError("Channel named " + channel + " not found. Instead found: " + str(channels))
return DataObject(name=channel, data=labels, resolution=resolution)
def write(filepath, dataobjects):
"""
Write the dataset to a file.
Parameters
----------
filepath : path
Path to the .mat file to write.
dataobjects : array-like of DataObject, shape=(n_channels,)
DataObjects to write to the file. DataObjects
with resolution set to -1 are assumed as labels.
"""
raise NotImplementedError("smrMAT cannot write to files")
def _load(filepath):
"""
Attempt to load the .mat file.
Parameters
----------
filepath : path
Path to the .mat file.
Returns
-------
matfile : dict
Dictionary with variable names as keys
and matrices as values.
"""
try:
matfile = loadmat(filepath)
except:
raise FileNotFoundError("No such file or directory: " + filepath)
return matfile
| 24.504065 | 101 | 0.687127 |
a0a4fbac30758d2b498bed687a6351da51d4c02a | 15,351 | py | Python | tests/scanner/audit/data/test_rules.py | mcunha/forseti-security | cbf25f6173c1a25d4e43a9738eca73f927361cb8 | [
"Apache-2.0"
] | null | null | null | tests/scanner/audit/data/test_rules.py | mcunha/forseti-security | cbf25f6173c1a25d4e43a9738eca73f927361cb8 | [
"Apache-2.0"
] | null | null | null | tests/scanner/audit/data/test_rules.py | mcunha/forseti-security | cbf25f6173c1a25d4e43a9738eca73f927361cb8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules to use in the unit tests."""
# A whitelist for:
# * org 778899, on self and children
# * projects "my-project-1" and "my-project-2", on self
# No inheritance of rules
# Allow members of any role and pattern "user:*@company.com"
RULES1 = {
'rules': [{
'name': 'my rule',
'mode': 'whitelist',
'resource': [{
'type': 'organization',
'applies_to': 'self_and_children',
'resource_ids': ['778899']
}, {
'type': 'project',
'applies_to': 'self',
'resource_ids': [
'my-project-1',
'my-project-2',
]
}],
'inherit_from_parents': False,
'bindings': [{
'role': 'roles/*',
'members': ['user:*@company.com']
}]
}]
}
# Whitelist, blacklist, and required list
#
# Whitelist
# * org 778899, on self and children
# * projects "my-project-1" and "my-project-2", on self
# No inheritance of rules
# "Allow members of any role and pattern `user:*@company.com`
# on these resources"
#
# Blacklist
# * project "my-project-2", on self
# "Don't allow `user:baduser@company.com` with any roles on my-project-2"
#
# Required list
# * project "my-project-1", on self
# "Require `user:project_viewer@company.com` to have roles/viewer on
# my-project-1"
RULES2 = {
'rules': [
{
'name': 'my rule',
'mode': 'whitelist',
'resource': [{
'type': 'organization',
'applies_to': 'self_and_children',
'resource_ids': ['778899']
}, {
'type': 'project',
'applies_to': 'self',
'resource_ids': [
'my-project-1',
'my-project-2',
]
}],
'inherit_from_parents': False,
'bindings': [{
'role': 'roles/*',
'members': ['user:*@company.com']
}]
}, {
'name': 'my other rule',
'mode': 'blacklist',
'resource': [{
'type': 'project',
'applies_to': 'self',
'resource_ids': ['my-project-2',]
}],
'inherit_from_parents': False,
'bindings': [{
'role': 'roles/*',
'members': ['user:baduser@company.com']
}]
}, {
'name': 'required rule',
'mode': 'required',
'resource': [{
'type': 'project',
'applies_to': 'self',
'resource_ids': [
'my-project-1',
]
}],
'inherit_from_parents': False,
'bindings': [{
'role': 'roles/viewer',
'members': ['user:project_viewer@company.com']
}]
}
]
}
# Same as RULES2, except:
# Blacklist
# * org 778899 on self and children
# "Block `user:baduser@company.com` from having any roles on the org"
# This is to see how the rule plays along with the whitelist rule, which
# allows user:*@company.com to have any role in the org.
RULES3 = {
'rules': [
{
'name': 'my whitelist rule',
'mode': 'whitelist',
'resource': [{
'type': 'organization',
'applies_to': 'self_and_children',
'resource_ids': ['778899']
}, {
'type': 'project',
'applies_to': 'self',
'resource_ids': [
'my-project-1',
'my-project-2',
]
}],
'inherit_from_parents': False,
'bindings': [{
'role': 'roles/*',
'members': ['user:*@company.com']
}]
}, {
'name': 'my blacklist rule',
'mode': 'blacklist',
'resource': [{
'type': 'organization',
'applies_to': 'self_and_children',
'resource_ids': ['778899']
}],
'inherit_from_parents': False,
'bindings': [{
'role': 'roles/*',
'members': ['user:baduser@company.com']
}]
}, {
'name': 'my required rule',
'mode': 'required',
'resource': [{
'type': 'project',
'applies_to': 'self',
'resource_ids': ['my-project-1',]
}],
'inherit_from_parents': False,
'bindings': [{
'role': 'roles/viewer',
'members': ['user:project_viewer@company.com']
}]
}
]
}
# Two separate whitelist rules:
# * org 778899, applies to self only
# * org 778899, applies to children only
# "Allow `user:owner@company.com` to have an owner role on the org, and
# allow `user:*@company.com` to have any role on the org's children."
RULES4 = {
'rules': [
{
'name': 'org whitelist',
'mode': 'whitelist',
'resource': [{
'type': 'organization',
'applies_to': 'self',
'resource_ids': ['778899']
}],
'inherit_from_parents': False,
'bindings': [{
'role': 'roles/owner',
'members': ['user:owner@company.com']
}]
}, {
'name': 'project whitelist',
'mode': 'whitelist',
'resource': [{
'type': 'organization',
'applies_to': 'children',
'resource_ids': ['778899']
}],
'inherit_from_parents': False,
'bindings': [{
'role': 'roles/*',
'members': ['user:*@company.com']
}]
},
]
}
# Blacklist/whitelist combination
# * org 778899 blacklist for self and children
# "Don't allow `user:owner@company.com` to have roles/owner."
# * project my-project-1 for self
# "Allow `user:*@company.com` to have any role in this project."
RULES5 = {
'rules': [
{
'name': 'org blacklist',
'mode': 'blacklist',
'resource': [{
'type': 'organization',
'applies_to': 'self_and_children',
'resource_ids': ['778899']
}],
'bindings': [{
'role': 'roles/owner',
'members': ['user:owner@company.com']
}]
}, {
'name': 'project whitelist',
'mode': 'whitelist',
'resource': [
{
'type': 'project',
'applies_to': 'self',
'resource_ids': ['my-project-1']
}],
'inherit_from_parents': False,
'bindings': [{
'role': 'roles/*',
'members': ['user:*@company.com']
}]
},
]
}
# Org children whitelist allows any roles/members for users @company.com
# Org children blacklist blocks owner@company.com.
RULES6 = {
'rules': [
{
'name': 'org whitelist',
'mode': 'whitelist',
'resource': [{
'type': 'organization',
'applies_to': 'children',
'resource_ids': ['778899']
}],
'bindings': [{
'role': 'roles/*',
'members': ['user:*@company.com']
}]
}, {
'name': 'project blacklist',
'mode': 'blacklist',
'resource': [{
'type': 'organization',
'applies_to': 'children',
'resource_ids': ['778899']
}],
'inherit_from_parents': False,
'bindings': [{
'role': 'roles/owner',
'members': ['user:owner@company.com']
}]
},
]
}
# Org children blacklist blocks user@company.com with
# Project self whitelist allows *@company.com
RULES7 = {
'rules': [
{
'name': 'org blacklist',
'mode': 'blacklist',
'resource': [{
'type': 'organization',
'applies_to': 'children',
'resource_ids': ['778899']
}],
'bindings': [{
'role': 'roles/*',
'members': ['user:user@company.com']
}]
}, {
'name': 'project whitelist',
'mode': 'whitelist',
'resource': [{
'type': 'project',
'applies_to': 'self',
'resource_ids': ['my-project-1']
}],
'inherit_from_parents': False,
'bindings': [{
'role': 'roles/owner',
'members': ['user:user@company.com']
}]
},
]
}
# A whitelist for:
# * Folder 1 (id=333)
# * projects "my-project-3"
# No inheritance of rules
# Allow members of any role and pattern "user:*@company.com"
FOLDER_RULES1 = {
'rules': [{
'name': 'folder rule 1',
'mode': 'whitelist',
'resource': [{
'type': 'organization',
'applies_to': 'self_and_children',
'resource_ids': ['778899']
}, {
'type': 'project',
'applies_to': 'self',
'resource_ids': [
'my-project-3',
]
}],
'inherit_from_parents': True,
'bindings': [{
'role': 'roles/*',
'members': ['user:*@company.com']
}]
}]
}
# Simple whitelist to allow any users @ company.com to be present with
# any roles inside any organization.
RULES8 = {
'rules': [
{
'name': 'org whitelist',
'mode': 'whitelist',
'resource': [{
'type': 'organization',
'applies_to': 'self_and_children',
'resource_ids': ['*']
}],
'bindings': [{
'role': 'roles/*',
'members': ['user:*@company.com']
}]
},
]
}
# Whitelist to allow *@company.com on all orgs and their descendents,
# plus allow my-project-1 to have *@contract-company.com.
RULES9 = {
'rules': [
{
'name': 'org whitelist',
'mode': 'whitelist',
'resource': [{
'type': 'organization',
'applies_to': 'self_and_children',
'resource_ids': ['*']
}],
'bindings': [{
'role': 'roles/*',
'members': ['user:*@company.com']
}]
}, {
'name': 'project whitelist',
'mode': 'whitelist',
'resource': [{
'type': 'project',
'applies_to': 'self',
'resource_ids': ['my-project-1']
}],
'inherit_from_parents': True,
'bindings': [{
'role': 'roles/editor',
'members': ['user:*@contract-company.com']
}]
},
]
}
RULES10 = {
'rules': [
{
'name': 'project required',
'mode': 'required',
'resource': [{
'type': 'project',
'applies_to': 'self',
'resource_ids': ['*']
}],
'inherit_from_parents': True, # this is kinda broken, keep it for now
'bindings': [{
'role': 'roles/owner',
'members': ['user:*@company.com']
}]
},
]
}
# Requiring projects to have owners from a specific domain, for context see
# also https://github.com/GoogleCloudPlatform/forseti-security/issues/799
RULES11 = {
'rules': [
{
'name': (
'this rule uses domain in member of the IAM policy to '
'stipulate that all owners must belong to my domain'),
'mode': 'required',
'resource': [{
'type': 'project',
'applies_to': 'self',
'resource_ids': ['*']
}],
'inherit_from_parents': True,
'bindings': [{
'role': 'roles/owner',
'members': ['domain:xyz.edu']
}]
},
]
}
# Requiring projects to have owners from a specific domain, expressed as a
# wildcard user
RULES12 = {
'rules': [
{
'name': (
'this rule uses a wildcard user in member of the IAM policy '
'to stipulate that all owners must belong to my domain'),
'mode': 'required',
'resource': [{
'type': 'project',
'applies_to': 'self',
'resource_ids': ['*']
}],
'inherit_from_parents': True,
'bindings': [{
'role': 'roles/owner',
'members': ['user:*@xyz.edu']
}]
},
]
}
# Requiring buckets to have object owners from a specific domain, expressed as
# a wildcard user on bucket level
RULES13 = {
'rules': [
{
'name': (
'this rule uses a wildcard user in member of the IAM policy '
'to stipulate that object viewers must belong to my domain'),
'mode': 'required',
'resource': [{
'type': 'bucket',
'applies_to': 'self',
'resource_ids': ['*']
}],
'inherit_from_parents': True,
'bindings': [{
'role': 'roles/objectViewer',
'members': ['user:*@gcs.cloud']
}]
},
]
}
| 31.782609 | 81 | 0.419452 |
d371beeab344ce023faa99c703b58d2b4e18d4b1 | 4,809 | py | Python | tic_tac_toe_tk.py | vinuvirat/tic_tac_toe_tk | 8fa2092539c270852c152caee365434cff3b60ae | [
"MIT"
] | null | null | null | tic_tac_toe_tk.py | vinuvirat/tic_tac_toe_tk | 8fa2092539c270852c152caee365434cff3b60ae | [
"MIT"
] | null | null | null | tic_tac_toe_tk.py | vinuvirat/tic_tac_toe_tk | 8fa2092539c270852c152caee365434cff3b60ae | [
"MIT"
] | null | null | null | from tkinter import *
window = Tk()
window.geometry("130x150")
window.title('Tic Tac Toe')
player = 'x'
print('x starts the game')
def click(num):
global player, window
if num == 1:
print(text_1.get(), not text_1)
if text_1.get() == ' ':
text_1.set(player)
print('here', text_1.get())
if player == 'x':
player = 'o'
else:
player = 'x'
elif num == 2:
if text_2.get() == ' ':
text_2.set(player)
if player == 'x':
player = 'o'
else:
player = 'x'
elif num == 3:
if text_3.get() == ' ':
text_3.set(player)
if player == 'x':
player = 'o'
else:
player = 'x'
elif num == 4:
if text_4.get() == ' ':
text_4.set(player)
if player == 'x':
player = 'o'
else:
player = 'x'
elif num == 5:
if text_5.get() == ' ':
text_5.set(player)
if player == 'x':
player = 'o'
else:
player = 'x'
elif num == 6:
if text_6.get() == ' ':
text_6.set(player)
if player == 'x':
player = 'o'
else:
player = 'x'
elif num == 7:
if text_7.get() == ' ':
text_7.set(player)
if player == 'x':
player = 'o'
else:
player = 'x'
elif num == 8:
if text_8.get() == ' ':
text_8.set(player)
if player == 'x':
player = 'o'
else:
player = 'x'
elif num == 9:
if text_9.get() == ' ':
text_9.set(player)
if player == 'x':
player = 'o'
else:
player = 'x'
if (text_1.get() == text_2.get() and text_2.get() == text_3.get() and text_1.get() != ' '):
print(text_1.get() + ' won the game')
window.destroy()
elif (text_4.get() == text_5.get() and text_5.get() == text_6.get() and text_4.get() != ' '):
print(text_4.get() + ' won the game')
window.destroy()
elif (text_7.get() == text_8.get() and text_8.get() == text_9.get() and text_7.get() != ' '):
print(text_7.get() + ' won the game')
window.destroy()
elif (text_1.get() == text_4.get() and text_4.get() == text_7.get() and text_1.get() != ' '):
print(text_1.get() + ' won the game')
window.destroy()
elif (text_2.get() == text_5.get() and text_5.get() == text_8.get() and text_2.get() != ' '):
print(text_2.get() + ' won the game')
window.destroy()
elif (text_3.get() == text_6.get() and text_6.get() == text_9.get() and text_3.get() != ' '):
print(text_3.get() + ' won the game')
window.destroy()
elif (text_1.get() == text_5.get() and text_5.get() == text_9.get() and text_1.get() != ' '):
print(text_1.get() + ' won the game')
window.destroy()
elif (text_3.get() == text_5.get() and text_5.get() == text_7.get() and text_3.get() != ' '):
print(text_3.get() + ' won the game')
window.destroy()
text_1 = StringVar()
text_2 = StringVar()
text_3 = StringVar()
text_4 = StringVar()
text_5 = StringVar()
text_6 = StringVar()
text_7 = StringVar()
text_8 = StringVar()
text_9 = StringVar()
_1 = Button(window, textvariable = text_1, height = 2, width = 2, command = lambda : click(1))
_2 = Button(window, textvariable = text_2, height = 2, width = 2, command = lambda : click(2))
_3 = Button(window, textvariable = text_3, height = 2, width = 2, command = lambda : click(3))
_4 = Button(window, textvariable = text_4, height = 2, width = 2, command = lambda : click(4))
_5 = Button(window, textvariable = text_5, height = 2, width = 2, command = lambda : click(5))
_6 = Button(window, textvariable = text_6, height = 2, width = 2, command = lambda : click(6))
_7 = Button(window, textvariable = text_7, height = 2, width = 2, command = lambda : click(7))
_8 = Button(window, textvariable = text_8, height = 2, width = 2, command = lambda : click(8))
_9 = Button(window, textvariable = text_9, height = 2, width = 2, command = lambda : click(9))
text_1.set(' ')
text_2.set(' ')
text_3.set(' ')
text_4.set(' ')
text_5.set(' ')
text_6.set(' ')
text_7.set(' ')
text_8.set(' ')
text_9.set(' ')
_1.grid(row = 1, column = 1)
_2.grid(row = 1, column = 2)
_3.grid(row = 1, column = 3)
_4.grid(row = 2, column = 1)
_5.grid(row = 2, column = 2)
_6.grid(row = 2, column = 3)
_7.grid(row = 3, column = 1)
_8.grid(row = 3, column = 2)
_9.grid(row = 3, column = 3)
window.mainloop()
| 30.05625 | 97 | 0.49948 |
82a9d4cbca03e2fcb51af64c6c7c1da4bbd2bc32 | 1,038 | py | Python | test/unit/test_rules.py | ozzyx149/contessa | 4dd22b880299d2a2079c752ae4cf02a66e078ac6 | [
"MIT"
] | null | null | null | test/unit/test_rules.py | ozzyx149/contessa | 4dd22b880299d2a2079c752ae4cf02a66e078ac6 | [
"MIT"
] | null | null | null | test/unit/test_rules.py | ozzyx149/contessa | 4dd22b880299d2a2079c752ae4cf02a66e078ac6 | [
"MIT"
] | null | null | null | from contessa import ContessaRunner
from contessa.executor import refresh_executors
from contessa.models import Table
from test.utils import normalize_str
from contessa.rules import SqlRule
def test_rule_context_formatted_in_where():
class TestRule(SqlRule):
@property
def sql(self):
return "select a, b, c from {{table_fullname}}_{{ts_nodash}}"
r = TestRule(
name="test_rule",
condition="created_at >= '{{ts_nodash}}'::timestamptz - interval '10 minutes'",
description="Greater than 0 when bags <> 0",
)
check_table = Table("raw", "table")
context = ContessaRunner.get_context(check_table, {"ts_nodash": "20190101T000000"})
# executor holds context of run, so set it
refresh_executors(check_table, "", context)
result = r.sql_with_where
expected = """
select a, b, c
from raw.table_20190101T000000
where created_at >= '20190101T000000'::timestamptz - interval '10 minutes'
"""
assert normalize_str(result) == normalize_str(expected)
| 32.4375 | 87 | 0.697495 |
8d07e3c8bd23a711ce2911a2030d0f6f6378f5a8 | 18,265 | py | Python | python/dev/logic.py | Shail-Shouryya/yt-videos-list | d8b85552804ef2e7bcc828bca15632eeeb46aaa2 | [
"Apache-2.0"
] | 26 | 2021-01-31T11:52:10.000Z | 2021-08-01T17:24:55.000Z | python/dev/logic.py | Shail-Shouryya/yt_videos_list | d8b85552804ef2e7bcc828bca15632eeeb46aaa2 | [
"Apache-2.0"
] | 7 | 2020-06-01T13:14:15.000Z | 2021-01-09T20:58:17.000Z | python/dev/logic.py | Shail-Shouryya/yt-videos-list | d8b85552804ef2e7bcc828bca15632eeeb46aaa2 | [
"Apache-2.0"
] | 6 | 2021-03-18T05:46:51.000Z | 2021-07-19T07:40:37.000Z | import sys
import time
import traceback
import contextlib
import selenium
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from . import program
from .download.selenium_webdriver_dependencies import download_all
from .download.windows_info import get_drive_letter
from .download.user_os_info import determine_user_os
from .notifications import Common, ModuleMessage, ScriptMessage
from .custom_logger import log
def execute(url, file_name, log_silently, txt, csv, markdown, file_suffix, all_video_data_in_memory, video_id_only, reverse_chronological, headless, scroll_pause_time, user_driver, cookie_consent, verify_page_bottom_n_times, file_buffering, list_creator_configuration, execution_type):
common_message = Common()
module_message = ModuleMessage()
script_message = ScriptMessage()
def verify_writing_to_at_least_one_location():
if txt is False and csv is False and markdown is False and all_video_data_in_memory is False:
print(common_message.not_writing_to_any_files)
if execution_type == 'module': print(module_message.not_writing_to_any_files_hint)
else: print(script_message.not_writing_to_any_files_hint)
sys.exit()
def process_url():
try:
_, channel_type, channel_id = parse_url()
except IndexError as error_message:
common_message.display_url_error(error_message)
traceback.print_exc()
sys.exit()
base_url = 'https://www.youtube.com'
return f'{base_url}/{channel_type}/{channel_id}/videos?view=0&sort=dd&flow=grid&shelf_id=0'
def parse_url():
channel_info = url.split('youtube.com/')[1]
channel_type = channel_info.split('/')[0]
try:
# handle URLs such as
# youtube.com/identifier/ # NOTE there is a trailing slash here!
# youtube.com/identifier/ID
# youtube.com/identifier/ID/
# youtube.com/identifier/ID/anythingElse
channel_id = channel_info.split('/')[1]
except IndexError:
# handle URLs such as
# youtube.com/identifier # NOTE there is no trailing slash here!
channel_id = ''
return channel_info, channel_type, channel_id
def open_user_driver():
nonlocal user_driver
if user_driver is None:
if execution_type == 'module': print(module_message.running_default_driver + '\n' + module_message.show_driver_options)
else: print(script_message.running_default_driver + '\n' + script_message.show_driver_options)
user_driver = 'firefox'
supported_drivers = {
'firefox': configure_firefoxdriver,
'opera': configure_operadriver,
'chrome': configure_chromedriver,
'brave': configure_bravedriver,
'edge': configure_edgedriver,
'safari': configure_safaridriver
}
if user_driver not in supported_drivers:
print(common_message.invalid_driver)
sys.exit()
return supported_drivers[user_driver]() # NOTE the need to CALL the function returned by supported_drivers[key] since the dictionary value is a function REFERENCE (the function is not yet invoked)
def configure_firefoxdriver():
options = selenium.webdriver.firefox.options.Options()
if headless is True:
options.headless = True
return webdriver.Firefox(options=options)
def configure_operadriver():
# webdriver.Opera class MRO (method resolution order): WebDriver -> OperaDriver -> selenium.webdriver.chrome.webdriver.WebDriver -> selenium.webdriver.remote.webdriver.WebDriver -> builtins.object
# check with
# >>> from selenium import webdriver
# >>> help(webdriver.Opera)
# options = selenium.webdriver.chrome.options.Options()
# options.headless = True
options = webdriver.ChromeOptions()
if headless is True:
options.add_argument('headless')
print(common_message.unsupported_opera_headless)
return webdriver.Opera(options=options)
def configure_safaridriver():
if user_os != 'macos':
common_message.display_dependency_setup_instructions('safari', user_os)
sys.exit()
if headless is True:
print(common_message.unsupported_safari_headless)
return webdriver.Safari()
def configure_chromedriver():
# options = selenium.webdriver.chrome.options.Options()
options = webdriver.ChromeOptions()
if headless is True:
options.add_argument('headless')
return webdriver.Chrome(chrome_options=options)
def configure_bravedriver():
options = webdriver.ChromeOptions()
if user_os == 'windows':
drive = get_drive_letter()
options.binary_location = rf'{drive}:\Program Files (x86)\BraveSoftware\Brave-Browser\Application\brave.exe'
executable_path = rf'{drive}:\Windows\bravedriver.exe'
else:
options.binary_location = '/Applications/Brave Browser.app/Contents/MacOS/Brave Browser'
executable_path = '/usr/local/bin/bravedriver'
if headless is True:
print(common_message.unsupported_brave_headless)
# options.headless = True
return webdriver.Chrome(options=options, executable_path=executable_path)
def configure_edgedriver():
# options = selenium.webdriver.remote.webdriver.WebDriver()
if user_os == 'windows':
drive = get_drive_letter()
# options.binary_location = rf'{drive}:\Program Files (x86)\Microsoft\Edge\Application\msedge.exe'
executable_path = rf'{drive}:\Windows\msedgedriver.exe'
else:
# options.binary_location = '/Applications/Microsoft Edge.app/Contents/MacOS/Microsoft Edge'
executable_path = '/usr/local/bin/msedgedriver'
print(common_message.unsupported_edge)
print(module_message.show_driver_options)
sys.exit()
if headless is True:
print(common_message.unsupported_edge_headless)
# options.headless = True
return webdriver.Edge(executable_path=executable_path)
def show_user_how_to_set_up_selenium():
if user_driver != 'safari':
common_message.tell_user_to_download_driver(user_driver)
common_message.display_dependency_setup_instructions(user_driver, user_os)
def handle_opening_webdriver_exception(error_message):
# selenium.common.exceptions.WebDriverException: Message: 'BROWSERdriver' executable needs to be in PATH. Please see https://................
# for some reason this also catches selenium.common.exceptions.SessionNotCreatedException: Message: session not created: This version of BROWSERDriver only supports BROWSER version ##
nonlocal driver
common_message.display_selenium_dependency_error(error_message)
try:
download_all()
driver = open_user_driver()
except selenium.common.exceptions.WebDriverException as same_error_message_again: # could not download the correct Selenium driver based on the user's OS and specified driver
common_message.display_selenium_dependency_update_error(same_error_message_again)
traceback.print_exc()
show_user_how_to_set_up_selenium()
common_message.display_unable_to_update_driver_automatically(user_driver)
sys.exit()
def run_scraper():
with driver:
driver.get(url)
driver.set_window_size(780, 800)
driver.set_window_position(0, 0)
manage_cookie_consent_form()
wait = selenium.webdriver.support.ui.WebDriverWait(driver, 9)
channel_heading_xpath = '//yt-formatted-string[@class="style-scope ytd-channel-name"]'
topic_channel_heading_xpath = '//yt-formatted-string[@class="style-scope ytd-topic-channel-details-renderer"]'
def load_page(channel_heading_xpath, topic_channel_heading_xpath):
try:
wait.until(EC.element_to_be_clickable((By.XPATH, channel_heading_xpath)))
except selenium.common.exceptions.TimeoutException:
wait.until(EC.element_to_be_clickable((By.XPATH, topic_channel_heading_xpath)))
except selenium.common.exceptions.WebDriverException as error_message:
traceback.print_exc()
common_message.display_possible_topic_channel_in_headless_error(error_message)
sys.exit()
try:
load_page(channel_heading_xpath, topic_channel_heading_xpath)
except selenium.common.exceptions.TimeoutException as error_message:
common_message.display_selenium_unable_to_load_elements_error(error_message)
traceback.print_exc()
sys.exit()
channel_name, file_name = determine_file_name(channel_heading_xpath, topic_channel_heading_xpath)
with yield_logger(file_name) as logging_locations:
log( '>' * 50 + 'STARTING PROGRAM' + '<' * 50, logging_locations)
log(f'Now scraping {url} using the {user_driver}driver...', logging_locations)
log(f'Current configuration: {list_creator_configuration}', logging_locations)
video_data = program.determine_action(url, driver, video_id_only, scroll_pause_time, verify_page_bottom_n_times, reverse_chronological, file_name, file_buffering, txt, csv, markdown, all_video_data_in_memory, logging_locations)
program_end = time.perf_counter()
total_time = program_end - program_start
log(f'This program took {total_time} seconds to complete writing information for the "{channel_name}" channel to the {file_name} file.', logging_locations)
log( '>' * 50 + 'COMPLETED PROGRAM' + '<' * 50, logging_locations)
return (video_data, (channel_name, file_name))
def manage_cookie_consent_form():
if 'consent.youtube.com' in driver.current_url:
common_message.display_cookie_redirection()
accept_button_relative_path = '//button[@aria-label="Agree to the use of cookies and other data for the purposes described"]'
accept_button = driver.find_element_by_xpath(accept_button_relative_path)
if cookie_consent is False:
common_message.display_blocking_cookie_consent()
wait = selenium.webdriver.support.ui.WebDriverWait(driver, 9)
# YouTube changed the HTML formatting to make it significantly more difficult to block cookies programatically
# the following no longer works:
# wait.until(EC.element_to_be_clickable((By.XPATH, '//a[@aria-label="Customize"]')))
# driver.find_element_by_xpath('//a[@aria-label="Customize"]').click()
# the new HTML format uses dynamically named attributes, making it nearly impossible to hard code the cooking blocking process
# example:
# <button class="VfPpkd-LgbsSe VfPpkd-LgbsSe-OWXEXe-k8QpJ VfPpkd-LgbsSe-OWXEXe-dgl2Hf nCP5yc AjY5Oe DuMIQc IIdkle" jscontroller="soHxf" jsaction="click:cOuCgd; mousedown:UX7yZ; mouseup:lbsD7e; mouseenter:tfO1Yc; mouseleave:JywGue; touchstart:p6p2H; touchmove:FwuNnf; touchend:yfqBxc; touchcancel:JMtRjd; focus:AHmuwe; blur:O22p3e; contextmenu:mg9Pef;" data-idom-class="nCP5yc AjY5Oe DuMIQc IIdkle" jsname="Q7N4Oc"><div class="VfPpkd-Jh9lGc"></div><div class="VfPpkd-RLmnJb"></div><span jsname="V67aGc" class="VfPpkd-vQzf8d">Customize</span></button></div></div>
# notice how "Customize" is now just an innerHTML attribute, and nested as a very deep child node of dynamically named attributes
# one workaround is using a relative path from the "I AGREE" button
customize_button_relative_path = f'{accept_button_relative_path}/../../../../div/div/button'
wait.until(EC.element_to_be_clickable((By.XPATH, customize_button_relative_path)))
driver.find_element_by_xpath(customize_button_relative_path).click()
wait.until(EC.element_to_be_clickable((By.XPATH, '//button[@aria-label="Turn off Ad personalization"]'))) # last form element on page
driver.find_element_by_xpath('//button[@aria-label="Turn off Search customization"]').click()
driver.find_element_by_xpath('//button[@aria-label="Turn off YouTube History"]').click()
driver.find_element_by_xpath('//button[@aria-label="Turn off Ad personalization"]').click()
# clicking the button above also selects the 2 buttons below
# driver.find_element_by_xpath('//button[@aria-label="Turn off Ad personalization on Google Search"]').click()
# driver.find_element_by_xpath('//button[@aria-label="Turn off Ad personalization on YouTube & across the web"]').click()
wait.until(EC.element_to_be_clickable((By.XPATH, '//button[@aria-label="Ad personalization is off"]'))) # wait for last form element on page to update
# driver.find_element_by_xpath('//form[@method="POST"]').click() # this doesn't seem to click the button
driver.find_elements_by_xpath('//button')[-1].click() # find the last button on the page (the CONFIRM button) and click it
elif cookie_consent is True:
common_message.display_accepting_cookie_consent()
accept_button.click()
else:
common_message.display_invalid_cookie_consent_option(cookie_consent)
def determine_file_name(channel_heading_xpath, topic_channel_heading_xpath):
channel_name = driver.find_element_by_xpath(channel_heading_xpath).text or driver.find_element_by_xpath(topic_channel_heading_xpath).text
is_id = '_id' if video_id_only is True else ''
if file_suffix is True: suffix = f'_reverse_chronological_video{is_id}s_list' if reverse_chronological else f'_chronological_video{is_id}s_list'
else: suffix = ''
if txt is False and csv is False and markdown is False:
# program will not write to any output files
# program will store video data in memory and return the list of lists containing the video data
# only runs when all_video_data_in_memory=True
formatted_file_name = ''
elif file_name == 'auto':
formatted_channel_name = channel_name.replace(' ', '')
formatted_file_name = f'{formatted_channel_name}{suffix}'
elif file_name == 'id':
_, channel_type, channel_id = parse_url()
if channel_id in ('videos', ''):
# handle URLs such as
# youtube.com/teded # id will be teded
# youtube.com/teded/ # id will be teded
# youtube.com/teded/videos # id will be teded
# youtube.com/originals # id will be originals
# youtube.com/originals/ # id will be originals
# youtube.com/originals/videos # id will be originals
formatted_file_name = f'{channel_type}{suffix}'
else:
# handle URLs such as
# youtube.com/channel/UC-Some24CharacterString # id will be UC-Some24CharacterString
# youtube.com/channel/UC-Some24CharacterString/ # id will be UC-Some24CharacterString
# youtube.com/channel/UC-Some24CharacterString/videos # id will be UC-Some24CharacterString
# youtube.com/user/UserNameForChannel # id will be UserNameForChannel
# youtube.com/user/UserNameForChannel/ # id will be UserNameForChannel
# youtube.com/user/UserNameForChannel/videos # id will be UserNameForChannel
# youtube.com/c/ChannelName # id will be ChannelName
# youtube.com/c/ChannelName/ # id will be ChannelName
# youtube.com/c/ChannelName/videos # id will be ChannelName
formatted_file_name = f'{channel_id}{suffix}'
else:
if file_name.endswith('.txt') or file_name.endswith('.csv'): formatted_file_name = file_name[:-4]
elif file_name.endswith('.md'): formatted_file_name = file_name[:-3]
else: formatted_file_name = file_name
return (channel_name, formatted_file_name)
@contextlib.contextmanager
def yield_logger(file_name):
log_file = f'{file_name}.log'
with open(log_file, mode='a', encoding='utf-8', buffering=file_buffering) as output_location:
if log_silently is True: yield (output_location,)
else: yield (output_location, sys.stdout)
verify_writing_to_at_least_one_location()
user_os = determine_user_os()
url = process_url()
program_start = time.perf_counter()
try:
driver = open_user_driver()
except selenium.common.exceptions.WebDriverException as error_message:
handle_opening_webdriver_exception(error_message)
return run_scraper()
| 60.480132 | 577 | 0.644073 |
2726de33b6f452ef9c7d71b9b6b70e9c351d8a76 | 51,542 | py | Python | tests/unit/client_tests.py | app63/python-cloudant | cbc6cfb554aa88660c9c80f3bb4d1df170fc8131 | [
"Apache-2.0"
] | null | null | null | tests/unit/client_tests.py | app63/python-cloudant | cbc6cfb554aa88660c9c80f3bb4d1df170fc8131 | [
"Apache-2.0"
] | null | null | null | tests/unit/client_tests.py | app63/python-cloudant | cbc6cfb554aa88660c9c80f3bb4d1df170fc8131 | [
"Apache-2.0"
] | 1 | 2021-09-19T23:52:53.000Z | 2021-09-19T23:52:53.000Z | #!/usr/bin/env python
# Copyright (C) 2015, 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
client module - Unit tests for CouchDB and Cloudant client classes
See configuration options for environment variables in unit_t_db_base
module docstring.
"""
import base64
import datetime
import json
import os
import sys
import unittest
from time import sleep
import mock
import requests
from cloudant import cloudant, cloudant_bluemix, couchdb, couchdb_admin_party
from cloudant._client_session import BasicSession, CookieSession
from cloudant.client import Cloudant, CouchDB
from cloudant.database import CloudantDatabase
from cloudant.error import (CloudantArgumentError, CloudantClientException,
CloudantDatabaseException)
from cloudant.feed import Feed, InfiniteFeed
from nose.plugins.attrib import attr
from requests import ConnectTimeout, HTTPError
from .unit_t_db_base import skip_if_iam, skip_if_not_cookie_auth, UnitTestDbBase
from .. import bytes_, str_
class CloudantClientExceptionTests(unittest.TestCase):
"""
Ensure CloudantClientException functions as expected.
"""
def test_raise_without_code(self):
"""
Ensure that a default exception/code is used if none is provided.
"""
with self.assertRaises(CloudantClientException) as cm:
raise CloudantClientException()
self.assertEqual(cm.exception.status_code, 100)
def test_raise_using_invalid_code(self):
"""
Ensure that a default exception/code is used if invalid code is provided.
"""
with self.assertRaises(CloudantClientException) as cm:
raise CloudantClientException('foo')
self.assertEqual(cm.exception.status_code, 100)
def test_raise_without_args(self):
"""
Ensure that a default exception/code is used if the message requested
by the code provided requires an argument list and none is provided.
"""
with self.assertRaises(CloudantClientException) as cm:
raise CloudantClientException(404)
self.assertEqual(cm.exception.status_code, 100)
def test_raise_with_proper_code_and_args(self):
"""
Ensure that the requested exception is raised.
"""
with self.assertRaises(CloudantClientException) as cm:
raise CloudantClientException(404, 'foo')
self.assertEqual(cm.exception.status_code, 404)
class ClientTests(UnitTestDbBase):
"""
CouchDB/Cloudant client unit tests
"""
@unittest.skipIf(
((os.environ.get('ADMIN_PARTY') and os.environ.get('ADMIN_PARTY') == 'true')),
'Skipping couchdb context manager test'
)
@attr(db='couch')
def test_couchdb_context_helper(self):
"""
Test that the couchdb context helper works as expected.
"""
try:
with couchdb(self.user, self.pwd, url=self.url) as c:
self.assertIsInstance(c, CouchDB)
self.assertIsInstance(c.r_session, requests.Session)
except Exception as err:
self.fail('Exception {0} was raised.'.format(str(err)))
@unittest.skipUnless(
((os.environ.get('ADMIN_PARTY') and os.environ.get('ADMIN_PARTY') == 'true')),
'Skipping couchdb_admin_party context manager test'
)
@attr(db='couch')
def test_couchdb_admin_party_context_helper(self):
"""
Test that the couchdb_admin_party context helper works as expected.
"""
try:
with couchdb_admin_party(url=self.url) as c:
self.assertIsInstance(c, CouchDB)
self.assertIsInstance(c.r_session, requests.Session)
except Exception as err:
self.fail('Exception {0} was raised.'.format(str(err)))
def test_constructor_with_url(self):
"""
Test instantiating a client object using a URL
"""
self.assertEqual(
self.client.server_url,
self.url
)
self.assertEqual(self.client.encoder, json.JSONEncoder)
self.assertIsNone(self.client.r_session)
def test_constructor_with_creds_removed_from_url(self):
"""
Test instantiating a client object using a URL
"""
client = CouchDB(None, None, url='http://a9a9a9a9-a9a9-a9a9-a9a9-a9a9a9a9a9a9-bluemix'
':a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9'
'a9a9a9a9a9a9@d8a01891-e4d2-4102-b5f8-751fb735ce31-'
'bluemix.couchdb.local:5984')
self.assertEqual(client.server_url, 'http://d8a01891-e4d2-4102-b5f8-751fb735ce31-'
'bluemix.couchdb.local:5984')
self.assertEqual(client._user, 'a9a9a9a9-a9a9-a9a9-a9a9-a9a9a9a9a9a9-bluemix')
self.assertEqual(client._auth_token, 'a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a'
'9a9a9a9a9a9a9a9a9a9a9a9a9')
def test_connect(self):
"""
Test connect and disconnect functionality.
"""
try:
self.client.connect()
self.assertIsInstance(self.client.r_session, requests.Session)
finally:
self.client.disconnect()
self.assertIsNone(self.client.r_session)
def test_auto_connect(self):
"""
Test connect during client instantiation option.
"""
try:
self.set_up_client(auto_connect=True)
self.assertIsInstance(self.client.r_session, requests.Session)
finally:
self.client.disconnect()
self.assertIsNone(self.client.r_session)
def test_multiple_connect(self):
"""
Test that issuing a connect call to an already connected client does
not cause any issue.
"""
try:
self.client.connect()
self.set_up_client(auto_connect=True)
self.client.connect()
self.assertIsInstance(self.client.r_session, requests.Session)
finally:
self.client.disconnect()
self.assertIsNone(self.client.r_session)
@skip_if_not_cookie_auth
def test_auto_renew_enabled(self):
"""
Test that CookieSession is used when auto_renew is enabled.
"""
try:
self.set_up_client(auto_renew=True)
self.client.connect()
if os.environ.get('ADMIN_PARTY') == 'true':
self.assertIsInstance(self.client.r_session, requests.Session)
else:
self.assertIsInstance(self.client.r_session, CookieSession)
finally:
self.client.disconnect()
@skip_if_not_cookie_auth
def test_auto_renew_enabled_with_auto_connect(self):
"""
Test that CookieSession is used when auto_renew is enabled along with
an auto_connect.
"""
try:
self.set_up_client(auto_connect=True, auto_renew=True)
if os.environ.get('ADMIN_PARTY') == 'true':
self.assertIsInstance(self.client.r_session, requests.Session)
else:
self.assertIsInstance(self.client.r_session, CookieSession)
finally:
self.client.disconnect()
@skip_if_not_cookie_auth
def test_session(self):
"""
Test getting session information.
Session info is None if CouchDB Admin Party mode was selected.
"""
try:
self.client.connect()
session = self.client.session()
if self.client.admin_party:
self.assertIsNone(session)
else:
self.assertEqual(session['userCtx']['name'], self.user)
finally:
self.client.disconnect()
@skip_if_not_cookie_auth
def test_session_cookie(self):
"""
Test getting the session cookie.
Session cookie is None if CouchDB Admin Party mode was selected.
"""
try:
self.client.connect()
if self.client.admin_party:
self.assertIsNone(self.client.session_cookie())
else:
self.assertIsNotNone(self.client.session_cookie())
finally:
self.client.disconnect()
@mock.patch('cloudant._client_session.Session.request')
def test_session_basic(self, m_req):
"""
Test using basic access authentication.
"""
m_response_ok = mock.MagicMock()
type(m_response_ok).status_code = mock.PropertyMock(return_value=200)
type(m_response_ok).text = mock.PropertyMock(return_value='["animaldb"]')
m_req.return_value = m_response_ok
client = Cloudant('foo', 'bar', url=self.url, use_basic_auth=True)
client.connect()
self.assertIsInstance(client.r_session, BasicSession)
all_dbs = client.all_dbs()
m_req.assert_called_once_with(
'GET',
self.url + '/_all_dbs',
allow_redirects=True,
auth=('foo', 'bar'), # uses HTTP Basic Auth
timeout=None
)
self.assertEquals(all_dbs, ['animaldb'])
@mock.patch('cloudant._client_session.Session.request')
def test_session_basic_with_no_credentials(self, m_req):
"""
Test using basic access authentication with no credentials.
"""
m_response_ok = mock.MagicMock()
type(m_response_ok).status_code = mock.PropertyMock(return_value=200)
m_req.return_value = m_response_ok
client = Cloudant(None, None, url=self.url, use_basic_auth=True)
client.connect()
self.assertIsInstance(client.r_session, BasicSession)
db = client['animaldb']
m_req.assert_called_once_with(
'HEAD',
self.url + '/animaldb',
allow_redirects=False,
auth=None, # ensure no authentication specified
timeout=None
)
self.assertIsInstance(db, CloudantDatabase)
@mock.patch('cloudant._client_session.Session.request')
def test_change_credentials_basic(self, m_req):
"""
Test changing credentials when using basic access authentication.
"""
# mock 200
m_response_ok = mock.MagicMock()
type(m_response_ok).text = mock.PropertyMock(return_value='["animaldb"]')
# mock 401
m_response_bad = mock.MagicMock()
m_response_bad.raise_for_status.side_effect = HTTPError('401 Unauthorized')
m_req.side_effect = [m_response_bad, m_response_ok]
client = Cloudant('foo', 'bar', url=self.url, use_basic_auth=True)
client.connect()
self.assertIsInstance(client.r_session, BasicSession)
with self.assertRaises(HTTPError):
client.all_dbs() # expected 401
m_req.assert_called_with(
'GET',
self.url + '/_all_dbs',
allow_redirects=True,
auth=('foo', 'bar'), # uses HTTP Basic Auth
timeout=None
)
# use valid credentials
client.change_credentials('baz', 'qux')
all_dbs = client.all_dbs()
m_req.assert_called_with(
'GET',
self.url + '/_all_dbs',
allow_redirects=True,
auth=('baz', 'qux'), # uses HTTP Basic Auth
timeout=None
)
self.assertEquals(all_dbs, ['animaldb'])
@skip_if_not_cookie_auth
def test_basic_auth_str(self):
"""
Test getting the basic authentication string.
Basic auth string is None if CouchDB Admin Party mode was selected.
"""
try:
self.client.connect()
if self.client.admin_party:
self.assertIsNone(self.client.basic_auth_str())
else:
expected = 'Basic {0}'.format(
str_(base64.urlsafe_b64encode(bytes_("{0}:{1}".format(
self.user, self.pwd
))))
)
self.assertEqual(self.client.basic_auth_str(), expected)
finally:
self.client.disconnect()
def test_all_dbs(self):
"""
Test getting a list of all of the databases
"""
dbnames = [self.dbname() for _ in range(3)]
try:
self.client.connect()
for dbname in dbnames:
self.client.create_database(dbname)
self.assertTrue(set(dbnames).issubset(self.client.all_dbs()))
finally:
for dbname in dbnames:
self.client.delete_database(dbname)
self.client.disconnect()
def test_create_delete_database(self):
"""
Test database creation and deletion
"""
try:
self.client.connect()
dbname = self.dbname()
# Create database
db = self.client.create_database(dbname)
self.assertTrue(db.exists())
# Delete database
self.assertIsNone(self.client.delete_database(dbname))
self.assertFalse(db.exists())
finally:
self.client.disconnect()
def test_create_existing_database(self):
"""
Test creation of already existing database
"""
dbname = self.dbname()
self.client.connect()
self.client.create_database(dbname)
with self.assertRaises(CloudantClientException) as cm:
self.client.create_database(dbname, throw_on_exists=True)
self.assertEqual(cm.exception.status_code, 412)
self.client.delete_database(dbname)
self.client.disconnect()
def test_create_invalid_database_name(self):
"""
Test creation of database with an invalid name
"""
dbname = 'invalidDbName_'
self.client.connect()
with self.assertRaises((CloudantDatabaseException, HTTPError)) as cm:
self.client.create_database(dbname)
code = cm.exception.status_code if hasattr(cm.exception, 'status_code') else cm.exception.response.status_code
self.assertEqual(code, 400)
self.client.disconnect()
@skip_if_not_cookie_auth
@mock.patch('cloudant._client_session.Session.request')
def test_create_with_server_error(self, m_req):
"""
Test creation of database with a server error
"""
dbname = self.dbname()
# mock 200 for authentication
m_response_ok = mock.MagicMock()
type(m_response_ok).status_code = mock.PropertyMock(return_value=200)
# mock 404 for head request when verifying if database exists
m_response_bad = mock.MagicMock()
type(m_response_bad).status_code = mock.PropertyMock(return_value=404)
# mock 500 when trying to create the database
m_resp_service_error = mock.MagicMock()
type(m_resp_service_error).status_code = mock.PropertyMock(
return_value=500)
type(m_resp_service_error).text = mock.PropertyMock(
return_value='Internal Server Error')
m_req.side_effect = [m_response_ok, m_response_bad, m_resp_service_error]
self.client.connect()
with self.assertRaises(CloudantDatabaseException) as cm:
self.client.create_database(dbname)
self.assertEqual(cm.exception.status_code, 500)
self.assertEquals(m_req.call_count, 3)
m_req.assert_called_with(
'PUT',
'/'.join([self.url, dbname]),
data=None,
params={'partitioned': 'false'},
timeout=(30, 300)
)
def test_delete_non_existing_database(self):
"""
Test deletion of non-existing database
"""
try:
self.client.connect()
self.client.delete_database('no_such_db')
self.fail('Above statement should raise a CloudantException')
except CloudantClientException as err:
self.assertEqual(str(err), 'Database no_such_db does not exist. '
'Verify that the client is valid and try again.')
finally:
self.client.disconnect()
def test_keys(self):
"""
Test retrieving the list of database names
"""
dbs = []
try:
self.client.connect()
self.assertEqual(list(self.client.keys()), [])
# create 10 new test dbs
for _ in range(10):
dbs.append(self.client.create_database(self.dbname()).database_name)
self.assertTrue(set(dbs).issubset(set(self.client.keys(remote=True))))
self.assertTrue(set(dbs).issubset(set(self.client.all_dbs())))
finally:
for db in dbs:
self.client.delete_database(db) # remove test db
self.client.disconnect()
def test_get_non_existing_db_via_getitem(self):
"""
Test __getitem__ when retrieving a non-existing database
"""
try:
self.client.connect()
db = self.client['no_such_db']
self.fail('Above statement should raise a KeyError')
except KeyError:
pass
finally:
self.client.disconnect()
def test_get_db_via_getitem(self):
"""
Test __getitem__ when retrieving a database
"""
dbname = self.dbname()
try:
self.client.connect()
self.client.create_database(dbname)
# Retrieve the database object from the server using __getitem__
db = self.client[dbname]
self.assertIsInstance(db, self.client._DATABASE_CLASS)
finally:
self.client.delete_database(dbname)
self.client.disconnect()
def test_delete_cached_db_object_via_delitem(self):
"""
Test __delitem__ when removing a cached database object
"""
dbname = self.dbname()
try:
self.client.connect()
db = self.client.create_database(dbname)
self.assertIsNotNone(self.client.get(dbname))
del self.client[dbname]
# Removed from local cache
# Note: The get method returns a local db object by default
self.assertIsNone(self.client.get(dbname))
# Database still exists remotely
# Note: __getitem__ returns the db object from the server
self.assertEqual(self.client[dbname], db)
finally:
self.client.delete_database(dbname)
self.client.disconnect()
def test_delete_remote_db_via_delitem(self):
"""
Test __delitem__ when removing a database
"""
dbname = self.dbname()
try:
self.client.connect()
db = self.client.create_database(dbname)
self.assertIsNotNone(self.client.get(dbname))
self.client.__delitem__(dbname, remote=True)
# Removed from local cache
self.assertIsNone(self.client.get(dbname))
# Database removed remotely as well
try:
db = self.client[dbname]
self.fail('Above statement should raise a KeyError')
except KeyError:
pass
finally:
self.client.disconnect()
def test_get_cached_db_object_via_get(self):
"""
Test retrieving a database from the client database cache
"""
dbname = self.dbname()
try:
self.client.connect()
# Default returns None
self.assertIsNone(self.client.get('no_such_db'))
# Creates the database remotely and adds it to the
# client database cache
db = self.client.create_database(dbname)
# Locally cached database object is returned
self.assertEqual(self.client.get(dbname), db)
finally:
self.client.delete_database(dbname)
self.client.disconnect()
def test_get_remote_db_via_get(self):
"""
Test retrieving a database
"""
dbname = self.dbname()
try:
self.client.connect()
# Default returns None
self.assertIsNone(self.client.get('no_such_db', remote=True))
# Creates the database remotely and ensure that
# it is not in the client database local cache
db = self.client.create_database(dbname)
del self.client[dbname]
self.assertIsNone(self.client.get(dbname))
# Retrieve the database object from the server
self.assertEqual(self.client.get(dbname, remote=True), db)
finally:
self.client.delete_database(dbname)
self.client.disconnect()
def test_set_non_db_value_via_setitem(self):
"""
Test raising exception when value is not a database object
"""
try:
self.client.connect()
self.client['not-a-db'] = 'This is not a database object'
self.fail('Above statement should raise a CloudantException')
except CloudantClientException as err:
self.assertEqual(
str(err),
'Value must be set to a Database object. Found type: str')
finally:
self.client.disconnect()
def test_local_set_db_value_via_setitem(self):
"""
Test setting a database object to the local database cache
"""
try:
self.client.connect()
db = self.client._DATABASE_CLASS(self.client, 'local-not-on-server')
# Value is set in the local database cache but not on the server
self.client['local-not-on-server'] = db
self.assertEqual(self.client.get('local-not-on-server'), db)
self.assertFalse(db.exists())
finally:
self.client.disconnect()
def test_create_db_via_setitem(self):
"""
Test creating a database remotely using __setitem__
"""
dbname = self.dbname()
try:
self.client.connect()
db = self.client._DATABASE_CLASS(self.client, dbname)
self.client.__setitem__(dbname, db, remote=True)
self.assertTrue(db.exists())
finally:
self.client.delete_database(dbname)
self.client.disconnect()
def test_db_updates_feed_call(self):
"""
Test that db_updates() method call constructs and returns a Feed object
"""
try:
self.client.connect()
db_updates = self.client.db_updates(limit=100)
self.assertIs(type(db_updates), Feed)
self.assertEqual(
db_updates._url, '/'.join([self.client.server_url, '_db_updates']))
self.assertIsInstance(db_updates._r_session, requests.Session)
self.assertFalse(db_updates._raw_data)
self.assertEqual(db_updates._options.get('limit'), 100)
finally:
self.client.disconnect()
@attr(db='cloudant')
class CloudantClientTests(UnitTestDbBase):
"""
Cloudant specific client unit tests
"""
def test_constructor_with_creds_removed_from_url(self):
"""
Test instantiating a client object using a URL
"""
client = Cloudant(None, None, url='https://a9a9a9a9-a9a9-a9a9-a9a9-a9a9a9a9a9a9-bluemix'
':a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9'
'a9a9a9a9a9a9@d8a01891-e4d2-4102-b5f8-751fb735ce31-'
'bluemix.cloudant.com')
self.assertEqual(client.server_url, 'https://d8a01891-e4d2-4102-b5f8-751fb735ce31-'
'bluemix.cloudant.com')
self.assertEqual(client._user, 'a9a9a9a9-a9a9-a9a9-a9a9-a9a9a9a9a9a9-bluemix')
self.assertEqual(client._auth_token, 'a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a9a'
'9a9a9a9a9a9a9a9a9a9a9a9a9')
@skip_if_not_cookie_auth
def test_cloudant_session_login(self):
"""
Test that the Cloudant client session successfully authenticates.
"""
self.client.connect()
old_cookie = self.client.session_cookie()
sleep(5) # ensure we get a different cookie back
self.client.session_login()
self.assertNotEqual(self.client.session_cookie(), old_cookie)
@skip_if_not_cookie_auth
def test_cloudant_session_login_with_new_credentials(self):
"""
Test that the Cloudant client session fails to authenticate when
passed incorrect credentials.
"""
self.client.connect()
with self.assertRaises(HTTPError) as cm:
self.client.session_login('invalid-user-123', 'pa$$w0rd01')
self.assertTrue(str(cm.exception).find('Name or password is incorrect'))
@skip_if_not_cookie_auth
def test_cloudant_context_helper(self):
"""
Test that the cloudant context helper works as expected.
"""
try:
with cloudant(self.user, self.pwd, account=self.account) as c:
self.assertIsInstance(c, Cloudant)
self.assertIsInstance(c.r_session, requests.Session)
except Exception as err:
self.fail('Exception {0} was raised.'.format(str(err)))
@skip_if_not_cookie_auth
def test_cloudant_bluemix_context_helper_with_legacy_creds(self):
"""
Test that the cloudant_bluemix context helper with legacy creds works as expected.
"""
instance_name = 'Cloudant NoSQL DB-lv'
vcap_services = {'cloudantNoSQLDB': [{
'credentials': {
'username': self.user,
'password': self.pwd,
'host': '{0}.cloudant.com'.format(self.account),
'port': 443,
'url': self.url
},
'name': instance_name,
}]}
try:
with cloudant_bluemix(vcap_services, instance_name=instance_name) as c:
self.assertIsInstance(c, Cloudant)
self.assertIsInstance(c.r_session, requests.Session)
self.assertEquals(c.session()['userCtx']['name'], self.user)
except Exception as err:
self.fail('Exception {0} was raised.'.format(str(err)))
@unittest.skipUnless(os.environ.get('IAM_API_KEY'),
'Skipping Cloudant Bluemix context helper with IAM test')
def test_cloudant_bluemix_context_helper_with_iam(self):
"""
Test that the cloudant_bluemix context helper with IAM works as expected.
"""
instance_name = 'Cloudant NoSQL DB-lv'
vcap_services = {'cloudantNoSQLDB': [{
'credentials': {
'apikey': self.iam_api_key,
'username': self.user,
'host': '{0}.cloudant.com'.format(self.account),
'port': 443,
'url': self.url
},
'name': instance_name,
}]}
try:
with cloudant_bluemix(vcap_services, instance_name=instance_name) as c:
self.assertIsInstance(c, Cloudant)
self.assertIsInstance(c.r_session, requests.Session)
except Exception as err:
self.fail('Exception {0} was raised.'.format(str(err)))
def test_cloudant_bluemix_context_helper_raise_error_for_missing_iam_and_creds(self):
"""
Test that the cloudant_bluemix context helper raises a CloudantClientException
when the IAM key, username, and password are missing in the VCAP_SERVICES env variable.
"""
instance_name = 'Cloudant NoSQL DB-lv'
vcap_services = {'cloudantNoSQLDB': [{
'credentials': {
'host': '{0}.cloudant.com'.format(self.account),
'port': 443,
'url': self.url
},
'name': instance_name,
}]}
try:
with cloudant_bluemix(vcap_services, instance_name=instance_name) as c:
self.assertIsInstance(c, Cloudant)
self.assertIsInstance(c.r_session, requests.Session)
except CloudantClientException as err:
self.assertEqual(
'Invalid service: IAM API key or username/password credentials are required.',
str(err)
)
@skip_if_iam
def test_cloudant_bluemix_dedicated_context_helper(self):
"""
Test that the cloudant_bluemix context helper works as expected when
specifying a service name.
"""
instance_name = 'Cloudant NoSQL DB-wq'
service_name = 'cloudantNoSQLDB Dedicated'
vcap_services = {service_name: [{
'credentials': {
'username': self.user,
'password': self.pwd,
'host': '{0}.cloudant.com'.format(self.account),
'port': 443,
'url': self.url
},
'name': instance_name,
}]}
try:
with cloudant_bluemix(vcap_services,
instance_name=instance_name,
service_name=service_name) as c:
self.assertIsInstance(c, Cloudant)
self.assertIsInstance(c.r_session, requests.Session)
self.assertEquals(c.session()['userCtx']['name'], self.user)
except Exception as err:
self.fail('Exception {0} was raised.'.format(str(err)))
def test_constructor_with_account(self):
"""
Test instantiating a client object using an account name
"""
# Ensure that the client is new
del self.client
self.client = Cloudant(self.user, self.pwd, account=self.account)
self.assertEqual(
self.client.server_url,
'https://{0}.cloudant.com'.format(self.account)
)
@skip_if_not_cookie_auth
def test_bluemix_constructor_with_legacy_creds(self):
"""
Test instantiating a client object using a VCAP_SERVICES environment
variable.
"""
instance_name = 'Cloudant NoSQL DB-lv'
vcap_services = {'cloudantNoSQLDB': [{
'credentials': {
'username': self.user,
'password': self.pwd,
'host': '{0}.cloudant.com'.format(self.account),
'port': 443,
'url': self.url
},
'name': instance_name
}]}
# create Cloudant Bluemix client
c = Cloudant.bluemix(vcap_services)
try:
c.connect()
self.assertIsInstance(c, Cloudant)
self.assertIsInstance(c.r_session, requests.Session)
self.assertEquals(c.session()['userCtx']['name'], self.user)
except Exception as err:
self.fail('Exception {0} was raised.'.format(str(err)))
finally:
c.disconnect()
@unittest.skipUnless(os.environ.get('IAM_API_KEY'),
'Skipping Cloudant Bluemix constructor with IAM test')
def test_bluemix_constructor_with_iam(self):
"""
Test instantiating a client object using a VCAP_SERVICES environment
variable.
"""
instance_name = 'Cloudant NoSQL DB-lv'
vcap_services = {'cloudantNoSQLDB': [{
'credentials': {
'apikey': self.iam_api_key,
'username': self.user,
'host': '{0}.cloudant.com'.format(self.account),
'port': 443
},
'name': instance_name
}]}
# create Cloudant Bluemix client
c = Cloudant.bluemix(vcap_services)
try:
c.connect()
self.assertIsInstance(c, Cloudant)
self.assertIsInstance(c.r_session, requests.Session)
except Exception as err:
self.fail('Exception {0} was raised.'.format(str(err)))
finally:
c.disconnect()
@skip_if_iam
def test_bluemix_constructor_specify_instance_name(self):
"""
Test instantiating a client object using a VCAP_SERVICES environment
variable and specifying which instance name to use.
"""
instance_name = 'Cloudant NoSQL DB-lv'
vcap_services = {'cloudantNoSQLDB': [{
'credentials': {
'username': self.user,
'password': self.pwd,
'host': '{0}.cloudant.com'.format(self.account),
'port': 443,
'url': self.url
},
'name': instance_name
}]}
# create Cloudant Bluemix client
c = Cloudant.bluemix(vcap_services, instance_name=instance_name)
try:
c.connect()
self.assertIsInstance(c, Cloudant)
self.assertIsInstance(c.r_session, requests.Session)
self.assertEquals(c.session()['userCtx']['name'], self.user)
except Exception as err:
self.fail('Exception {0} was raised.'.format(str(err)))
finally:
c.disconnect()
@skip_if_not_cookie_auth
def test_bluemix_constructor_with_multiple_services(self):
"""
Test instantiating a client object using a VCAP_SERVICES environment
variable that contains multiple services.
"""
instance_name = 'Cloudant NoSQL DB-lv'
vcap_services = {'cloudantNoSQLDB': [
{
'credentials': {
'apikey': '1234api',
'host': '{0}.cloudant.com'.format(self.account),
'port': 443,
'url': self.url
},
'name': instance_name
},
{
'credentials': {
'username': 'foo',
'password': 'bar',
'host': 'baz.com',
'port': 1234,
'url': 'https://foo:bar@baz.com:1234'
},
'name': 'Cloudant NoSQL DB-yu'
}
]}
# create Cloudant Bluemix client
c = Cloudant.bluemix(vcap_services, instance_name=instance_name)
try:
c.connect()
self.assertIsInstance(c, Cloudant)
self.assertIsInstance(c.r_session, requests.Session)
self.assertEquals(c.session()['userCtx']['name'], self.user)
except Exception as err:
self.fail('Exception {0} was raised.'.format(str(err)))
finally:
c.disconnect()
def test_connect_headers(self):
"""
Test that the appropriate request headers are set
"""
try:
self.client.connect()
self.assertEqual(
self.client.r_session.headers['X-Cloudant-User'],
self.account
)
agent = self.client.r_session.headers.get('User-Agent')
ua_parts = agent.split('/')
self.assertEqual(len(ua_parts), 6)
self.assertEqual(ua_parts[0], 'python-cloudant')
self.assertEqual(ua_parts[1], sys.modules['cloudant'].__version__)
self.assertEqual(ua_parts[2], 'Python')
self.assertEqual(ua_parts[3], '{0}.{1}.{2}'.format(
sys.version_info[0], sys.version_info[1], sys.version_info[2])),
self.assertEqual(ua_parts[4], os.uname()[0]),
self.assertEqual(ua_parts[5], os.uname()[4])
finally:
self.client.disconnect()
@skip_if_not_cookie_auth
def test_connect_timeout(self):
"""
Test that a connect timeout occurs when instantiating
a client object with a timeout of 10 ms.
"""
with self.assertRaises(ConnectTimeout) as cm:
self.set_up_client(auto_connect=True, timeout=.01)
self.assertTrue(str(cm.exception).find('timed out.'))
def test_db_updates_infinite_feed_call(self):
"""
Test that infinite_db_updates() method call constructs and returns an
InfiniteFeed object
"""
try:
self.client.connect()
db_updates = self.client.infinite_db_updates()
self.assertIsInstance(db_updates, InfiniteFeed)
self.assertEqual(
db_updates._url, '/'.join([self.client.server_url, '_db_updates']))
self.assertIsInstance(db_updates._r_session, requests.Session)
self.assertFalse(db_updates._raw_data)
self.assertDictEqual(db_updates._options, {'feed': 'continuous'})
finally:
self.client.disconnect()
@skip_if_not_cookie_auth
def test_billing_data(self):
"""
Test the retrieval of billing data
"""
try:
self.client.connect()
now = datetime.datetime.now()
expected = [
'data_volume',
'total',
'start',
'end',
'http_heavy',
'http_light',
'bill_type'
]
# Test using year and month
year = now.year
month = now.month
data = self.client.bill(year, month)
self.assertTrue(all(x in expected for x in data.keys()))
#Test without year and month arguments
del data
data = self.client.bill()
self.assertTrue(all(x in expected for x in data.keys()))
finally:
self.client.disconnect()
def test_set_year_without_month_for_billing_data(self):
"""
Test raising an exception when retrieving billing data with only
year parameter
"""
try:
self.client.connect()
year = 2016
with self.assertRaises(CloudantArgumentError) as cm:
self.client.bill(year)
expected = ('Invalid year and/or month supplied. '
'Found: year - 2016, month - None')
self.assertEqual(str(cm.exception), expected)
finally:
self.client.disconnect()
def test_set_month_without_year_for_billing_data(self):
"""
Test raising an exception when retrieving billing data with only
month parameter
"""
try:
self.client.connect()
month = 1
with self.assertRaises(CloudantArgumentError) as cm:
self.client.bill(None, month)
expected = ('Invalid year and/or month supplied. '
'Found: year - None, month - 1')
self.assertEqual(str(cm.exception), expected)
finally:
self.client.disconnect()
def test_set_invalid_type_year_for_billing_data(self):
"""
Test raising an exception when retrieving billing data with a type
string for the year parameter
"""
try:
self.client.connect()
year = 'foo'
month = 1
with self.assertRaises(CloudantArgumentError) as cm:
self.client.bill(year, month)
expected = ('Invalid year and/or month supplied. '
'Found: year - foo, month - 1')
self.assertEqual(str(cm.exception), expected)
finally:
self.client.disconnect()
def test_set_year_with_invalid_month_for_billing_data(self):
"""
Test raising an exception when retrieving billing data with an
invalid month parameter
"""
try:
self.client.connect()
year = 2016
month = 13
with self.assertRaises(CloudantArgumentError) as cm:
self.client.bill(year, month)
expected = ('Invalid year and/or month supplied. '
'Found: year - 2016, month - 13')
self.assertEqual(str(cm.exception), expected)
finally:
self.client.disconnect()
@skip_if_not_cookie_auth
def test_volume_usage_data(self):
"""
Test the retrieval of volume usage data
"""
try:
self.client.connect()
now = datetime.datetime.now()
expected = [
'data_vol',
'granularity',
'start',
'end'
]
# Test using year and month
year = now.year
month = now.month
data = self.client.volume_usage(year, month)
self.assertTrue(all(x in expected for x in data.keys()))
#Test without year and month arguments
del data
data = self.client.volume_usage()
self.assertTrue(all(x in expected for x in data.keys()))
finally:
self.client.disconnect()
def test_set_year_without_month_for_volume_usage_data(self):
"""
Test raising an exception when retrieving volume usage data with only
year parameter
"""
try:
self.client.connect()
year = 2016
with self.assertRaises(CloudantArgumentError) as cm:
self.client.volume_usage(year)
expected = ('Invalid year and/or month supplied. '
'Found: year - 2016, month - None')
self.assertEqual(str(cm.exception), expected)
finally:
self.client.disconnect()
def test_set_month_without_year_for_volume_usage_data(self):
"""
Test raising an exception when retrieving volume usage data with only
month parameter
"""
try:
self.client.connect()
month = 1
with self.assertRaises(CloudantArgumentError) as cm:
self.client.volume_usage(None, month)
expected = ('Invalid year and/or month supplied. '
'Found: year - None, month - 1')
self.assertEqual(str(cm.exception), expected)
finally:
self.client.disconnect()
def test_set_invalid_type_year_for_volume_usage_data(self):
"""
Test raising an exception when retrieving volume usage data with a type
string for the year parameter
"""
try:
self.client.connect()
year = 'foo'
month = 1
with self.assertRaises(CloudantArgumentError) as cm:
self.client.volume_usage(year, month)
expected = ('Invalid year and/or month supplied. '
'Found: year - foo, month - 1')
self.assertEqual(str(cm.exception), expected)
finally:
self.client.disconnect()
def test_set_year_with_invalid_month_for_volume_usage_data(self):
"""
Test raising an exception when retrieving volume usage data with an
invalid month parameter
"""
try:
self.client.connect()
year = 2016
month = 13
with self.assertRaises(CloudantArgumentError) as cm:
self.client.volume_usage(year, month)
expected = ('Invalid year and/or month supplied. '
'Found: year - 2016, month - 13')
self.assertEqual(str(cm.exception), expected)
finally:
self.client.disconnect()
@skip_if_not_cookie_auth
def test_requests_usage_data(self):
"""
Test the retrieval of requests usage data
"""
try:
self.client.connect()
now = datetime.datetime.now()
expected = [
'requests',
'granularity',
'start',
'end'
]
# Test using year and month
year = now.year
month = now.month
data = self.client.requests_usage(year, month)
self.assertTrue(all(x in expected for x in data.keys()))
#Test without year and month arguments
del data
data = self.client.requests_usage()
self.assertTrue(all(x in expected for x in data.keys()))
finally:
self.client.disconnect()
def test_set_year_without_month_for_requests_usage_data(self):
"""
Test raising an exception when retrieving requests usage data with an
invalid month parameter
"""
try:
self.client.connect()
year = 2016
with self.assertRaises(CloudantArgumentError) as cm:
self.client.requests_usage(year)
expected = ('Invalid year and/or month supplied. '
'Found: year - 2016, month - None')
self.assertEqual(str(cm.exception), expected)
finally:
self.client.disconnect()
def test_set_month_without_year_for_requests_usage_data(self):
"""
Test raising an exception when retrieving requests usage data with only
month parameter
"""
try:
self.client.connect()
month = 1
with self.assertRaises(CloudantArgumentError) as cm:
self.client.requests_usage(None, month)
expected = ('Invalid year and/or month supplied. '
'Found: year - None, month - 1')
self.assertEqual(str(cm.exception), expected)
finally:
self.client.disconnect()
def test_set_invalid_type_year_for_requests_usage_data(self):
"""
Test raising an exception when retrieving requests usage data with
a type string for the year parameter
"""
try:
self.client.connect()
year = 'foo'
month = 1
with self.assertRaises(CloudantArgumentError) as cm:
self.client.requests_usage(year, month)
expected = ('Invalid year and/or month supplied. '
'Found: year - foo, month - 1')
self.assertEqual(str(cm.exception), expected)
finally:
self.client.disconnect()
def test_set_year_with_invalid_month_for_requests_usage_data(self):
"""
Test raising an exception when retrieving requests usage data with only
year parameter
"""
try:
self.client.connect()
year = 2016
month = 13
with self.assertRaises(CloudantArgumentError) as cm:
self.client.requests_usage(year, month)
expected = ('Invalid year and/or month supplied. '
'Found: year - 2016, month - 13')
self.assertEqual(str(cm.exception), expected)
finally:
self.client.disconnect()
@skip_if_not_cookie_auth
def test_shared_databases(self):
"""
Test the retrieval of shared database list
"""
try:
self.client.connect()
self.assertIsInstance(self.client.shared_databases(), list)
finally:
self.client.disconnect()
@skip_if_not_cookie_auth
def test_generate_api_key(self):
"""
Test the generation of an API key for this client account
"""
try:
self.client.connect()
expected = ['key', 'password', 'ok']
api_key = self.client.generate_api_key()
self.assertTrue(all(x in expected for x in api_key.keys()))
self.assertTrue(api_key['ok'])
finally:
self.client.disconnect()
@skip_if_not_cookie_auth
def test_cors_configuration(self):
"""
Test the retrieval of the current CORS configuration for this client
account
"""
try:
self.client.connect()
expected = ['allow_credentials', 'enable_cors', 'origins']
cors = self.client.cors_configuration()
self.assertTrue(all(x in expected for x in cors.keys()))
finally:
self.client.disconnect()
@skip_if_not_cookie_auth
def test_cors_origins(self):
"""
Test the retrieval of the CORS origins list
"""
try:
self.client.connect()
origins = self.client.cors_origins()
self.assertIsInstance(origins, list)
finally:
self.client.disconnect()
@skip_if_not_cookie_auth
def test_disable_cors(self):
"""
Test disabling CORS (assuming CORS is enabled)
"""
try:
self.client.connect()
# Save original CORS settings
save = self.client.cors_configuration()
# Test CORS disable
self.assertEqual(self.client.disable_cors(), {'ok': True})
# Restore original CORS settings
self.client.update_cors_configuration(
save['enable_cors'],
save['allow_credentials'],
save['origins'],
True
)
finally:
self.client.disconnect()
@skip_if_not_cookie_auth
def test_update_cors_configuration(self):
"""
Test updating CORS configuration
"""
try:
self.client.connect()
# Save original CORS settings
save = self.client.cors_configuration()
# Test updating CORS settings, overwriting origins
result = self.client.update_cors_configuration(
True,
True,
['https://ibm.com'],
True)
self.assertEqual(result, {'ok': True})
updated_cors = self.client.cors_configuration()
self.assertTrue(updated_cors['enable_cors'])
self.assertTrue(updated_cors['allow_credentials'])
expected = ['https://ibm.com']
self.assertTrue(all(x in expected for x in updated_cors['origins']))
# Test updating CORS settings, adding to origins
result = self.client.update_cors_configuration(
True,
True,
['https://ibm.cloudant.com']
)
self.assertEqual(result, {'ok': True})
del updated_cors
updated_cors = self.client.cors_configuration()
self.assertTrue(updated_cors['enable_cors'])
self.assertTrue(updated_cors['allow_credentials'])
expected.append('https://ibm.cloudant.com')
self.assertTrue(all(x in expected for x in updated_cors['origins']))
# Restore original CORS settings
self.client.update_cors_configuration(
save['enable_cors'],
save['allow_credentials'],
save['origins'],
True
)
finally:
self.client.disconnect()
if __name__ == '__main__':
unittest.main() | 36.399718 | 118 | 0.584145 |
64572ed78858acb0028ff40cf8d8cfe382de1adc | 7,173 | py | Python | tensorflow/contrib/distributions/python/ops/categorical.py | returncode13/tensorflow | c5f94b10bbb30e525fa3ca313e7ccb173040c90a | [
"Apache-2.0"
] | 1 | 2016-11-23T17:44:04.000Z | 2016-11-23T17:44:04.000Z | tensorflow/contrib/distributions/python/ops/categorical.py | returncode13/tensorflow | c5f94b10bbb30e525fa3ca313e7ccb173040c90a | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/distributions/python/ops/categorical.py | returncode13/tensorflow | c5f94b10bbb30e525fa3ca313e7ccb173040c90a | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Categorical distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
class Categorical(distribution.Distribution):
"""Categorical distribution.
The categorical distribution is parameterized by the log-probabilities
of a set of classes.
Note, the following methods of the base class aren't implemented:
* mean
* cdf
* log_cdf
"""
def __init__(
self,
logits,
dtype=dtypes.int32,
validate_args=True,
allow_nan_stats=False,
name="Categorical"):
"""Initialize Categorical distributions using class log-probabilities.
Args:
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
indexes into the classes.
dtype: The type of the event samples (default: int32).
validate_args: Unused in this distribution.
allow_nan_stats: Boolean, default False. If False, raise an exception if
a statistic (e.g. mean/mode/etc...) is undefined for any batch member.
If True, batch members with valid parameters leading to undefined
statistics will return NaN for this statistic.
name: A name for this distribution (optional).
"""
self._allow_nan_stats = allow_nan_stats
self._name = name
self._dtype = dtype
self._validate_args = validate_args
with ops.op_scope([logits], name):
self._logits = ops.convert_to_tensor(logits, name="logits")
logits_shape = array_ops.shape(self._logits)
self._batch_rank = array_ops.size(logits_shape) - 1
self._batch_shape = array_ops.slice(
logits_shape, [0], array_ops.pack([self._batch_rank]))
self._num_classes = array_ops.gather(logits_shape, self._batch_rank)
@property
def allow_nan_stats(self):
"""Boolean describing behavior when a stat is undefined for batch member."""
return self._allow_nan_stats
@property
def validate_args(self):
"""Boolean describing behavior on invalid input."""
return self._validate_args
@property
def name(self):
return self._name
@property
def dtype(self):
return self._dtype
@property
def is_reparameterized(self):
return False
def batch_shape(self, name="batch_shape"):
with ops.name_scope(self.name):
return array_ops.identity(self._batch_shape, name=name)
def get_batch_shape(self):
return self.logits.get_shape()[:-1]
def event_shape(self, name="event_shape"):
with ops.name_scope(self.name):
return array_ops.constant([], dtype=self._batch_shape.dtype, name=name)
def get_event_shape(self):
return tensor_shape.scalar()
@property
def num_classes(self):
return self._num_classes
@property
def logits(self):
return self._logits
def log_prob(self, k, name="log_prob"):
"""Log-probability of class `k`.
Args:
k: `int32` or `int64` Tensor. Must be broadcastable with a `batch_shape`
`Tensor`.
name: A name for this operation (optional).
Returns:
The log-probabilities of the classes indexed by `k`
"""
with ops.name_scope(self.name):
with ops.op_scope([k, self.logits], name):
k = ops.convert_to_tensor(k, name="k")
logits = self.logits * array_ops.ones_like(
array_ops.expand_dims(k, -1),
dtype=self.logits.dtype)
k *= array_ops.ones(
array_ops.slice(
array_ops.shape(logits), [0], [array_ops.rank(logits) - 1]),
dtype=k.dtype)
k.set_shape(tensor_shape.TensorShape(logits.get_shape()[:-1]))
return -nn_ops.sparse_softmax_cross_entropy_with_logits(logits, k)
def prob(self, k, name="prob"):
"""Probability of class `k`.
Args:
k: `int32` or `int64` Tensor. Must be broadcastable with logits.
name: A name for this operation (optional).
Returns:
The probabilities of the classes indexed by `k`
"""
return super(Categorical, self).prob(k, name)
def sample_n(self, n, seed=None, name="sample_n"):
"""Sample `n` observations from the Categorical distribution.
Args:
n: 0-D. Number of independent samples to draw for each distribution.
seed: Random seed (optional).
name: A name for this operation (optional).
Returns:
An `int64` `Tensor` with shape `[n, batch_shape, event_shape]`
"""
with ops.name_scope(self.name):
with ops.op_scope([self.logits, n], name):
n = ops.convert_to_tensor(n, name="n")
logits_2d = array_ops.reshape(
self.logits, array_ops.pack([-1, self.num_classes]))
samples = random_ops.multinomial(logits_2d, n, seed=seed)
samples = math_ops.cast(samples, self._dtype)
ret = array_ops.reshape(
array_ops.transpose(samples),
array_ops.concat(
0, [array_ops.expand_dims(n, 0), self.batch_shape()]))
ret.set_shape(tensor_shape.vector(tensor_util.constant_value(n))
.concatenate(self.get_batch_shape()))
return ret
def entropy(self, name="sample"):
with ops.name_scope(self.name):
with ops.op_scope([], name):
logits_2d = array_ops.reshape(
self.logits, array_ops.pack([-1, self.num_classes]))
histogram_2d = nn_ops.softmax(logits_2d)
ret = array_ops.reshape(
nn_ops.softmax_cross_entropy_with_logits(logits_2d, histogram_2d),
self.batch_shape())
ret.set_shape(self.get_batch_shape())
return ret
def mode(self, name="mode"):
with ops.name_scope(self.name):
with ops.op_scope([], name):
ret = math_ops.argmax(self.logits, dimension=self._batch_rank)
ret = math_ops.cast(ret, self._dtype)
ret.set_shape(self.get_batch_shape())
return ret
@property
def is_continuous(self):
return False
| 34.320574 | 80 | 0.678238 |
4d5432e63152da12602ee6bd309046657c0548bb | 15,617 | py | Python | tests/examples/image/classification/test_pokemon_classification_data_processor.py | kostaleonard/mlops | 236d3499535d6294768c15336180217829fb2ee3 | [
"MIT"
] | 1 | 2021-11-26T21:41:00.000Z | 2021-11-26T21:41:00.000Z | tests/examples/image/classification/test_pokemon_classification_data_processor.py | kostaleonard/mlops | 236d3499535d6294768c15336180217829fb2ee3 | [
"MIT"
] | 39 | 2021-11-18T20:01:34.000Z | 2022-03-26T17:59:07.000Z | tests/examples/image/classification/test_pokemon_classification_data_processor.py | kostaleonard/mlops | 236d3499535d6294768c15336180217829fb2ee3 | [
"MIT"
] | null | null | null | """Tests pokemon_classification_data_processor.py."""
import pytest
import numpy as np
from mlops.examples.image.classification.pokemon_classification_data_processor import (
PokemonClassificationDataProcessor,
DEFAULT_DATASET_TRAINVALTEST_PATH,
DEFAULT_DATASET_PRED_PATH,
HEIGHT,
WIDTH,
CHANNELS,
CLASSES,
)
from mlops.examples.image.classification.errors import LabelsNotFoundError
EXPECTED_NUM_TRAINVALTEST = 10
EXPECTED_NUM_TRAIN = 7
EXPECTED_NUM_VAL = 2
EXPECTED_NUM_PRED = 3
PIXEL_MIN = 0
PIXEL_MAX = 255
BULBASAUR_IMG_MEAN = 0.06437409
BULBASAUR_LABEL = {"Grass", "Poison"}
CHARIZARD_IMG_MEAN = 0.17114125
CHARIZARD_LABEL = {"Fire", "Flying"}
def test_get_raw_features_and_labels_returns_expected_keys() -> None:
"""Tests that get_raw_features_and_labels returns the expected keys for the
train/val/test dataset."""
processor = PokemonClassificationDataProcessor()
features, labels = processor.get_raw_features_and_labels(
DEFAULT_DATASET_TRAINVALTEST_PATH
)
assert set(features.keys()) == {"X_train", "X_val", "X_test"}
assert set(labels.keys()) == {"y_train", "y_val", "y_test"}
def test_get_raw_features_and_labels_pred_raises_error() -> None:
"""Tests that get_raw_features_and_labels raises LabelsNotFoundError when
called on the prediction directory."""
processor = PokemonClassificationDataProcessor()
with pytest.raises(LabelsNotFoundError):
_ = processor.get_raw_features_and_labels(DEFAULT_DATASET_PRED_PATH)
def test_get_raw_features_and_labels_trainvaltest_correct_split() -> None:
"""Tests that the train/val/test datasets are split into the expected
sizes."""
processor = PokemonClassificationDataProcessor()
features, labels = processor.get_raw_features_and_labels(
DEFAULT_DATASET_TRAINVALTEST_PATH
)
num_examples = sum(map(len, features.values()))
assert num_examples == EXPECTED_NUM_TRAINVALTEST
assert len(features["X_train"]) == EXPECTED_NUM_TRAIN
assert len(features["X_val"]) == EXPECTED_NUM_VAL
assert (
len(features["X_test"])
== EXPECTED_NUM_TRAINVALTEST - EXPECTED_NUM_TRAIN - EXPECTED_NUM_VAL
)
assert len(features["X_train"]) == len(labels["y_train"])
assert len(features["X_val"]) == len(labels["y_val"])
assert len(features["X_test"]) == len(labels["y_test"])
def test_get_raw_features_trainvaltest_returns_expected_keys() -> None:
"""Tests that get_raw_features returns the expected keys {'X_train',
'X_val', 'X_test} when called on the train/val/test directory.
"""
processor = PokemonClassificationDataProcessor()
raw = processor.get_raw_features(DEFAULT_DATASET_TRAINVALTEST_PATH)
assert set(raw.keys()) == {"X_train", "X_val", "X_test"}
def test_get_raw_features_match() -> None:
"""Tests that the features produced by get_raw_features_and_labels and
get_raw_features are the same features."""
# pylint: disable=invalid-name
processor = PokemonClassificationDataProcessor()
features, _ = processor.get_raw_features_and_labels(
DEFAULT_DATASET_TRAINVALTEST_PATH
)
X_all = np.concatenate(
(features["X_train"], features["X_val"], features["X_test"])
)
features_only = processor.get_raw_features(
DEFAULT_DATASET_TRAINVALTEST_PATH
)
X_all_only = np.concatenate(
(
features_only["X_train"],
features_only["X_val"],
features_only["X_test"],
)
)
X_all.sort(axis=0)
X_all_only.sort(axis=0)
assert np.array_equal(X_all, X_all_only)
def test_get_raw_features_pred_returns_expected_keys() -> None:
"""Tests that get_raw_features returns the expected keys {'X_pred'} when
called on the prediction directory.
"""
processor = PokemonClassificationDataProcessor()
raw = processor.get_raw_features(DEFAULT_DATASET_PRED_PATH)
assert set(raw.keys()) == {"X_pred"}
def test_get_raw_features_correct_shape() -> None:
"""Tests that get_raw_features returns tensors with the expected shapes."""
processor = PokemonClassificationDataProcessor()
raw = processor.get_raw_features(DEFAULT_DATASET_PRED_PATH)
for tensor in raw.values():
assert tensor.shape[1:] == (HEIGHT, WIDTH, CHANNELS)
def test_get_raw_features_correct_dtype() -> None:
"""Tests that get_raw_features returns tensors with dtype float32."""
processor = PokemonClassificationDataProcessor()
raw = processor.get_raw_features(DEFAULT_DATASET_PRED_PATH)
for tensor in raw.values():
assert tensor.dtype == np.float32
def test_get_raw_features_correct_value_range() -> None:
"""Tests that get_raw_features returns tensors in the range [0, 255]."""
processor = PokemonClassificationDataProcessor()
raw = processor.get_raw_features(DEFAULT_DATASET_TRAINVALTEST_PATH)
for tensor in raw.values():
assert tensor.min() >= 0
assert tensor.max() <= 1
def test_get_raw_features_no_na() -> None:
"""Tests that get_raw_features returns tensors with no missing values."""
processor = PokemonClassificationDataProcessor()
raw = processor.get_raw_features(DEFAULT_DATASET_TRAINVALTEST_PATH)
for tensor in raw.values():
assert not np.isnan(tensor).any()
def test_get_raw_features_have_multiple_pixel_values() -> None:
"""Tests that the images were loaded correctly by ensuring that more than
one pixel value exists in the tensors."""
processor = PokemonClassificationDataProcessor()
raw = processor.get_raw_features(DEFAULT_DATASET_TRAINVALTEST_PATH)
for tensor in raw.values():
assert len(np.unique(tensor)) > 1
def test_get_raw_labels_trainvaltest_lengths_match_features() -> None:
"""Tests that all entries in the raw label dictionary have the same number
of examples as their counterpart features."""
processor = PokemonClassificationDataProcessor()
raw_features, raw_labels = processor.get_raw_features_and_labels(
DEFAULT_DATASET_TRAINVALTEST_PATH
)
assert len(raw_features["X_train"]) == len(raw_labels["y_train"])
assert len(raw_features["X_val"]) == len(raw_labels["y_val"])
assert len(raw_features["X_test"]) == len(raw_labels["y_test"])
def test_get_raw_labels_correct_tensor_shapes() -> None:
"""Tests that labels are of the correct shape."""
processor = PokemonClassificationDataProcessor()
_, raw = processor.get_raw_features_and_labels(
DEFAULT_DATASET_TRAINVALTEST_PATH
)
for tensor in raw.values():
assert tensor.shape[1:] == (2,)
def test_get_raw_labels_correct_dtype() -> None:
"""Tests that labels are of type object (string)."""
processor = PokemonClassificationDataProcessor()
_, raw = processor.get_raw_features_and_labels(
DEFAULT_DATASET_TRAINVALTEST_PATH
)
for tensor in raw.values():
assert tensor.dtype == object
def test_get_raw_labels_valid_classes() -> None:
"""Tests that all raw label classes are valid Pokemon types."""
processor = PokemonClassificationDataProcessor()
_, raw = processor.get_raw_features_and_labels(
DEFAULT_DATASET_TRAINVALTEST_PATH
)
for tensor in raw.values():
for row in tensor:
assert row[0] in CLASSES
assert row[1] is None or row[1] in CLASSES
def test_preprocessed_features_same_shape_as_raw() -> None:
"""Tests that the preprocessed features have the same shape as the raw
features."""
processor = PokemonClassificationDataProcessor()
raw = processor.get_raw_features(DEFAULT_DATASET_TRAINVALTEST_PATH)
for tensor in raw.values():
preprocessed = processor.preprocess_features(tensor)
assert tensor.shape == preprocessed.shape
def test_preprocess_features_correct_dtype() -> None:
"""Tests that preprocessed features are of dtype float32."""
processor = PokemonClassificationDataProcessor()
raw = processor.get_raw_features(DEFAULT_DATASET_TRAINVALTEST_PATH)
for tensor in raw.values():
preprocessed = processor.preprocess_features(tensor)
assert preprocessed.dtype == np.float32
def test_preprocess_features_no_na() -> None:
"""Tests that preprocessed features have no missing values."""
processor = PokemonClassificationDataProcessor()
raw = processor.get_raw_features(DEFAULT_DATASET_TRAINVALTEST_PATH)
for tensor in raw.values():
preprocessed = processor.preprocess_features(tensor)
assert not np.isnan(preprocessed).any()
def test_preprocessed_features_scaled() -> None:
"""Tests that preprocessing scales the features to the range [0, 1]."""
processor = PokemonClassificationDataProcessor()
raw = processor.get_raw_features(DEFAULT_DATASET_TRAINVALTEST_PATH)
for tensor in raw.values():
preprocessed = processor.preprocess_features(tensor)
assert preprocessed.min() >= 0
assert preprocessed.max() <= 1
def test_preprocess_labels_correct_shape() -> None:
"""Tests that the preprocessed labels have the correct shape."""
processor = PokemonClassificationDataProcessor()
_, raw = processor.get_raw_features_and_labels(
DEFAULT_DATASET_TRAINVALTEST_PATH
)
for tensor in raw.values():
preprocessed = processor.preprocess_labels(tensor)
assert preprocessed.shape == (len(tensor), len(CLASSES))
def test_preprocess_labels_correct_dtype() -> None:
"""Tests that the preprocessed labels are of dtype float32."""
processor = PokemonClassificationDataProcessor()
_, raw = processor.get_raw_features_and_labels(
DEFAULT_DATASET_TRAINVALTEST_PATH
)
for tensor in raw.values():
preprocessed = processor.preprocess_labels(tensor)
assert preprocessed.dtype == np.float32
def test_preprocess_labels_no_na() -> None:
"""Tests that the preprocessed labels have no missing values."""
processor = PokemonClassificationDataProcessor()
_, raw = processor.get_raw_features_and_labels(
DEFAULT_DATASET_TRAINVALTEST_PATH
)
for tensor in raw.values():
preprocessed = processor.preprocess_labels(tensor)
assert not np.isnan(preprocessed).any()
def test_preprocess_labels_binary() -> None:
"""Tests that the preprocessed labels have values in the set {0, 1}."""
processor = PokemonClassificationDataProcessor()
_, raw = processor.get_raw_features_and_labels(
DEFAULT_DATASET_TRAINVALTEST_PATH
)
for tensor in raw.values():
preprocessed = processor.preprocess_labels(tensor)
assert set(np.unique(preprocessed)) == {0, 1}
def test_preprocess_labels_min_one_max_two_classes() -> None:
"""Tests that each preprocessed label has at least one and at most two
ones indicating the class(es)."""
processor = PokemonClassificationDataProcessor()
_, raw = processor.get_raw_features_and_labels(
DEFAULT_DATASET_TRAINVALTEST_PATH
)
for tensor in raw.values():
preprocessed = processor.preprocess_labels(tensor)
row_sums = preprocessed.sum(axis=1)
assert set(np.unique(row_sums)).union({1, 2}) == {1, 2}
def test_unpreprocess_features_inverts_transformation() -> None:
"""Tests that unpreprocessing the preprocessed features results in the raw
features."""
processor = PokemonClassificationDataProcessor()
raw = processor.get_raw_features(DEFAULT_DATASET_TRAINVALTEST_PATH)
for tensor in raw.values():
preprocessed = processor.preprocess_features(tensor)
unpreprocessed = processor.unpreprocess_features(preprocessed)
assert (unpreprocessed == tensor).all()
def test_unpreprocess_labels_inverts_transformation() -> None:
"""Tests that unpreprocessing the preprocessed labels results in the raw
labels."""
processor = PokemonClassificationDataProcessor()
_, raw = processor.get_raw_features_and_labels(
DEFAULT_DATASET_TRAINVALTEST_PATH
)
for tensor in raw.values():
preprocessed = processor.preprocess_labels(tensor)
unpreprocessed = processor.unpreprocess_labels(preprocessed)
assert (unpreprocessed == tensor).all()
def test_get_raw_features_and_labels_examples_in_same_order() -> None:
"""Tests that the raw features and raw labels have examples in the same
order. For example, say X_train[0] is the raw Bulbasaur image; then
y_train[0] must be the labels for Bulbasaur."""
# pylint: disable=invalid-name
processor = PokemonClassificationDataProcessor()
features, labels = processor.get_raw_features_and_labels(
DEFAULT_DATASET_TRAINVALTEST_PATH
)
X_all = np.concatenate(
(features["X_train"], features["X_val"], features["X_test"])
)
y_all = np.concatenate(
(labels["y_train"], labels["y_val"], labels["y_test"])
)
bulbasaur_idx = None
for idx, arr in enumerate(X_all):
if np.isclose(arr.mean(), BULBASAUR_IMG_MEAN):
bulbasaur_idx = idx
assert bulbasaur_idx is not None
assert set(y_all[bulbasaur_idx]) == BULBASAUR_LABEL
charizard_idx = None
for idx, arr in enumerate(X_all):
if np.isclose(arr.mean(), CHARIZARD_IMG_MEAN):
charizard_idx = idx
assert charizard_idx is not None
assert set(y_all[charizard_idx]) == CHARIZARD_LABEL
def test_get_valid_prediction_correct_shape() -> None:
"""Tests that the output of get_valid_prediction is of the same shape as
the input."""
pred_arr = np.array([[0.8, 0.4, 0.2, 0.6], [0.3, 0.4, 0.1, 0.1]])
valid = PokemonClassificationDataProcessor.get_valid_prediction(pred_arr)
assert pred_arr.shape == valid.shape
def test_get_valid_prediction_output_is_binary() -> None:
"""Tests that the output of get_valid_prediction on arbitrary input is
binary."""
pred_arr = np.array(
[[0.8, 0.4, 0.2, 0.6], [0.3, 0.4, 0.1, 0.1], [-1, 5, 2, 0.5]]
)
valid = PokemonClassificationDataProcessor.get_valid_prediction(pred_arr)
assert set(np.unique(valid)) == {0, 1}
def test_get_valid_prediction_chooses_highest() -> None:
"""Tests that get_valid_prediction chooses the highest scores as output."""
pred_arr = np.array(
[[0.8, 0.4, 0.2, 0.6], [0.3, 0.4, 0.1, 0.1], [0.9, 0.9, 0.8, 0.8]]
)
valid = PokemonClassificationDataProcessor.get_valid_prediction(pred_arr)
assert valid.tolist() == [[1, 0, 0, 1], [0, 1, 0, 0], [1, 1, 0, 0]]
def test_get_valid_prediction_one_or_two_classes() -> None:
"""Tests that get_valid_prediction returns predictions with one or two
classes."""
pred_arr = np.array(
[
[0.8, 0.4, 0.2, 0.6],
[0.3, 0.4, 0.1, 0.1],
[0.9, 0.9, 0.9, 0.9],
[0.1, 0.1, 0.1, 0.1],
[2.0, 2.0, 2.0, 2.0],
]
)
valid = PokemonClassificationDataProcessor.get_valid_prediction(pred_arr)
row_sums = valid.sum(axis=1)
assert set(row_sums) == {1, 2}
def test_get_valid_prediction_threshold_only_affects_second_highest() -> None:
"""Tests that the decision threshold only affects the second highest
prediction value."""
pred_arr = np.array(
[[0.8, 0.4, 0.2, 0.6], [0.3, 0.4, 0.1, 0.1], [0.9, 0.8, 0.7, 0.7]]
)
valid = PokemonClassificationDataProcessor.get_valid_prediction(
pred_arr, threshold=0.6
)
assert valid.tolist() == [[1, 0, 0, 1], [0, 1, 0, 0], [1, 1, 0, 0]]
valid = PokemonClassificationDataProcessor.get_valid_prediction(
pred_arr, threshold=0.99
)
assert valid.tolist() == [[1, 0, 0, 0], [0, 1, 0, 0], [1, 0, 0, 0]]
| 38.751861 | 87 | 0.711788 |
0ca38f0191b2ea6d19bfd3a4a5266188f0fb0264 | 6,057 | py | Python | guillotina/api/search.py | vinissimus/guillotina | 4240adfa5607c022ff6dc5f7335e2c59c1f2217d | [
"BSD-2-Clause"
] | null | null | null | guillotina/api/search.py | vinissimus/guillotina | 4240adfa5607c022ff6dc5f7335e2c59c1f2217d | [
"BSD-2-Clause"
] | null | null | null | guillotina/api/search.py | vinissimus/guillotina | 4240adfa5607c022ff6dc5f7335e2c59c1f2217d | [
"BSD-2-Clause"
] | null | null | null | from guillotina import configure
from guillotina.api.service import Service
from guillotina.catalog.utils import reindex_in_future
from guillotina.component import query_utility
from guillotina.interfaces import ICatalogUtility
from guillotina.interfaces import IResource
from guillotina.response import HTTPServiceUnavailable
import logging
logger = logging.getLogger("guillotina")
QUERY_PARAMETERS = [
{
"in": "query",
"required": False,
"name": "term",
"description": "Generic search term support. See modifier list below for usage.",
"schema": {"type": "string"},
},
{
"in": "query",
"required": False,
"name": "_from",
"description": "Start with search result _from.",
"schema": {"type": "string"},
},
{
"in": "query",
"required": False,
"name": "_size",
"description": "Size of result set. Max to 50 (app_settings.catalog_max_results).",
"schema": {"type": "string"},
},
{
"in": "query",
"required": False,
"name": "_sort_asc",
"description": "Sort ascending by index _sort_asc.",
"schema": {"type": "string"},
},
{
"in": "query",
"required": False,
"name": "_sort_des",
"description": "Sort descending by index _sort_des.",
"schema": {"type": "string"},
},
{
"in": "query",
"required": False,
"name": "_metadata",
"description": "List of metadata fields to include",
"schema": {"type": "string"},
},
{
"in": "query",
"required": False,
"name": "_metadata_not",
"description": "List of metadata fields to exclude",
"schema": {"type": "string"},
},
{"in": "query", "required": False, "name": "__eq", "schema": {"type": "string"}},
{"in": "query", "required": False, "name": "__not", "schema": {"type": "string"}},
{"in": "query", "required": False, "name": "__gt", "schema": {"type": "string"}},
{"in": "query", "required": False, "name": "__gte", "schema": {"type": "string"}},
{"in": "query", "required": False, "name": "__lte", "schema": {"type": "string"}},
{"in": "query", "required": False, "name": "__lt", "schema": {"type": "string"}},
{"in": "query", "required": False, "name": "__in", "schema": {"type": "string"}},
]
@configure.service(
context=IResource,
method="GET",
permission="guillotina.SearchContent",
name="@search",
validate=True,
parameters=QUERY_PARAMETERS,
summary="Make search request",
responses={
"200": {
"description": "Search results",
"content": {
"application/json": {
"schema": {"type": "object", "$ref": "#/components/schemas/SearchResults"}
}
},
}
},
)
async def search_get(context, request):
search = query_utility(ICatalogUtility)
if search is None:
raise HTTPServiceUnavailable()
return await search.search(context, dict(request.query))
@configure.service(
context=IResource,
method="POST",
permission="guillotina.RawSearchContent",
name="@search",
summary="Make a complex search query",
requestBody={"content": {"application/json": {"schema": {"properties": {}}}}},
responses={
"200": {
"description": "Search results",
"content": {
"application/json": {
"schema": {"type": "object", "$ref": "#/components/schemas/SearchResults"}
}
},
}
},
)
async def search_post(context, request):
q = await request.json()
search = query_utility(ICatalogUtility)
if search is None:
raise HTTPServiceUnavailable()
return await search.search_raw(context, q)
@configure.service(
context=IResource,
method="POST",
permission="guillotina.ReindexContent",
name="@catalog-reindex",
summary="Reindex entire container content",
responses={"200": {"description": "Successfully reindexed content"}},
)
class CatalogReindex(Service):
def __init__(self, context, request, security=False):
super(CatalogReindex, self).__init__(context, request)
self._security_reindex = security
async def __call__(self):
search = query_utility(ICatalogUtility)
if search is None:
raise HTTPServiceUnavailable()
await search.reindex_all_content(self.context, self._security_reindex)
return {}
@configure.service(
context=IResource,
method="POST",
permission="guillotina.ReindexContent",
name="@async-catalog-reindex",
summary="Asynchronously reindex entire container content",
responses={"200": {"description": "Successfully initiated reindexing"}},
)
class AsyncCatalogReindex(Service):
def __init__(self, context, request, security=False):
super(AsyncCatalogReindex, self).__init__(context, request)
self._security_reindex = security
async def __call__(self):
reindex_in_future(self.context, False)
return {}
@configure.service(
context=IResource,
method="POST",
permission="guillotina.ManageCatalog",
name="@catalog",
summary="Initialize catalog",
responses={"200": {"description": "Successfully initialized catalog"}},
)
async def catalog_post(context, request):
search = query_utility(ICatalogUtility)
if search is None:
raise HTTPServiceUnavailable()
await search.initialize_catalog(context)
return {}
@configure.service(
context=IResource,
method="DELETE",
permission="guillotina.ManageCatalog",
name="@catalog",
summary="Delete search catalog",
responses={"200": {"description": "Successfully deleted catalog"}},
)
async def catalog_delete(context, request):
search = query_utility(ICatalogUtility)
if search is None:
raise HTTPServiceUnavailable()
await search.remove_catalog(context)
return {}
| 30.746193 | 94 | 0.608882 |
214ab4d32ac798dd5c1b7b3d29dd41816de8a826 | 2,179 | py | Python | extensions/on_start_screen.py | Lucestra-Studios/DiscordChannelSpammer | ee104b4fb0b820bf3b991153d0bb8c28404dcb14 | [
"MIT"
] | 2 | 2021-08-13T20:36:57.000Z | 2021-08-14T17:46:36.000Z | extensions/on_start_screen.py | lucaso60/DiscordChannelSpammer | 98e50a50cbc877a09e5fbe72cdf4ad8ccdde10f0 | [
"MIT"
] | 1 | 2021-09-14T15:25:38.000Z | 2021-09-14T15:26:37.000Z | extensions/on_start_screen.py | Lucestra-Studios/DiscordChannelSpammer | ee104b4fb0b820bf3b991153d0bb8c28404dcb14 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2021 lucaso60
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
logo = """
_____ _ _ _____ _ _
| __ \(_) | | / ____| | | |
| | | |_ ___ ___ ___ _ __ __| | | | | |__ __ _ _ __ _ __ ___| |
| | | | / __|/ __/ _ \| '__/ _` | | | | '_ \ / _` | '_ \| '_ \ / _ \ |
| |__| | \__ \ (_| (_) | | | (_| | | |____| | | | (_| | | | | | | | __/ |
|_____/|_|___/\___\___/|_| \__,_| \_____|_| |_|\__,_|_| |_|_| |_|\___|_|
_____
/ ____|
| (___ _ __ __ _ _ __ ___ _ __ ___ ___ _ __
\___ \| '_ \ / _` | '_ ` _ \| '_ ` _ \ / _ \ '__|
____) | |_) | (_| | | | | | | | | | | | __/ |
|_____/| .__/ \__,_|_| |_| |_|_| |_| |_|\___|_|
| |
|_|
"""
print(logo)
copyright = "Copyright (c) 2021 lucaso60, Copyright (c) 2015-present Rapptz"
print()
print(copyright)
print() | 45.395833 | 78 | 0.525011 |
e28b779dcc50cf0e44cf7eac42348d0171a1cacc | 109 | py | Python | src/domain/errors/invalid_image_path_failure.py | OzielFilho/ProjetoFinalPdi | c9e6fe415f1a985d6eeac204580d3ab623026665 | [
"MIT"
] | null | null | null | src/domain/errors/invalid_image_path_failure.py | OzielFilho/ProjetoFinalPdi | c9e6fe415f1a985d6eeac204580d3ab623026665 | [
"MIT"
] | null | null | null | src/domain/errors/invalid_image_path_failure.py | OzielFilho/ProjetoFinalPdi | c9e6fe415f1a985d6eeac204580d3ab623026665 | [
"MIT"
] | null | null | null | from domain.errors.image_failure import ImageFailure
class InvalidImagePathFailure(ImageFailure):
pass
| 18.166667 | 52 | 0.834862 |
7eea2a6a55cbfbff67c5dd6dbf2c16764339357c | 5,297 | py | Python | google/devtools/clouddebugger/v2/devtools-clouddebugger-v2-py/google/cloud/debugger_v2/types/controller.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/devtools/clouddebugger/v2/devtools-clouddebugger-v2-py/google/cloud/debugger_v2/types/controller.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/devtools/clouddebugger/v2/devtools-clouddebugger-v2-py/google/cloud/debugger_v2/types/controller.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.debugger_v2.types import data
__protobuf__ = proto.module(
package='google.devtools.clouddebugger.v2',
manifest={
'RegisterDebuggeeRequest',
'RegisterDebuggeeResponse',
'ListActiveBreakpointsRequest',
'ListActiveBreakpointsResponse',
'UpdateActiveBreakpointRequest',
'UpdateActiveBreakpointResponse',
},
)
class RegisterDebuggeeRequest(proto.Message):
r"""Request to register a debuggee.
Attributes:
debuggee (google.cloud.debugger_v2.types.Debuggee):
Required. Debuggee information to register. The fields
``project``, ``uniquifier``, ``description`` and
``agent_version`` of the debuggee must be set.
"""
debuggee = proto.Field(
proto.MESSAGE,
number=1,
message=data.Debuggee,
)
class RegisterDebuggeeResponse(proto.Message):
r"""Response for registering a debuggee.
Attributes:
debuggee (google.cloud.debugger_v2.types.Debuggee):
Debuggee resource. The field ``id`` is guaranteed to be set
(in addition to the echoed fields). If the field
``is_disabled`` is set to ``true``, the agent should disable
itself by removing all breakpoints and detaching from the
application. It should however continue to poll
``RegisterDebuggee`` until reenabled.
"""
debuggee = proto.Field(
proto.MESSAGE,
number=1,
message=data.Debuggee,
)
class ListActiveBreakpointsRequest(proto.Message):
r"""Request to list active breakpoints.
Attributes:
debuggee_id (str):
Required. Identifies the debuggee.
wait_token (str):
A token that, if specified, blocks the method call until the
list of active breakpoints has changed, or a server-selected
timeout has expired. The value should be set from the
``next_wait_token`` field in the last response. The initial
value should be set to ``"init"``.
success_on_timeout (bool):
If set to ``true`` (recommended), returns
``google.rpc.Code.OK`` status and sets the ``wait_expired``
response field to ``true`` when the server-selected timeout
has expired.
If set to ``false`` (deprecated), returns
``google.rpc.Code.ABORTED`` status when the server-selected
timeout has expired.
"""
debuggee_id = proto.Field(
proto.STRING,
number=1,
)
wait_token = proto.Field(
proto.STRING,
number=2,
)
success_on_timeout = proto.Field(
proto.BOOL,
number=3,
)
class ListActiveBreakpointsResponse(proto.Message):
r"""Response for listing active breakpoints.
Attributes:
breakpoints (Sequence[google.cloud.debugger_v2.types.Breakpoint]):
List of all active breakpoints. The fields ``id`` and
``location`` are guaranteed to be set on each breakpoint.
next_wait_token (str):
A token that can be used in the next method
call to block until the list of breakpoints
changes.
wait_expired (bool):
If set to ``true``, indicates that there is no change to the
list of active breakpoints and the server-selected timeout
has expired. The ``breakpoints`` field would be empty and
should be ignored.
"""
breakpoints = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=data.Breakpoint,
)
next_wait_token = proto.Field(
proto.STRING,
number=2,
)
wait_expired = proto.Field(
proto.BOOL,
number=3,
)
class UpdateActiveBreakpointRequest(proto.Message):
r"""Request to update an active breakpoint.
Attributes:
debuggee_id (str):
Required. Identifies the debuggee being
debugged.
breakpoint_ (google.cloud.debugger_v2.types.Breakpoint):
Required. Updated breakpoint information. The field ``id``
must be set. The agent must echo all Breakpoint
specification fields in the update.
"""
debuggee_id = proto.Field(
proto.STRING,
number=1,
)
breakpoint_ = proto.Field(
proto.MESSAGE,
number=2,
message=data.Breakpoint,
)
class UpdateActiveBreakpointResponse(proto.Message):
r"""Response for updating an active breakpoint.
The message is defined to allow future extensions.
"""
__all__ = tuple(sorted(__protobuf__.manifest))
| 30.618497 | 74 | 0.639985 |
7dd9997f333285847f1148a66a4952b9e990c521 | 5,396 | py | Python | sugaroid/brain/dis.py | vardaan-raj/sugaroid | d0476fb9c44a73fee2e0de45162f2b1ac86452aa | [
"MIT"
] | 4 | 2020-09-28T13:52:40.000Z | 2020-10-30T15:24:50.000Z | sugaroid/brain/dis.py | sreyasaju/sugaroid | d58e06fb664daa16fda1bf23cc73068efcd5634c | [
"MIT"
] | null | null | null | sugaroid/brain/dis.py | sreyasaju/sugaroid | d58e06fb664daa16fda1bf23cc73068efcd5634c | [
"MIT"
] | null | null | null | """
MIT License
Sugaroid Artificial Intelligence
Chatbot Core
Copyright (c) 2020-2021 Srevin Saju
Copyright (c) 2021 The Sugaroid Project
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
from chatterbot.logic import LogicAdapter
from nltk.sentiment import SentimentIntensityAnalyzer
from pyinflect import getInflection
from sugaroid.brain.postprocessor import any_in, random_response
from sugaroid.brain.constants import (
BYE,
DIS_RESPONSES_YOU,
CONSOLATION,
DIS_RESPONSES_I,
DIS_RESPONSES_HIM,
)
from sugaroid.brain.ooo import Emotion
from sugaroid.brain.preprocessors import normalize, spac_token
from sugaroid.sugaroid import SugaroidStatement
class DisAdapter(LogicAdapter):
"""
A complex algorithm sorting the words beginning with negative based on the probability.
and achieving a similar confidence ratio of the word percentage.
The DisAdapter keeps the confidence below 0.5 so that the BestAdapter may find some
other answer similar to
"""
def __init__(self, chatbot, **kwargs):
super().__init__(chatbot, **kwargs)
self.normalized = None
self.dis = None
def can_process(self, statement):
self.normalized = normalize(str(statement))
self.dis = None
for i in self.normalized:
if i.startswith("dis"):
self.dis = i
return True
else:
return False
def process(self, statement, additional_response_selection_parameters=None):
confidence = 0
dis_word = False
if any_in(
[
"distinguish",
"disfigure",
"distinct",
"distinction",
"distant",
"distance",
"distribution",
"distilled",
],
self.normalized,
):
confidence = 0
else:
logging.info(
"DisAdapter: Starting Advanced scan. dis_word == {}".format(self.dis)[0]
)
dis_word = self.dis[3:]
logging.info("DisAdapter: Distilled word == {}".format(dis_word))
sia = SentimentIntensityAnalyzer().polarity_scores(dis_word)
if dis_word[0] in ["a", "e", "i", "o", "u", "g", "m", "p"]:
confidence += 0.4
if "infect" in dis_word:
confidence -= 0.3
if "spirit" in dis_word:
confidence += 0.2
if any_in(
[
"play",
"pensary",
"pense",
"patch",
"port",
"persal",
"perse",
"persion",
"praise",
],
dis_word,
):
confidence -= 0.2
confidence += sia["neg"]
inflection = getInflection(self.chatbot.lp.tokenize(self.dis)[0].lemma_, "VBD")
if inflection is None:
past_participle_form_of_verb = self.dis
else:
past_participle_form_of_verb = inflection[0]
if "you" in self.normalized:
response = random_response(DIS_RESPONSES_YOU).format(
past_participle_form_of_verb
)
emotion = Emotion.angry_non_expressive
elif "I" in self.normalized:
response = "{} {}".format(
random_response(DIS_RESPONSES_I), random_response(CONSOLATION)
)
emotion = Emotion.angel
else:
nn = None
pn = None
tokenized = spac_token(statement, chatbot=self.chatbot)
for i in tokenized:
if (i.pos_ == "NOUN") or (i.pos_ == "PROPN"):
nn = i.text
elif i.pos_ == "PRON":
pn = i.text
if not (nn or pn):
response = "Lol. What?"
emotion = Emotion.seriously
else:
response = random_response(DIS_RESPONSES_HIM).format(nn or pn)
emotion = Emotion.cry_overflow
selected_statement = SugaroidStatement(response, chatbot=True)
selected_statement.confidence = confidence
selected_statement.emotion = emotion
selected_statement.adapter = None
return selected_statement
| 35.973333 | 91 | 0.596553 |
04e7a3a10c3263f30a19de49a902356501a65648 | 5,838 | py | Python | tests/test_remote.py | LianaGrieken/cblaster | a05923976e86a3edc08cece675d34cf8fdafd11e | [
"MIT"
] | null | null | null | tests/test_remote.py | LianaGrieken/cblaster | a05923976e86a3edc08cece675d34cf8fdafd11e | [
"MIT"
] | null | null | null | tests/test_remote.py | LianaGrieken/cblaster | a05923976e86a3edc08cece675d34cf8fdafd11e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Test suite for remote module
"""
import pytest
import requests_mock
from pathlib import Path
from cblaster import remote, helpers
TEST_DIR = Path(__file__).resolve().parent
def test_start_no_input():
with pytest.raises(ValueError):
# No query_file/ids
remote.start()
@pytest.fixture()
def start_response():
return (TEST_DIR / "start_response.html").read_text()
def test_start(start_response, monkeypatch):
def mock_sequences(query_file, query_ids):
return {'seq1': 'TEST', 'seq2': 'TEST'}
monkeypatch.setattr(helpers, "get_sequences", mock_sequences)
with requests_mock.Mocker() as mock:
mock.post(remote.BLAST_API_URL, text=start_response)
# Ensure RID/RTOE is returned
result = remote.start(
query_ids=["seq1", "seq2"],
entrez_query="Aspergillus[ORGN]"
)
assert result == ("VCZM3MWB014", 18)
# Check correct request URL
assert mock.request_history[0].url == (
"https://blast.ncbi.nlm.nih.gov/Blast.cgi?"
"CMD=PUT"
"&DATABASE=nr"
"&PROGRAM=blastp"
"&FILTER=F"
"&EXPECT=10"
"&GAPCOSTS=11+1"
"&MATRIX=BLOSUM62"
"&HITLIST_SIZE=5000"
"&ALIGNMENTS=5000"
"&DESCRIPTIONS=5000"
"&WORD_SIZE=6"
"&COMPOSITION_BASED_STATISTICS=2"
"&ENTREZ_QUERY=Aspergillus%5BORGN%5D"
"&THRESHOLD=11"
)
def test_start_blastn_options(start_response, monkeypatch):
def mock_sequences(query_file, query_ids):
return {'seq1': 'TEST', 'seq2': 'TEST'}
monkeypatch.setattr(helpers, "get_sequences", mock_sequences)
with requests_mock.Mocker() as mock:
mock.post(remote.BLAST_API_URL, text=start_response)
# megablast, nucl_* are blastn options, threshold is only BLASTp
remote.start(
query_ids=["seq1"],
program="blastn",
megablast=True,
nucl_penalty=99,
nucl_reward=99,
threshold=99,
)
# Check correct request URL
request = mock.request_history[0]
assert "THRESHOLD" not in request.url # Only blastp
assert all(
part in request.url
for part in ["NUCL_PENALTY=99", "NUCL_REWARD=99", "MEGABLAST=on"]
)
@pytest.fixture()
def check_response():
return (TEST_DIR / "check_response.html").read_text()
def test_check(check_response):
with requests_mock.Mocker() as mock:
mock.get(remote.BLAST_API_URL, text=check_response)
# Finds Status=READY and ThereAreHits=yes
assert remote.check("VCZM3MWB014") is True
# Check correct request URL
assert mock.request_history[0].url == (
"https://blast.ncbi.nlm.nih.gov/Blast.cgi?"
"CMD=Get"
"&RID=VCZM3MWB014"
"&FORMAT_OBJECT=SearchInfo"
)
@pytest.mark.parametrize(
"text", ["Status=UNKNOWN\n", "Status=FAILED\n", "Status=READY\nThereAreHits=no\n"]
)
def test_check_failed(text):
with requests_mock.Mocker() as mock, pytest.raises(ValueError):
mock.get(remote.BLAST_API_URL, text=text)
remote.check("RID")
def test_check_waiting():
with requests_mock.Mocker() as mock:
mock.get(remote.BLAST_API_URL, text="Status=WAITING\n")
assert remote.check("RID") is False
@pytest.fixture()
def retrieve_response():
return (TEST_DIR / "retrieve_response.html").read_text()
def test_retrieve(retrieve_response):
with requests_mock.Mocker() as mock:
mock.get(remote.BLAST_API_URL, text=retrieve_response)
result = remote.retrieve("RID")
# Make sure we've removed non-TSV cruft
assert len(result) == 300
assert not any(row.startswith(("#", "<", " ", "Qblast", "-")) for row in result)
assert mock.request_history[0].url == (
"https://blast.ncbi.nlm.nih.gov/Blast.cgi?"
"CMD=Get"
"&RID=RID"
"&FORMAT_TYPE=Tabular"
"&FORMAT_OBJECT=Alignment"
"&HITLIST_SIZE=5000"
"&ALIGNMENTS=5000"
"&DESCRIPTIONS=5000"
"&NCBI_GI=F"
)
def test_poll_success(monkeypatch):
def patch_check(rid):
return True
monkeypatch.setattr(remote, "check", patch_check)
assert remote.check("RID") is True
def test_poll_retry_limit(monkeypatch):
def returns_false(rid):
return False
monkeypatch.setattr(remote, "check", returns_false)
with pytest.raises(ValueError):
remote.poll("RID", delay=0, max_retries=2)
@pytest.fixture
def query_file():
return TEST_DIR / "test.faa"
def test_parse_empty_handle(query_file):
with pytest.raises(ValueError):
remote.parse([], query_file=query_file)
def test_parse(query_file):
# length of QBE85648 == 179
result = [
# qid sid pid len mismatch gapopen qstart qend sstart ssend evalue bitscore
"QBE85648.1\tHIT1\t100.000\t179\t0\t0\t1\t179\t1\t179\t1.38e-127\t365\t100.00",
"QBE85648.1\tHIT2\t20.000\t179\t0\t0\t1\t179\t1\t179\t1.38e-127\t365\t100.00",
"QBE85648.1\tHIT3\t100.000\t179\t0\t0\t150\t179\t1\t179\t1.38e-127\t365\t100.00",
"QBE85648.1\tHIT4\t100.000\t179\t0\t0\t1\t179\t1\t179\t0.011\t365\t100.00",
]
hits = remote.parse(result, query_file=query_file)
# Default thresholds are 30% identity, 50% coverage, 0.01 evalue
# so only the first hit should be saved
assert len(hits) == 1
assert hits[0].query == "QBE85648.1"
assert hits[0].subject == "HIT1"
assert hits[0].identity == 100.0
assert hits[0].coverage == 100.0
assert hits[0].bitscore == 365.0
assert hits[0].evalue == 1.38e-127
| 28.067308 | 89 | 0.624872 |
a05dee4d5e0ece98724b932c60c23471f3ebfbe7 | 16,152 | py | Python | scripts/train_vq_code_predictor.py | tomhosking/torchseq | 1b08c16822a553ecb77b96289fb21eb0a13d9c6b | [
"Apache-2.0"
] | 17 | 2021-02-25T14:24:06.000Z | 2021-12-12T07:12:26.000Z | scripts/train_vq_code_predictor.py | tomhosking/torchseq | 1b08c16822a553ecb77b96289fb21eb0a13d9c6b | [
"Apache-2.0"
] | null | null | null | scripts/train_vq_code_predictor.py | tomhosking/torchseq | 1b08c16822a553ecb77b96289fb21eb0a13d9c6b | [
"Apache-2.0"
] | null | null | null | # MLP code prediction
import argparse, json, os
parser = argparse.ArgumentParser(
description="MLP code prediction trainer",
)
parser.add_argument(
"--data_dir", type=str, default='./data/', help="Path to data folder"
)
parser.add_argument(
"--model_path", type=str, default='./runs/sep_ae/20201230_132811_vae_wa_6h_quantized_256_16qh_chunk-drop30/', help="Path to model folder"
)
parser.add_argument(
"--output_path", type=str, default='./runs/mlpcodepredictor/', help="Path to output folder"
)
parser.add_argument(
"--dataset", type=str, default='wikianswers', help="Which dataset?"
)
parser.add_argument("--train", action="store_true", help="Train mode")
parser.add_argument("--eval", action="store_true", help="Eval mode")
parser.add_argument("--test", action="store_true", help="Eval on test")
parser.add_argument(
"--lr", type=float, default=1e-4
)
parser.add_argument(
"--bsz", type=int, default=1024
)
parser.add_argument(
"--codebook_size", type=int, default=0
)
parser.add_argument(
"--hidden_dim", type=int, default=768*4
)
parser.add_argument(
"--num_steps", type=int, default=30001
)
args = parser.parse_args()
if args.dataset == 'wikianswers':
dataset_all = 'wikianswers-para-allqs'
dataset_clusters = 'wikianswers-pp'
dataset_geneval = 'wikianswers-para-splitforgeneval'
dataset_mlppredict = 'wikianswers-para-exemplarmlppredict'
elif args.dataset == 'qqp':
dataset_all = 'qqp-allqs'
dataset_clusters = 'qqp-clusters'
dataset_geneval = 'qqp-splitforgeneval'
dataset_mlppredict = 'qqp-exemplarmlppredict'
import torch
from torch.autograd import Variable
from tqdm import tqdm
from torchseq.utils.functions import onehot
from torchseq.utils.seed import set_seed
class MLPClassifier(torch.nn.Module):
def __init__(self, input_dim, output_dim, hidden_dim, num_heads):
super(MLPClassifier, self).__init__()
self.linear = torch.nn.Linear(input_dim, input_dim*num_heads)
self.linear2 = torch.nn.Linear(input_dim*num_heads, input_dim*num_heads)
self.linear3 = torch.nn.Linear(input_dim*num_heads, output_dim*num_heads)
self.drop1 = torch.nn.Dropout(p=0.2)
self.drop2 = torch.nn.Dropout(p=0.2)
self.num_heads = num_heads
self.output_dim = output_dim
def forward(self, x):
outputs = self.drop1(torch.nn.functional.relu(self.linear(x)))
outputs = self.drop2(torch.nn.functional.relu(self.linear2(outputs)))
outputs = self.linear3(outputs)
return outputs.reshape(-1, self.num_heads, self.output_dim)
os.makedirs(args.output_path, exist_ok=True)
with open(args.output_path + '/config.json', 'w') as f:
json.dump(vars(args), f)
import numpy as np
import jsonlines, os
# Load encodings, data
MODEL_PATH = args.model_path
if not os.path.exists(MODEL_PATH+f'/sep_encoding_1_train.npy') or not os.path.exists(MODEL_PATH+f'/sep_encoding_1_dev.npy') or not os.path.exists(MODEL_PATH+f'/sep_encoding_1_test.npy'):
# generate encodings
print('Encoding cache not found - generating...')
import json, torch, jsonlines
from tqdm import tqdm
import numpy as np
from torchseq.agents.para_agent import ParaphraseAgent
from torchseq.datasets.json_loader import JsonDataLoader
from torchseq.utils.config import Config
with open(MODEL_PATH + "/config.json") as f:
cfg_dict = json.load(f)
# cfg_dict["task"] = "autoencoder"
cfg_dict["env"]["data_path"] = args.data_dir
cfg_dict["eval"]["sample_outputs"] = False
cfg_dict["training"]['batch_size'] = 24
cfg_dict["eval"]['eval_batch_size'] = 24
cfg_dict["training"]["dataset"] = 'json'
cfg_dict["training"]["shuffle_data"] = False
cfg_dict['json_dataset'] = {
"path": dataset_all,
"field_map": [
{
"type": "copy",
"from": "q",
"to": "s2"
},
{
"type": "copy",
"from": "q",
"to": "s1"
}
]
}
cfg_dict["bottleneck"]["prior_var_weight"] = 0.0
config = Config(cfg_dict)
checkpoint_path = MODEL_PATH
data_loader = JsonDataLoader(config)
instance = ParaphraseAgent(config=config, run_id=None, output_path="./runs/parademo/", silent=False, verbose=False, training_mode=False)
if os.path.exists(os.path.join(MODEL_PATH, "orig_model.txt")):
with open(os.path.join(MODEL_PATH, "orig_model.txt")) as f:
chkpt_pth = f.readlines()[0]
checkpoint_path = chkpt_pth
else:
checkpoint_path = os.path.join(MODEL_PATH, "model", "checkpoint.pt")
instance.load_checkpoint(checkpoint_path)
instance.model.eval()
# Train
if not os.path.exists(MODEL_PATH+f'/sep_encoding_1_train.npy'):
_, _, _, memory_train = instance.inference(data_loader.train_loader, memory_keys_to_return=['sep_encoding_1', 'sep_encoding_2','vq_codes'])
torch.cuda.empty_cache()
for mem_key in ['sep_encoding_1', 'sep_encoding_2','vq_codes']:
np.save(MODEL_PATH+f'/{mem_key}_train.npy', memory_train[mem_key])
# Dev
if not os.path.exists(MODEL_PATH+f'/sep_encoding_1_dev.npy'):
_, _, _, memory_dev = instance.inference(data_loader.valid_loader, memory_keys_to_return=['sep_encoding_1', 'sep_encoding_2','vq_codes'])
torch.cuda.empty_cache()
for mem_key in ['sep_encoding_1', 'sep_encoding_2','vq_codes']:
np.save(MODEL_PATH+f'/{mem_key}_dev.npy', memory_dev[mem_key])
# Test
if not os.path.exists(MODEL_PATH+f'/sep_encoding_1_test.npy'):
_, _, _, memory_test = instance.inference(data_loader.test_loader, memory_keys_to_return=['sep_encoding_1', 'sep_encoding_2','vq_codes'])
torch.cuda.empty_cache()
for mem_key in ['sep_encoding_1', 'sep_encoding_2','vq_codes']:
np.save(MODEL_PATH+f'/{mem_key}_test.npy', memory_test[mem_key])
del instance
del data_loader
torch.cuda.empty_cache()
print('Encoding cache built')
# Now actually load the encodings
print('Loading encodings, data')
memory_train = {}
memory_dev = {}
memory_test = {}
for mem_key in ['sep_encoding_1', 'sep_encoding_2', 'vq_codes']:
memory_train[mem_key] = np.load(MODEL_PATH+f'/{mem_key}_train.npy')
if args.test:
memory_test[mem_key] = np.load(MODEL_PATH+f'/{mem_key}_test.npy')
else:
memory_dev[mem_key] = np.load(MODEL_PATH+f'/{mem_key}_dev.npy')
with jsonlines.open(os.path.join(args.data_dir, dataset_clusters, "train.jsonl")) as f:
train_qs = [row for row in f]
train_cluster_ixs = []
ix = 0
for cix, cluster in enumerate(train_qs):
clen = len(cluster['qs'])
for i in range(clen):
cluster_ixs = list(range(ix, ix+clen))
# if args.dataset != 'qqp':
cluster_ixs.remove(ix + i)
train_cluster_ixs.append(cluster_ixs)
ix += clen
with jsonlines.open(os.path.join(args.data_dir, dataset_clusters, "dev.jsonl")) as f:
dev_qs = [row for row in f]
dev_cluster_ixs = []
ix = 0
for cix, cluster in enumerate(dev_qs):
clen = len(cluster['qs'])
for i in range(clen):
cluster_ixs = list(range(ix, ix+clen))
# if args.dataset != 'qqp':
cluster_ixs.remove(ix + i)
dev_cluster_ixs.append(cluster_ixs)
ix += clen
import sys, gc
gc.collect()
# print('mem train', sum([x.nbytes for x in memory_train.values()])/1024**2)
# print('mem dev', sum([x.nbytes for x in memory_dev.values()])/1024**2)
# print('mem test', sum([x.nbytes for x in memory_test.values()])/1024**2)
# print('qs train', sys.getsizeof(train_qs)/1024**2)
# print('qs dev', sys.getsizeof(dev_qs)/1024**2)
# print('clusters train', sys.getsizeof(train_cluster_ixs)/1024**2)
# print('clusters dev', sys.getsizeof(dev_cluster_ixs)/1024**2)
print('Data and encodings loaded')
# from guppy import hpy;
# h=hpy()
# h.heap()
# Prepare datasets
print('Prepping dataset')
h_ix = 0
X = np.concatenate([memory_train['sep_encoding_1'][:, 0, :], memory_train['sep_encoding_2'][:, 0, :]], axis=1)
y = memory_train['vq_codes'][:, :, 0]
# print(y[:10, :])
# print(len(train_qs))
# print(X.shape)
# print(len(train_cluster_ixs))
# X_train_ixs = []
# y_train_ixs = []
# for src_ix, cluster in enumerate(train_cluster_ixs):
# for tgt_ix in cluster:
# X_train_ixs.append(src_ix)
# y_train_ixs.append(tgt_ix)
# X_dev_ixs = []
# y_dev_ixs = []
# for src_ix, cluster in enumerate(dev_cluster_ixs[:1000]):
# for tgt_ix in cluster:
# X_dev_ixs.append(src_ix)
# y_dev_ixs.append(tgt_ix)
if args.test:
# X_dev = memory_dev['sep_encoding_1'][:, 0, :]
X_test = np.concatenate([memory_test['sep_encoding_1'][:, 0, :], memory_test['sep_encoding_2'][:, 0, :]], axis=1)
y_test = memory_test['vq_codes'][:, :, 0]
else:
# X_dev = memory_dev['sep_encoding_1'][:, 0, :]
X_dev = np.concatenate([memory_dev['sep_encoding_1'][:, 0, :], memory_dev['sep_encoding_2'][:, 0, :]], axis=1)
y_dev = memory_dev['vq_codes'][:, :, 0]
print('Datasets prepped')
# Train the model
batch_size = args.bsz
NUM_STEPS = args.num_steps
NUM_HEADS = 4
input_dim = 768 * 4//4
output_dim = args.codebook_size
hidden_dim = args.hidden_dim
lr_rate = args.lr
set_seed(123)
model = MLPClassifier(input_dim, output_dim, hidden_dim, NUM_HEADS).cuda()
if args.train:
print('Training model...')
criterion = torch.nn.CrossEntropyLoss().cuda() # computes softmax and then the cross entropy
optimizer = torch.optim.Adam(model.parameters(), lr=lr_rate)
rand_ixs = np.random.randint(0, high=len(train_cluster_ixs), size=(NUM_STEPS, batch_size))
best_acc = 0
for iter in tqdm(range(NUM_STEPS)):
# batch_ixs = np.random.choice(len(train_cluster_ixs), size=batch_size)
model.train()
batch_ixs = rand_ixs[iter,:]
inputs = Variable(torch.tensor([X[ix] for ix in batch_ixs])).cuda()
# print([len(train_cluster_ixs[cix]) for cix in batch_ixs])
tgt = torch.where(torch.cat([torch.sum(torch.cat([onehot(torch.tensor(y[ix]), N=output_dim).unsqueeze(0) for ix in train_cluster_ixs[cix]], dim=0), dim=0, keepdims=True) for cix in batch_ixs], dim=0) > 0, 1, 0).cuda()
# tgt = Variable(tgt).cuda()
optimizer.zero_grad()
outputs = model(inputs)
# loss = criterion(outputs, labels)
loss = torch.sum(-1 * torch.nn.functional.log_softmax(outputs, dim=-1) * tgt/tgt.sum(dim=-1, keepdims=True), dim=-1).mean() #
loss.backward()
optimizer.step()
if iter%1000==0:
model.eval()
# calculate Accuracy
correct = 0
all_acc = 0
head_acc = [0] * NUM_HEADS
total = 0
for x_ix, cluster in enumerate(train_cluster_ixs[:10000]):
inputs = Variable(torch.tensor([X[x_ix]])).cuda()
labels = cluster
outputs = model(inputs)
predicted = torch.argmax(outputs.data, -1).cpu()
total+= inputs.size(0)
# for gpu, bring the predicted and labels back to cpu fro python operations to work
# print(predicted, [y[ix] for ix in cluster])
all_correct = True
for h_ix in range(NUM_HEADS):
this_corr = (predicted[0, h_ix] in [y[ix, h_ix] for ix in cluster])
correct+= 1.0 * this_corr
head_acc[h_ix] += 1.0 * this_corr
all_correct = all_correct & this_corr
all_acc += 1.0 * all_correct
accuracy = 100 * correct/(total*NUM_HEADS)
head_acc = [100*x/total for x in head_acc]
all_accuracy = 100 * all_acc/total
if accuracy > best_acc:
print('Saving...')
torch.save(model.state_dict(), args.output_path+'/code_predict.pt')
best_acc = accuracy
metrics = {
'acc': accuracy,
'full_acc': all_accuracy,
'head_acc': head_acc
}
with open(args.output_path + '/metrics.json', 'w') as f:
json.dump(metrics, f)
print("Iteration: {}. Loss: {}. Recall: {}. All Recall {}. PerHead Recall {}".format(iter, loss.item(), accuracy, all_accuracy, head_acc))
print('Training complete')
# Run inference
if args.eval or args.test:
split = 'test' if args.test else 'dev'
print('Generating exemplars')
import jsonlines, os, copy
from tqdm import tqdm
NUM_HEADS = 16
NUM_TEMPL_HEADS = 4
model.load_state_dict(torch.load(args.output_path+'/code_predict.pt'))
model.eval()
with jsonlines.open(os.path.join(args.data_dir, f"{dataset_geneval}/{split}.jsonl")) as f:
rows = [row for row in f]
q_to_ix = {}
ix = 0
with jsonlines.open(os.path.join(args.data_dir, f"{dataset_clusters}/{split}.jsonl")) as f:
dev_qs = [row for row in f]
for cix, cluster in enumerate(dev_qs):
for q in cluster['qs']:
q_to_ix[q] = ix
ix += 1
miss = 0
# os.makedirs(args.data_dir + '/wikianswers-para-exemplarmlppredict', exist_ok=True)
# with jsonlines.open(args.data_dir + '/wikianswers-para-exemplarmlppredict/dev.jsonl', 'w') as f:
# for ix, row in enumerate(tqdm(rows)):
# query_ix = q_to_ix[row['sem_input']]
# tgt_codes = [0] * (NUM_HEADS - NUM_TEMPL_HEADS)
# inputs = Variable(torch.tensor([X_dev[query_ix]])).cuda()
# outputs = model(inputs)
# predicted = torch.argmax(outputs.data, -1).cpu()
# gold = y_dev[ix]
# # print(predicted, gold)
# for h_ix in range(NUM_TEMPL_HEADS):
# tgt_codes.append(predicted[0, h_ix].item())
# this_row = copy.copy(row)
# this_row['vq_codes'] = tgt_codes
# f.write(this_row)
X_src = X_test if args.test else X_dev
os.makedirs(args.data_dir + '/' + dataset_mlppredict, exist_ok=True)
with jsonlines.open(args.data_dir + '/' + dataset_mlppredict +f'/{split}.jsonl', 'w') as f:
for ix, row in enumerate(tqdm(rows)):
query_ix = q_to_ix[row['sem_input']]
# tgt_codes = [0] * (NUM_HEADS - NUM_TEMPL_HEADS)
tgt_codes = []
inputs = Variable(torch.tensor([X_src[query_ix]])).cuda()
outputs = model(inputs)[0]
probs, predicted = torch.topk(torch.softmax(outputs, -1), 3 -1)
# print(predicted.shape, probs.shape)
# break
joint_probs = [([], 0)]
for h_ix in range(NUM_TEMPL_HEADS):
new_hypotheses = []
for i, (combo, prob) in enumerate(joint_probs):
for k in range(2):
new_hyp = [copy.copy(combo), prob]
new_hyp[0].append(predicted[h_ix, k].item())
new_hyp[1] += torch.log(probs[h_ix, k]).item()
new_hypotheses.append(new_hyp)
joint_probs = new_hypotheses
joint_probs = sorted(joint_probs, key=lambda x: x[1], reverse=True)[:3]
pred_codes = [tgt_codes + x[0] for x in sorted(joint_probs, key=lambda x: x[1], reverse=True)[:2]]
# pred_codes = predicted.transpose(1,0).tolist()
# pred_codes = [tgt_codes + codes for codes in pred_codes]
# print(pred_codes)
# exit()
# for h_ix in range(NUM_TEMPL_HEADS):
# tgt_codes.append(predicted[0, h_ix].item())
for codes in pred_codes:
this_row = copy.copy(row)
this_row['vq_codes'] = codes
f.write(this_row)
| 33.234568 | 225 | 0.614289 |
ec05ec150f19b102d715046d734476e3b288e407 | 387 | py | Python | bmark/asgi.py | gravedigger0/LinearDoc | 7e35f86091a64829faaff644cd5e8de28e869dfa | [
"MIT"
] | 1 | 2021-10-20T10:18:01.000Z | 2021-10-20T10:18:01.000Z | bmark/asgi.py | gravedigger0/LinearDoc | 7e35f86091a64829faaff644cd5e8de28e869dfa | [
"MIT"
] | null | null | null | bmark/asgi.py | gravedigger0/LinearDoc | 7e35f86091a64829faaff644cd5e8de28e869dfa | [
"MIT"
] | null | null | null | """
ASGI config for bmark project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bmark.settings')
application = get_asgi_application()
| 22.764706 | 78 | 0.782946 |
acf5e33b0f7cf419bcb9d2d8fa7d26e002937a8b | 3,803 | py | Python | event/event/settings.py | JuroOravec/knwldg | 33235f78ae1ea6409883f312adcf8679c5bf2401 | [
"MIT"
] | null | null | null | event/event/settings.py | JuroOravec/knwldg | 33235f78ae1ea6409883f312adcf8679c5bf2401 | [
"MIT"
] | null | null | null | event/event/settings.py | JuroOravec/knwldg | 33235f78ae1ea6409883f312adcf8679c5bf2401 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Scrapy settings for event project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'event'
SPIDER_MODULES = ['event.spiders']
NEWSPIDER_MODULE = 'event.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'event (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# COOKIES_DEBUG = True
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# CUSTOM_REQUEST_HEADERS = OrderedDict({
# 'Host': 'www.infogreffe.com',
# 'Connection': 'keep-alive',
# 'Sec-Fetch-Mode': 'cors',
# 'X-Requested-With': 'XMLHttpRequest',
# 'User-Agent': 'scrapy',
# 'Content-Type': 'application/x-www-form-urlencoded',
# 'Accept': '*/*',
# 'Sec-Fetch-Site': 'same-origin',
# 'Referer': 'https://www.infogreffe.fr/',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'en-GB,en-US;q=0.9,en;q=0.8',
# 'Cookie': ''
# })
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
# 'fr.middlewares.FrSpiderMiddleware': 543,
# 'fr.middlewares.SpiderExceptionMiddleware': 550,
}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 300,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None,
# 'random_useragent.RandomUserAgentMiddleware': 400,
# 'rotating_proxies.middlewares.RotatingProxyMiddleware': 610,
# 'rotating_proxies.middlewares.BanDetectionMiddleware': 620,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
# 'event.pipelines.FrPipeline': 300,
# }
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 120
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 0.5
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 35.542056 | 103 | 0.747568 |
d5f0a74f5bc08813af2989b4b6a8d35c926a175c | 245 | py | Python | hood_app/admin.py | Tajeu2001/hood | 727e6709f5619e2421fb02ce487235e75af1b2b0 | [
"MIT"
] | 1 | 2022-01-09T05:10:51.000Z | 2022-01-09T05:10:51.000Z | neighbor/admin.py | mwendaB/neighboorhood | d7607e816890369a486e7e7971ce78c2354cbd1b | [
"MIT"
] | null | null | null | neighbor/admin.py | mwendaB/neighboorhood | d7607e816890369a486e7e7971ce78c2354cbd1b | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Neighbourhood, Profile, Business, Post
# Register your models here.
admin.site.register(Profile)
admin.site.register(Neighbourhood)
admin.site.register(Post)
admin.site.register(Business) | 30.625 | 58 | 0.808163 |
24e48fdb0b4a27132959ca18d7ee5471703f70f8 | 10,404 | py | Python | lpbm/module_loader.py | fmichea/lpbm | 172772d562e2f1aa4aba72599150f95f89bdf6ce | [
"BSD-3-Clause"
] | 1 | 2015-11-09T11:30:41.000Z | 2015-11-09T11:30:41.000Z | lpbm/module_loader.py | fmichea/lpbm | 172772d562e2f1aa4aba72599150f95f89bdf6ce | [
"BSD-3-Clause"
] | 1 | 2015-04-28T07:02:21.000Z | 2016-01-23T19:12:11.000Z | lpbm/module_loader.py | fmichea/lpbm | 172772d562e2f1aa4aba72599150f95f89bdf6ce | [
"BSD-3-Clause"
] | 2 | 2016-01-11T17:55:42.000Z | 2018-03-19T19:03:15.000Z | # module_loader.py - Loads every module in tools directory.
# Author: Franck Michea < franck.michea@gmail.com >
# License: New BSD License (See LICENSE)
'''
This module dynamically loads all the command line modules in `modules`
directory.
'''
import abc
import imp
import inspect
import os
import sys
import lpbm.logging
import lpbm.tools as ltools
from lpbm.lib.deprecated_command import deprecated_command
class Module(metaclass=abc.ABCMeta):
"""
This is the base class of all modules. You can find documentation for every
method required. To create a new module, you just have to create an new
file in modules directory, inheriting from this class, and implementing
following methods. It will then be loaded automatically.
"""
def __init__(self):
self.parser, self.modules, self.args = None, None, None
self.needed_modules, self.module_loaded = None, False
def module_init(self, argument_parser):
"""
This function initialize a parser for the command line. It also,
initialize needed module to none (empty list). If you want to load data
from other modules, you should override this in your init function.
"""
self.parser = argument_parser.add_parser(
self.name(), help=self.abstract(), description=self.abstract()
)
self.parser.set_defaults(func=self.module_process)
self.needed_modules = []
self.init()
def module_process(self, modules, args):
"""
This methods calls the load function of each needed module and then
calls the process function overriden by you. Configuration is always
loaded.
"""
modules['config'].module_load(modules, args)
for mod in self.needed_modules:
modules[mod].module_load(modules, args)
self.module_load(modules, args)
self.process(modules, args)
def module_load(self, modules, args):
if self.module_loaded:
return
self.modules, self.args = modules, args
self.module_loaded, self.args = True, args
self.load(modules, args)
@abc.abstractmethod
def init(self):
"""
This function should add its own arguments on command line. When
called, self.parser will be initialized with a valid argument parser.
"""
pass
def load(self, modules, args):
"""
This function can be overriden to load data according to global
arguments. It can be overriden.
"""
pass
@abc.abstractmethod
def name(self):
"""Returns the name of the parser on command line."""
pass
@abc.abstractmethod
def abstract(self):
"""Returns an abstract of the functionnality of the command."""
pass
@abc.abstractmethod
def process(self, modules, args):
"""Invoked if command was chosen on command line."""
pass
class ModelManagerModule(Module, metaclass=abc.ABCMeta):
def __init__(self):
super().__init__()
self._objects = dict()
self.fgroup, self.ggroup, self.igroup = None, None, None
self.fopts, self.gopts, self.iopts = [], [], []
self.helps = {
'delete': 'delete the selected {object_name}.',
'edit': 'edit the {object_name}.',
'id': 'select an {object_name} for several options.',
'list': 'list all the {object_name_plural}.',
'new': 'add a new {object_name} interactively.',
'with-deleted': 'include deleted {object_name_plural} in listings.',
}
def __getitem__(self, id):
try:
return self._objects[id]
except KeyError:
raise lpbm.exceptions.ModelDoesNotExistError(self.object_name(), id)
def create_object(self, cls, *args, **kwargs):
return cls(self, self.modules, *args, **kwargs)
def register_object(self, cls, *args, **kwargs):
obj = self.create_object(cls, *args, **kwargs)
self._objects[obj.id] = obj
return obj
@property
def objects(self):
return [obj for obj in self._objects.values()
if getattr(self.args, 'with_deleted', False) or not obj.deleted]
@property
def all_objects(self):
return list(self._objects.values())
def init(self):
# Set correctly object name to its value.
kwargs = {
'object_name': self.object_name(),
'object_name_plural': self.object_name_plural(),
}
self.helps = dict((k, v.format(**kwargs)) for (k, v) in self.helps.items())
# Default options.
self.parser.add_argument('-i', '--id', action='store', type=int,
metavar='id', default=None, help=self.helps['id'])
self.ggroup = self.parser.add_argument_group(title='general actions')
self.add_general_option('-n', '--new', help=self.helps['new'])
self.add_general_option('-l', '--list', help=self.helps['list'])
self.fgroup = self.parser.add_argument_group(title='flags')
self.add_flag_option('-D', '--with-deleted', help=self.helps['with-deleted'])
self.igroup = self.parser.add_argument_group(title='specific actions (need --id)')
self.add_id_option('-e', '--edit', help=self.helps['edit'])
self.add_id_option('-d', '--delete', help=self.helps['delete'])
def _add_option(self, group, opts, args, kwargs_):
def f(args):
return sorted(args, key=len)[-1][2:].replace('-', '_')
opts.append(kwargs_.get('dest', f(args)))
kwargs = {'default': None, 'action': 'store_true'}
kwargs.update(kwargs_)
group.add_argument(*args, **kwargs)
def add_flag_option(self, *args, **kwargs):
self._add_option(self.fgroup, self.fopts, args, kwargs)
def add_general_option(self, *args, **kwargs):
self._add_option(self.ggroup, self.gopts, args, kwargs)
def add_id_option(self, *args, **kwargs):
self._add_option(self.igroup, self.iopts, args, kwargs)
def process(self, modules, args):
def option_mangle(opt):
return opt.replace('-', '_')
def option_states(opts):
return dict((k, getattr(args, option_mangle(k))) for k in opts)
# First check general options.
opts_states = option_states(self.gopts)
for opt, state in opts_states.items():
if state is not None:
try:
getattr(self, 'opt_' + opt)()
return
except (AttributeError, TypeError):
raise lpbm.exceptions.GeneralOptionError(opt)
# If we have any id option in there.
opts_states = option_states(self.iopts)
for opt, state in opts_states.items():
if state is not None:
if args.id is None:
raise lpbm.exceptions.IdOptionMissingError(opt)
try:
getattr(self, 'opt_' + opt)(args.id)
return
except (AttributeError, TypeError):
raise lpbm.exceptions.IdOptionError(opt)
self.parser.print_help()
# Actions.
def opt_list(self, short=False):
deprecated_command()
def opt_new(self, *args, **kwargs):
deprecated_command()
def opt_edit(self, id):
deprecated_command()
def opt_delete(self, id):
deprecated_command()
@abc.abstractmethod
def object_name(self):
pass
@abc.abstractmethod
def model_cls(self):
pass
def object_name_plural(self):
return self.object_name() + 's'
def is_valid(self, id):
try:
if int(id) not in self._objects:
print('{} id {} is invalid!'.format(self.object_name().title(), id))
return False
except ValueError:
print('One of the ids is not a valid integer: {}'.format(id))
return False
return True
def is_valid_list(self, lst):
lst = ltools.split_on_comma(lst)
for id in lst:
if not self.is_valid(id):
return False
return True
def load_modules(modules_, argument_parser):
"""Dynamically loads all the compatible commands from modules directory"""
main_root = os.path.join(os.path.dirname(__file__), 'modules')
logger, modules = lpbm.logging.get(), []
# Finds all submodules that should be loaded.
logger.debug('Tool being loaded from %s.', main_root)
for root, _, files in os.walk(main_root):
root_ = root[len(main_root):]
for filename in files:
if not filename.endswith('.py'):
continue
mod_name = root_.replace('/', '.') + filename[:-3]
try:
modules.append((
mod_name, imp.find_module(mod_name, [root] + sys.path)
))
logger.debug('Module found: lpbm.tools.%s', mod_name)
except ImportError:
logger.debug('Failed to find module %s.', mod_name)
modules = sorted(modules, key=lambda mod: mod[0])
# Loads modules 1 by 1.
for mod_name, (fd, pathname, description) in modules:
try:
mod = imp.load_module(mod_name, fd, pathname, description)
logger.debug('Module loaded: %s', mod.__name__)
for item in inspect.getmembers(mod):
logger.debug(' + Item in module found: %s', item[0])
if inspect.isclass(item[1]) and issubclass(item[1], Module):
try:
logger.debug(' -> Item is a subclass of Module class.')
tmp = item[1]()
tmp.module_init(argument_parser)
msg = 'Command %s was correctly loaded.'
logger.info(msg, tmp.name())
modules_[tmp.name()] = tmp
except TypeError as e:
msg = ' -> Failed to instanciate class %s, abstract '
msg += 'method or property missing?'
logger.debug(msg, item[0])
logger.debug(' Error: ' + str(e))
except ImportError as err:
logger.debug('Failed to import module %s (%s).', mod_name, err)
| 35.630137 | 90 | 0.589581 |
59fb080d5d358a57df5fe5d8a611d394e63ed66e | 743 | py | Python | netdevice/migrations/0003_create_network_os.py | lkmhaqer/gtools-python | cff6d80525b78a4fadfb686566489fbe1687d889 | [
"MIT"
] | 5 | 2016-10-31T17:46:17.000Z | 2022-02-02T00:40:49.000Z | netdevice/migrations/0003_create_network_os.py | lkmhaqer/gtools-python | cff6d80525b78a4fadfb686566489fbe1687d889 | [
"MIT"
] | 33 | 2018-05-09T06:07:50.000Z | 2021-09-22T17:39:56.000Z | netdevice/migrations/0003_create_network_os.py | lkmhaqer/gtools-python | cff6d80525b78a4fadfb686566489fbe1687d889 | [
"MIT"
] | 1 | 2020-05-14T21:44:25.000Z | 2020-05-14T21:44:25.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-14 14:10
from __future__ import unicode_literals
from django.db import migrations
def create_network_os(apps, schema_editor):
db_alias = schema_editor.connection.alias
network_os = apps.get_model('netdevice', 'network_os')
network_os.objects.get_or_create(name='ios')
network_os.objects.get_or_create(name='junos')
network_os.objects.get_or_create(name='bird')
network_os.objects.get_or_create(name='quagga')
network_os.objects.get_or_create(name='yaml')
class Migration(migrations.Migration):
dependencies = [
('netdevice', '0002_auto_20180511_0619'),
]
operations = [
migrations.RunPython(create_network_os),
]
| 29.72 | 58 | 0.726783 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.