hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
364b600d3ef5acf33fe924e1359281a6bf39ef53
| 1,862
|
py
|
Python
|
psltdsim/plot/__init__.py
|
thadhaines/PSLTDSim
|
1bc598f3733c1369c164f54249e5f7757e6bf466
|
[
"MIT"
] | null | null | null |
psltdsim/plot/__init__.py
|
thadhaines/PSLTDSim
|
1bc598f3733c1369c164f54249e5f7757e6bf466
|
[
"MIT"
] | null | null | null |
psltdsim/plot/__init__.py
|
thadhaines/PSLTDSim
|
1bc598f3733c1369c164f54249e5f7757e6bf466
|
[
"MIT"
] | null | null | null |
#import sys
#if sys.version_info[0] > 2:
# import matplotlib.pyplot as plt
# The above doesn't seem to perform as desired, as a result
# each function handles own import of matplotlib...
from .sysPePmF import sysPePmF
from .sysPePmFLoad import sysPePmFLoad
from .sysPQVF import sysPQVF
from .sysPLQF import sysPLQF
from .sysVmVa import sysVmVa
from .sysLoad import sysLoad
from .sysPQgen import sysPQgen
from .allGenDynamics import allGenDynamics
from .oneGenDynamics import oneGenDynamics
from .sysPemLQF import sysPemLQF
from .BAplots01 import BAplots01
from .BAplots02 import BAplots02
from .BAplots02detail import BAplots02detail
from .BAgovU import BAgovU
from .ValveTravel import ValveTravel
from .ValveTravel00 import ValveTravel00
from .ValveTravel01 import ValveTravel01
from .AreaLosses import AreaLosses
from .SACE import SACE
from .ACE2dist import ACE2dist
from .sysF import sysF
from .Pload import Pload
from .PloadIEEE import PloadIEEE
from .sysFcomp import sysFcomp
from .genDynamicsComp import genDynamicsComp
from .AreaRunningValveTravel import AreaRunningValveTravel
from .BAALtest import BAALtest
from .branchMW import branchMW
from .branchMW2 import branchMW2
from .branchMW3 import branchMW3
from .AreaPLoad import AreaPLoad
from .AreaPe import AreaPe
from .AreaPm import AreaPm
from .sysShunt import sysShunt
from .branchMVAR import branchMVAR
from .sysBranchMVAR import sysBranchMVAR
from .sysShuntV import sysShuntV
from .sysShuntMVAR import sysShuntMVAR
from .sysPePmFLoad2 import sysPePmFLoad2
from .sysH import sysH
from .sysVmVAR import sysVmVAR
from .sysFcomp2 import sysFcomp2
from .sysPgenComp import sysPgenComp
from .sysPmComp import sysPmComp
from .sysPeComp import sysPeComp
from .sysPe import sysPe
from .areaPL import areaPL
from .PloadIEEE2 import PloadIEEE2
from .genDynamicsComp2 import genDynamicsComp2
| 26.6
| 59
| 0.833512
|
5cc19a3a98ab3c6710fc4842e7e1a8b3b0add251
| 806
|
py
|
Python
|
setup.py
|
slinghq/sling
|
710b2743d43b6378c10f7c58e004257e53b164f6
|
[
"Apache-2.0"
] | 6
|
2015-12-07T13:14:15.000Z
|
2021-05-27T10:17:35.000Z
|
setup.py
|
slinghq/sling
|
710b2743d43b6378c10f7c58e004257e53b164f6
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
slinghq/sling
|
710b2743d43b6378c10f7c58e004257e53b164f6
|
[
"Apache-2.0"
] | 3
|
2015-12-07T12:25:27.000Z
|
2015-12-07T13:14:47.000Z
|
from setuptools import setup
with open('requirements.txt', 'r') as f:
install_requires = [l.strip() for l in f.readlines()]
setup(
name='sling',
version='0.1.1',
url='https://github.com/slinghq/sling',
license='Apache',
author='SlingHQ',
author_email='support@slinghq.com',
install_requires=install_requires,
packages=['sling', 'sling.core', 'sling.ext'],
platforms='any',
include_package_data=True,
zip_safe=False,
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Environment :: Web Environment',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| 27.793103
| 71
| 0.631514
|
bdfdea8bc5af91e69f7f82f1455e0b3563088123
| 17,551
|
py
|
Python
|
cvpods/structures/interval.py
|
WFDetector/WFDetection
|
b16d35b3a3a5de62de9e0bac83eccd21b6358b53
|
[
"Apache-2.0"
] | null | null | null |
cvpods/structures/interval.py
|
WFDetector/WFDetection
|
b16d35b3a3a5de62de9e0bac83eccd21b6358b53
|
[
"Apache-2.0"
] | null | null | null |
cvpods/structures/interval.py
|
WFDetector/WFDetection
|
b16d35b3a3a5de62de9e0bac83eccd21b6358b53
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Modified by WFDetection, Inc. and its affiliates. All Rights Reserved
import math
from enum import IntEnum, unique
from typing import Iterator, List, Tuple, Union
import numpy as np
import torch
from torchvision.ops.boxes import box_area
from cvpods.layers import cat
# _RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray]
_RawIntervalType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray]
# @unique
# class BoxMode(IntEnum):
# """
# Enum of different ways to represent a box.
# Attributes:
# XYXY_ABS: (x0, y0, x1, y1) in absolute floating points coordinates.
# The coordinates in range [0, width or height].
# XYWH_ABS: (x0, y0, w, h) in absolute floating points coordinates.
# XYXY_REL: (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.
# XYWH_REL: (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.
# XYWHA_ABS: (xc, yc, w, h, a) in absolute floating points coordinates.
# (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.
# """
# XYXY_ABS = 0
# XYWH_ABS = 1
# XYXY_REL = 2
# XYWH_REL = 3
# XYWHA_ABS = 4
# @staticmethod
# def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType:
# """
# Args:
# box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5
# from_mode, to_mode (BoxMode)
# Returns:
# The converted box of the same type.
# """
# if from_mode == to_mode:
# return box
# original_type = type(box)
# is_numpy = isinstance(box, np.ndarray)
# single_box = isinstance(box, (list, tuple))
# if single_box:
# assert len(box) == 4 or len(box) == 5, (
# "BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,"
# " where k == 4 or 5"
# )
# arr = torch.tensor(box)[None, :]
# else:
# # avoid modifying the input box
# if is_numpy:
# arr = torch.from_numpy(np.asarray(box)).clone()
# else:
# arr = box.clone()
# assert to_mode.value not in [
# BoxMode.XYXY_REL,
# BoxMode.XYWH_REL,
# ] and from_mode.value not in [
# BoxMode.XYXY_REL,
# BoxMode.XYWH_REL,
# ], "Relative mode not yet supported!"
# if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:
# assert (
# arr.shape[-1] == 5
# ), "The last dimension of input shape must be 5 for XYWHA format"
# original_dtype = arr.dtype
# arr = arr.double()
# w = arr[:, 2]
# h = arr[:, 3]
# a = arr[:, 4]
# c = torch.abs(torch.cos(a * math.pi / 180.0))
# s = torch.abs(torch.sin(a * math.pi / 180.0))
# # This basically computes the horizontal bounding rectangle of the rotated box
# new_w = c * w + s * h
# new_h = c * h + s * w
# # convert center to top-left corner
# arr[:, 0] -= new_w / 2.0
# arr[:, 1] -= new_h / 2.0
# # bottom-right corner
# arr[:, 2] = arr[:, 0] + new_w
# arr[:, 3] = arr[:, 1] + new_h
# arr = arr[:, :4].to(dtype=original_dtype)
# elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:
# original_dtype = arr.dtype
# arr = arr.double()
# arr[:, 0] += arr[:, 2] / 2.0
# arr[:, 1] += arr[:, 3] / 2.0
# angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)
# arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)
# else:
# if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:
# arr[:, 2] += arr[:, 0]
# arr[:, 3] += arr[:, 1]
# elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:
# arr[:, 2] -= arr[:, 0]
# arr[:, 3] -= arr[:, 1]
# else:
# raise NotImplementedError(
# "Conversion from BoxMode {} to {} is not supported yet".format(
# from_mode, to_mode
# )
# )
# if single_box:
# return original_type(arr.flatten())
# if is_numpy:
# return arr.numpy()
# else:
# return arr
@unique
class IntervalMode(IntEnum):
"""
Enum of different ways to represent a Interval.
Attributes:
XX_ABS: (x0, x1) in absolute floating points coordinates.
The coordinates in range [0, length].
XW_ABS: (x0, w) in absolute floating points coordinates.
"""
XX_ABS = 0
XW_ABS = 1
@staticmethod
def convert(interval: _RawIntervalType, from_mode: "IntervalMode", to_mode: "IntervalMode") -> _RawIntervalType:
"""
Args:
Interval: can be a k-tuple, k-list or an Nxk array/tensor, where k = 2
from_mode, to_mode (IntervalMode)
Returns:
The converted interval of the same type.
"""
if from_mode == to_mode:
return interval
original_type = type(interval)
is_numpy = isinstance(interval, np.ndarray)
single_interval = isinstance(interval, (list, tuple))
if single_interval:
assert len(interval) == 2, (
"IntervalMode.convert takes either a k-tuple/list or an Nxk array/tensor,"
" where k == 2"
)
arr = torch.tensor(interval)[None, :]
else:
# avoid modifying the input box
if is_numpy:
arr = torch.from_numpy(np.asarray(interval)).clone()
else:
arr = interval.clone()
# assert to_mode.value not in [
# IntervalMode.XX_ABS,
# IntervalMode.XW_ABS,
# ] and from_mode.value not in [
# IntervalMode.XX_ABS,
# IntervalMode.XW_ABS,
# ], "Relative mode not yet supported!"
# if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:
# assert (
# arr.shape[-1] == 5
# ), "The last dimension of input shape must be 5 for XYWHA format"
# original_dtype = arr.dtype
# arr = arr.double()
# w = arr[:, 2]
# h = arr[:, 3]
# a = arr[:, 4]
# c = torch.abs(torch.cos(a * math.pi / 180.0))
# s = torch.abs(torch.sin(a * math.pi / 180.0))
# # This basically computes the horizontal bounding rectangle of the rotated box
# new_w = c * w + s * h
# new_h = c * h + s * w
# # convert center to top-left corner
# arr[:, 0] -= new_w / 2.0
# arr[:, 1] -= new_h / 2.0
# # bottom-right corner
# arr[:, 2] = arr[:, 0] + new_w
# arr[:, 3] = arr[:, 1] + new_h
# arr = arr[:, :4].to(dtype=original_dtype)
# elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:
# original_dtype = arr.dtype
# arr = arr.double()
# arr[:, 0] += arr[:, 2] / 2.0
# arr[:, 1] += arr[:, 3] / 2.0
# angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)
# arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)
# else:
if to_mode == IntervalMode.XX_ABS and from_mode == IntervalMode.XW_ABS:
# center_position = arr[:, 0]
# length = arr[:, 1]
arr[:, 0] = arr[:, 0] - arr[:, 1] / 2 #start_pos
arr[:, 1] = arr[:, 0] + arr[:, 1] #end_pos
elif from_mode == IntervalMode.XX_ABS and to_mode == IntervalMode.XW_ABS:
arr[:, 1] -= arr[:, 0] # length
arr[:, 0] = arr[:, 0] + arr[:, 1] / 2 # center_pos
else:
raise NotImplementedError(
"Conversion from BoxMode {} to {} is not supported yet".format(
from_mode, to_mode
)
)
if single_interval:
return original_type(arr.flatten())
if is_numpy:
return arr.numpy()
else:
return arr
class Intervales:
"""
This structure stores a list of intervales as a Nx2 torch.Tensor.
It supports some common methods about intervales
(`length`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all intervales)
Attributes:
tensor(torch.Tensor): float matrix of Nx2.
"""
# IntervalSizeType = Union[List[int], Tuple[int, int]]
IntervalSizeType = int
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx2 matrix. Each row is (x1, x2).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
tensor = torch.zeros(0, 4, dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 2, tensor.size()
self.tensor = tensor
def clone(self) -> "Intervales":
"""
Clone the Intervales.
Returns:
Intervales
"""
return Intervales(self.tensor.clone())
def to(self, device: str) -> "Intervales":
return Intervales(self.tensor.to(device))
def length(self) -> torch.Tensor:
"""
Computes the length of all the intervales.
Returns:
torch.Tensor: a vector with lengths of each interval.
"""
interval = self.tensor
length = interval[:, 1] - interval[:, 0]
return length
def clip(self, interval_size: IntervalSizeType) -> None:
"""
Clip (in place) the intervales by limiting x coordinates to the range [0, length]
and y coordinates to the range [0, length].
Args:
interval_size (): The clipping interval's size.
"""
assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!"
w = interval_size
self.tensor[:, 0].clamp_(min=0, max=w)
self.tensor[:, 1].clamp_(min=0, max=w)
def nonempty(self, threshold: int = 0) -> torch.Tensor:
"""
Find intervales that are non-empty.
A interval is considered empty, if its length is no larger than threshold.
Returns:
Tensor:
a binary vector which represents whether each interval is empty
(False) or non-empty (True).
"""
interval = self.tensor
lengths = interval[:, 1] - interval[:, 0]
keep = lengths > threshold
return keep
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Intervales":
"""
Returns:
Intervales: Create a new :class:`Intervales` by indexing.
The following usage are allowed:
1. `new_intervales = intervales[3]`: return a `intervales` which contains only one interval.
2. `new_intervales = intervales[2:10]`: return a slice of intervales.
3. `new_intervales = intervales[vector]`, where vector is a torch.BoolTensor
with `length = len(intervales)`. Nonzero elements in the vector will be selected.
Note that the returned Intervales might share storage with this Intervales,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return Intervales(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item)
return Intervales(b)
def __len__(self) -> int:
return self.tensor.shape[0]
def __repr__(self) -> str:
return "Intervales(" + str(self.tensor) + ")"
def inside_Interval(self, interval_size: IntervalSizeType, boundary_threshold: int = 0) -> torch.Tensor:
"""
Args:
interval_size (length): Size of the reference interval.
boundary_threshold (int): Intervales that extend beyond the reference interval
boundary by more than boundary_threshold are considered "outside".
Returns:
a binary vector, indicating whether each interval is inside the reference interval.
"""
length = interval_size
inds_inside = (
(self.tensor[..., 0] >= -boundary_threshold)
& (self.tensor[..., 1] < length + boundary_threshold)
)
return inds_inside
def get_centers(self) -> torch.Tensor:
"""
Returns:
The interval centers in a Nx1 array of (x).
"""
return (self.tensor[:, 1] + self.tensor[:, 0]) / 2
def scale(self, scale: float) -> None:
"""
Scale the interval with scaling factors
"""
self.tensor[:, :] *= scale
# self.tensor[:, 1::2] *= scale_y
@classmethod
def cat(cls, intervales_list: List["Intervales"]) -> "Intervales":
"""
Concatenates a list of Intervales into a single Intervales
Arguments:
intervales_list (list[Intervales])
Returns:
Intervales: the concatenated Boxes
"""
assert isinstance(intervales_list, (list, tuple))
assert all(isinstance(interval, Intervales) for interval in intervales_list)
if len(intervales_list) == 0:
return cls(torch.empty(0))
cat_intervales = type(intervales_list[0])(cat([b.tensor for b in intervales_list], dim=0))
return cat_intervales
@property
def device(self) -> torch.device:
return self.tensor.device
def __iter__(self) -> Iterator[torch.Tensor]:
"""
Yield a interval as a Tensor of shape (2,) at a time.
"""
yield from self.tensor
# added for DETR
# TODO @wangfeng02, use BoxMode instead and provide a better func
# def box_cxcywh_to_xyxy(x):
# x_c, y_c, w, h = x.unbind(-1)
# b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
# return torch.stack(b, dim=-1)
def interval_cxw_to_xx(x):
x_c, w= x.unbind(-1)
b = [(x_c - 0.5 * w), (x_c + 0.5 * w)]
return torch.stack(b, dim=-1)
# def box_xyxy_to_cxcywh(x):
# x0, y0, x1, y1 = x.unbind(-1)
# b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
# return torch.stack(b, dim=-1)
def interval_xx_to_cxcyw(x):
x0, x1 = x.unbind(-1)
b = [(x0 + x1) / 2, (x1 - x0)]
return torch.stack(b, dim=-1)
def interval_length(intervales):
"""
Computes the area of a set of bounding intervales, which are specified by its
(x1,x2) coordinates.
Arguments:
intervales (Tensor[N, 2]): intervales for which the area will be computed. They
are expected to be in (x1, x2) format
Returns:
length (Tensor[N]): length for each interval
"""
return intervales[:, 1] - intervales[:, 0]
def generalized_interval_iou_trace(intervales1, intervales2):
"""
Generalized IoU from https://giou.stanford.edu/
The intervales should be in [x0, x1] format
Returns a [N, M] pairwise matrix, where N = len(intervales1)
and M = len(intervales2)
"""
# degenerate intervales gives inf / nan results
# so do an early check
assert (intervales1[:, 1] >= intervales1[:, 0]).all()
assert (intervales2[:, 1] >= intervales2[:, 0]).all()
# vallina interval iou
# modified from torchvision to also return the union
length1 = interval_length(intervales1)
length2 = interval_area(intervales2)
l = torch.max(intervales1[:, None, 0], intervales2[:, 0]) # [N,M]
r = torch.min(intervales1[:, None, 1], intervales2[:, 1]) # [N,M]
w = (r - l).clamp(min=0) # [N,M]
inter = w # [N,M]
union = length1[:, None] + length2 - inter
iou = inter / union
# iou, union = box_iou(boxes1, boxes2)
l2 = torch.min(intervales1[:, None, 0], intervales2[:, 0])
r2 = torch.max(intervales1[:, None, 1], intervales2[:, 1])
w2 = (r2 - l2).clamp(min=0) # [N,M]
# area = wh[:, :, 0] * wh[:, :, 1]
return iou - (w2 - union) / w2
def masks_to_boxes(masks):
"""
Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks,
(H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float)
x = torch.arange(0, w, dtype=torch.float)
y, x = torch.meshgrid(y, x)
x_mask = masks * x.unsqueeze(0)
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = masks * y.unsqueeze(0)
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1)
| 35.172345
| 116
| 0.554555
|
71bdc6deee698f930927648a21778017faa54c8a
| 6,349
|
py
|
Python
|
geotrek/api/mobile/serializers/trekking.py
|
GeotrekCE/Geotrek-admin
|
efcc7a6c2ccb6aee6b299b22f33f236dd8a23d91
|
[
"BSD-2-Clause"
] | 50
|
2016-10-19T23:01:21.000Z
|
2022-03-28T08:28:34.000Z
|
geotrek/api/mobile/serializers/trekking.py
|
GeotrekCE/Geotrek-admin
|
efcc7a6c2ccb6aee6b299b22f33f236dd8a23d91
|
[
"BSD-2-Clause"
] | 1,422
|
2016-10-27T10:39:40.000Z
|
2022-03-31T13:37:10.000Z
|
geotrek/api/mobile/serializers/trekking.py
|
GeotrekCE/Geotrek-admin
|
efcc7a6c2ccb6aee6b299b22f33f236dd8a23d91
|
[
"BSD-2-Clause"
] | 46
|
2016-10-27T10:59:10.000Z
|
2022-03-22T15:55:56.000Z
|
import os
from django.conf import settings
from rest_framework import serializers
from rest_framework_gis import serializers as geo_serializers
from geotrek.api.mobile.serializers.tourism import InformationDeskSerializer
from geotrek.api.v2.functions import Transform, Length, StartPoint, EndPoint
from geotrek.zoning.models import City, District
if 'geotrek.trekking' in settings.INSTALLED_APPS:
from geotrek.trekking import models as trekking_models
class POIListSerializer(geo_serializers.GeoFeatureModelSerializer):
pictures = serializers.SerializerMethodField(read_only=True)
geometry = geo_serializers.GeometryField(read_only=True, precision=7, source='geom2d_transformed')
type = serializers.ReadOnlyField(source='type.pk')
def get_pictures(self, obj):
if not obj.resized_pictures:
return []
root_pk = self.context.get('root_pk') or obj.pk
return obj.serializable_pictures_mobile(root_pk)
class Meta:
model = trekking_models.POI
id_field = 'pk'
geo_field = 'geometry'
fields = (
'id', 'pk', 'pictures', 'name', 'description', 'type', 'geometry',
)
class TrekBaseSerializer(geo_serializers.GeoFeatureModelSerializer):
cities = serializers.SerializerMethodField(read_only=True)
districts = serializers.SerializerMethodField(read_only=True)
length = serializers.SerializerMethodField(read_only=True)
departure_city = serializers.SerializerMethodField(read_only=True)
def get_cities(self, obj):
qs = City.objects.filter(published=True)
cities = qs.filter(geom__intersects=(obj.geom, 0))
return cities.values_list('code', flat=True)
def get_departure_city(self, obj):
qs = City.objects.filter(published=True)
if obj.start_point:
city = qs.filter(geom__covers=(obj.start_point, 0)).first()
if city:
return city.code
return None
def get_length(self, obj):
return round(obj.length_2d_m, 1)
def get_districts(self, obj):
qs = District.objects.filter(published=True)
districts = qs.filter(geom__intersects=(obj.geom, 0))
return [district.pk for district in districts]
class Meta:
model = trekking_models.Trek
id_field = 'pk'
geo_field = 'geometry'
class TrekListSerializer(TrekBaseSerializer):
first_picture = serializers.SerializerMethodField(read_only=True)
geometry = geo_serializers.GeometryField(read_only=True, precision=7, source='start_point', )
def get_first_picture(self, obj):
root_pk = self.context.get('root_pk') or obj.pk
return obj.resized_picture_mobile(root_pk)
class Meta(TrekBaseSerializer.Meta):
fields = (
'id', 'pk', 'first_picture', 'name', 'departure', 'accessibilities', 'route', 'departure_city',
'difficulty', 'practice', 'themes', 'length', 'geometry', 'districts', 'cities', 'duration', 'ascent',
'descent',
)
class TrekDetailSerializer(TrekBaseSerializer):
geometry = geo_serializers.GeometryField(read_only=True, precision=7, source='geom2d_transformed')
pictures = serializers.SerializerMethodField(read_only=True)
arrival_city = serializers.SerializerMethodField(read_only=True)
information_desks = serializers.SerializerMethodField()
parking_location = serializers.SerializerMethodField(read_only=True)
profile = serializers.SerializerMethodField(read_only=True)
points_reference = serializers.SerializerMethodField()
children = serializers.SerializerMethodField()
def get_pictures(self, obj):
root_pk = self.context.get('root_pk') or obj.pk
return obj.serializable_pictures_mobile(root_pk)
def get_children(self, obj):
children = obj.children.all().annotate(length_2d_m=Length('geom'),
start_point=Transform(StartPoint('geom'), settings.API_SRID),
end_point=Transform(EndPoint('geom'), settings.API_SRID))
serializer_children = TrekListSerializer(children, many=True, context={'root_pk': obj.pk})
return serializer_children.data
def get_points_reference(self, obj):
if not obj.points_reference:
return None
return obj.points_reference.transform(settings.API_SRID, clone=True).coords
def get_parking_location(self, obj):
if not obj.parking_location:
return None
return obj.parking_location.transform(settings.API_SRID, clone=True).coords
def get_arrival_city(self, obj):
qs = City.objects.all()
if obj.end_point:
city = qs.filter(geom__covers=(obj.end_point, 0)).first()
if city:
return city.code
return None
def get_information_desks(self, obj):
return [
InformationDeskSerializer(information_desk, context={'root_pk': obj.pk}).data
for information_desk in obj.information_desks.all()
]
def get_profile(self, obj):
root_pk = self.context.get('root_pk') or obj.pk
return os.path.join("/", str(root_pk), settings.MEDIA_URL.lstrip('/'), obj.get_elevation_chart_url_png())
class Meta(TrekBaseSerializer.Meta):
auto_bbox = True
fields = (
'id', 'pk', 'name', 'slug', 'accessibilities', 'description_teaser', 'cities', 'profile',
'description', 'departure', 'arrival', 'duration', 'access', 'advised_parking', 'advice',
'difficulty', 'length', 'ascent', 'descent', 'route', 'labels', 'parking_location',
'min_elevation', 'max_elevation', 'themes', 'networks', 'practice', 'difficulty',
'geometry', 'pictures', 'information_desks', 'cities', 'departure_city', 'arrival_city',
'points_reference', 'districts', 'ambiance', 'children',
)
| 46.007246
| 118
| 0.633643
|
a7fb9ab62109a0d2ace16fc3c2987865620fb899
| 28,596
|
py
|
Python
|
dashboard/dashboard/add_point.py
|
ravitejavalluri/catapult
|
246a39a82c2213d913a96fff020a263838dc76e6
|
[
"BSD-3-Clause"
] | null | null | null |
dashboard/dashboard/add_point.py
|
ravitejavalluri/catapult
|
246a39a82c2213d913a96fff020a263838dc76e6
|
[
"BSD-3-Clause"
] | null | null | null |
dashboard/dashboard/add_point.py
|
ravitejavalluri/catapult
|
246a39a82c2213d913a96fff020a263838dc76e6
|
[
"BSD-3-Clause"
] | 1
|
2020-07-24T05:13:01.000Z
|
2020-07-24T05:13:01.000Z
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""URL endpoint to allow Buildbot slaves to post data to the dashboard."""
import copy
import json
import logging
import math
import re
from google.appengine.api import datastore_errors
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from dashboard import math_utils
from dashboard import post_data_handler
from dashboard.common import datastore_hooks
from dashboard.models import graph_data
_TASK_QUEUE_NAME = 'new-points-queue'
# Number of rows to process per task queue task. This limits the task size
# and execution time (Limits: 100KB object size and 10 minutes execution time).
_TASK_QUEUE_SIZE = 32
# Max length for a Row property name.
_MAX_COLUMN_NAME_LENGTH = 25
# Maximum length of a value for a string property.
_STRING_COLUMN_MAX_LENGTH = 400
# Maximum number of properties for a Row.
_MAX_NUM_COLUMNS = 30
# Maximum length for a test path. This limit is required because the test path
# used as the string ID for TestContainer (the parent in the datastore for Row
# entities), and datastore imposes a maximum string ID length.
_MAX_TEST_PATH_LENGTH = 500
class BadRequestError(Exception):
"""An error indicating that a 400 response status should be returned."""
pass
class AddPointHandler(post_data_handler.PostDataHandler):
"""URL endpoint to post data to the dashboard."""
def post(self):
"""Validates data parameter and add task to queue to process points.
The row data comes from a "data" parameter, which is a JSON encoding of a
list of dictionaries, each of which represents one performance result
(one point in a graph) and associated data.
[
{
"master": "ChromiumPerf",
"bot": "xp-release-dual-core",
"test": "dromaeo/dom/modify",
"revision": 123456789,
"value": 24.66,
"error": 2.33,
"units": "ms",
"supplemental_columns": {
"d_median": 24234.12,
"d_mean": 23.553,
"r_webkit": 423340,
...
},
...
},
...
]
In general, the required fields are "master", "bot", "test" (which together
form the test path which identifies the series that this point belongs to),
and "revision" and "value", which are the X and Y values for the point.
This API also supports the Dashboard JSON v1.0 format (go/telemetry-json),
the first producer of which is Telemetry. Telemetry provides lightweight
serialization of values it produces, as JSON. If a dashboard JSON object is
passed, it will be a single dict rather than a list, with the test,
value, error, and units fields replaced by a chart_data field containing a
Chart JSON dict (see design doc, and example below). Dashboard JSON v1.0 is
processed by converting it into rows (which can be viewed as Dashboard JSON
v0).
{
"master": "ChromiumPerf",
<other row fields>,
"chart_data": {
"foo": {
"bar": {
"type": "scalar",
"name": "foo.bar",
"units": "ms",
"value": 4.2,
},
"summary": {
"type": "list_of_scalar_values",
"name": "foo",
"units": "ms",
"values": [4.2, 5.7, 6.8],
"std": 1.30512,
},
},
}
Request parameters:
data: JSON encoding of a list of dictionaries.
Outputs:
Empty 200 response with if successful,
200 response with warning message if optional data is invalid,
403 response with error message if sender IP is not white-listed,
400 response with error message if required data is invalid.
500 with error message otherwise.
"""
datastore_hooks.SetPrivilegedRequest()
if not self._CheckIpAgainstWhitelist():
# TODO(qyearsley): Add test coverage. See catapult:#1346.
return
data_str = self.request.get('data')
if not data_str:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
self.ReportError('Missing "data" parameter.', status=400)
return
self.AddData(data_str)
def AddData(self, data_str):
try:
data = json.loads(data_str)
except ValueError:
self.ReportError('Invalid JSON string.', status=400)
return
logging.info('Received data: %s', data)
try:
if type(data) is dict:
if data.get('chart_data'):
data = _DashboardJsonToRawRows(data)
if not data:
return # No data to add, bail out.
else:
self.ReportError(
'Data should be a list of rows or a Dashboard JSON v1.0 dict.',
status=400)
return
if data:
# We only need to validate the row ID for one point, since all points
# being handled by this upload should have the same row ID.
test_map = _ConstructTestPathMap(data)
_ValidateRowId(data[0], test_map)
for row_dict in data:
_ValidateRowDict(row_dict)
_AddTasks(data)
except BadRequestError as error:
# If any of the data was invalid, abort immediately and return an error.
self.ReportError(error.message, status=400)
def _DashboardJsonToRawRows(dash_json_dict):
"""Formats a Dashboard JSON dict as a list of row dicts.
For the dashboard to begin accepting the Telemetry Dashboard JSON format
as per go/telemetry-json, this function chunks a Dashboard JSON literal
into rows and passes the resulting list to _AddTasks.
Args:
dash_json_dict: A dashboard JSON v1.0 dict.
Returns:
A list of dicts, each of which represents a point.
Raises:
AssertionError: The given argument wasn't a dict.
BadRequestError: The content of the input wasn't valid.
"""
assert type(dash_json_dict) is dict
# A Dashboard JSON dict should at least have all charts coming from the
# same master, bot and rev. It can contain multiple charts, however.
if not dash_json_dict.get('master'):
raise BadRequestError('No master name given.')
if not dash_json_dict.get('bot'):
raise BadRequestError('No bot name given.')
if not dash_json_dict.get('point_id'):
raise BadRequestError('No point_id number given.')
if not dash_json_dict.get('chart_data'):
raise BadRequestError('No chart data given.')
test_suite_name = _TestSuiteName(dash_json_dict)
chart_data = dash_json_dict.get('chart_data', {})
charts = chart_data.get('charts', {})
if not charts:
return [] # No charts implies no data to add.
# Links to about:tracing traces are listed under 'trace'; if they
# exist copy them to a separate dictionary and delete from the chartjson
# so that we don't try to process them as data points.
tracing_links = None
if 'trace' in charts:
tracing_links = charts['trace'].copy()
del charts['trace']
row_template = _MakeRowTemplate(dash_json_dict)
benchmark_description = chart_data.get('benchmark_description', '')
trace_rerun_options = dict(chart_data.get('trace_rerun_options', []))
is_ref = bool(dash_json_dict.get('is_ref'))
rows = []
for chart in charts:
for trace in charts[chart]:
# Need to do a deep copy here so we don't copy a_tracing_uri data.
row = copy.deepcopy(row_template)
specific_vals = _FlattenTrace(
test_suite_name, chart, trace, charts[chart][trace], is_ref,
tracing_links, benchmark_description)
# Telemetry may validly produce rows that represent a value of NaN. To
# avoid getting into messy situations with alerts, we do not add such
# rows to be processed.
if not (math.isnan(specific_vals['value']) or
math.isnan(specific_vals['error'])):
if specific_vals['tracing_uri']:
row['supplemental_columns']['a_tracing_uri'] = specific_vals[
'tracing_uri']
if trace_rerun_options:
row['supplemental_columns']['a_trace_rerun_options'] = (
trace_rerun_options)
row.update(specific_vals)
rows.append(row)
return rows
def _TestSuiteName(dash_json_dict):
"""Extracts a test suite name from Dashboard JSON.
The dashboard JSON may contain a field "test_suite_name". If this is not
present or it is None, the dashboard will fall back to using "benchmark_name"
in the "chart_data" dict.
"""
if dash_json_dict.get('test_suite_name'):
return dash_json_dict['test_suite_name']
try:
return dash_json_dict['chart_data']['benchmark_name']
except KeyError as e:
raise BadRequestError('Could not find test suite name. ' + e.message)
def _AddTasks(data):
"""Puts tasks on queue for adding data.
Args:
data: A list of dictionaries, each of which represents one point.
"""
task_list = []
for data_sublist in _Chunk(data, _TASK_QUEUE_SIZE):
task_list.append(taskqueue.Task(
url='/add_point_queue',
params={'data': json.dumps(data_sublist)}))
queue = taskqueue.Queue(_TASK_QUEUE_NAME)
for task_sublist in _Chunk(task_list, taskqueue.MAX_TASKS_PER_ADD):
# Calling get_result waits for all tasks to be added. It's possible that
# this is different, and maybe faster, than just calling queue.add.
queue.add_async(task_sublist).get_result()
def _Chunk(items, chunk_size):
"""Breaks a long list into sub-lists of a particular size."""
chunks = []
for i in range(0, len(items), chunk_size):
chunks.append(items[i:i + chunk_size])
return chunks
def _MakeRowTemplate(dash_json_dict):
"""Produces a template for rows created from a Dashboard JSON v1.0 dict.
_DashboardJsonToRawRows adds metadata fields to every row that it creates.
These include things like master, bot, point ID, versions, and other
supplementary data. This method produces a dict containing this metadata
to which row-specific information (like value and error) can be added.
Some metadata needs to be transformed to conform to the v0 format, and this
method is also responsible for that transformation.
Some validation is deferred until after the input is converted to a list
of row dicts, since revision format correctness is checked on a per-point
basis.
Args:
dash_json_dict: A dashboard JSON v1.0 dict.
Returns:
A dict containing data to include in each row dict that is created from
|dash_json_dict|.
"""
row_template = dash_json_dict.copy()
del row_template['chart_data']
del row_template['point_id']
row_template['revision'] = dash_json_dict['point_id']
annotations = row_template['supplemental']
versions = row_template['versions']
del row_template['supplemental']
del row_template['versions']
row_template['supplemental_columns'] = {}
supplemental = row_template['supplemental_columns']
for annotation in annotations:
supplemental['a_' + annotation] = annotations[annotation]
for version in versions:
supplemental['r_' + version] = versions[version]
return row_template
def _FlattenTrace(test_suite_name, chart_name, trace_name, trace,
is_ref=False, tracing_links=None, benchmark_description=''):
"""Takes a trace dict from dashboard JSON and readies it for display.
Traces can be either scalars or lists; if scalar we take the value directly;
if list we average the values and compute their standard deviation. We also
extract fields that are normally part of v0 row dicts that are uploaded
using add_point but are actually part of traces in the v1.0 format.
Args:
test_suite_name: The name of the test suite (benchmark).
chart_name: The name of the chart to which this trace belongs.
trace_name: The name of the passed trace.
trace: A trace dict extracted from a dashboard JSON chart.
is_ref: A boolean which indicates whether this trace comes from a
reference build.
tracing_links: A dictionary mapping trace names to about:tracing trace
urls in cloud storage
benchmark_description: A string documenting the benchmark suite to which
this trace belongs.
Returns:
A dict containing units, value, and error for this trace.
Raises:
BadRequestError: The data wasn't valid.
"""
if '@@' in chart_name:
tir_label, chart_name = chart_name.split('@@')
chart_name = chart_name + '/' + tir_label
value, error = _ExtractValueAndError(trace)
# If there is a link to an about:tracing trace in cloud storage for this
# test trace_name, cache it.
tracing_uri = None
if (tracing_links and
trace_name in tracing_links and
'cloud_url' in tracing_links[trace_name]):
tracing_uri = tracing_links[trace_name]['cloud_url'].replace('\\/', '/')
trace_name = _EscapeName(trace_name)
if trace_name == 'summary':
subtest_name = chart_name
else:
subtest_name = chart_name + '/' + trace_name
name = test_suite_name + '/' + subtest_name
if trace_name == 'summary' and is_ref:
name += '/ref'
elif trace_name != 'summary' and is_ref:
name += '_ref'
row_dict = {
'test': name,
'value': value,
'error': error,
'units': trace['units'],
'tracing_uri': tracing_uri,
'benchmark_description': benchmark_description,
}
if 'improvement_direction' in trace:
improvement_direction_str = trace['improvement_direction']
if improvement_direction_str is None:
raise BadRequestError('improvement_direction must not be None')
row_dict['higher_is_better'] = _ImprovementDirectionToHigherIsBetter(
improvement_direction_str)
return row_dict
def _ExtractValueAndError(trace):
"""Returns the value and measure of error from a chartjson trace dict.
Args:
trace: A dict that has one "result" from a performance test, e.g. one
"value" in a Telemetry test, with the keys "trace_type", "value", etc.
Returns:
A pair (value, error) where |value| is a float and |error| is some measure
of variance used to show error bars; |error| could be None.
Raises:
BadRequestError: Data format was invalid.
"""
trace_type = trace.get('type')
if trace_type == 'scalar':
value = trace.get('value')
if value is None and trace.get('none_value_reason'):
return float('nan'), 0
try:
return float(value), 0
except:
raise BadRequestError('Expected scalar value, got: %r' % value)
if trace_type == 'list_of_scalar_values':
values = trace.get('values')
if not isinstance(values, list) and values is not None:
# Something else (such as a single scalar, or string) was given.
raise BadRequestError('Expected list of scalar values, got: %r' % values)
if not values or None in values:
# None was included or values is None; this is not an error if there
# is a reason.
if trace.get('none_value_reason'):
return float('nan'), float('nan')
raise BadRequestError('Expected list of scalar values, got: %r' % values)
if not all(_IsNumber(v) for v in values):
raise BadRequestError('Non-number found in values list: %r' % values)
value = math_utils.Mean(values)
std = trace.get('std')
if std is not None:
error = std
else:
error = math_utils.StandardDeviation(values)
return value, error
if trace_type == 'histogram':
return _GeomMeanAndStdDevFromHistogram(trace)
raise BadRequestError('Invalid value type in chart object: %r' % trace_type)
def _IsNumber(v):
return isinstance(v, float) or isinstance(v, int) or isinstance(v, long)
def _EscapeName(name):
"""Escapes a trace name so it can be stored in a row.
Args:
name: A string representing a name.
Returns:
An escaped version of the name.
"""
return re.sub(r'[\:|=/#&,]', '_', name)
def _GeomMeanAndStdDevFromHistogram(histogram):
"""Generates the geom. mean and std. dev. for a histogram.
A histogram is a collection of numerical buckets with associated
counts; a bucket can either represent a number of instances of a single
value ('low'), or from within a range of values (in which case 'high' will
specify the upper bound). We compute the statistics by treating the
histogram analogously to a list of individual values, where the counts tell
us how many of each value there are.
Args:
histogram: A histogram dict with a list 'buckets' of buckets.
Returns:
The geometric mean and standard deviation of the given histogram.
"""
# Note: This code comes originally from
# build/scripts/common/chromium_utils.py and was used initially for
# processing histogram results on the buildbot side previously.
if 'buckets' not in histogram:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
return 0.0, 0.0
count = 0
sum_of_logs = 0
for bucket in histogram['buckets']:
if 'high' in bucket:
bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0
else:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
bucket['mean'] = bucket['low']
if bucket['mean'] > 0:
sum_of_logs += math.log(bucket['mean']) * bucket['count']
count += bucket['count']
if count == 0:
return 0.0, 0.0
sum_of_squares = 0
geom_mean = math.exp(sum_of_logs / count)
for bucket in histogram['buckets']:
if bucket['mean'] > 0:
sum_of_squares += (bucket['mean'] - geom_mean) ** 2 * bucket['count']
return geom_mean, math.sqrt(sum_of_squares / count)
def _ImprovementDirectionToHigherIsBetter(improvement_direction_str):
"""Converts an improvement direction string to a higher_is_better boolean.
Args:
improvement_direction_str: a string, either 'up' or 'down'.
Returns:
A boolean expressing the appropriate higher_is_better value.
Raises:
BadRequestError: if improvement_direction_str is invalid.
"""
# If improvement_direction is provided, we want to use it. Otherwise, by not
# providing it we'll fall back to unit-info.json
# TODO(eakuefner): Fail instead of falling back after fixing crbug.com/459450.
if improvement_direction_str == 'up':
return True
elif improvement_direction_str == 'down':
return False
else:
raise BadRequestError('Invalid improvement direction string: ' +
improvement_direction_str)
def _ConstructTestPathMap(row_dicts):
"""Makes a mapping from test paths to last added revision."""
last_added_revision_keys = []
for row in row_dicts:
if not ('master' in row and 'bot' in row and 'test' in row):
continue
path = '%s/%s/%s' % (row['master'], row['bot'], row['test'].strip('/'))
if len(path) > _MAX_TEST_PATH_LENGTH:
continue
last_added_revision_keys.append(ndb.Key('LastAddedRevision', path))
try:
last_added_revision_entities = ndb.get_multi(last_added_revision_keys)
except datastore_errors.BadRequestError:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
logging.warn('Datastore BadRequestError when getting %s',
repr(last_added_revision_keys))
return {}
return {r.key.string_id(): r.revision
for r in last_added_revision_entities if r is not None}
def _ValidateRowDict(row):
"""Checks all fields in the input dictionary.
Args:
row: A dictionary which represents one point.
Raises:
BadRequestError: The input was not valid.
"""
required_fields = ['master', 'bot', 'test']
for field in required_fields:
if field not in row:
raise BadRequestError('No "%s" field in row dict.' % field)
_ValidateMasterBotTest(row['master'], row['bot'], row['test'])
GetAndValidateRowProperties(row)
def _ValidateMasterBotTest(master, bot, test):
"""Validates the master, bot, and test properties of a row dict."""
# Trailing and leading slashes in the test name are ignored.
# The test name must consist of at least a test suite plus sub-test.
test = test.strip('/')
if '/' not in test:
raise BadRequestError('Test name must have more than one part.')
if len(test.split('/')) > graph_data.MAX_TEST_ANCESTORS:
raise BadRequestError('Invalid test name: %s' % test)
# The master and bot names have just one part.
if '/' in master or '/' in bot:
raise BadRequestError('Illegal slash in master or bot name.')
_ValidateTestPath('%s/%s/%s' % (master, bot, test))
def _ValidateTestPath(test_path):
"""Checks whether all the parts of the test path are valid."""
# A test with a test path length over the max key length shouldn't be
# created, since the test path is used in TestContainer keys.
if len(test_path) > _MAX_TEST_PATH_LENGTH:
raise BadRequestError('Test path too long: %s' % test_path)
# Stars are reserved for test path patterns, so they can't be used in names.
if '*' in test_path:
raise BadRequestError('Illegal asterisk in test name.')
for name in test_path.split('/'):
_ValidateTestPathPartName(name)
def _ValidateTestPathPartName(name):
"""Checks whether a Master, Bot or TestMetadata name is OK."""
# NDB Datastore doesn't allow key names to start and with "__" and "__".
if name.startswith('__') and name.endswith('__'):
raise BadRequestError(
'Invalid name: "%s". Names cannot start and end with "__".' % name)
def _ValidateRowId(row_dict, test_map):
"""Checks whether the ID for a Row is OK.
Args:
row_dict: A dictionary with new point properties, including "revision".
test_map: A dictionary mapping test paths to the last previously added
revision for each test.
Raises:
BadRequestError: The revision is not acceptable for some reason.
"""
row_id = GetAndValidateRowId(row_dict)
# Get the last added revision number for this test.
master, bot, test = row_dict['master'], row_dict['bot'], row_dict['test']
test_path = '%s/%s/%s' % (master, bot, test)
last_row_id = test_map.get(test_path)
if not last_row_id:
# Could be first point in test.
logging.warning('Test %s has no last added revision entry.', test_path)
return
allow_jump = (
master.endswith('Internal') or
(master.endswith('QA') and bot.startswith('release-tests-')))
if not _IsAcceptableRowId(row_id, last_row_id, allow_jump=allow_jump):
raise BadRequestError(
'Invalid ID (revision) %d; compared to previous ID %s, it was larger '
'or smaller by too much.' % (row_id, last_row_id))
def _IsAcceptableRowId(row_id, last_row_id, allow_jump=False):
"""Checks whether the given row id (aka revision) is not too large or small.
For each data series (i.e. TestMetadata entity), we assume that row IDs are
monotonically increasing. On a given chart, points are sorted by these
row IDs. This way, points can arrive out of order but still be shown
correctly in the chart.
However, sometimes a bot might start to use a different *type* of row ID;
for example it might change from revision numbers or build numbers to
timestamps, or from timestamps to build numbers. This causes a lot of
problems, including points being put out of order.
If a sender of data actually wants to switch to a different type of
row ID, it would be much cleaner for them to start sending it under a new
chart name.
Args:
row_id: The proposed Row entity id (usually sent as "revision")
last_row_id: The previous Row id, or None if there were none previous.
Returns:
True if acceptable, False otherwise.
"""
if last_row_id is None:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
return True
if row_id <= 0:
# TODO(qyearsley): Add test coverage. See catapult:#1346.
return False
# Too big of a decrease.
if row_id < 0.5 * last_row_id:
return False
# TODO(perezju): We temporarily allow for a big jump on special cased bots,
# while we migrate from using commit position to timestamp as row id.
# The jump is only allowed into a timestamp falling within Aug-Dec 2016.
# This special casing should be removed after finishing the migration.
if allow_jump and 1470009600 < row_id < 1483228800:
return True
# Too big of an increase.
if row_id > 2 * last_row_id:
return False
return True
def GetAndValidateRowId(row_dict):
"""Returns the integer ID for a new Row.
This method is also responsible for validating the input fields related
to making the new row ID.
Args:
row_dict: A dictionary obtained from the input JSON.
Returns:
An integer row ID.
Raises:
BadRequestError: The input wasn't formatted properly.
"""
if 'revision' not in row_dict:
raise BadRequestError('Required field "revision" missing.')
try:
return int(row_dict['revision'])
except (ValueError, TypeError):
raise BadRequestError('Bad value for "revision", should be numerical.')
def GetAndValidateRowProperties(row):
"""From the object received, make a dictionary of properties for a Row.
This includes the default "value" and "error" columns as well as all
supplemental columns, but it doesn't include "revision", and it doesn't
include input fields that are properties of the parent TestMetadata, such as
"units".
This method is responsible for validating all properties that are to be
properties of the new Row.
Args:
row: A dictionary obtained from the input JSON.
Returns:
A dictionary of the properties and property values to set when creating
a Row. This will include "value" and "error" as well as all supplemental
columns.
Raises:
BadRequestError: The properties weren't formatted correctly.
"""
columns = {}
# Value and error must be floating point numbers.
if 'value' not in row:
raise BadRequestError('No "value" given.')
try:
columns['value'] = float(row['value'])
except (ValueError, TypeError):
raise BadRequestError('Bad value for "value", should be numerical.')
if 'error' in row:
try:
error = float(row['error'])
columns['error'] = error
except (ValueError, TypeError):
logging.warn('Bad value for "error".')
columns.update(_GetSupplementalColumns(row))
return columns
def _GetSupplementalColumns(row):
"""Gets a dict of supplemental columns.
If any columns are invalid, a warning is logged and they just aren't included,
but no exception is raised.
Individual rows may specify up to _MAX_NUM_COLUMNS extra data, revision,
and annotation columns. These columns must follow formatting rules for
their type. Invalid columns are dropped with an error log, but the valid
data will still be graphed.
Args:
row: A dict, possibly with the key "supplemental_columns", the value of
which should be a dict.
Returns:
A dict of valid supplemental columns.
"""
columns = {}
for (name, value) in row.get('supplemental_columns', {}).iteritems():
# Don't allow too many columns
if len(columns) == _MAX_NUM_COLUMNS:
logging.warn('Too many columns, some being dropped.')
break
value = _CheckSupplementalColumn(name, value)
if value:
columns[name] = value
return columns
def _CheckSupplementalColumn(name, value):
"""Returns a possibly modified value for a supplemental column, or None."""
# Check length of column name.
name = str(name)
if len(name) > _MAX_COLUMN_NAME_LENGTH:
logging.warn('Supplemental column name too long.')
return None
# The column name has a prefix which indicates type of value.
if name[:2] not in ('d_', 'r_', 'a_'):
logging.warn('Bad column name "%s", invalid prefix.', name)
return None
# The d_ prefix means "data column", intended to hold numbers.
if name.startswith('d_'):
try:
value = float(value)
except (ValueError, TypeError):
logging.warn('Bad value for column "%s", should be numerical.', name)
return None
# The r_ prefix means "revision", and the value should look like a number,
# a version number, or a git commit hash.
if name.startswith('r_'):
revision_patterns = [
r'^\d+$',
r'^\d+\.\d+\.\d+\.\d+$',
r'^[A-Fa-f0-9]{40}$',
]
if (not value or len(str(value)) > _STRING_COLUMN_MAX_LENGTH or
not any(re.match(p, str(value)) for p in revision_patterns)):
logging.warn('Bad value for revision column "%s".', name)
return None
value = str(value)
if name.startswith('a_'):
# Annotation column, should be a short string.
if len(str(value)) > _STRING_COLUMN_MAX_LENGTH:
logging.warn('Value for "%s" too long, max length is %d.',
name, _STRING_COLUMN_MAX_LENGTH)
return None
return value
| 34.328932
| 80
| 0.693279
|
f3221580408becc6230fc52b93ac6a7f52761013
| 558
|
py
|
Python
|
December-14/python_Nilesh2000.py
|
Nilesh2000/A-December-of-Algorithms-2019
|
da92ef23b473d083fea48f102fd46646b604342a
|
[
"MIT"
] | null | null | null |
December-14/python_Nilesh2000.py
|
Nilesh2000/A-December-of-Algorithms-2019
|
da92ef23b473d083fea48f102fd46646b604342a
|
[
"MIT"
] | null | null | null |
December-14/python_Nilesh2000.py
|
Nilesh2000/A-December-of-Algorithms-2019
|
da92ef23b473d083fea48f102fd46646b604342a
|
[
"MIT"
] | null | null | null |
# Author - Nilesh D
# December 14 - A Wordplay with Vowels and Consonants
def subString(s, n):
vowel = ['a', 'e', 'i', 'o', 'u']
scoreA = scoreB = 0
for i in range(n):
for len in range(i+1, n+1):
subsStr = s[i: len]
if subsStr[0] in vowel:
scoreA += 1
else:
scoreB += 1
if scoreA > scoreB:
print("The winner is A with", scoreA, "points")
else:
print("The winner is B with", scoreB, "points.")
s = input("Enter string: ")
subString(s, len(s))
| 24.26087
| 56
| 0.507168
|
432acc41c7f52ec3d4c5e581a080ea3b9dec9355
| 24,548
|
py
|
Python
|
as3/parser.py
|
eigenein/python-as3
|
323b58fd19359842332a1b045857f793cd124aa3
|
[
"MIT"
] | 1
|
2019-05-27T11:25:18.000Z
|
2019-05-27T11:25:18.000Z
|
as3/parser.py
|
eigenein/python-as3
|
323b58fd19359842332a1b045857f793cd124aa3
|
[
"MIT"
] | null | null | null |
as3/parser.py
|
eigenein/python-as3
|
323b58fd19359842332a1b045857f793cd124aa3
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from collections import deque
from typing import Any, Callable, Deque, Dict, Iterable, Iterator, List, NoReturn, Optional, Set, Tuple
from as3 import ast_, interpreter
from as3.enums import TokenType
from as3.exceptions import ASSyntaxError
from as3.interpreter import undefined
from as3.scanner import Token
class Parser:
def __init__(self, tokens: Iterable[Token], filename: str) -> None:
self.tokens = Peekable(filter_tokens(tokens))
self.filename = filename
# Rules.
# ------------------------------------------------------------------------------------------------------------------
def parse_script(self) -> ast_.Block:
"""
Parse *.as script.
"""
body: List[ast_.AST] = []
while self.tokens:
# FIXME: allow package at any level.
body.append(self.switch({TokenType.PACKAGE: self.parse_package}, else_=self.parse_statement))
return ast_.Block(body=body)
def parse_package(self) -> ast_.AST:
self.expect(TokenType.PACKAGE)
if self.tokens.is_type(TokenType.IDENTIFIER):
# TODO: for now just consume the name because we'll use the directory structure instead.
for _ in self.parse_qualified_name():
pass
return self.parse_statement_or_code_block()
def parse_class(self) -> ast_.Class:
# Definition.
self.parse_modifiers()
# noinspection PyArgumentList
node = ast_.Class(token=self.expect(TokenType.CLASS, TokenType.INTERFACE))
node.name = self.expect(TokenType.IDENTIFIER).value
if self.tokens.skip(TokenType.EXTENDS):
node.base = self.parse_primary_expression()
if self.tokens.skip(TokenType.IMPLEMENTS):
# Skip interfaces.
self.parse_primary_expression()
# Body.
self.expect(TokenType.CURLY_BRACKET_OPEN)
while not self.tokens.skip(TokenType.CURLY_BRACKET_CLOSE):
# Parse definition.
modifiers = self.parse_modifiers()
statement: ast_.AST = self.switch({
TokenType.CONST: self.parse_variable_definition,
TokenType.FUNCTION: self.parse_function_definition,
TokenType.SEMICOLON: self.parse_semicolon,
TokenType.VAR: self.parse_variable_definition,
})
if isinstance(statement, ast_.Function) and statement.name == node.name:
node.constructor = statement
elif isinstance(statement, ast_.Variable):
node.variables.append(statement)
return node
def parse_modifiers(self) -> Set[TokenType]:
modifiers: Set[TokenType] = set()
if self.tokens.skip(TokenType.OVERRIDE):
modifiers.add(TokenType.OVERRIDE)
visibility_token = self.tokens.skip(
TokenType.PUBLIC, TokenType.PRIVATE, TokenType.PROTECTED, TokenType.INTERNAL)
if visibility_token:
modifiers.add(visibility_token.type_)
if self.tokens.skip(TokenType.STATIC):
modifiers.add(TokenType.STATIC)
return modifiers
def parse_statement_or_code_block(self) -> ast_.AST:
return self.switch({TokenType.CURLY_BRACKET_OPEN: self.parse_code_block}, else_=self.parse_statement)
def parse_statement(self) -> ast_.AST:
node = self.switch({
TokenType.BREAK: self.parse_break,
TokenType.CLASS: self.parse_class,
TokenType.FOR: self.parse_for,
TokenType.FUNCTION: self.parse_function_definition,
TokenType.IF: self.parse_if,
TokenType.IMPORT: self.parse_import,
TokenType.PRIVATE: self.parse_class,
TokenType.PROTECTED: self.parse_class,
TokenType.PUBLIC: self.parse_class,
TokenType.RETURN: self.parse_return,
TokenType.SEMICOLON: self.parse_semicolon,
TokenType.STATIC: self.parse_class,
TokenType.THROW: self.parse_throw,
TokenType.TRY: self.parse_try,
TokenType.VAR: self.parse_variable_definition,
TokenType.WHILE: self.parse_while,
}, else_=self.parse_expression_statement)
return node
def parse_code_block(self) -> ast_.Block:
# noinspection PyArgumentList
node = ast_.Block(token=self.expect(TokenType.CURLY_BRACKET_OPEN))
while not self.tokens.skip(TokenType.CURLY_BRACKET_CLOSE):
node.body.append(self.parse_statement_or_code_block())
return node
def parse_expression_statement(self) -> ast_.AST:
node = self.parse_expression()
self.tokens.skip(TokenType.SEMICOLON)
return node
def parse_qualified_name(self) -> Iterable[str]:
"""
Parse qualified name and return its parts.
"""
yield self.expect(TokenType.IDENTIFIER).value
while self.tokens.skip(TokenType.DOT):
yield self.expect(TokenType.IDENTIFIER).value
def parse_import(self) -> ast_.AST:
import_token = self.expect(TokenType.IMPORT)
args = []
while True:
token = self.expect(TokenType.IDENTIFIER, TokenType.MULTIPLY)
args.append(AST.string(token, token.value).node)
if not self.tokens.skip(TokenType.DOT):
break
self.tokens.skip(TokenType.SEMICOLON)
return AST \
.name(import_token, constants.import_key) \
.call(import_token, args) \
.expr() \
.node
def parse_if(self) -> ast_.If:
# noinspection PyArgumentList
node = ast_.If(token=self.expect(TokenType.IF))
self.expect(TokenType.PARENTHESIS_OPEN)
node.test = self.parse_assignment_expression()
self.expect(TokenType.PARENTHESIS_CLOSE)
node.positive = self.parse_statement_or_code_block()
if self.tokens.skip(TokenType.ELSE):
node.negative = self.parse_statement_or_code_block()
return node
def parse_variable_definition(self) -> ast_.Variable:
token = self.expect(TokenType.VAR, TokenType.CONST)
name_token = self.expect(TokenType.IDENTIFIER)
value: ast_.AST = ast_.Literal(value=self.parse_type_annotation())
if self.tokens.skip(TokenType.ASSIGN):
value = self.parse_assignment_expression()
# noinspection PyArgumentList
return ast_.Variable(token=token, name=name_token.value, value=value)
def parse_type_annotation(self) -> Any:
"""
Parse type annotation and return its _default value_.
https://www.adobe.com/devnet/actionscript/learning/as3-fundamentals/data-types.html
"""
if self.tokens.skip(TokenType.COLON):
return self.parse_type()
return undefined
def parse_type(self) -> Any:
if self.tokens.skip(TokenType.MULTIPLY):
return undefined
if self.tokens.skip(TokenType.VOID):
return None
# Standard types.
identifier_token = self.expect(TokenType.IDENTIFIER)
if identifier_token.value == 'Boolean':
return False
if identifier_token.value in ('int', 'uint', 'Number'):
return 0
# `None` for other standard types and all user classes. Skip the rest of the annotation.
while True:
if not self.tokens.skip(TokenType.DOT):
break
if not self.tokens.skip(TokenType.IDENTIFIER):
self.expect(TokenType.LESS)
self.parse_type()
self.expect(TokenType.GREATER)
return None
def parse_semicolon(self) -> ast_.Block:
# noinspection PyArgumentList
return ast_.Block(token=self.expect(TokenType.SEMICOLON))
def parse_return(self) -> ast_.Return:
# noinspection PyArgumentList
node = ast_.Return(token=self.expect(TokenType.RETURN))
if not self.tokens.skip(TokenType.SEMICOLON):
node.value = self.parse_assignment_expression()
return node
def parse_function_definition(self) -> ast_.Function:
# noinspection PyArgumentList
node = ast_.Function(token=self.expect(TokenType.FUNCTION))
is_property = self.tokens.skip(TokenType.GET) is not None # TODO
node.name = self.expect(TokenType.IDENTIFIER).value
self.expect(TokenType.PARENTHESIS_OPEN)
while not self.tokens.skip(TokenType.PARENTHESIS_CLOSE):
node.parameter_names.append(self.expect(TokenType.IDENTIFIER).value)
default_value: ast_.AST = ast_.Literal(value=self.parse_type_annotation())
if self.tokens.skip(TokenType.ASSIGN):
default_value = self.parse_assignment_expression()
node.defaults.append(default_value)
self.tokens.skip(TokenType.COMMA)
node.default_return_value = self.parse_type_annotation()
node.body = self.parse_statement_or_code_block()
return node
def parse_break(self) -> ast_.Break:
# noinspection PyArgumentList
return ast_.Break(token=self.expect(TokenType.BREAK))
def parse_while(self) -> ast_.While:
token = self.expect(TokenType.WHILE)
self.expect(TokenType.PARENTHESIS_OPEN)
test = self.parse_assignment_expression()
self.expect(TokenType.PARENTHESIS_CLOSE)
body = self.parse_statement_or_code_block()
# noinspection PyArgumentList
return ast_.While(token=token, test=test, body=body)
def parse_try(self) -> ast_.TryFinally:
# noinspection PyArgumentList
node = ast_.TryFinally(token=self.expect(TokenType.TRY))
node.body = self.parse_statement_or_code_block()
while self.tokens.is_type(TokenType.CATCH):
# noinspection PyArgumentList
except_node = ast_.Except(token=next(self.tokens))
self.expect(TokenType.PARENTHESIS_OPEN)
except_node.variable_name = self.expect(TokenType.IDENTIFIER).value
self.expect(TokenType.COLON)
if not self.tokens.skip(TokenType.MULTIPLY):
except_node.exception_type = self.parse_assignment_expression()
self.expect(TokenType.PARENTHESIS_CLOSE)
except_node.body = self.parse_statement_or_code_block()
node.excepts.append(except_node)
if self.tokens.skip(TokenType.FINALLY):
node.finally_ = self.parse_statement_or_code_block()
return node
def parse_throw(self) -> ast_.Throw:
token = self.expect(TokenType.THROW)
value = self.parse_assignment_expression()
# noinspection PyArgumentList
return ast_.Throw(token=token, value=value)
def parse_for(self) -> ast_.AbstractFor:
token = self.expect(TokenType.FOR)
ast_class = ast_.ForEach if self.tokens.skip(TokenType.EACH) else ast_.For
self.expect(TokenType.PARENTHESIS_OPEN)
self.expect(TokenType.VAR)
variable_name = self.expect(TokenType.IDENTIFIER).value
self.expect(TokenType.IN)
value = self.parse_assignment_expression()
self.expect(TokenType.PARENTHESIS_CLOSE)
body = self.parse_statement_or_code_block()
return ast_class(token=token, variable_name=variable_name, value=value, body=body)
# Expression rules.
# Methods are ordered according to reversed precedence.
# https://www.adobe.com/devnet/actionscript/learning/as3-fundamentals/operators.html#articlecontentAdobe_numberedheader_1
# ------------------------------------------------------------------------------------------------------------------
def parse_expression(self) -> ast_.AST:
return self.parse_label()
def parse_label(self) -> ast_.AST:
# For the sake of simplicity any expression is allowed as a label.
# Again, for the sake of simplicity any label is translated to `null`.
left = self.parse_assignment_expression()
if self.tokens.is_type(TokenType.COLON):
# noinspection PyArgumentList
return ast_.Literal(token=next(self.tokens), value=None)
return left
def parse_assignment_expression(self) -> ast_.AST:
return self.parse_binary_operations(
self.parse_conditional_expression,
TokenType.ASSIGN,
TokenType.ASSIGN_ADD,
)
# def parse_non_assignment_expression(self) -> ast.AST:
# return self.parse_conditional_expression()
def parse_conditional_expression(self) -> ast_.AST:
node = self.parse_logical_or_expression()
if self.tokens.is_type(TokenType.QUESTION_MARK):
token = next(self.tokens)
positive = self.parse_conditional_expression()
self.expect(TokenType.COLON)
negative = self.parse_conditional_expression()
# noinspection PyArgumentList
node = ast_.If(token=token, test=node, positive=positive, negative=negative)
return node
def parse_logical_or_expression(self) -> ast_.AST:
return self.parse_binary_operations(self.parse_logical_and_expression, TokenType.LOGICAL_OR)
def parse_logical_and_expression(self) -> ast_.AST:
return self.parse_binary_operations(self.parse_bitwise_xor, TokenType.LOGICAL_AND)
def parse_bitwise_xor(self) -> ast_.AST:
return self.parse_binary_operations(self.parse_equality_expression, TokenType.BITWISE_XOR)
def parse_equality_expression(self) -> ast_.AST:
return self.parse_binary_operations(
self.parse_relational_expression,
TokenType.EQUALS,
TokenType.NOT_EQUALS,
)
def parse_relational_expression(self) -> ast_.AST:
return self.parse_binary_operations(
self.parse_additive_expression,
TokenType.AS,
TokenType.GREATER,
TokenType.GREATER_OR_EQUAL,
TokenType.IN,
TokenType.IS,
TokenType.LESS,
TokenType.LESS_OR_EQUAL,
)
def parse_additive_expression(self) -> ast_.AST:
return self.parse_binary_operations(self.parse_multiplicative_expression, TokenType.PLUS, TokenType.MINUS)
def parse_multiplicative_expression(self) -> ast_.AST:
return self.parse_binary_operations(
self.parse_unary_expression,
TokenType.MULTIPLY, TokenType.DIVIDE, TokenType.PERCENT,
)
def parse_unary_expression(self) -> ast_.AST:
if self.tokens.is_type(*interpreter.unary_operations):
token = next(self.tokens)
# noinspection PyArgumentList
return ast_.UnaryOperation(token=token, value=self.parse_unary_expression())
return self.parse_postfix()
def parse_postfix(self) -> ast_.AST:
left = self.parse_primary_expression()
while self.tokens.is_type(*interpreter.postfix_operations):
# noinspection PyArgumentList
left = ast_.PostfixOperation(token=next(self.tokens), value=left)
return left
def parse_primary_expression(self) -> ast_.AST:
left = self.parse_terminal_or_parenthesized()
cases = {
TokenType.BRACKET_OPEN: self.parse_subscript,
TokenType.DOT: self.parse_attribute_expression,
TokenType.PARENTHESIS_OPEN: self.parse_call_expression,
}
while self.tokens.is_type(*cases):
left = self.switch(cases, left=left)
return left
def parse_attribute_expression(self, left: ast_.AST) -> ast_.AST:
token = self.expect(TokenType.DOT)
name: str = self.expect(TokenType.IDENTIFIER).value
# noinspection PyArgumentList
return ast_.Property(token=token, value=left, item=ast_.Literal(value=name))
def parse_call_expression(self, left: ast_.AST) -> ast_.AST:
# noinspection PyArgumentList
node = ast_.Call(token=self.expect(TokenType.PARENTHESIS_OPEN), value=left)
while not self.tokens.skip(TokenType.PARENTHESIS_CLOSE):
node.arguments.append(self.parse_assignment_expression())
self.tokens.skip(TokenType.COMMA)
return node
def parse_subscript(self, left: ast_.AST) -> ast_.AST:
token = self.expect(TokenType.BRACKET_OPEN)
item = self.parse_assignment_expression()
self.expect(TokenType.BRACKET_CLOSE)
# noinspection PyArgumentList
return ast_.Property(token=token, value=left, item=item)
def parse_terminal_or_parenthesized(self) -> ast_.AST:
# noinspection PyArgumentList
return self.switch({
TokenType.BRACKET_OPEN: self.parse_compound_literal,
TokenType.CURLY_BRACKET_OPEN: self.parse_map_literal,
TokenType.FALSE: lambda: ast_.Literal(token=next(self.tokens), value=False),
TokenType.IDENTIFIER: lambda: ast_.Name.from_(next(self.tokens)),
TokenType.NEW: self.parse_new,
TokenType.NULL: lambda: ast_.Literal(token=next(self.tokens), value=None),
TokenType.NUMBER: lambda: ast_.Literal.from_(next(self.tokens)),
TokenType.PARENTHESIS_OPEN: self.parse_parenthesized_expression,
TokenType.STRING: lambda: ast_.Literal.from_(next(self.tokens)),
TokenType.SUPER: self.parse_super_expression,
TokenType.THIS: lambda: ast_.Name.from_(next(self.tokens)),
TokenType.TRUE: lambda: ast_.Literal(token=next(self.tokens), value=True),
TokenType.UNDEFINED: lambda: ast_.Literal(token=next(self.tokens), value=undefined),
})
def parse_parenthesized_expression(self) -> ast_.AST:
self.expect(TokenType.PARENTHESIS_OPEN)
inner = self.parse_assignment_expression()
self.expect(TokenType.PARENTHESIS_CLOSE)
return inner
def parse_super_expression(self) -> ast_.AST:
super_token = self.expect(TokenType.SUPER)
builder = AST.identifier(super_token).call(super_token)
if self.tokens.is_type(TokenType.PARENTHESIS_OPEN):
# Call super constructor. Return `super().__init__` and let `parse_call_expression` do its job.
return self.parse_call_expression(builder.attribute(super_token, constants.init_name).node)
if self.tokens.is_type(TokenType.DOT):
# Call super method. Return `super()` and let `parse_attribute_expression` do its job.
return self.parse_attribute_expression(builder.node)
self.raise_syntax_error(TokenType.PARENTHESIS_OPEN, TokenType.DOT)
def parse_new(self) -> ast_.AST:
# noinspection PyArgumentList
node = ast_.New(token=self.expect(TokenType.NEW))
# Skip generic parameter before literal.
if self.tokens.skip(TokenType.LESS):
self.parse_type()
self.expect(TokenType.GREATER)
return self.parse_terminal_or_parenthesized()
# Actual constructor.
node.value = self.parse_terminal_or_parenthesized()
# Skip yet another generic parameter.
if self.tokens.skip(TokenType.DOT):
self.expect(TokenType.LESS)
self.parse_type()
self.expect(TokenType.GREATER)
# Parse the call.
self.expect(TokenType.PARENTHESIS_OPEN)
while not self.tokens.skip(TokenType.PARENTHESIS_CLOSE):
node.arguments.append(self.parse_assignment_expression())
self.tokens.skip(TokenType.COMMA)
return node
def parse_compound_literal(self) -> ast_.AST:
token = self.expect(TokenType.BRACKET_OPEN)
value: List[ast_.AST] = []
while not self.tokens.skip(TokenType.BRACKET_CLOSE):
value.append(self.parse_assignment_expression())
self.tokens.skip(TokenType.COMMA)
# noinspection PyArgumentList
return ast_.CompoundLiteral(token=token, value=value)
def parse_map_literal(self) -> ast_.AST:
token = self.expect(TokenType.CURLY_BRACKET_OPEN)
map_value: List[Tuple[ast_.AST, ast_.AST]] = []
while not self.tokens.skip(TokenType.CURLY_BRACKET_CLOSE):
key = self.parse_assignment_expression()
self.expect(TokenType.COLON)
value = self.parse_assignment_expression()
map_value.append((key, value))
self.tokens.skip(TokenType.COMMA)
# noinspection PyArgumentList
return ast_.MapLiteral(token=token, value=map_value)
# Expression rule helpers.
# ------------------------------------------------------------------------------------------------------------------
def parse_binary_operations(self, child_parser: Callable[[], ast_.AST], *types: TokenType) -> ast_.AST:
node = child_parser()
while self.tokens.is_type(*types):
token = next(self.tokens)
# noinspection PyArgumentList
node = ast_.BinaryOperation(token=token, left=node, right=child_parser())
return node
# Parser helpers.
# ------------------------------------------------------------------------------------------------------------------
TParser = Callable[..., ast_.AST]
def switch(self, cases: Dict[TokenType, TParser], else_: Optional[TParser] = None, **kwargs) -> ast_.AST:
"""
Behaves like a `switch` (`case`) operator and tries to match the current token against specified token types.
If match is found, then the corresponding parser is called.
Otherwise, `else_` is called if defined.
Otherwise, `default` is returned if defined.
Otherwise, syntax error is raised.
"""
try:
parser = cases[self.tokens.peek().type_]
except (StopIteration, KeyError):
if else_:
return else_(**kwargs)
self.raise_syntax_error(*cases.keys())
else:
return parser(**kwargs)
def expect(self, *types: TokenType) -> Token:
"""
Check the current token type, return it and advance.
Raise syntax error if the current token has an unexpected type.
"""
if self.tokens.is_type(*types):
return next(self.tokens)
self.raise_syntax_error(*types)
def raise_syntax_error(self, *expected_types: TokenType) -> NoReturn:
"""
Raise syntax error with the list of expected types in the message.
"""
types_string = ', '.join(type_.name for type_ in expected_types)
try:
token = self.tokens.peek()
except StopIteration:
raise_syntax_error(f'unexpected end of file, expected one of: {types_string}', filename=self.filename)
else:
raise_syntax_error(
f'unexpected {token.type_.name} "{token.value}", expected one of: {types_string}',
location=token,
filename=self.filename,
)
class Peekable(Iterable[Token]):
def __init__(self, iterable: Iterable[Token]) -> None:
self.iterator = iter(iterable)
self.cache: Deque[Token] = deque()
def __iter__(self) -> Iterator[Token]:
return self
def __next__(self) -> Token:
self.peek()
return self.cache.popleft()
def __bool__(self):
try:
self.peek()
except StopIteration:
return False
else:
return True
def peek(self) -> Token:
if not self.cache:
self.cache.append(next(self.iterator))
return self.cache[0]
def is_type(self, *types: TokenType) -> bool:
"""
Check the current token type.
"""
try:
return self.peek().type_ in types
except StopIteration:
return False
def skip(self, *types: TokenType) -> Optional[Token]:
"""
Check the current token type and skip it if matches.
"""
if self.is_type(*types):
return next(self)
return None
def filter_tokens(tokens: Iterable[Token]) -> Iterable[Token]:
return (token for token in tokens if token.type_ != TokenType.COMMENT)
def raise_syntax_error(message: str, location: Optional[Token] = None, filename: str = None) -> NoReturn:
"""
Raise syntax error and provide some help message.
"""
if filename:
message = f'{filename}: {message}'
if location:
raise ASSyntaxError(f'{message} at line {location.line_number} position {location.position}')
else:
raise ASSyntaxError(f'{message}')
| 41.39629
| 125
| 0.641885
|
baa8e5419574c2e6c66a0be895c5b955916d1136
| 3,294
|
py
|
Python
|
crystalpy/tests/util/StokesVectorTest.py
|
oasys-kit/crystalpy
|
4852851c622ef914844178577dea4075e92cd8dd
|
[
"MIT"
] | null | null | null |
crystalpy/tests/util/StokesVectorTest.py
|
oasys-kit/crystalpy
|
4852851c622ef914844178577dea4075e92cd8dd
|
[
"MIT"
] | null | null | null |
crystalpy/tests/util/StokesVectorTest.py
|
oasys-kit/crystalpy
|
4852851c622ef914844178577dea4075e92cd8dd
|
[
"MIT"
] | null | null | null |
"""
Unittest for StokesVector class.
"""
import unittest
import numpy as np
from numpy.testing import assert_array_almost_equal
from crystalpy.util.StokesVector import StokesVector
class StokesVectorTest(unittest.TestCase):
def setUp(self):
self.element_list = [0.78177969457877930,
0.22595711869558588,
0.28797567756487550,
0.58551861060989900]
self.stokes_vector = StokesVector(self.element_list)
def testConstructor(self):
self.assertIsInstance(self.stokes_vector, StokesVector)
self.assertEqual(self.stokes_vector.s0, 0.78177969457877930)
self.assertEqual(self.stokes_vector.s1, 0.22595711869558588)
self.assertEqual(self.stokes_vector.s2, 0.28797567756487550)
self.assertEqual(self.stokes_vector.s3, 0.58551861060989900)
def testGetArray(self):
array1 = self.stokes_vector.components()
array2 = self.stokes_vector.getList()
self.assertEqual(type(array1), np.ndarray)
self.assertEqual(type(array2), list)
np.testing.assert_array_equal(array1, np.asarray(self.element_list))
self.assertListEqual(array2, self.element_list)
def testPolarizationDegree(self):
pol_deg = self.stokes_vector.circularPolarizationDegree()
self.assertEqual(type(pol_deg), float)
self.assertAlmostEqual(pol_deg, 0.7489560226111716)
def test_operator_equal(self):
stokes_vector1 = self.stokes_vector
stokes_vector2 = StokesVector([0.7817796945787793,
0.22595711869558588,
0.2879756775648755,
0.585518610609899]) # without final zeros
stokes_vector3 = StokesVector([round(0.78177969457877930, 6),
round(0.22595711869558588, 6),
round(0.28797567756487550, 6),
round(0.58551861060989900, 6)]) # rounded float
self.assertTrue(stokes_vector1 == stokes_vector1) # identity
self.assertTrue(stokes_vector1 == stokes_vector2)
self.assertFalse(stokes_vector1 == stokes_vector3)
def test_operator_not_equal(self):
stokes_vector1 = self.stokes_vector
stokes_vector2 = StokesVector([0.7817796945787793,
0.22595711869558588,
0.2879756775648755,
0.585518610609899]) # without final zeros
stokes_vector3 = StokesVector([round(0.78177969457877930, 6),
round(0.22595711869558588, 6),
round(0.28797567756487550, 6),
round(0.58551861060989900, 6)]) # rounded float
self.assertFalse(stokes_vector1 != stokes_vector1)
self.assertFalse(stokes_vector1 != stokes_vector2)
self.assertTrue(stokes_vector1 != stokes_vector3)
def testDuplicate(self):
v1 = StokesVector([1,2,3,4])
v2 = v1.duplicate()
self.assertTrue(v1 == v2)
v1.setFromValues(0,2,3,4)
self.assertFalse(v1 == v2)
| 41.175
| 87
| 0.603825
|
ea73d795a7c662240fdc6da80ce19265608f08fc
| 19,216
|
py
|
Python
|
inna/quantizer/quantize_caffe.py
|
caoqichun/inspur-inna
|
0848ec5db3c04aa8e2b65caff8095dd3ac4040ca
|
[
"Apache-2.0"
] | null | null | null |
inna/quantizer/quantize_caffe.py
|
caoqichun/inspur-inna
|
0848ec5db3c04aa8e2b65caff8095dd3ac4040ca
|
[
"Apache-2.0"
] | null | null | null |
inna/quantizer/quantize_caffe.py
|
caoqichun/inspur-inna
|
0848ec5db3c04aa8e2b65caff8095dd3ac4040ca
|
[
"Apache-2.0"
] | null | null | null |
# uncompyle6 version 3.3.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.7.1 (default, Dec 14 2018, 19:28:38)
# [GCC 7.3.0]
# Embedded file name: ./xfdnn/tools/quantize/quantize_caffe.py
# Compiled at: 2018-10-13 08:25:54
import numpy as np, google.protobuf.text_format as tfmt, caffe
from quantize_base import *
#import matplotlib.pyplot as plt
class Caffe:
_net = None
_net_parameter = None
_deploy_model = None
_weights = None
_dims = None
def __init__(self, deploy_model, weights):
self._deploy_model = deploy_model
self._weights = weights
self._net, self._net_parameter = self.declare_network(deploy_model, weights)
blob_dict = self._net.blobs
first_key = blob_dict.keys()[0]
first_blob = blob_dict[first_key]
self._dims = first_blob.data.shape
self._input = first_key
useGpu = False
if useGpu:
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
def getNetwork(self):
return (
self._net, self._net_parameter)
def declareTransformer(self, transpose, channel_swap, raw_scale, mean_value, input_scale, dims):
self._transformer = caffe.io.Transformer({self._input: self._net.blobs[self._input].data.shape})
self._transformer.set_transpose(self._input, transpose)
self._transformer.set_channel_swap(self._input, channel_swap)
self._transformer.set_raw_scale(self._input, raw_scale)
self._transformer.set_mean(self._input, mean_value)
self._transformer.set_input_scale(self._input, input_scale)
def initializeBatch(self, calibration_size, calibration_filenames, calibration_indices):
dims = list(self._dims[-3:])
self._net.blobs[self._input].reshape(calibration_size, *dims)
for i in range(calibration_size):
print ('Adding', calibration_filenames[calibration_indices[i]], 'to calibration batch.')
data = caffe.io.load_image(calibration_filenames[calibration_indices[i]])
self._net.blobs[self._input].data[i] = self._transformer.preprocess(self._input, data)
def declare_network(self, deploy_model, weights):
net = caffe.Net(deploy_model, weights, caffe.TEST)
net_parameter = caffe.proto.caffe_pb2.NetParameter()
return (
net, net_parameter)
def executeCalibration(self, bitwidths, deploy_model, quant_params):
net, net_parameter = self.getNetwork()
bw_layer_in_global = bitwidths[0]
bw_params_global = bitwidths[1]
bw_layer_out_global = bitwidths[2]
quant_layers = []
with open(deploy_model, 'r') as (f):
tfmt.Merge(f.read(), net_parameter)
for name, layer in net.layer_dict.items():
print '-' * 80
print 'Processing layer %d of %d' % (list(net._layer_names).index(name), len(list(net._layer_names)))
print 'Layer Name:' + name + ' Type:' + layer.type
print ('Inputs:', str(net.bottom_names[name]) + ', Outputs:', str(net.top_names[name]))
if layer.type in ('Input', 'Data'):
print ('input/data quantization!')
q = QuantizeInputCaffe(self, name, layer.type, bw_layer_out_global, quant_params)
q.preProcess()
q.execute()
q.postProcess()
elif layer.type in ('Convolution', ):
print ('convolution quantization!')
q = QuantizeConvolutionCaffe(self, name, layer.type, bw_layer_out_global, bw_params_global, quant_params)
q.preProcess()
q.execute()
q.postProcess()
quant_layers.append((name, layer.type))
elif layer.type in ('InnerProduct', ):
quantize_inner_product(name, net, quant_params, bw_params_global, bw_layer_out_global)
quant_layers.append((name, layer.type))
elif layer.type in ('ReLU', ):
#quantize_relu and quantize_relu_KL are the same functions.
quantize_relu_withoutKL(name, net, quant_params, bw_layer_out_global)
elif layer.type in ('Pooling', ):
quantize_pooling(name, net, quant_params)
elif layer.type in ('LRN', ):
quantize_lrn(name, net, quant_params)
elif layer.type in ('Dropout', ):
quantize_dropout(name, net, quant_params)
elif layer.type in ('Split', ):
quantize_split(name, net, quant_params)
elif layer.type in ('Concat', ):
quantize_concat(name, net, quant_params, bw_layer_out_global)
elif layer.type in ('Eltwise', ):
quantize_eltwise_bottom(name, net, quant_params)
elif layer.type in ('Softmax', 'SoftmaxWithLoss'):
print 'Passing'
else:
print 'Error: Quantization of ' + layer.type + ' is not yet supported'
return quant_layers
class LayerExecute:
_nodename = None
_layertype = None
def __init__(self, nodename, layertype):
self._nodename = nodename
self._layertype = layertype
def preProcess(self):
pass
def execute(self):
pass
def postProcess(self):
pass
class LayerExecuteCaffe(LayerExecute):
_topname = None
_quant_params = None
_net = None
_bitwidth = None
def __init__(self, caffeobj, nodename, layertype, bitwidth, quant_params):
LayerExecute.__init__(self, nodename, layertype)
self._net = caffeobj.getNetwork()[0]
self._topname = self._net.top_names[nodename][0]
self._bitwidth = bitwidth
self._quant_params = quant_params
class QuantizeInputCaffe(LayerExecuteCaffe):
def __init__(self, caffeobj, nodename, layertype, bitwidth, quant_params):
LayerExecuteCaffe.__init__(self, caffeobj, nodename, layertype, bitwidth, quant_params)
def preProcess(self):
pass
def execute(self):
self._net.forward(start=self._nodename, end=self._nodename)
def postProcess(self):
print 'Quantizing layer output...'
threshold = np.float64(254.00)
self._quant_params.bw_layer_out[self._nodename] = self._bitwidth
self._quant_params.th_layer_out[self._nodename] = threshold
print (
'bw_layer_out: ', self._quant_params.bw_layer_out[self._nodename])
print ('th_layer_out: ', self._quant_params.th_layer_out[self._nodename])
class QuantizeConvolutionCaffe(LayerExecuteCaffe):
_botname = None
_bitwidth_params = None
def __init__(self, caffeobj, nodename, layertype, bitwidth, bitwidth_params, quant_params):
LayerExecuteCaffe.__init__(self, caffeobj, nodename, layertype, bitwidth, quant_params)
self._botname = self._net.bottom_names[self._nodename][0]
self._bitwidth_params = bitwidth_params
def preProcess(self):
print (
'Quantizing conv input layer ...', self._topname)
bitwidth = self._quant_params.bw_layer_out[self._botname]
threshold = self._quant_params.th_layer_out[self._botname]
self._quant_params.bw_layer_in[self._nodename] = bitwidth
self._quant_params.th_layer_in[self._nodename] = threshold
print 'Quantizing conv weights for layer %s...' % self._nodename
bitwidth = self._bitwidth_params
threshold = ThresholdWeights_per_layer(self._net.params[self._nodename][0].data, bitwidth)
#threshold = ThresholdWeights_KL(self._net.params[self._nodename][0].data, bitwidth)
#per channel
#threshold = ThresholdWeights(self._net.params[self._nodename][0].data, bitwidth)
self._quant_params.bw_params[self._nodename] = bitwidth
self._quant_params.th_params[self._nodename] = threshold
self._net.params[self._nodename][0].data[:] = QuantizeWeights_per_layer(threshold, bitwidth, self._net.params[self._nodename][0].data[:])
#self._net.params[self._nodename][0].data[:] = QuantizeWeights(threshold, bitwidth, self._net.params[self._nodename][0].data[:])
def execute(self):
self._net.forward(start=self._nodename, end=self._nodename)
def postProcess(self):
data = self._net.blobs[self._nodename].data[...]
maxValue = np.abs(data).max()
minValue = np.min(data)
print('max of convolution:', maxValue)
threshold = ThresholdLayerOutputs(data, self._bitwidth)
#####plot histogram
hist, bin_edges = np.histogram(data, 2048, range=(0, maxValue), density=True)
hist = hist / np.sum(hist)
cumsum = np.cumsum(hist)
index = np.arange(len(bin_edges)-1)
plt.figure()
plt.bar(bin_edges[0:len(bin_edges)-1], cumsum)
histName = self._nodename + '.png'
plt.savefig(histName)
#####
self._quant_params.bw_layer_out[self._nodename] = self._bitwidth
self._quant_params.th_layer_out[self._nodename] = threshold
print (
'bw_layer_in: ', self._quant_params.bw_layer_in[self._nodename])
print ('th_layer_in: ', self._quant_params.th_layer_in[self._nodename])
print (
'bw_layer_out: ', self._quant_params.bw_layer_out[self._nodename])
print ('th_layer_out: ', self._quant_params.th_layer_out[self._nodename])
def quantize_inner_product(name, net, quant_params, bw_params_global, bw_layer_out_global):
topname = net.top_names[name][0]
botname = net.bottom_names[name][0]
data = net.blobs[botname].data[...]
bitwidth = quant_params.bw_layer_out[botname]
threshold = quant_params.th_layer_out[botname]
quant_params.bw_layer_in[topname] = bitwidth
quant_params.th_layer_in[topname] = threshold
#delete 2019/7/15
#net.blobs[botname].data[:] = Float2Fixed2Float(data, bitwidth, threshold, np.round)
data = net.params[name][0].data[...]
bitwidth = bw_params_global
threshold = ThresholdWeights_per_layer(data, bitwidth) #lly per layer
#threshold = ThresholdWeights(data, bitwidth) #per channels
quant_params.bw_params[topname] = bitwidth
quant_params.th_params[topname] = threshold
#per-channel
#for i in range(len(quant_params.th_params[topname])):
# net.params[name][0].data[i] = Float2Fixed2Float(data[i], bitwidth, threshold[i], np.round)
net.forward(start=name, end=name)
data = net.blobs[topname].data[...]
bitwidth = bw_layer_out_global
threshold = ThresholdLayerOutputs(data, bitwidth)
quant_params.bw_layer_out[topname] = bitwidth
quant_params.th_layer_out[topname] = threshold
print (
'bw_layer_in: ', quant_params.bw_layer_in[topname])
print ('th_layer_in: ', quant_params.th_layer_in[topname])
print (
'bw_layer_out: ', quant_params.bw_layer_out[topname])
print ('th_layer_out: ', quant_params.th_layer_out[topname])
def quantize_relu(name, net, quant_params, bw_layer_out_global):
topname = net.top_names[name][0]
botname = net.bottom_names[name][0]
net.forward(start=botname, end=name)
net.forward(start=name, end=name)
data = net.blobs[topname].data[...]
maxValue = np.max(abs(data))
print('max of relu:', maxValue)
bitwidth = bw_layer_out_global
threshold = ThresholdLayerOutputs(data, bitwidth) #KL
quant_params.bw_layer_out[topname] = bitwidth
quant_params.th_layer_out[topname] = threshold
print (
'bw_layer_out: ', quant_params.bw_layer_out[topname])
print ('th_layer_out: ', quant_params.th_layer_out[topname])
def quantize_relu_withoutKL(name, net, quant_params, bw_layer_out_global):
topname = net.top_names[name][0]
print('topname:', topname)
botname = net.bottom_names[name][0]
print('botname:', botname)
net.forward(start=botname, end=name)
net.forward(start=name, end=name)
quant_params.bw_layer_out[topname] = bw_layer_out_global
quant_params.th_layer_out[topname] = quant_params.th_layer_out[botname]
print('bw_layer_out:', quant_params.bw_layer_out[topname])
print('th_layer_out:', quant_params.th_layer_out[topname])
def quantize_pooling(name, net, quant_params):
topname = net.top_names[name][0]
botname = net.bottom_names[name][0]
data = net.blobs[botname].data[...]
bitwidth = quant_params.bw_layer_out[botname]
threshold = quant_params.th_layer_out[botname]
quant_params.bw_layer_in[topname] = bitwidth
quant_params.th_layer_in[topname] = threshold
net.forward(start=name, end=name)
data = net.blobs[topname].data[...]
bitwidth = quant_params.bw_layer_in[topname]
threshold = quant_params.th_layer_in[topname]
quant_params.bw_layer_out[topname] = bitwidth
quant_params.th_layer_out[topname] = threshold
print (
'bw_layer_in: ', quant_params.bw_layer_in[topname])
print ('th_layer_in: ', quant_params.th_layer_in[topname])
print (
'bw_layer_out: ', quant_params.bw_layer_out[topname])
print ('th_layer_out: ', quant_params.th_layer_out[topname])
def quantize_lrn(name, net, quant_params):
topname = net.top_names[name][0]
botname = net.bottom_names[name][0]
data = net.blobs[botname].data[...]
bitwidth = quant_params.bw_layer_out[botname]
threshold = quant_params.th_layer_out[botname]
quant_params.bw_layer_in[topname] = bitwidth
quant_params.th_layer_in[topname] = threshold
net.forward(start=name, end=name)
data = net.blobs[topname].data[...]
bitwidth = quant_params.bw_layer_in[topname]
threshold = quant_params.th_layer_in[topname]
quant_params.bw_layer_out[topname] = bitwidth
quant_params.th_layer_out[topname] = threshold
print (
'bw_layer_in: ', quant_params.bw_layer_in[topname])
print ('th_layer_in: ', quant_params.th_layer_in[topname])
print (
'bw_layer_out: ', quant_params.bw_layer_out[topname])
print ('th_layer_out: ', quant_params.th_layer_out[topname])
def quantize_dropout(name, net, quant_params):
topname = net.top_names[name][0]
botname = net.bottom_names[name][0]
data = net.blobs[botname].data[...]
bitwidth = quant_params.bw_layer_out[botname]
threshold = quant_params.th_layer_out[botname]
quant_params.bw_layer_in[topname] = bitwidth
quant_params.th_layer_in[topname] = threshold
net.forward(start=name, end=name)
data = net.blobs[topname].data[...]
bitwidth = quant_params.bw_layer_in[topname]
threshold = quant_params.th_layer_in[topname]
quant_params.bw_layer_out[topname] = bitwidth
quant_params.th_layer_out[topname] = threshold
print (
'bw_layer_in: ', quant_params.bw_layer_in[topname])
print ('th_layer_in: ', quant_params.th_layer_in[topname])
print (
'bw_layer_out: ', quant_params.bw_layer_out[topname])
print ('th_layer_out: ', quant_params.th_layer_out[topname])
def quantize_split(name, net, quant_params):
topname = net.top_names[name][0]
botname = net.bottom_names[name][0]
for i in range(len(net.top_names[name])):
topnamei = net.top_names[name][i]
quant_params.bw_layer_in[topnamei] = quant_params.bw_layer_out[botname]
quant_params.th_layer_in[topnamei] = quant_params.th_layer_out[botname]
net.forward(start=name, end=name)
for i in range(len(net.top_names[name])):
topnamei = net.top_names[name][i]
quant_params.bw_layer_out[topnamei] = quant_params.bw_layer_in[topnamei]
quant_params.th_layer_out[topnamei] = quant_params.th_layer_in[topnamei]
print ('bw_layer_in: ', quant_params.bw_layer_in[topname])
print ('th_layer_in: ', quant_params.th_layer_in[topname])
print (
'bw_layer_out: ', quant_params.bw_layer_out[topname])
print ('th_layer_out: ', quant_params.th_layer_out[topname])
def quantize_concat(name, net, quant_params, bw_layer_out_global):
topname = net.top_names[name][0]
botname = net.bottom_names[name][0]
for bottom_name in net.bottom_names[name]:
start_name = list((bottom_name in net.top_names[name] and name for name, layer in net.layer_dict.items()))[0]
end_name = list((bottom_name in net.top_names[name] and name for name, layer in net.layer_dict.items()))[-1]
net.forward(start=start_name, end=end_name)
net.forward(start=name, end=name)
data = net.blobs[topname].data[...]
bitwidth = bw_layer_out_global
threshold = ThresholdLayerOutputs(data, bitwidth)
quant_params.bw_layer_out[topname] = bitwidth
quant_params.th_layer_out[topname] = threshold
#net.blobs[topname].data[:] = Float2Fixed2Float(data, bitwidth, threshold, np.round)
quant_params.bw_layer_in[topname] = quant_params.bw_layer_out[topname]
quant_params.th_layer_in[topname] = quant_params.th_layer_out[topname]
for bottom_name in net.bottom_names[name]:
quant_params.bw_layer_out[net.top_names[bottom_name][0]] = quant_params.bw_layer_in[topname]
quant_params.th_layer_out[net.top_names[bottom_name][0]] = quant_params.th_layer_in[topname]
print ('bw_layer_in: ', quant_params.bw_layer_in[topname])
print ('th_layer_in: ', quant_params.th_layer_in[topname])
print (
'bw_layer_out: ', quant_params.bw_layer_out[topname])
print ('th_layer_out: ', quant_params.th_layer_out[topname])
# this method is right.
def quantize_eltwise_bottom(name, net, quant_params):
topname = net.top_names[name][0]
botname = net.bottom_names[name][0]
bitwidth = quant_params.bw_layer_out[botname]
threshold = quant_params.th_layer_out[botname]
for i in range(1, len(net.bottom_names[name])):
bitwidth = np.maximum(bitwidth, quant_params.bw_layer_out[net.bottom_names[name][i]])
threshold = np.maximum(threshold, quant_params.th_layer_out[net.bottom_names[name][i]])
quant_params.bw_layer_in[topname] = bitwidth
quant_params.th_layer_in[topname] = threshold
for i in range(0, len(net.bottom_names[name])):
quant_params.th_layer_out[net.bottom_names[name][i]] = threshold
for i in range(0, len(net.bottom_names[name])):
net.blobs[net.bottom_names[name][i]].data[:] = QuantizeThresholdBlob(net.blobs[net.bottom_names[name][i]].data[:], bitwidth, threshold)
net.forward(start=name, end=name)
quant_params.bw_layer_out[topname] = bitwidth
quant_params.th_layer_out[topname] = threshold
print (
'bw_layer_in: ', quant_params.bw_layer_in[topname])
print ('th_layer_in: ', quant_params.th_layer_in[topname])
print (
'bw_layer_out: ', quant_params.bw_layer_out[topname])
print ('th_layer_out: ', quant_params.th_layer_out[topname])
def quantize_softmax(name, net, quant_params):
topname = net.top_names[name][0]
botname = net.bottom_names[name][0]
data = net.blobs[botname].data[...]
bw_layer_in[topname] = bw_layer_out[botname]
th_layer_in[topname] = th_layer_out[botname]
net.forward(start=name, end=name)
# okay decompiling quantize_caffe.pyc
| 41.956332
| 145
| 0.680579
|
e199256c0db2cc90feb17bcd534c2e32fb2b6c90
| 14,831
|
py
|
Python
|
workbench/tools/history.py
|
matthiask/workbench
|
f09dfd3f4d946c1091b6c1e96940d56335a736a3
|
[
"MIT"
] | 15
|
2020-09-02T22:17:34.000Z
|
2022-02-01T20:09:10.000Z
|
workbench/tools/history.py
|
matthiask/workbench
|
f09dfd3f4d946c1091b6c1e96940d56335a736a3
|
[
"MIT"
] | 18
|
2020-01-08T15:28:26.000Z
|
2022-02-28T02:46:41.000Z
|
workbench/tools/history.py
|
matthiask/workbench
|
f09dfd3f4d946c1091b6c1e96940d56335a736a3
|
[
"MIT"
] | 8
|
2020-09-29T08:00:24.000Z
|
2022-01-16T11:58:19.000Z
|
from collections import namedtuple
from decimal import Decimal
from django.db import models
from django.http import Http404
from django.urls import reverse
from django.utils import dateparse
from django.utils.html import conditional_escape, format_html, mark_safe
from django.utils.text import capfirst
from django.utils.translation import gettext as _
from workbench.accounts.features import FEATURES
from workbench.accounts.models import Team, User
from workbench.awt.models import Absence, Employment, VacationDaysOverride, Year
from workbench.contacts.models import (
EmailAddress,
Organization,
Person,
PhoneNumber,
PostalAddress,
)
from workbench.credit_control.models import CreditEntry
from workbench.deals.models import Contribution, Deal, Value, ValueType
from workbench.expenses.models import ExpenseReport
from workbench.invoices.models import (
Invoice,
ProjectedInvoice,
RecurringInvoice,
Service as InvoiceService,
)
from workbench.logbook.models import Break, LoggedCost, LoggedHours
from workbench.offers.models import Offer
from workbench.planning.models import (
ExternalWork,
Milestone,
PlannedWork,
PublicHoliday,
)
from workbench.projects.models import Campaign, Project, Service as ProjectService
from workbench.reporting.models import CostCenter
from workbench.tools.formats import local_date_format
# This is an object which __contains__ everything
EVERYTHING = type("c", (), {"__contains__": lambda *a: True})()
Change = namedtuple("Change", "changes created_at pretty_user_name values version")
def default_if_none(value, default):
return default if value is None else value
class Prettifier:
def __init__(self):
self._flatchoices = {}
self._prettified_instances = {}
def handle_bool(self, values, field):
value = values.get(field.attname)
if value in {True, "t"}:
values[field.attname] = True
return _("yes")
elif value in {False, "f"}:
values[field.attname] = False
return _("no")
elif value is None:
values[field.attname] = None
return _("<no value>")
return value
def handle_choice(self, values, field):
if field not in self._flatchoices:
self._flatchoices[field] = {
str(key): (key, value) for key, value in field.flatchoices
}
value = values.get(field.attname)
if value in self._flatchoices[field]:
key, value = self._flatchoices[field][value]
values[field.attname] = key
return default_if_none(value, _("<no value>"))
def handle_date(self, values, field):
value = values.get(field.attname)
if value is None:
return _("<no value>")
dt = dateparse.parse_datetime(value) or dateparse.parse_date(value)
if dt:
values[field.attname] = dt
return local_date_format(dt)
return value
def handle_decimal(self, values, field):
value = values.get(field.attname)
if value:
value = Decimal(value)
values[field.attname] = value
return default_if_none(value, _("<no value>"))
def handle_related_model(self, values, field):
value = values.get(field.attname)
if value is None:
return _("<no value>")
model = field.related_model
key = (model, value)
if key in self._prettified_instances:
values[field.attname] = self._prettified_instances[key][0]
return self._prettified_instances[key][1]
queryset = model._default_manager.all()
instance = None
try:
instance = queryset.get(pk=value)
pretty = str(instance)
except model.DoesNotExist:
pretty = _("Deleted %s instance") % model._meta.verbose_name
else:
values[field.attname] = instance
if model in HISTORY:
pretty = format_html(
'<a href="{}" data-toggle="ajaxmodal">{}</a>',
reverse("history", args=(model._meta.db_table, "id", value)),
pretty,
)
self._prettified_instances[key] = (instance, pretty)
return pretty
def format(self, values, field):
value = values.get(field.attname)
if field.choices:
return self.handle_choice(values, field)
if field.related_model:
return self.handle_related_model(values, field)
if isinstance(field, (models.BooleanField, models.NullBooleanField)):
return self.handle_bool(values, field)
if isinstance(field, models.DateField):
return self.handle_date(values, field)
if isinstance(field, models.DecimalField):
return self.handle_decimal(values, field)
return default_if_none(value, _("<no value>"))
def changes(model, fields, actions):
changes = []
if not actions:
return changes
users = {u.pk: u.get_full_name() for u in User.objects.all()}
users[0] = _("<anonymous>")
fields = [
f
for f in model._meta.get_fields()
if hasattr(f, "attname") and not f.primary_key and f.name in fields
]
prettifier = Prettifier()
for action in actions:
if action.action == "I":
values = action.row_data
version_changes = [
mark_safe(
_("Initial value of '%(field)s' was '%(current)s'.")
% {
"field": conditional_escape(capfirst(f.verbose_name)),
"current": conditional_escape(prettifier.format(values, f)),
}
)
for f in fields
if f.attname in values
]
elif action.action == "U":
values = action.changed_fields or {}
version_changes = [
mark_safe(
_("New value of '%(field)s' was '%(current)s'.")
% {
"field": conditional_escape(capfirst(f.verbose_name)),
"current": conditional_escape(prettifier.format(values, f)),
}
)
for f in fields
if f.attname in values
]
else: # Deletion or truncation
values = action.row_data
version_changes = [
mark_safe(
_("Final value of '%(field)s' was '%(current)s'.")
% {
"field": conditional_escape(capfirst(f.verbose_name)),
"current": conditional_escape(prettifier.format(values, f)),
}
)
for f in fields
if f.attname in values
]
if version_changes:
changes.append(
Change(
changes=version_changes,
created_at=action.created_at,
pretty_user_name=users.get(action.user_id) or action.user_name,
values={
f.attname: values.get(f.attname)
for f in fields
if f.attname in values
},
version=action,
)
)
return changes
def _accounts_user_cfg(user):
fields = {
"email",
"is_active",
"is_admin",
"_short_name",
"_full_name",
"language",
"working_time_model",
"planning_hours_per_day",
"person",
"_features",
"date_of_employment",
}
related = [
(Employment, "user_id"),
(VacationDaysOverride, "user_id"),
(Absence, "user_id"),
]
if user.features[FEATURES.PLANNING]:
related.extend([(PlannedWork, "user_id")])
return {"fields": fields, "related": related}
def _credit_control_creditentry_cfg(user):
if not user.features[FEATURES.CONTROLLING]:
raise Http404
return {
"fields": {
"ledger",
"reference_number",
"value_date",
"total",
"payment_notice",
"invoice",
"notes",
}
}
def _deals_contribution_cfg(user):
if not user.features[FEATURES.DEALS]:
raise Http404
return {
"fields": EVERYTHING,
"related": [(Value, "deal_id")],
}
def _deals_deal_cfg(user):
if not user.features[FEATURES.DEALS]:
raise Http404
return {
"fields": EVERYTHING,
"related": [(Value, "deal_id"), (Contribution, "deal_id")],
}
def _deals_value_cfg(user):
if not user.features[FEATURES.DEALS]:
raise Http404
return {"fields": EVERYTHING}
def _deals_valuetype_cfg(user):
if not user.features[FEATURES.DEALS]:
raise Http404
return {"fields": EVERYTHING}
def _invoices_invoice_cfg(user):
if not user.features[FEATURES.CONTROLLING]:
raise Http404
return {
"fields": EVERYTHING,
"related": [(CreditEntry, "invoice_id")],
}
def _invoices_service_cfg(user):
if not user.features[FEATURES.CONTROLLING]:
raise Http404
return {"fields": EVERYTHING}
def _invoices_recurringinvoice_cfg(user):
if not user.features[FEATURES.CONTROLLING]:
raise Http404
return {"fields": EVERYTHING}
def _invoices_projectedinvoice_cfg(user):
if (
not user.features[FEATURES.CONTROLLING]
or not user.features[FEATURES.PROJECTED_INVOICES]
):
raise Http404
return {"fields": EVERYTHING}
def _logbook_loggedcost_cfg(user):
fields = {
"service",
"created_at",
"created_by",
"rendered_on",
"rendered_by",
"cost",
"third_party_costs",
"description",
}
if user.features[FEATURES.CONTROLLING]:
fields |= {"invoice_service", "archived_at"}
if user.features[FEATURES.EXPENSES]:
fields |= {"expense_report", "are_expenses"}
if user.features[FEATURES.FOREIGN_CURRENCIES]:
fields |= {"expense_currency", "expense_cost"}
return {"fields": fields}
def _logbook_loggedhours_cfg(user):
fields = {
"service",
"created_at",
"created_by",
"rendered_on",
"rendered_by",
"hours",
"description",
}
if user.features[FEATURES.CONTROLLING]:
fields |= {"invoice_service", "archived_at"}
return {"fields": fields}
def _offers_offer_cfg(user):
fields = {
"created_at",
"project",
"offered_on",
"closed_on",
"work_completed_on",
"title",
"description",
"owned_by",
"status",
"postal_address",
}
if user.features[FEATURES.CONTROLLING]:
fields |= WITH_TOTAL
return {"fields": fields}
def _projects_campaign_cfg(user):
if not user.features[FEATURES.CAMPAIGNS]:
raise Http404
return {
"fields": {"customer", "title", "description", "owned_by"},
"related": [(Project, "campaign_id")],
}
def _projects_project_cfg(user):
fields = {
"customer",
"contact",
"title",
"description",
"owned_by",
"type",
"created_at",
"closed_on",
}
related = []
if user.features[FEATURES.CONTROLLING]:
related.extend(
[
(Offer, "project_id"),
(Invoice, "project_id"),
(ProjectService, "project_id"),
]
)
fields |= {"flat_rate"}
if user.features[FEATURES.CAMPAIGNS]:
fields |= {"campaign"}
if user.features[FEATURES.LABOR_COSTS]:
fields |= {"cost_center"}
if user.features[FEATURES.PLANNING]:
fields |= {"suppress_planning_update_mails"}
related.append((PlannedWork, "project_id"))
if user.features[FEATURES.PROJECTED_INVOICES]:
related.append((ProjectedInvoice, "project_id"))
return {"fields": fields, "related": related}
def _projects_service_cfg(user):
fields = {
"created_at",
"title",
"description",
"service_hours",
"project",
"offer",
"allow_logging",
"is_optional",
}
if user.features[FEATURES.CONTROLLING]:
fields |= {
"service_cost",
"effort_type",
"effort_rate",
"effort_hours",
"cost",
"third_party_costs",
}
if user.features[FEATURES.GLASSFROG]:
fields |= {"role"}
return {"fields": fields}
def _reporting_costcenter_cfg(user):
if not user.features[FEATURES.LABOR_COSTS]:
raise Http404
return {"fields": {"title"}}
WITH_TOTAL = {
"subtotal",
"discount",
"liable_to_vat",
"total_excl_tax",
"tax_rate",
"total",
"show_service_details",
}
HISTORY = {
User: _accounts_user_cfg,
Team: {"fields": EVERYTHING},
Absence: {"fields": EVERYTHING},
Year: {"fields": EVERYTHING},
CreditEntry: _credit_control_creditentry_cfg,
Contribution: _deals_contribution_cfg,
Deal: _deals_deal_cfg,
Value: _deals_value_cfg,
ValueType: _deals_valuetype_cfg,
ExpenseReport: {
"fields": EVERYTHING,
"related": [(LoggedCost, "expense_report_id")],
},
Employment: {
"fields": {
"user",
"date_from",
"date_until",
"percentage",
"vacation_weeks",
"notes",
}
},
VacationDaysOverride: {"fields": EVERYTHING},
EmailAddress: {"fields": EVERYTHING},
PhoneNumber: {"fields": EVERYTHING},
PostalAddress: {"fields": EVERYTHING},
Organization: {"fields": EVERYTHING, "related": [(Person, "organization_id")]},
Person: {
"fields": EVERYTHING,
"related": [
(PhoneNumber, "person_id"),
(EmailAddress, "person_id"),
(PostalAddress, "person_id"),
],
},
Invoice: _invoices_invoice_cfg,
InvoiceService: _invoices_service_cfg,
RecurringInvoice: _invoices_recurringinvoice_cfg,
ProjectedInvoice: _invoices_projectedinvoice_cfg,
Break: {"fields": EVERYTHING},
LoggedCost: _logbook_loggedcost_cfg,
LoggedHours: _logbook_loggedhours_cfg,
Offer: _offers_offer_cfg,
PlannedWork: {"fields": EVERYTHING},
ExternalWork: {"fields": EVERYTHING},
PublicHoliday: {"fields": EVERYTHING},
Milestone: {"fields": EVERYTHING},
Campaign: _projects_campaign_cfg,
Project: _projects_project_cfg,
ProjectService: _projects_service_cfg,
CostCenter: _reporting_costcenter_cfg,
}
| 28.466411
| 84
| 0.586744
|
08ebe5a877519505b833e19db4b5523a088dcf6b
| 3,321
|
py
|
Python
|
scikit-learn-weighted_kde/doc/tutorial/machine_learning_map/svg2imagemap.py
|
RTHMaK/git-squash-master
|
76c4c8437dd18114968e69a698f4581927fcdabf
|
[
"BSD-2-Clause"
] | 1
|
2021-11-26T12:22:13.000Z
|
2021-11-26T12:22:13.000Z
|
scikit-learn-weighted_kde/doc/tutorial/machine_learning_map/svg2imagemap.py
|
RTHMaK/git-squash-master
|
76c4c8437dd18114968e69a698f4581927fcdabf
|
[
"BSD-2-Clause"
] | null | null | null |
scikit-learn-weighted_kde/doc/tutorial/machine_learning_map/svg2imagemap.py
|
RTHMaK/git-squash-master
|
76c4c8437dd18114968e69a698f4581927fcdabf
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/local/bin/python
"""
This script converts a subset of SVG into an HTML imagemap
Note *subset*. It only handles <path> elements, for which it only pays
attention to the M and L commands. Futher, it only notices the "translate"
transform.
It was written to generate the examples in the documentation for maphilight,
and thus is very squarely aimed at handling several SVG maps from wikipedia.
It *assumes* that all the <path>s it will need are inside a <g>. Any <path>
outside of a <g> will be ignored.
It takes several possible arguments, in the form:
$ svn2imagemap.py FILENAME [x y [group1 group2 ... groupN]]
FILENAME must be the name of an SVG file. All other arguments are optional.
x and y, if present, are the dimensions of the image you'll be creating from
the SVG. If not present, it assumes the values of the width and height
attributes in the SVG file.
group1 through groupN are group ids. If only want particular groups used,
enter their ids here and all others will be ignored.
"""
import os
import re
import sys
import xml.dom.minidom
import parse_path
if len(sys.argv) == 1:
sys.exit("svn2imagemap.py FILENAME [x y [group1 group2 ... groupN]]")
if not os.path.exists(sys.argv[1]):
sys.exit("Input file does not exist")
x, y, groups = None, None, None
if len(sys.argv) >= 3:
x = float(sys.argv[2])
y = float(sys.argv[3])
if len(sys.argv) > 3:
groups = sys.argv[4:]
svg_file = xml.dom.minidom.parse(sys.argv[1])
svg = svg_file.getElementsByTagName('svg')[0]
raw_width = float(svg.getAttribute('width'))
raw_height = float(svg.getAttribute('height'))
width_ratio = x and (x / raw_width) or 1
height_ratio = y and (y / raw_height) or 1
if groups:
elements = [g for g in svg.getElementsByTagName('g') if (g.hasAttribute('id') and g.getAttribute('id') in groups)]
elements.extend([p for p in svg.getElementsByTagName('path') if (p.hasAttribute('id') and p.getAttribute('id') in groups)])
else:
elements = svg.getElementsByTagName('g')
parsed_groups = {}
for e in elements:
paths = []
if e.nodeName == 'g':
for path in e.getElementsByTagName('path'):
points = parse_path.get_points(path.getAttribute('d'))
for pointset in points:
paths.append([path.getAttribute('id'), pointset])
else:
points = parse_path.get_points(e.getAttribute('d'))
for pointset in points:
paths.append([e.getAttribute('id'), pointset])
if e.hasAttribute('transform'):
print e.getAttribute('id'), e.getAttribute('transform')
for transform in re.findall(r'(\w+)\((-?\d+.?\d*),(-?\d+.?\d*)\)', e.getAttribute('transform')):
if transform[0] == 'translate':
x_shift = float(transform[1])
y_shift = float(transform[2])
for path in paths:
path[1] = [(p[0] + x_shift, p[1] + y_shift) for p in path[1]]
parsed_groups[e.getAttribute('id')] = paths
out = []
for g in parsed_groups:
for path in parsed_groups[g]:
out.append('<area href="#" title="%s" shape="poly" coords="%s"></area>' %
(path[0], ', '.join([("%d,%d" % (p[0]*width_ratio, p[1]*height_ratio)) for p in path[1]])))
outfile = open(sys.argv[1].replace('.svg', '.html'), 'w')
outfile.write('\n'.join(out))
| 36.494505
| 127
| 0.654321
|
b738e722eea2039e1054bd77151aa37c95f7a1a3
| 281
|
py
|
Python
|
saleor/cart/context_processors.py
|
jdruiter/saleor
|
9393ac20bd3e82c8ec1f17f6e47e3d7379f20419
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/cart/context_processors.py
|
jdruiter/saleor
|
9393ac20bd3e82c8ec1f17f6e47e3d7379f20419
|
[
"BSD-3-Clause"
] | 1
|
2022-02-10T14:46:00.000Z
|
2022-02-10T14:46:00.000Z
|
saleor/cart/context_processors.py
|
jdruiter/saleor
|
9393ac20bd3e82c8ec1f17f6e47e3d7379f20419
|
[
"BSD-3-Clause"
] | null | null | null |
"""Cart-related context processors."""
from __future__ import unicode_literals
from .utils import get_cart_from_request
def cart_counter(request):
"""Expose the number of items in cart."""
cart = get_cart_from_request(request)
return {'cart_counter': cart.quantity}
| 25.545455
| 45
| 0.754448
|
f53865e6de0f342a514dac42793b599e5aa63069
| 3,184
|
py
|
Python
|
tests/test_static_graph.py
|
zbmain/PGL
|
dbded6a1543248b0a33c05eb476ddc513401a774
|
[
"Apache-2.0"
] | 1,389
|
2019-06-11T03:29:20.000Z
|
2022-03-29T18:25:43.000Z
|
tests/test_static_graph.py
|
zbmain/PGL
|
dbded6a1543248b0a33c05eb476ddc513401a774
|
[
"Apache-2.0"
] | 232
|
2019-06-21T06:52:10.000Z
|
2022-03-29T08:20:31.000Z
|
tests/test_static_graph.py
|
zbmain/PGL
|
dbded6a1543248b0a33c05eb476ddc513401a774
|
[
"Apache-2.0"
] | 229
|
2019-06-20T12:13:58.000Z
|
2022-03-25T12:04:48.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import numpy as np
import paddle
import pgl
import paddle.nn as nn
import pgl.nn as gnn
import pgl.nn.functional as F
import paddle.static as static
class GNNModel(nn.Layer):
def __init__(self, input_size, output_size, num_layers=3):
super(GNNModel, self).__init__()
self.conv_fn = nn.LayerList()
self.conv_fn.append(gnn.GCNConv(input_size, output_size))
for i in range(num_layers - 1):
self.conv_fn.append(gnn.GCNConv(output_size, output_size))
self.pool_fn = gnn.GraphPool("sum")
def forward(self, num_nodes, edges, feature):
graph = pgl.Graph(num_nodes=num_nodes, edges=edges)
for fn in self.conv_fn:
feature = fn(graph, feature)
output = self.pool_fn(graph, feature)
return output
class StaticGraphOpTest(unittest.TestCase):
def test_static_graph(self):
path = './tmp'
dim = 100
# Load DyGraph Model
paddle.disable_static()
num_nodes = 5
edges = [(0, 1), (1, 2), (3, 4)]
nfeat = np.random.randn(num_nodes, dim).astype("float32")
model = GNNModel(dim, 10)
out = model(
paddle.to_tensor(num_nodes),
paddle.to_tensor(edges), paddle.to_tensor(nfeat))
out = out.numpy()
paddle.save(model.state_dict(),
os.path.join(path, "static_gnn.pdparam"))
paddle.enable_static()
# Run Static Fisrt
model2 = GNNModel(dim, 10)
input_num_nodes = static.data(
name='num_nodes', shape=[-1], dtype='int32')
input_edges = static.data(name='edges', shape=[-1, 2], dtype='int32')
input_feature = static.data(
name="feature", shape=[-1, dim], dtype="float32")
output = model2(input_num_nodes, input_edges, input_feature)
place = paddle.CPUPlace()
exe = static.Executor(place)
exe.run(static.default_startup_program())
prog = static.default_main_program()
state_dict = paddle.load(os.path.join(path, "static_gnn.pdparam"))
model2.set_state_dict(state_dict)
feed_dict = {
"num_nodes": num_nodes,
"edges": np.array(
edges, dtype="int32"),
"feature": nfeat.astype("float32"),
}
out2 = exe.run(prog, feed=feed_dict, fetch_list=[output])[0]
eps = np.sum((out2 - out)**2)
self.assertTrue(eps < 1e-5)
import shutil
shutil.rmtree(path)
if __name__ == "__main__":
unittest.main()
| 31.215686
| 77
| 0.633794
|
e870bfceb35044841df9ce2bd8f04ca7be6d8b8d
| 2,352
|
py
|
Python
|
blockchain.py
|
mmz-001/simple-blockchain
|
e7f3e10791a1597bae14d4bee9261f644d75587f
|
[
"MIT"
] | null | null | null |
blockchain.py
|
mmz-001/simple-blockchain
|
e7f3e10791a1597bae14d4bee9261f644d75587f
|
[
"MIT"
] | null | null | null |
blockchain.py
|
mmz-001/simple-blockchain
|
e7f3e10791a1597bae14d4bee9261f644d75587f
|
[
"MIT"
] | null | null | null |
import hashlib
from datetime import datetime
class Blockchain:
def __init__(self):
self.chain = []
self.current_tx = []
# Add the genesis block the the Blockchain
self.chain.append(self.genesis_block)
@property
def genesis_block(self):
"""The first block in the Blockchain."""
block = {
"index": 0,
"timestamp": "0",
"txs": "[]",
"nonce": 634,
"prev_hash": 0
}
return block
def create_block(self, nonce=0):
"""Create a new block."""
block = {
"index": self.last_block["index"] + 1,
"timestamp": str(datetime.now()),
"txs": str(self.current_tx),
"nonce": nonce,
"prev_hash": self.hash(self.chain[-1])
}
# Reset current list of transactions
self.current_tx.clear()
return block
def new_tx(self, sender, recipient, amount):
"""Create a new transaction."""
self.current_tx.append({
"sender": sender,
"recipient": recipient,
"amount": amount
})
return self.last_block["index"] + 1
@staticmethod
def hash(block):
"""Return the SHA-256 hash of a Block."""
return hashlib.sha256(str(block).encode()).hexdigest()
@property
def last_block(self):
"""Return the last block in the chain."""
return self.chain[-1]
def proof(self, block):
"""Proof of work algorithm."""
nonce = 0
while not self.valid_proof(block):
block["nonce"] = nonce
nonce += 1
self.chain.append(block)
return nonce
@staticmethod
def valid_proof(block):
"""Validates the proof: Checks whether hash of the block contains 3 leading zeros."""
return Blockchain.hash(block)[:3] == "000"
if __name__ == "__main__":
blockchain = Blockchain()
# Create a new transaction.
blockchain.new_tx("Alice", "Bob", 5)
# Mine a block.
new_block = blockchain.create_block()
blockchain.proof(new_block)
# Print the current Blockchain.
print(*blockchain.chain, sep="\n")
| 21
| 94
| 0.522109
|
47681fb1482c398fffac920b0b5d12390cca7713
| 9,245
|
py
|
Python
|
Scripts/etasteadyfv.py
|
dbcooney/Protocell_Paper_Code
|
8ae4ae3b6487905c18e5fdf5d5f48756cb7b0134
|
[
"BSD-3-Clause"
] | null | null | null |
Scripts/etasteadyfv.py
|
dbcooney/Protocell_Paper_Code
|
8ae4ae3b6487905c18e5fdf5d5f48756cb7b0134
|
[
"BSD-3-Clause"
] | null | null | null |
Scripts/etasteadyfv.py
|
dbcooney/Protocell_Paper_Code
|
8ae4ae3b6487905c18e5fdf5d5f48756cb7b0134
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Script used to generate Figures B.1 and B.2, comparing the analytically calculated
steady states for the fast-dimer and fast-slow dimorphic models with the states
achieved after a large number of steps under numerical finite volume simulations of
the same models.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cmap
import scipy.integrate as spi
import os
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
"""
This argument toggles between simulating dynamics on the fast-slow or fast-dimer
edges of the simplex.
"""
#edge = "FD"
edge = "FS"
lamb1 = 8.
lamb2 = 16.
lamb3 = 32.
lamb4 =64.
lamb5 = 128.
"""
Introducing protocell-level replication rates for fast-slow and fast-dimer edges.
"""
def GFS(x,eta):
return x - eta * (x ** 2.)
def GFD(x,eta):
return 0.5 * (x - 0.5 * eta * (x ** 2.))
def GFS_j(j,N,eta):
return 0.5 * (1.0 / N) * (2.0 * j + 1.) - (eta / 3.) * (1.0 / (N**2.)) * (3.0 * (j ** 2.) + 3.0 * j + 1.)
def GFD_j(j,N,eta):
return 0.25 * (1. / N) * (2.0 * j + 1.) - (eta / 12.) * (1.0 / (N**2.)) * (3.0 * (j ** 2.) + 3.0 * j + 1.)
"""
Formulas for analyically calculated steady state densities.
"""
def fast_dimer_density(x,lamb,eta,s,theta):
lamb_tilde = (lamb * (2. + s)) / ((s + 1.) ** 2.)
return (x ** ( lamb_tilde * 0.5 * (1.0 - 0.5 * eta) - theta - 1.0)) * ((1.0 - x)**(theta - 1.0)) * (np.exp(-(0.25 * lamb_tilde * eta * x)))
def fast_slow_density(x,lamb,eta,s,theta):
return (x ** ( (lamb / s) * (1.0 - eta) - theta - 1.0)) * ((1.0 - x)**(theta - 1.0)) * (np.exp(-(lamb * eta * x)/s))
def steady_density(x,lamb,eta,s,theta,edge):
if edge == "FD":
return fast_dimer_density(x,lamb,eta,s,theta)
if edge == "FS":
return fast_slow_density(x,lamb,eta,s,theta)
N = 400
time_step = 0.003
#time_length for FS edge
time_length = 5000
#time_length for FD edge
#time_length = 3000
eta = 0.67
s = 1.
theta = 1.
GFSvec = np.vectorize(GFS)
GFS_jvec = np.vectorize(GFS_j)
GFDvec = np.vectorize(GFD)
GFD_jvec = np.vectorize(GFD_j)
index_holder = np.zeros(N)
for j in range(N):
index_holder[j] = j
"""
Formulating discretized versions of a family of initial densities.
"""
def theta_init(j,N,theta):
return N ** (1.0 - theta) * (((N - j) ** theta) - ((N - j - 1.0) ** theta) )
theta_vec = np.vectorize(theta_init)
f_j = np.ones(N)
f_j = theta_vec(index_holder,N,1.0)
"""
Formulating fluxes between volume boundaries for FS and FD models.
"""
def above_fluxFS(j,N,s):
return ((j+1.0) / N) * (1.0 - (j+1.0) / N) * s
def above_fluxFD(j,N,s):
return ((j+1.0) / N) * (1.0 - (j+1.0) / N) * (s + 1. / (2. + s))
def below_fluxFS(j,N,s):
return (np.float(j) / N) * (1.0 - (np.float(j)) / N) * s
def below_fluxFD(j,N,s):
return (np.float(j) / N) * (1.0 - (np.float(j)) / N) * (s + 1. / (2. + s))
above_fluxFS_vec = np.vectorize(above_fluxFS)
below_fluxFS_vec = np.vectorize(below_fluxFS)
above_fluxFD_vec = np.vectorize(above_fluxFD)
below_fluxFD_vec = np.vectorize(below_fluxFD)
"""
Characterizing impact of within-protocell dynamics for finite volume discretization.
"""
def within_group(f,N,s,index_holder,edge):
left_roll = np.roll(f,-1)
left_roll[-1] = 0.
right_roll = np.roll(f,1)
right_roll[0] = 0.
if edge == "FS":
flux_in = above_fluxFS_vec(index_holder,N,s) * left_roll
flux_out = below_fluxFS_vec(index_holder,N,s) * f
elif edge == "FD":
flux_in = above_fluxFD_vec(index_holder,N,s) * left_roll
flux_out = below_fluxFD_vec(index_holder,N,s) * f
return N * (flux_in - flux_out)
"""
Effect of protocell-level competition on multilevel finite volume dynamics.
"""
def righthand(f,G,N):
return f * (G - (1.0/N) * np.dot(f,G))
peak_holder = [float(np.argmax(f_j))/N]
"""
Setting up finite volume simulations for five different values of relative selection
strength $\lambda$.
"""
f_j1 = theta_vec(index_holder,N,1.0)
f_j2 = theta_vec(index_holder,N,1.0)
f_j3 = theta_vec(index_holder,N,1.0)
f_j4 = theta_vec(index_holder,N,1.0)
f_j5 = theta_vec(index_holder,N,1.0)
"""
Running the five stes of finite-volume simulations.
"""
for time in range(time_length):
if edge == "FS":
between_group_effect1 = righthand(f_j1,GFS_jvec(index_holder,N,eta),N)
between_group_effect2 = righthand(f_j2,GFS_jvec(index_holder,N,eta),N)
between_group_effect3 = righthand(f_j3,GFS_jvec(index_holder,N,eta),N)
between_group_effect4 = righthand(f_j4,GFS_jvec(index_holder,N,eta),N)
between_group_effect5 = righthand(f_j5,GFS_jvec(index_holder,N,eta),N)
elif edge == "FD":
between_group_effect1 = righthand(f_j1,GFD_jvec(index_holder,N,eta),N)
between_group_effect2 = righthand(f_j2,GFD_jvec(index_holder,N,eta),N)
between_group_effect3 = righthand(f_j3,GFD_jvec(index_holder,N,eta),N)
between_group_effect4 = righthand(f_j4,GFD_jvec(index_holder,N,eta),N)
between_group_effect5 = righthand(f_j5,GFD_jvec(index_holder,N,eta),N)
within_group_effect1 = within_group(f_j1,N,s,index_holder,edge)
within_group_effect2 = within_group(f_j2,N,s,index_holder,edge)
within_group_effect3 = within_group(f_j3,N,s,index_holder,edge)
within_group_effect4 = within_group(f_j4,N,s,index_holder,edge)
within_group_effect5 = within_group(f_j5,N,s,index_holder,edge)
righthandside1 = lamb1 * between_group_effect1 + within_group_effect1
righthandside2 = lamb2 * between_group_effect2 + within_group_effect2
righthandside3 = lamb3 * between_group_effect3 + within_group_effect3
righthandside4 = lamb4 * between_group_effect4 + within_group_effect4
righthandside5 = lamb5 * between_group_effect5 + within_group_effect5
f_j1 = f_j1 + time_step * righthandside1
f_j2 = f_j2 + time_step * righthandside2
f_j3 = f_j3 + time_step * righthandside3
f_j4 = f_j4 + time_step * righthandside4
f_j5 = f_j5 + time_step * righthandside5
print (1.0 / N) * np.sum(f_j1)
peak_holder.append(float(np.argmax(f_j1))/N)
"""
Plotting the states achieved from the five sets of finite volume simulations after
the prescribed number of time steps.
"""
plt.plot(np.arange(0.5/N,1.0+0.5/N,1.0/N),f_j1, color = plt.cm.YlOrRd(0.2), lw = 6., label = r"$\lambda = 8$")
plt.plot(np.arange(0.5/N,1.0+0.5/N,1.0/N),f_j2, color = plt.cm.YlOrRd(0.4), lw = 6., label = r"$\lambda = 16$")
plt.plot(np.arange(0.5/N,1.0+0.5/N,1.0/N),f_j3, color = plt.cm.YlOrRd(0.6), lw = 6., label = r"$\lambda = 32$")
plt.plot(np.arange(0.5/N,1.0+0.5/N,1.0/N),f_j4, color = plt.cm.YlOrRd(0.8), lw = 6., label = r"$\lambda = 64$")
plt.plot(np.arange(0.5/N,1.0+0.5/N,1.0/N),f_j5, color = plt.cm.YlOrRd(1.0), lw = 6., label = r"$\lambda = 128$")
"""
Plotting the analytically calculated steady states for the same values of the
relative selection strength $\lambda$ as used in the finite volume simulations.
"""
lamb1_steady = steady_density(np.arange(0.5/N,1.0+0.5/N,1.0/N),lamb1,eta,s,1.,edge)
lamb1_norm = lamb1_steady / spi.simps(lamb1_steady,np.arange(0.5/N,1.0+0.5/N,1.0/N))
plt.plot(np.arange(0.5/N,1.0+0.5/N,1.0/N),lamb1_norm, color =plt.cm.YlOrRd(0.2), lw = 8., ls = '--')
lamb2_steady = steady_density(np.arange(0.5/N,1.0+0.5/N,1.0/N),lamb2,eta,s,1.,edge)
lamb2_norm = lamb2_steady / spi.simps(lamb2_steady,np.arange(0.5/N,1.0+0.5/N,1.0/N))
plt.plot(np.arange(0.5/N,1.0+0.5/N,1.0/N),lamb2_norm, color =plt.cm.YlOrRd(0.4), lw = 8., ls = '--')
lamb3_steady = steady_density(np.arange(0.5/N,1.0+0.5/N,1.0/N),lamb3,eta,s,1.,edge)
lamb3_norm = lamb3_steady / spi.simps(lamb3_steady,np.arange(0.5/N,1.0+0.5/N,1.0/N))
plt.plot(np.arange(0.5/N,1.0+0.5/N,1.0/N),lamb3_norm, color =plt.cm.YlOrRd(0.6), lw = 8., ls = '--')
lamb4_steady = steady_density(np.arange(0.5/N,1.0+0.5/N,1.0/N),lamb4,eta,s,1.,edge)
lamb4_norm = lamb4_steady / spi.simps(lamb4_steady,np.arange(0.5/N,1.0+0.5/N,1.0/N))
plt.plot(np.arange(0.5/N,1.0+0.5/N,1.0/N),lamb4_norm, color =plt.cm.YlOrRd(0.8), lw = 8., ls = '--')
lamb5_steady = steady_density(np.arange(0.5/N,1.0+0.5/N,1.0/N),lamb5,eta,s,1.,edge)
lamb5_norm = lamb5_steady / spi.simps(lamb5_steady,np.arange(0.5/N,1.0+0.5/N,1.0/N))
plt.plot(np.arange(0.5/N,1.0+0.5/N,1.0/N),lamb5_norm, color =plt.cm.YlOrRd(1.), lw = 8., ls = '--')
if eta == 0.67:
plt.axvline(x = 0.75, ls = '--', lw = 6., color = 'k', alpha = 0.8)
plt.annotate(r"Optimal Composition $x^*_{FS}$", xy = (0.7,8.),rotation = 90., fontsize = 20.)
plt.axis([0.,1.0,0.,10.])
plt.legend(loc = "upper left", prop = {"size":16})
if edge == "FS":
plt.xlabel(r"Fraction of Slow Replicators $x$", fontsize = 24., labelpad = 10.)
plt.ylabel(r"Density of Groups $f(t,x)$", fontsize = 24.)
elif edge == "FD":
plt.xlabel(r"Fraction of Dimer Replicators $z$", fontsize = 24.,labelpad = 10.)
plt.ylabel(r"Density of Groups $g(t,z)$", fontsize = 24.)
plt.tight_layout()
script_folder = os.getcwd()
protocell_folder = os.path.dirname(script_folder)
if edge == "FS" and eta == 0.67:
plt.savefig(protocell_folder + "/Figures/fvfsghostdensity.png")
elif edge == "FS" and eta == 0.33:
plt.savefig(protocell_folder + "/Figures/fvfsnoghostdensity.png")
elif edge == "FD" and eta == 1.:
plt.savefig(protocell_folder + "/Figures/fvfddensity.png")
plt.show()
| 30.919732
| 141
| 0.674202
|
086bb4c46858895d54d08a8b0f1c3218dfdde3b9
| 378
|
py
|
Python
|
exercises1-115/d115 - Criando um menu.py
|
renankalfa/Curso_em_Video
|
d9012e7f8c87fcc0ea27082279da234364f7e9a8
|
[
"MIT"
] | 3
|
2022-01-08T23:16:07.000Z
|
2022-01-17T14:11:25.000Z
|
exercises1-115/d115 - Criando um menu.py
|
renankalfa/Curso_em_Video
|
d9012e7f8c87fcc0ea27082279da234364f7e9a8
|
[
"MIT"
] | null | null | null |
exercises1-115/d115 - Criando um menu.py
|
renankalfa/Curso_em_Video
|
d9012e7f8c87fcc0ea27082279da234364f7e9a8
|
[
"MIT"
] | null | null | null |
from d115.visuais import *
from d115.tratamento import *
from d115.bancodedados import *
while True:
menu('Pessoas cadastradas', 'Cadastrar Pessoa', 'Encerrar Programa')
option = testaoption(input('\033[33mSua opção: \033[m'))
if option == 1:
mostrarcadastradas()
if option == 2:
armazenar()
if option == 3:
encerrar()
break
| 25.2
| 72
| 0.634921
|
298e1191c06271831d4f1dd6f591995ac16a5a5e
| 5,337
|
py
|
Python
|
validation/emulation/experiments/linear-gpolka.p4app/test/latency_test_var/gpolka-fabric_bw10/heatmap_latency.py
|
nerds-ufes/G-PolKA
|
9c6bd42167bc333f6421a751c93a88c00841def9
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
validation/emulation/experiments/linear-gpolka.p4app/test/latency_test_var/gpolka-fabric_bw10/heatmap_latency.py
|
nerds-ufes/G-PolKA
|
9c6bd42167bc333f6421a751c93a88c00841def9
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
validation/emulation/experiments/linear-gpolka.p4app/test/latency_test_var/gpolka-fabric_bw10/heatmap_latency.py
|
nerds-ufes/G-PolKA
|
9c6bd42167bc333f6421a751c93a88c00841def9
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from numpy import genfromtxt
# X
n_of_hops = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# X
pkt_sizes = ["64", "128", "256", "512", "1024", "1300"]
pkt_sizes.reverse()
array = []
def get_values(p_size):
values_ping = []
for n in range(0, 10):
f_name = "{f}/{n}/a1_{n}".format(f=p_size, n=n)
print(f_name)
values = []
with open(f_name) as f:
for line in f:
values.append(float(line))
values_ping.append(np.mean(values))
return values_ping
def fill_array():
for p_size in pkt_sizes:
print("Experiment: {}".format(p_size))
array.append(get_values(p_size=p_size))
def heatmap(data, row_labels, col_labels, ax=None, cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Parameters
----------
data
A 2D numpy array of shape (N, M).
row_labels
A list or array of length N with the labels for the rows.
col_labels
A list or array of length M with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
**kwargs
All other arguments are forwarded to `imshow`.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=False, bottom=True, labeltop=False, labelbottom=True)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=0, ha="center", rotation_mode="anchor")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor=True)
ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor=True)
ax.grid(which="minor", color="w", linestyle="-", linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def annotate_heatmap(
im,
data=None,
valfmt="{x:.2f}",
textcolors=("black", "white"),
threshold=None,
**textkw
):
"""
A function to annotate a heatmap.
Parameters
----------
im
The AxesImage to be labeled.
data
Data used to annotate. If None, the image's data is used. Optional.
valfmt
The format of the annotations inside the heatmap. This should either
use the string format method, e.g. "$ {x:.2f}", or be a
`matplotlib.ticker.Formatter`. Optional.
textcolors
A pair of colors. The first is used for values below a threshold,
the second for those above. Optional.
threshold
Value in data units according to which the colors from textcolors are
applied. If None (the default) uses the middle of the colormap as
separation. Optional.
**kwargs
All other arguments are forwarded to each call to `text` used to create
the text labels.
"""
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
# Normalize the threshold to the images color range.
if threshold is not None:
threshold = im.norm(threshold)
else:
threshold = im.norm(data.max()) / 2.0
# Set default alignment to center, but allow it to be
# overwritten by textkw.
kw = dict(horizontalalignment="center", verticalalignment="center")
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
return texts
fill_array()
array = np.array(array)
print(array)
fig, ax = plt.subplots()
im, cbar = heatmap(
array, pkt_sizes, n_of_hops, ax=ax, cmap="Blues", cbarlabel="Latency Bound [ms]"
)
texts = annotate_heatmap(im, valfmt="{x:.2f}")
fig.tight_layout()
plt.ylabel("Packet Size (bytes)")
plt.xlabel("Number of hops")
plt.savefig(
"latencymap.pdf",
dpi=300,
format="pdf",
# bbox_extra_artists=(lgd,),
bbox_inches="tight",
)
plt.show()
| 29.983146
| 88
| 0.610455
|
c9b774b167b67ab29dc1375cdb73b0ed8f0d856e
| 12,464
|
py
|
Python
|
PyU4V/tests/unit_tests/test_pyu4v_requests.py
|
SiSTm1/PyU4V
|
ce9784fc5f8192024cfa42509b8d45583b83f01d
|
[
"MIT"
] | 19
|
2020-01-06T12:02:25.000Z
|
2021-12-14T06:50:04.000Z
|
PyU4V/tests/unit_tests/test_pyu4v_requests.py
|
SiSTm1/PyU4V
|
ce9784fc5f8192024cfa42509b8d45583b83f01d
|
[
"MIT"
] | 53
|
2019-12-17T17:26:44.000Z
|
2022-02-03T12:28:34.000Z
|
PyU4V/tests/unit_tests/test_pyu4v_requests.py
|
SiSTm1/PyU4V
|
ce9784fc5f8192024cfa42509b8d45583b83f01d
|
[
"MIT"
] | 13
|
2019-01-24T17:10:05.000Z
|
2019-12-09T06:33:21.000Z
|
# Copyright (c) 2020 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test_pyu4v_requests.py."""
import json
import platform
import requests
import testtools
from unittest import mock
from PyU4V import rest_requests
from PyU4V.tests.unit_tests import pyu4v_common_data as pcd
from PyU4V.tests.unit_tests import pyu4v_fakes as pf
from PyU4V.utils import constants
from PyU4V.utils import exception
class PyU4VRestRequestsTest(testtools.TestCase):
"""Testing Unisphere REST requests."""
def setUp(self):
"""Setup."""
super(PyU4VRestRequestsTest, self).setUp()
self.data = pcd.CommonData()
pyu4v_version = constants.PYU4V_VERSION
python_version = platform.python_version()
sys_platform = platform.system()
sys_platform_release = platform.release()
self.ua_details = (
'PyU4V/{pv} ({plat}; version {rel}) Python {python}'.format(
pv=pyu4v_version, plat=sys_platform,
rel=sys_platform_release, python=python_version))
self.app_type = 'PyU4V-{v}'.format(v=pyu4v_version)
self.rest = rest_requests.RestRequests(
username='smc', password='smc', verify=False,
base_url='http://10.10.10.10:8443/univmax/restapi',
interval=1, retries=3)
def test_rest_requests_init(self):
"""Test class RestRequests __init__."""
self.assertEqual(self.rest.username, 'smc')
self.assertEqual(self.rest.password, 'smc')
self.assertFalse(self.rest.verify_ssl)
self.assertEqual(self.rest.base_url,
'http://10.10.10.10:8443/univmax/restapi')
self.assertEqual(self.rest.interval, 1)
self.assertEqual(self.rest.retries, 3)
self.assertEqual(self.rest.timeout, 120)
ref_headers = {'content-type': 'application/json',
'accept': 'application/json',
'application-type': None,
'user-agent': self.ua_details}
self.assertEqual(self.rest.headers, ref_headers)
self.assertIsInstance(self.rest.session,
type(requests.session()))
def test_establish_rest_session(self):
"""Test establish REST session."""
ref_headers = {'content-type': 'application/json',
'accept': 'application/json',
'application-type': 'test_app',
'user-agent': self.ua_details}
temp_rest = rest_requests.RestRequests(
username='smc', password='smc', verify=False,
base_url='http://10.10.10.10:8443/univmax/restapi',
interval=1, retries=3, application_type='test_app')
self.assertEqual(ref_headers, temp_rest.session.headers)
self.assertEqual('smc', temp_rest.session.auth.username)
self.assertEqual('smc', temp_rest.session.auth.password)
self.assertEqual(False, temp_rest.session.verify)
def test_establish_rest_session_with_headers(self):
"""Test establish_rest_session with headers."""
ref_headers = {'test_headers': True}
session = self.rest.establish_rest_session(headers=ref_headers)
self.assertEqual(ref_headers, session.headers)
def test_rest_request(self):
"""Test REST request success."""
with mock.patch.object(
self.rest.session, 'request',
return_value=pf.FakeResponse(
200, self.data.server_version)) as mock_request:
response, sc = self.rest.rest_request('/fake_uri', 'GET')
mock_request.assert_called_once_with(
method='GET', timeout=120,
url='http://10.10.10.10:8443/univmax/restapi/fake_uri')
self.assertEqual(200, sc)
self.assertEqual(self.data.server_version, response)
def test_rest_request_params(self):
"""Test REST request with parameters."""
with mock.patch.object(
self.rest.session, 'request',
return_value=pf.FakeResponse(
200, self.data.server_version)) as mock_request:
request_params = {'param': 'test'}
response, sc = self.rest.rest_request(
'/fake_uri', 'GET', params=request_params, timeout=500)
mock_request.assert_called_once_with(
method='GET', timeout=500, params=request_params,
url='http://10.10.10.10:8443/univmax/restapi/fake_uri')
self.assertEqual(200, sc)
self.assertEqual(self.data.server_version, response)
def test_rest_request_object(self):
"""Test REST request with object."""
with mock.patch.object(
self.rest.session, 'request',
return_value=pf.FakeResponse(
200, self.data.server_version)) as mock_request:
request_object = {'param': 'test'}
response, sc = self.rest.rest_request(
'/fake_uri', 'GET', request_object=request_object)
mock_request.assert_called_once_with(
method='GET', timeout=120,
data=json.dumps(request_object, sort_keys=True, indent=4),
url='http://10.10.10.10:8443/univmax/restapi/fake_uri')
self.assertEqual(200, sc)
self.assertEqual(self.data.server_version, response)
def test_rest_request_no_session(self):
"""Test REST requests, no existing session available."""
with mock.patch.object(
self.rest, 'establish_rest_session',
return_value=pf.FakeRequestsSession()) as mck_sesh:
self.rest.session = None
__, __ = self.rest.rest_request('/fake_uri', 'GET', timeout=0.1)
mck_sesh.assert_called_once()
def test_rest_request_value_error(self):
"""Test REST request value error no response."""
with mock.patch.object(self.rest.session, 'request',
return_value=pf.FakeResponse(500, None)):
response, sc = self.rest.rest_request('/fake_uri', 'GET')
self.assertEqual(500, sc)
self.assertIsNone(response)
def test_rest_request_value_error_no_status_code(self):
"""Test REST request value error no response or status code."""
with mock.patch.object(self.rest.session, 'request',
return_value=pf.FakeResponse(None, None)):
response, sc = self.rest.rest_request('/fake_uri', 'GET')
self.assertIsNone(sc)
self.assertIsNone(response)
def test_close_session(self):
"""Test close REST session."""
with mock.patch.object(self.rest.session, 'close') as mck_close:
self.rest.close_session()
mck_close.assert_called_once()
def test_rest_request_timeout_exception(self):
"""Test REST timeout exception scenario."""
self.rest.session = pf.FakeRequestsSession()
sc, msg = self.rest.rest_request('/fake_url', 'TIMEOUT')
self.assertIsNone(sc)
self.assertIsNone(msg)
def test_rest_request_connection_exception(self):
"""Test REST HTTP error exception scenario."""
self.rest.session = pf.FakeRequestsSession()
self.assertRaises(requests.exceptions.HTTPError,
self.rest.rest_request, '/fake_url', 'HTTPERROR')
def test_rest_request_ssl_exception(self):
"""Test REST SSL exception scenario."""
self.rest.session = pf.FakeRequestsSession()
self.assertRaises(requests.exceptions.SSLError,
self.rest.rest_request, '/fake_url', 'SSLERROR')
def test_rest_request_other_exception(self):
"""Test REST other exception scenario."""
self.rest.session = pf.FakeRequestsSession()
self.assertRaises(exception.VolumeBackendAPIException,
self.rest.rest_request, '/fake_url', 'EXCEPTION')
def test_file_transfer_request_download(self):
"""Test file_transfer_request download request."""
with mock.patch.object(
self.rest, 'establish_rest_session',
return_value=pf.FakeRequestsSession()) as mck_est:
response, sc = self.rest.file_transfer_request(
method=constants.POST,
uri='/system/settings/importfile',
download=True,
r_obj={'test_req': True})
mck_est.assert_called_once_with(headers={
constants.CONTENT_TYPE: constants.APP_JSON,
constants.ACCEPT: constants.APP_OCT,
constants.USER_AGENT: rest_requests.ua_details,
constants.APP_TYPE: self.rest.headers.get(
'application-type')})
self.assertEqual(200, sc)
self.assertEqual('OK', response.raw.reason)
def test_file_transfer_request_upload(self):
"""Test file_transfer_request download request."""
with mock.patch.object(
self.rest, 'establish_rest_session',
return_value=pf.FakeRequestsSession()) as mck_est:
response, sc = self.rest.file_transfer_request(
method=constants.POST,
uri='/system/settings/exportfile',
upload=True,
form_data={'test_req': True})
mck_est.assert_called_once_with(headers={
constants.ACCEPT_ENC: constants.APP_MPART,
constants.USER_AGENT: rest_requests.ua_details,
constants.APP_TYPE: self.rest.headers.get(
'application-type')})
self.assertEqual(200, sc)
self.assertEqual('OK', response.raw.reason)
def test_file_transfer_request_download_upload_exception(self):
"""Test file_transfer_request exc download and upload both set."""
self.assertRaises(
exception.InvalidInputException, self.rest.file_transfer_request,
method=constants.POST, uri='/fake', download=True, upload=True)
def test_file_transfer_request_timeout_exception(self):
"""Test file_transfer timeout exception scenario."""
with mock.patch.object(
self.rest, 'establish_rest_session',
side_effect=requests.Timeout):
resp, sc = self.rest.file_transfer_request(
method=constants.POST, uri='/fake', download=True)
self.assertIsNone(resp)
self.assertIsNone(sc)
def test_file_transfer_request_ssl_exception(self):
"""Test file_transfer SSL error exception scenario."""
with mock.patch.object(
self.rest, 'establish_rest_session',
side_effect=requests.exceptions.SSLError):
self.assertRaises(
requests.exceptions.SSLError,
self.rest.file_transfer_request,
method=constants.POST, uri='/fake', download=True)
def test_file_transfer_request_connection_exception(self):
"""Test file_transfer HTTP error exception scenario."""
with mock.patch.object(
self.rest, 'establish_rest_session',
side_effect=requests.exceptions.HTTPError):
self.assertRaises(
requests.exceptions.HTTPError,
self.rest.file_transfer_request,
method=constants.POST, uri='/fake', download=True)
def test_file_transfer_request_other_exception(self):
"""Test file_transfer HTTP error exception scenario."""
with mock.patch.object(
self.rest, 'establish_rest_session',
side_effect=exception.VolumeBackendAPIException):
self.assertRaises(
exception.VolumeBackendAPIException,
self.rest.file_transfer_request,
method=constants.POST, uri='/fake', download=True)
| 44.99639
| 77
| 0.628931
|
ec730f1c6e3d36bec5d0c6144bd9479bc3560a7c
| 7,064
|
py
|
Python
|
astrality/tests/actions/test_copy_action.py
|
JakobGM/Astrality
|
72935b616f9a6a2e9254e9cd9319b525c596e8f0
|
[
"MIT"
] | 111
|
2018-03-19T12:56:35.000Z
|
2022-02-05T11:19:04.000Z
|
astrality/tests/actions/test_copy_action.py
|
JakobGM/Astrality
|
72935b616f9a6a2e9254e9cd9319b525c596e8f0
|
[
"MIT"
] | 120
|
2018-02-22T11:23:08.000Z
|
2021-03-25T22:13:47.000Z
|
astrality/tests/actions/test_copy_action.py
|
JakobGM/Astrality
|
72935b616f9a6a2e9254e9cd9319b525c596e8f0
|
[
"MIT"
] | 7
|
2018-04-06T14:28:33.000Z
|
2020-03-18T20:25:59.000Z
|
"""Tests for astrality.actions.CopyAction."""
from pathlib import Path
from astrality.actions import CopyAction
from astrality.persistence import CreatedFiles
def test_null_object_pattern():
"""Copy actions without options should do nothing."""
copy_action = CopyAction(
options={},
directory=Path('/'),
replacer=lambda x: x,
context_store={},
creation_store=CreatedFiles().wrapper_for(module='test'),
)
copy_action.execute()
def test_if_dry_run_is_respected(create_temp_files, caplog):
"""When dry_run is True, the copy action should only be logged."""
content, target = create_temp_files(2)
content.write_text('content')
target.write_text('target')
copy_action = CopyAction(
options={'content': str(content), 'target': str(target)},
directory=Path('/'),
replacer=lambda x: x,
context_store={},
creation_store=CreatedFiles().wrapper_for(module='test'),
)
caplog.clear()
result = copy_action.execute(dry_run=True)
# We should still return the copy pair
assert result == {content: target}
# We should log what would have been done
assert 'SKIPPED:' in caplog.record_tuples[0][2]
assert str(content) in caplog.record_tuples[0][2]
assert str(target) in caplog.record_tuples[0][2]
# But we should not copy the file under a dry run
assert target.read_text() == 'target'
def test_copy_action_using_all_parameters(tmpdir):
"""All three parameters should be respected."""
temp_dir = Path(tmpdir) / 'content'
temp_dir.mkdir()
target = Path(tmpdir) / 'target'
target.mkdir()
file1 = temp_dir / 'file1'
file1.write_text('file1 content')
file2 = temp_dir / 'file2'
file2.write_text('file2 content')
recursive_dir = temp_dir / 'recursive'
recursive_dir.mkdir()
file3 = temp_dir / 'recursive' / 'file3'
file3.write_text('file3 content')
copy_options = {
'content': str(temp_dir),
'target': str(target),
'include': r'file(\d)',
}
copy_action = CopyAction(
options=copy_options,
directory=temp_dir,
replacer=lambda x: x,
context_store={},
creation_store=CreatedFiles().wrapper_for(module='test'),
)
copy_action.execute()
assert (target / '1').read_text() == file1.read_text()
assert (target / '2').read_text() == file2.read_text()
assert (target / 'recursive' / '3').read_text() == file3.read_text()
assert copy_action.copied_files == {
file1: {target / '1'},
file2: {target / '2'},
file3: {target / 'recursive' / '3'},
}
assert file1 in copy_action
def test_copying_without_renaming(tmpdir):
"""When include is not given, keep copy name."""
temp_dir = Path(tmpdir) / 'content'
temp_dir.mkdir()
target = Path(tmpdir) / 'target'
target.mkdir()
file1 = temp_dir / 'file1'
file1.touch()
file2 = temp_dir / 'file2'
file2.touch()
recursive_dir = temp_dir / 'recursive'
recursive_dir.mkdir()
file3 = temp_dir / 'recursive' / 'file3'
file3.touch()
copy_options = {
'content': str(temp_dir),
'target': str(target),
}
copy_action = CopyAction(
options=copy_options,
directory=temp_dir,
replacer=lambda x: x,
context_store={},
creation_store=CreatedFiles().wrapper_for(module='test'),
)
copy_action.execute()
assert (target / 'file1').read_text() == file1.read_text()
assert (target / 'file2').read_text() == file2.read_text()
assert (target / 'recursive' / 'file3').read_text() == file3.read_text()
def test_copying_file_to_directory(tmpdir):
"""If copying from directory to file, place file in directory."""
temp_dir = Path(tmpdir) / 'content'
temp_dir.mkdir()
target = Path(tmpdir) / 'target'
target.mkdir()
file1 = temp_dir / 'file1'
file1.touch()
copy_options = {
'content': str(file1),
'target': str(target),
'include': r'file1',
}
copy_action = CopyAction(
options=copy_options,
directory=temp_dir,
replacer=lambda x: x,
context_store={},
creation_store=CreatedFiles().wrapper_for(module='test'),
)
copy_action.execute()
assert (target / 'file1').read_text() == file1.read_text()
def test_setting_permissions_on_target_copy(tmpdir):
"""If permissions is provided, use it for the target."""
temp_dir = Path(tmpdir) / 'content'
temp_dir.mkdir()
target = Path(tmpdir) / 'target'
target.mkdir()
file1 = temp_dir / 'file1'
file1.touch()
file1.chmod(0o770)
copy_options = {
'content': str(file1),
'target': str(target),
'include': r'file1',
'permissions': '777',
}
copy_action = CopyAction(
options=copy_options,
directory=temp_dir,
replacer=lambda x: x,
context_store={},
creation_store=CreatedFiles().wrapper_for(module='test'),
)
copy_action.execute()
assert ((target / 'file1').stat().st_mode & 0o000777) == 0o777
def test_backup_of_copy_target(create_temp_files):
"""Overwritten copy targets should be backed up."""
target, content = create_temp_files(2)
# This file is the original and should be backed up
target.write_text('original')
# This is the new content copied to target
content.write_text('new')
copy_options = {
'content': str(content.name),
'target': str(target),
}
copy_action = CopyAction(
options=copy_options,
directory=content.parent,
replacer=lambda x: x,
context_store={},
creation_store=CreatedFiles().wrapper_for(module='test'),
)
# We replace the content by executing the action
copy_action.execute()
assert target.read_text() == 'new'
# And when cleaning up the module, the backup should be restored
CreatedFiles().cleanup(module='test')
assert target.read_text() == 'original'
def test_cleanup_of_created_directory(create_temp_files, tmpdir):
"""Created directories should be cleaned up."""
tmpdir = Path(tmpdir)
[content] = create_temp_files(1)
# The target requires a new directory to be created
directory = tmpdir / 'dir'
target = directory / 'target.tmp'
# Execute copy action
copy_options = {
'content': str(content.name),
'target': str(target),
}
created_files = CreatedFiles().wrapper_for(module='test')
copy_action = CopyAction(
options=copy_options,
directory=content.parent,
replacer=lambda x: x,
context_store={},
creation_store=created_files,
)
copy_action.execute()
# The directory should now be created and persisted
assert directory.is_dir()
assert directory in created_files.creation_store
# And it should be deleted on cleanup
created_files.creation_store.cleanup(module='test')
assert not directory.is_dir()
| 27.920949
| 76
| 0.640006
|
42507b93018771630b9ea8a6ebbffbe1c5861e1a
| 608
|
py
|
Python
|
{{cookiecutter.repo_name}}/{{cookiecutter.package_name}}/test/test_pyspark_entrypoint.py
|
daniel-cortez-stevenson/cookiecutter-pyspark-cloud
|
5a8818e086f810f66627862a0a0fa2e1741ae5a7
|
[
"Apache-2.0"
] | 3
|
2020-05-06T20:01:34.000Z
|
2021-07-14T17:19:43.000Z
|
{{cookiecutter.repo_name}}/{{cookiecutter.package_name}}/test/test_pyspark_entrypoint.py
|
daniel-cortez-stevenson/cookiecutter-pyspark-aws-emr
|
5a8818e086f810f66627862a0a0fa2e1741ae5a7
|
[
"Apache-2.0"
] | 1
|
2020-05-07T03:42:05.000Z
|
2020-05-26T18:02:22.000Z
|
{{cookiecutter.repo_name}}/{{cookiecutter.package_name}}/test/test_pyspark_entrypoint.py
|
daniel-cortez-stevenson/cookiecutter-pyspark-aws-emr
|
5a8818e086f810f66627862a0a0fa2e1741ae5a7
|
[
"Apache-2.0"
] | 2
|
2020-10-29T21:34:52.000Z
|
2021-12-28T12:53:12.000Z
|
"""
Copyright 2020 Daniel Cortez Stevenson
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def test_placeholder():
pass
| 30.4
| 72
| 0.779605
|
e087519151d90f5a9e6050272ff806461c9d5cca
| 26,307
|
py
|
Python
|
scripts/db/budget/budggosql12.py
|
CCLab/Raw-Salad
|
1ec028985e2b910aca31302fb57ed0677778756e
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/db/budget/budggosql12.py
|
CCLab/Raw-Salad
|
1ec028985e2b910aca31302fb57ed0677778756e
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/db/budget/budggosql12.py
|
CCLab/Raw-Salad
|
1ec028985e2b910aca31302fb57ed0677778756e
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import getpass
import os
import optparse
import csv
import psycopg2
import psycopg2.extras
import simplejson as json
from ConfigParser import ConfigParser
#-----------------------------
def get_db_connect(fullpath, dbtype):
connect_dict= {}
defaults= {
'basedir': fullpath
}
cfg= ConfigParser(defaults)
cfg.read(fullpath)
connect_dict['host']= cfg.get(dbtype,'host')
connect_dict['port']= cfg.getint(dbtype,'port')
connect_dict['database']= cfg.get(dbtype,'database')
connect_dict['username']= cfg.get(dbtype,'username')
try:
connect_dict['password']= cfg.get(dbtype,'password')
except:
connect_dict['password']= None
return connect_dict
#-----------------------------
def execute_sql(statement, connection):
cur = connection.cursor()
success= True
try:
cur.execute(statement)
except Exception, e:
success= False
print 'Cannot execute statement!: %r\n %s\n' % (statement, e)
return success
#-----------------------------
def create_table(connection, schema, mode, clean_act):
connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) # not to do COMMIT after every insert/update
cur = connection.cursor()
print '... creating', mode, "table - reading schema"
table_name= schema[mode]['name'] # dict of aliases -> document keys in db
colmn_name= schema[mode]['columns'] # dict of aliases -> document keys in db
create_table_stat= 'CREATE TABLE '+ table_name +' ('
order_list= [int(k) for k in colmn_name.keys()]
order_list.sort()
for curr_key in order_list:
create_table_stat= create_table_stat + colmn_name[str(curr_key)]['name'] + ' ' + colmn_name[str(curr_key)]['type'] + ', '
create_table_stat= create_table_stat.rstrip(', ')+')'
if clean_act:
try:
cur.execute('DROP TABLE '+table_name)
except:
pass # no table with that name - nothing to delete
success= True
try:
cur.execute(create_table_stat)
print "... table", table_name, "successfully created"
except Exception, e:
success= False
print 'Cannot create table:\n %s' % e
return success, table_name
#-----------------------------
def db_insert(data_bulk, db, collname, clean_first=False):
collect= db[collname]
if clean_first:
collect.remove()
collect.insert(data_bulk)
return collect.find().count()
#-----------------------------
def sort_format(src):
"""
format 1-2-3... to 0001-0002-0003...
src should be convertable to int
"""
src_list= src.split('-')
res_list= []
for elm in src_list:
res_list.append('%04d' % int(elm))
res= '-'.join(res_list)
return res
#-----------------------------
def fill_work_table(curr_year, temp_tbl, work_tbl, connection):
# filling out sql table
select_statement= "SELECT * FROM %s ORDER BY %s" % (temp_tbl, '_id')
dict_cur= connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
dict_cur.execute(select_statement)
rows = dict_cur.fetchall()
for row in rows:
# FUNKCJA: level 'a', node NULL
if row['test_f']:
idef= row['numer']
idef_sort= sort_format(idef)
funk_type_name= row['funkcja_zadanie_podzadanie'].split('.', 2)
curr_funkcja= row['numer']
leaf= False # 'funkcja' has children
v_total= row['ogolem']
v_nation= row['budzet_panstwa']
v_eu= row['budzet_srodkow_europejskich']
if v_total != 0:
v_proc_eu= round(float(v_eu) / float(v_total) * 100, 2) #percentage
v_proc_nation= round(float(v_nation) / float(v_total) * 100, 2)
if row['numer'] == '9999':
idef_sort= row['numer']
elem_type= 'Total'
elem_name= row['funkcja_zadanie_podzadanie'].strip().decode('utf-8')
leaf= True # in case of grand total it's the last level (no children)
else:
elem_name= funk_type_name[1].strip().decode('utf-8')
elem_type= funk_type_name[0].strip().decode('utf-8')
insert_statement= """
INSERT INTO %s VALUES ('%s', '%s', NULL, NULL, NULL, '%s', 'a', '%s', '%s', NULL, NULL, NULL, %d, %d, %d, %0.2f, %0.2f, %d)
""" % (work_tbl, idef, idef_sort, leaf, elem_type, elem_name, v_total, v_nation, v_eu, v_proc_nation, v_proc_eu, curr_year)
execute_sql(insert_statement, connection)
# FUNKCJA - ZADANIE: level 'b', node NULL
if row['test_z']:
curr_zadanie= row['numer'].replace('.','-')
idef= curr_zadanie
idef_sort= sort_format(idef)
parent= curr_funkcja
parent_sort= sort_format(parent)
list_name= row['funkcja_zadanie_podzadanie'].split('.', 2)
elem_type= ('Zadanie ' + list_name[0].strip() + '.' + list_name[1].strip()).decode('utf-8')
elem_name= list_name[2].strip().decode('utf-8')
leaf= False # 'zadanie' has children
v_total= row['ogolem']
v_nation= row['budzet_panstwa']
v_eu= row['budzet_srodkow_europejskich']
if v_total != 0:
v_proc_eu= round(float(v_eu) / float(v_total) * 100, 2) #percentage
v_proc_nation= round(float(v_nation) / float(v_total) * 100, 2)
insert_statement= """
INSERT INTO %s VALUES ('%s', '%s', '%s', '%s', NULL, '%s', 'b', '%s', '%s', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, %d)
""" % (work_tbl, idef, idef_sort, parent, parent_sort, leaf, elem_type, elem_name, curr_year)
execute_sql(insert_statement, connection) # money are NULL on that level
zadanie_dysp_count= 0 # for incrementing children - 'dysponent'
# ZADANIE - DYSPONENT: level 'c', node 0
if row['test_z_d']:
zadanie_dysp_count += 1
parent= curr_zadanie
parent_sort= sort_format(parent)
idef= curr_zadanie+'-'+str(zadanie_dysp_count)
curr_zadanie_dysponent= idef
idef_sort= sort_format(idef)
elem_type= 'Dysponent'
elem_name= row['dysponent'].strip().decode('utf-8')
leaf= False # 'dysponent' has children
v_total= row['ogolem']
v_nation= row['budzet_panstwa']
v_eu= row['budzet_srodkow_europejskich']
if v_total != 0:
v_proc_eu= round(float(v_eu) / float(v_total) * 100, 2) #percentage
v_proc_nation= round(float(v_nation) / float(v_total) * 100, 2)
if '/' in row['czesc']:
curr_czesc= row['czesc']
else:
curr_czesc= str('%02d' % int(row['czesc']))
insert_statement= """
INSERT INTO %s VALUES ('%s', '%s', '%s', '%s', 0, '%s', 'c', '%s', '%s', '%s', NULL, NULL, %d, %d, %d, %0.2f, %0.2f, %d)
""" % (work_tbl, idef, idef_sort, parent, parent_sort, leaf, elem_type, elem_name, curr_czesc, v_total, v_nation, v_eu, v_proc_nation, v_proc_eu, curr_year)
execute_sql(insert_statement, connection)
zadanie_dysp_cel_count= 0 # for incrementing children - 'cel'
# WARNING!
if curr_funkcja == '22': # doubling that for the node 1, as there's no 'Podzadanie' for Funk 22
insert_statement= """
INSERT INTO %s VALUES ('%s', '%s', '%s', '%s', 1, '%s', 'c', '%s', '%s', '%s', NULL, NULL, %d, %d, %d, %0.2f, %0.2f, %d)
""" % (work_tbl, idef, idef_sort, parent, parent_sort, leaf, elem_type, elem_name, curr_czesc,
v_total, v_nation, v_eu, v_proc_nation, v_proc_eu, curr_year)
execute_sql(insert_statement, connection)
# ZADANIE - DYSPONENT - CEL: level 'd', node 0
if row['test_z_c']:
zadanie_dysp_cel_count += 1
parent= curr_zadanie_dysponent
parent_sort= sort_format(parent)
idef= curr_zadanie_dysponent+'-'+str(zadanie_dysp_cel_count)
curr_zadanie_dysponent_cel= idef
idef_sort= sort_format(idef)
elem_type= 'Cel'
if row['cel'] is None:
print "--- ! empty Cel of parent %s" % parent
elem_name= ''
else:
elem_name= row['cel'].strip().decode('utf-8')
if '/' in row['czesc']:
curr_czesc= row['czesc']
else:
curr_czesc= str('%02d' % int(row['czesc']))
leaf= False # 'cel' has children
insert_statement= """
INSERT INTO %s VALUES ('%s', '%s', '%s', '%s', 0, '%s', 'd', '%s', '%s', '%s', NULL, NULL, NULL, NULL, NULL, NULL, NULL, %d)
""" % (work_tbl, idef, idef_sort, parent, parent_sort, leaf, elem_type, elem_name, curr_czesc, curr_year)
execute_sql(insert_statement, connection) # money are NULL on that level
zadanie_dysp_cel_mier_count= 0 # for incrementing children - 'cel'
# WARNING!
if curr_funkcja == '22': # doubling that for the node 1, as there's no 'Podzadanie' for Funk 22
insert_statement= """
INSERT INTO %s VALUES ('%s', '%s', '%s', '%s', 1, '%s', 'd', '%s', '%s', '%s', NULL, NULL, NULL, NULL, NULL, NULL, NULL, %d)
""" % (work_tbl, idef, idef_sort, parent, parent_sort, leaf, elem_type, elem_name, curr_czesc, curr_year)
execute_sql(insert_statement, connection)
# ZADANIE - DYSPONENT - CEL - MIERNIK: level 'e', node 0, leaf= True
if row['test_z_m']:
zadanie_dysp_cel_mier_count += 1
parent= curr_zadanie_dysponent_cel
parent_sort= sort_format(parent)
idef= curr_zadanie_dysponent_cel+'-'+str(zadanie_dysp_cel_mier_count)
idef_sort= sort_format(idef)
elem_type= 'Miernik'
if row['miernik_nazwa'] is None:
print "--- ! empty Miernik of parent %s" % parent
elem_name= ''
else:
elem_name= row['miernik_nazwa'].strip().decode('utf-8')
wartosc_bazowa= row['miernik_wartosc_bazowa']
wartosc_rok_obec= row['miernik_wartosc_rok_obec']
if wartosc_bazowa is not None:
wartosc_bazowa= wartosc_bazowa.strip().decode('utf-8')
if wartosc_rok_obec is not None:
wartosc_rok_obec= wartosc_rok_obec.strip().decode('utf-8')
leaf= True # 'miernik' is the deepest level
if '/' in row['czesc']:
curr_czesc= row['czesc']
else:
curr_czesc= str('%02d' % int(row['czesc']))
insert_statement= """
INSERT INTO %s VALUES ('%s', '%s', '%s', '%s', 0, '%s', 'e', '%s', '%s', '%s', '%s', '%s', NULL, NULL, NULL, NULL, NULL, %d)
""" % (work_tbl, idef, idef_sort, parent, parent_sort, leaf, elem_type, elem_name, curr_czesc, wartosc_bazowa, wartosc_rok_obec, curr_year)
execute_sql(insert_statement, connection) # money are NULL on that level
# WARNING!
if curr_funkcja == '22': # doubling that for the node 1, as there's no 'Podzadanie' for Funk 22
insert_statement= """
INSERT INTO %s VALUES ('%s', '%s', '%s', '%s', 1, '%s', 'e', '%s', '%s', '%s', '%s', '%s', NULL, NULL, NULL, NULL, NULL, %d)
""" % (work_tbl, idef, idef_sort, parent, parent_sort, leaf, elem_type, elem_name, curr_czesc, wartosc_bazowa, wartosc_rok_obec, curr_year)
execute_sql(insert_statement, connection)
# ZADANIE - PODZADANIE: level 'c', node 1
if row['test_p']:
parent= curr_zadanie
parent_sort= sort_format(parent)
idef= row['numer'].replace('.','-')
idef_sort= sort_format(idef)
curr_podzadanie= idef
list_name= row['funkcja_zadanie_podzadanie'].split('.', 3)
elem_type= ('Podzadanie ' + list_name[0].strip() + '.' + list_name[1].strip() + '.' + list_name[2].strip()).decode('utf-8')
elem_name= list_name[3].strip().decode('utf-8')
v_total= row['ogolem']
v_nation= row['budzet_panstwa']
v_eu= row['budzet_srodkow_europejskich']
if v_total != 0:
v_proc_eu= round(float(v_eu) / float(v_total) * 100, 2) #percentage
v_proc_nation= round(float(v_nation) / float(v_total) * 100, 2)
leaf= False # 'podzadanie' has children
insert_statement= """
INSERT INTO %s VALUES ('%s', '%s', '%s', '%s', 1, '%s', 'c', '%s', '%s', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, %d)
""" % (work_tbl, idef, idef_sort, parent, parent_sort, leaf, elem_type, elem_name, curr_year)
execute_sql(insert_statement, connection) # money are NULL on that level
podzadanie_dysp_count= 0 # for incrementing children - 'dysponent'
# ZADANIE - PODZADANIE - DYSPONENT: level 'd', node 1
if row['test_p_d']:
podzadanie_dysp_count += 1
parent= curr_podzadanie
parent_sort= sort_format(parent)
idef= curr_podzadanie+'-'+str(podzadanie_dysp_count)
curr_podzadanie_dysp= idef
idef_sort= sort_format(idef)
elem_type= 'Dysponent'
elem_name= row['dysponent'].strip().decode('utf-8')
leaf= False # 'dysponent' has children - TO CONFIRM BY IGNACY!!! (*)
v_total= row['ogolem']
v_nation= row['budzet_panstwa']
v_eu= row['budzet_srodkow_europejskich']
if v_total != 0:
v_proc_eu= round(float(v_eu) / float(v_total) * 100, 2) #percentage
v_proc_nation= round(float(v_nation) / float(v_total) * 100, 2)
if '/' in row['czesc']:
curr_czesc= row['czesc']
else:
curr_czesc= str('%02d' % int(row['czesc']))
insert_statement= """
INSERT INTO %s VALUES ('%s', '%s', '%s', '%s', 1, '%s', 'd', '%s', '%s', '%s', NULL, NULL, %d, %d, %d, %0.2f, %0.2f, %d)
""" % (work_tbl, idef, idef_sort, parent, parent_sort, leaf, elem_type, elem_name, curr_czesc, v_total, v_nation, v_eu, v_proc_nation, v_proc_eu, curr_year)
execute_sql(insert_statement, connection)
zadanie_dysp_cel_count= 0 # for incrementing children - 'cel'
# (*) next 2 blocks - until Ignacy confirms that we fulfill next 2 levels with 1 successive element in each case
# ZADANIE - PODZADANIE - DYSPONENT - CEL: level 'e', node 1
parent= curr_podzadanie_dysp
parent_sort= sort_format(parent)
idef= curr_podzadanie_dysp+'-1' # to change depends on (*)
curr_podzadanie_dysp_cel= idef
idef_sort= sort_format(idef)
elem_type= 'Cel'
if row['cel'] is None:
elem_name= ''
print "--- ! empty Cel of parent %s" % parent
else:
elem_name= row['cel'].strip().decode('utf-8')
leaf= False # 'cel' has children - TO BE CONFIRMED (*)
if '/' in row['czesc']:
curr_czesc= row['czesc']
else:
curr_czesc= str('%02d' % int(row['czesc']))
insert_statement= """
INSERT INTO %s VALUES ('%s', '%s', '%s', '%s', 1, '%s', 'e', '%s', '%s', '%s', NULL, NULL, NULL, NULL, NULL, NULL, NULL, %d)
""" % (work_tbl, idef, idef_sort, parent, parent_sort, leaf, elem_type, elem_name, curr_czesc, curr_year)
execute_sql(insert_statement, connection)
zadanie_dysp_cel_mier_count= 0 # for incrementing children - 'cel'
# ZADANIE - PODZADANIE - DYSPONENT - CEL - MIERNIK: level 'f', node 1
parent= curr_podzadanie_dysp_cel
parent_sort= sort_format(parent)
idef= curr_podzadanie_dysp_cel+'-1' # to change depends on (*)
idef_sort= sort_format(idef)
elem_type= 'Miernik'
if row['miernik_nazwa'] is None:
elem_name= ''
print "--- ! empty Miernik of parent %s" % parent
else:
elem_name= row['miernik_nazwa'].strip().decode('utf-8')
wartosc_bazowa= row['miernik_wartosc_bazowa']
wartosc_rok_obec= row['miernik_wartosc_rok_obec']
if wartosc_bazowa is not None:
wartosc_bazowa= wartosc_bazowa.strip().decode('utf-8')
if wartosc_rok_obec is not None:
wartosc_rok_obec= wartosc_rok_obec.strip().decode('utf-8')
leaf= True # 'miernik' is the deepest level
if '/' in row['czesc']:
curr_czesc= row['czesc']
else:
curr_czesc= str('%02d' % int(row['czesc']))
insert_statement= """
INSERT INTO %s VALUES ('%s', '%s', '%s', '%s', 1, '%s', 'f', '%s', '%s', '%s', '%s', '%s', NULL, NULL, NULL, NULL, NULL, %d)
""" % (work_tbl, idef, idef_sort, parent, parent_sort, leaf, elem_type, elem_name, curr_czesc, wartosc_bazowa, wartosc_rok_obec, curr_year)
execute_sql(insert_statement, connection)
# now calculating totals
print "... calculating totals"
# for zadanie
select_statement= """
SELECT parent, SUM(v_total) as sum_v_total, SUM(v_nation) as sum_v_nation, SUM(v_eu) as sum_v_eu
FROM %s WHERE node = 0 AND elem_level = 'c' AND budg_year = %d GROUP BY parent
""" % (work_tbl, curr_year)
dict_cur= connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
dict_cur.execute(select_statement)
rows = dict_cur.fetchall()
for row in rows:
if row['sum_v_total'] != 0:
v_proc_eu= round(float(row['sum_v_eu']) / float(row['sum_v_total']) * 100, 2) #percentage
v_proc_nation= round(float(row['sum_v_nation']) / float(row['sum_v_total']) * 100, 2)
update_statement= """
UPDATE %s SET v_total = %d, v_nation= %d, v_eu= %d, v_proc_nation= %0.2f, v_proc_eu= %0.2f WHERE idef = '%s' AND budg_year = %d
""" % (work_tbl, row['sum_v_total'], row['sum_v_nation'], row['sum_v_eu'], v_proc_nation, v_proc_eu, row['parent'], curr_year)
execute_sql(update_statement, connection)
# and podzadanie
select_statement= "SELECT parent, SUM(v_total) as sum_v_total, SUM(v_nation) as sum_v_nation, SUM(v_eu) as sum_v_eu FROM " + work_tbl + " WHERE node = 1 AND elem_level = 'd' AND idef NOT like '22%' AND budg_year = " + str(curr_year) + " GROUP BY parent"
dict_cur= connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
dict_cur.execute(select_statement)
rows = dict_cur.fetchall()
for row in rows:
if row['sum_v_total'] != 0:
v_proc_eu= round(float(row['sum_v_eu']) / float(row['sum_v_total']) * 100, 2) #percentage
v_proc_nation= round(float(row['sum_v_nation']) / float(row['sum_v_total']) * 100, 2)
update_statement= """
UPDATE %s SET v_total = %d, v_nation= %d, v_eu= %d, v_proc_nation= %0.2f, v_proc_eu= %0.2f WHERE idef = '%s' AND node = 1 AND budg_year = %d
""" % (work_tbl, row['sum_v_total'], row['sum_v_nation'], row['sum_v_eu'], v_proc_nation, v_proc_eu, row['parent'], curr_year)
else:
update_statement= """
UPDATE %s SET v_total = %d, v_nation= %d, v_eu= %d WHERE idef = '%s' AND node = 1 AND budg_year = %d
""" % (work_tbl, row['sum_v_total'], row['sum_v_nation'], row['sum_v_eu'], row['parent'], curr_year)
execute_sql(update_statement, connection)
#-----------------------------
def csv_parse(csv_read, table, schema, conn):
colmn_name= schema['temp']['columns']
for row in csv_read:
keys= tuple(row)
keys_len= len(keys)
row= iter(row)
for row in csv_read:
insert_statement= "INSERT INTO %s VALUES(" % table
i= 0
for field in row:
if field == '':
curr_field= "NULL"
else:
if 'integer' in colmn_name[str(i)]['type']:
curr_field= field.replace(' ', '').strip()
else:
field_rp= field.replace('\n', ' ')
curr_field= "'" + field_rp.replace('Ŝ', 'ż').strip().decode('utf-8') + "'"
insert_statement= insert_statement + curr_field + ', '
i += 1
# checking the data collected in the dict_row and putting to the proper "socket" in the roles matrix
if row[0].count('.') == 0: # row[0] is 'numer': ('grand grand total' or 'funkcja')
#True: test_f
insert_statement= insert_statement + "'true', 'false', 'false', 'false', 'false', 'false', 'false', "
elif row[0].count('.') == 1: # "zadanie + dysponent + cel + miernik" OR "dysponent + cel + miernik" OR "cel + miernik" OR "miernik"
if row[1] and row[2] and row[4]: # row[1] zadanie + row[2] dysponent + row[4] cel + miernik
#True: test_z (zadanie), test_z_d (zadanie_dysponent), test_z_c (zadanie_cel), test_z_m (zadanie_miernik)
insert_statement= insert_statement + "'false', 'true', 'true', 'true', 'true', 'false', 'false', "
else:
if row[2] and row[4]: # row[2] dysponent + row[4] cel + miernik
#True: test_z_d (zadanie_dysponent), test_z_c (zadanie_cel), test_z_m (zadanie_miernik)
insert_statement= insert_statement + "'false', 'false', 'true', 'true', 'true', 'false', 'false', "
else:
if row[4]: #row[5] cel + miernik
#True: test_z_c (zadanie_cel), test_z_m (zadanie_miernik)
insert_statement= insert_statement + "'false', 'false', 'false', 'true', 'true', 'false', 'false', "
else: # zadanie_miernik
#True: test_z_m
insert_statement= insert_statement + "'false', 'false', 'false', 'false', 'true', 'false', 'false', "
elif row[0].count('.') == 2: # row[0] is 'numer': "podzadanie + dysponent + cel + miernik" / "dysponent + cel + miernik"
if row[1]: # row[1] is podzadanie: "podzadanie + dysponent + cel + miernik"
#True: test_p (podzadanie), test_p_d (podzadanie_dysponent)
insert_statement= insert_statement + "'false', 'false', 'false', 'false', 'false', 'true', 'true', "
else: # dysponent + cel + miernik
#True: test_p_d (podzadanie_dysponent)
insert_statement= insert_statement + "'false', 'false', 'false', 'false', 'false', 'false', 'true', "
insert_statement= insert_statement + "DEFAULT)"
execute_sql(insert_statement, conn)
#-----------------------------
if __name__ == "__main__":
# process command line options
cmdparser = optparse.OptionParser(usage="usage: python %prog [Options] src_filename.csv src_schema.json")
cmdparser.add_option("-f", "--conf", action="store", dest="conf_filename", help="configuration file (CSV)")
cmdparser.add_option("-c", action="store_true",dest='dbact',help="clean db before insert (ignored if db is not updated)")
opts, args = cmdparser.parse_args()
try:
src_file= open(args[0], 'rb')
except IOError as e:
print 'Unable to open file:\n %s\n' % e
exit()
csv_delim= ';' #read CSV file with data
csv_quote= '"'
try:
csv_read= csv.reader(src_file, delimiter= csv_delim, quotechar= csv_quote)
except Exception as e:
print 'Unable to read CSV file:\n %s\n' % e
exit()
try: #read schema file
filename_schema= args[1]
except:
filename_schema= None
if filename_schema is None:
filename_schema= args[0].rstrip('.csv')+'-sql.json'
try: #deserialize it into the object
sch_src= open(filename_schema, 'rb')
schema= json.load(sch_src, encoding='utf-8') # schema file
except Exception as e:
print 'Error in processing schema file:\n %s\n' % e
exit()
conf_filename= opts.conf_filename
if conf_filename is None:
print 'No configuration file is specified, exiting now'
exit()
# get connection details
conn= get_db_connect(conf_filename, 'postgresql')
conn_host= conn['host']
conn_port= conn['port']
conn_db= conn['database']
conn_username= conn['username']
conn_password= conn['password']
#username - ask for password
if conn_password is None:
conn_password = getpass.getpass('Password for '+conn_username+': ')
try:
connect_postgres = psycopg2.connect(host= conn_host, port= conn_port, database=conn_db, user= conn_username, password= conn_password)
print "... connected to db", conn_db
except Exception, e:
print 'Unable to connect to the PostgreSQL database:\n %s\n' % e
exit() #no connection to the database - no data processing
clean_db= opts.dbact # False - insert() data, True - remove() and then insert()
mode= 'temp'
temp_table_created, temp_table= create_table(connect_postgres, schema, mode, True) # create temporary table
work_year= 2012
csv_parse(csv_read, temp_table, schema, connect_postgres) # fill temporary table
mode= 'work'
table_created, work_table= create_table(connect_postgres, schema, mode, clean_db) # create work table
fill_work_table(work_year, temp_table, work_table, connect_postgres) # fill work table
print '... work table is filled'
print '... dropping temporary table'
drop_statement= "DROP TABLE %s" % (temp_table)
execute_sql(drop_statement, connect_postgres)
print 'Done'
| 49.080224
| 257
| 0.572471
|
ab2e14d37fdede644b22d20e694b2927dfb2e0d0
| 18,663
|
py
|
Python
|
core/dbt/config/project.py
|
nchammas/dbt
|
ea4948ff8a0d076425ecab1f4ec187296876589c
|
[
"Apache-2.0"
] | null | null | null |
core/dbt/config/project.py
|
nchammas/dbt
|
ea4948ff8a0d076425ecab1f4ec187296876589c
|
[
"Apache-2.0"
] | null | null | null |
core/dbt/config/project.py
|
nchammas/dbt
|
ea4948ff8a0d076425ecab1f4ec187296876589c
|
[
"Apache-2.0"
] | null | null | null |
from copy import deepcopy
from dataclasses import dataclass
from itertools import chain
from typing import List, Dict, Any, Optional, TypeVar, Union, Tuple, Callable
import hashlib
import os
from dbt.clients.system import resolve_path_from_base
from dbt.clients.system import path_exists
from dbt.clients.system import load_file_contents
from dbt.clients.yaml_helper import load_yaml_text
from dbt.exceptions import DbtProjectError
from dbt.exceptions import RecursionException
from dbt.exceptions import SemverException
from dbt.exceptions import validator_error_message
from dbt.exceptions import warn_or_error
from dbt.helper_types import NoValue
from dbt.semver import VersionSpecifier
from dbt.semver import versions_compatible
from dbt.version import get_installed_version
from dbt.ui import printer
from dbt.utils import deep_map
from dbt.source_config import SourceConfig
from dbt.contracts.project import (
Project as ProjectContract,
SemverString,
)
from dbt.contracts.project import PackageConfig
from hologram import ValidationError
from .renderer import ConfigRenderer
UNUSED_RESOURCE_CONFIGURATION_PATH_MESSAGE = """\
WARNING: Configuration paths exist in your dbt_project.yml file which do not \
apply to any resources.
There are {} unused configuration paths:\n{}
"""
INVALID_VERSION_ERROR = """\
This version of dbt is not supported with the '{package}' package.
Installed version of dbt: {installed}
Required version of dbt for '{package}': {version_spec}
Check the requirements for the '{package}' package, or run dbt again with \
--no-version-check
"""
IMPOSSIBLE_VERSION_ERROR = """\
The package version requirement can never be satisfied for the '{package}
package.
Required versions of dbt for '{package}': {version_spec}
Check the requirements for the '{package}' package, or run dbt again with \
--no-version-check
"""
MALFORMED_PACKAGE_ERROR = """\
The packages.yml file in this project is malformed. Please double check
the contents of this file and fix any errors before retrying.
You can find more information on the syntax for this file here:
https://docs.getdbt.com/docs/package-management
Validator Error:
{error}
"""
def _list_if_none(value):
if value is None:
value = []
return value
def _dict_if_none(value):
if value is None:
value = {}
return value
def _list_if_none_or_string(value):
value = _list_if_none(value)
if isinstance(value, str):
return [value]
return value
def _load_yaml(path):
contents = load_file_contents(path)
return load_yaml_text(contents)
def _get_config_paths(config, path=(), paths=None):
if paths is None:
paths = set()
for key, value in config.items():
if isinstance(value, dict):
if key in SourceConfig.ConfigKeys:
if path not in paths:
paths.add(path)
else:
_get_config_paths(value, path + (key,), paths)
else:
if path not in paths:
paths.add(path)
return frozenset(paths)
def _is_config_used(path, fqns):
if fqns:
for fqn in fqns:
if len(path) <= len(fqn) and fqn[:len(path)] == path:
return True
return False
def package_data_from_root(project_root):
package_filepath = resolve_path_from_base(
'packages.yml', project_root
)
if path_exists(package_filepath):
packages_dict = _load_yaml(package_filepath)
else:
packages_dict = None
return packages_dict
def package_config_from_data(packages_data):
if packages_data is None:
packages_data = {'packages': []}
try:
packages = PackageConfig.from_dict(packages_data)
except ValidationError as e:
raise DbtProjectError(
MALFORMED_PACKAGE_ERROR.format(error=str(e.message))
) from e
return packages
def _parse_versions(versions: Union[List[str], str]) -> List[VersionSpecifier]:
"""Parse multiple versions as read from disk. The versions value may be any
one of:
- a single version string ('>0.12.1')
- a single string specifying multiple comma-separated versions
('>0.11.1,<=0.12.2')
- an array of single-version strings (['>0.11.1', '<=0.12.2'])
Regardless, this will return a list of VersionSpecifiers
"""
if isinstance(versions, str):
versions = versions.split(',')
return [VersionSpecifier.from_version_string(v) for v in versions]
def _all_source_paths(
source_paths: List[str], data_paths: List[str], snapshot_paths: List[str]
) -> List[str]:
return list(chain(source_paths, data_paths, snapshot_paths))
T = TypeVar('T')
def value_or(value: Optional[T], default: T) -> T:
if value is None:
return default
else:
return value
def _raw_project_from(project_root: str) -> Dict[str, Any]:
project_root = os.path.normpath(project_root)
project_yaml_filepath = os.path.join(project_root, 'dbt_project.yml')
# get the project.yml contents
if not path_exists(project_yaml_filepath):
raise DbtProjectError(
'no dbt_project.yml found at expected path {}'
.format(project_yaml_filepath)
)
project_dict = _load_yaml(project_yaml_filepath)
if not isinstance(project_dict, dict):
raise DbtProjectError(
'dbt_project.yml does not parse to a dictionary'
)
return project_dict
@dataclass
class PartialProject:
profile_name: Optional[str]
project_name: Optional[str]
project_root: str
project_dict: Dict[str, Any]
def render(self, renderer):
packages_dict = package_data_from_root(self.project_root)
return Project.render_from_dict(
self.project_root,
self.project_dict,
packages_dict,
renderer,
)
@dataclass
class Project:
project_name: str
version: Union[SemverString, float]
project_root: str
profile_name: str
source_paths: List[str]
macro_paths: List[str]
data_paths: List[str]
test_paths: List[str]
analysis_paths: List[str]
docs_paths: List[str]
target_path: str
snapshot_paths: List[str]
clean_targets: List[str]
log_path: str
modules_path: str
quoting: Dict[str, Any]
models: Dict[str, Any]
on_run_start: List[str]
on_run_end: List[str]
seeds: Dict[str, Any]
snapshots: Dict[str, Any]
dbt_version: List[VersionSpecifier]
packages: Dict[str, Any]
query_comment: Optional[Union[str, NoValue]]
@property
def all_source_paths(self) -> List[str]:
return _all_source_paths(
self.source_paths, self.data_paths, self.snapshot_paths
)
@staticmethod
def _preprocess(project_dict: Dict[str, Any]) -> Dict[str, Any]:
"""Pre-process certain special keys to convert them from None values
into empty containers, and to turn strings into arrays of strings.
"""
handlers: Dict[Tuple[str, ...], Callable[[Any], Any]] = {
('on-run-start',): _list_if_none_or_string,
('on-run-end',): _list_if_none_or_string,
}
for k in ('models', 'seeds', 'snapshots'):
handlers[(k,)] = _dict_if_none
handlers[(k, 'vars')] = _dict_if_none
handlers[(k, 'pre-hook')] = _list_if_none_or_string
handlers[(k, 'post-hook')] = _list_if_none_or_string
handlers[('seeds', 'column_types')] = _dict_if_none
def converter(value: Any, keypath: Tuple[str, ...]) -> Any:
if keypath in handlers:
handler = handlers[keypath]
return handler(value)
else:
return value
return deep_map(converter, project_dict)
@classmethod
def from_project_config(
cls,
project_dict: Dict[str, Any],
packages_dict: Optional[Dict[str, Any]] = None,
) -> 'Project':
"""Create a project from its project and package configuration, as read
by yaml.safe_load().
:param project_dict: The dictionary as read from disk
:param packages_dict: If it exists, the packages file as
read from disk.
:raises DbtProjectError: If the project is missing or invalid, or if
the packages file exists and is invalid.
:returns: The project, with defaults populated.
"""
try:
project_dict = cls._preprocess(project_dict)
except RecursionException:
raise DbtProjectError(
'Cycle detected: Project input has a reference to itself',
project=project_dict
)
try:
cfg = ProjectContract.from_dict(project_dict)
except ValidationError as e:
raise DbtProjectError(validator_error_message(e)) from e
# name/version are required in the Project definition, so we can assume
# they are present
name = cfg.name
version = cfg.version
# this is added at project_dict parse time and should always be here
# once we see it.
if cfg.project_root is None:
raise DbtProjectError('cfg must have a project root!')
else:
project_root = cfg.project_root
# this is only optional in the sense that if it's not present, it needs
# to have been a cli argument.
profile_name = cfg.profile
# these are all the defaults
source_paths: List[str] = value_or(cfg.source_paths, ['models'])
macro_paths: List[str] = value_or(cfg.macro_paths, ['macros'])
data_paths: List[str] = value_or(cfg.data_paths, ['data'])
test_paths: List[str] = value_or(cfg.test_paths, ['test'])
analysis_paths: List[str] = value_or(cfg.analysis_paths, [])
snapshot_paths: List[str] = value_or(cfg.snapshot_paths, ['snapshots'])
all_source_paths: List[str] = _all_source_paths(
source_paths, data_paths, snapshot_paths
)
docs_paths: List[str] = value_or(cfg.docs_paths, all_source_paths)
target_path: str = value_or(cfg.target_path, 'target')
clean_targets: List[str] = value_or(cfg.clean_targets, [target_path])
log_path: str = value_or(cfg.log_path, 'logs')
modules_path: str = value_or(cfg.modules_path, 'dbt_modules')
# in the default case we'll populate this once we know the adapter type
# It would be nice to just pass along a Quoting here, but that would
# break many things
quoting: Dict[str, Any] = {}
if cfg.quoting is not None:
quoting = cfg.quoting.to_dict()
models: Dict[str, Any] = cfg.models
seeds: Dict[str, Any] = cfg.seeds
snapshots: Dict[str, Any] = cfg.snapshots
on_run_start: List[str] = value_or(cfg.on_run_start, [])
on_run_end: List[str] = value_or(cfg.on_run_end, [])
# weird type handling: no value_or use
dbt_raw_version: Union[List[str], str] = '>=0.0.0'
if cfg.require_dbt_version is not None:
dbt_raw_version = cfg.require_dbt_version
query_comment = cfg.query_comment
try:
dbt_version = _parse_versions(dbt_raw_version)
except SemverException as e:
raise DbtProjectError(str(e)) from e
try:
packages = package_config_from_data(packages_dict)
except ValidationError as e:
raise DbtProjectError(validator_error_message(e)) from e
project = cls(
project_name=name,
version=version,
project_root=project_root,
profile_name=profile_name,
source_paths=source_paths,
macro_paths=macro_paths,
data_paths=data_paths,
test_paths=test_paths,
analysis_paths=analysis_paths,
docs_paths=docs_paths,
target_path=target_path,
snapshot_paths=snapshot_paths,
clean_targets=clean_targets,
log_path=log_path,
modules_path=modules_path,
quoting=quoting,
models=models,
on_run_start=on_run_start,
on_run_end=on_run_end,
seeds=seeds,
snapshots=snapshots,
dbt_version=dbt_version,
packages=packages,
query_comment=query_comment,
)
# sanity check - this means an internal issue
project.validate()
return project
def __str__(self):
cfg = self.to_project_config(with_packages=True)
return str(cfg)
def __eq__(self, other):
if not (isinstance(other, self.__class__) and
isinstance(self, other.__class__)):
return False
return self.to_project_config(with_packages=True) == \
other.to_project_config(with_packages=True)
def to_project_config(self, with_packages=False):
"""Return a dict representation of the config that could be written to
disk with `yaml.safe_dump` to get this configuration.
:param with_packages bool: If True, include the serialized packages
file in the root.
:returns dict: The serialized profile.
"""
result = deepcopy({
'name': self.project_name,
'version': self.version,
'project-root': self.project_root,
'profile': self.profile_name,
'source-paths': self.source_paths,
'macro-paths': self.macro_paths,
'data-paths': self.data_paths,
'test-paths': self.test_paths,
'analysis-paths': self.analysis_paths,
'docs-paths': self.docs_paths,
'target-path': self.target_path,
'snapshot-paths': self.snapshot_paths,
'clean-targets': self.clean_targets,
'log-path': self.log_path,
'quoting': self.quoting,
'models': self.models,
'on-run-start': self.on_run_start,
'on-run-end': self.on_run_end,
'seeds': self.seeds,
'snapshots': self.snapshots,
'require-dbt-version': [
v.to_version_string() for v in self.dbt_version
],
})
if with_packages:
result.update(self.packages.to_dict())
if self.query_comment != NoValue():
result['query-comment'] = self.query_comment
return result
def validate(self):
try:
ProjectContract.from_dict(self.to_project_config())
except ValidationError as e:
raise DbtProjectError(validator_error_message(e)) from e
@classmethod
def render_from_dict(
cls,
project_root: str,
project_dict: Dict[str, Any],
packages_dict: Dict[str, Any],
renderer: ConfigRenderer,
) -> 'Project':
rendered_project = renderer.render_project(project_dict)
rendered_project['project-root'] = project_root
rendered_packages = renderer.render_packages_data(packages_dict)
return cls.from_project_config(rendered_project, rendered_packages)
@classmethod
def partial_load(
cls, project_root: str
) -> PartialProject:
project_root = os.path.normpath(project_root)
project_dict = _raw_project_from(project_root)
project_name = project_dict.get('name')
profile_name = project_dict.get('profile')
return PartialProject(
profile_name=profile_name,
project_name=project_name,
project_root=project_root,
project_dict=project_dict,
)
@classmethod
def from_project_root(
cls, project_root: str, renderer: ConfigRenderer
) -> 'Project':
partial = cls.partial_load(project_root)
return partial.render(renderer)
def hashed_name(self):
return hashlib.md5(self.project_name.encode('utf-8')).hexdigest()
def get_resource_config_paths(self):
"""Return a dictionary with 'seeds' and 'models' keys whose values are
lists of lists of strings, where each inner list of strings represents
a configured path in the resource.
"""
return {
'models': _get_config_paths(self.models),
'seeds': _get_config_paths(self.seeds),
'snapshots': _get_config_paths(self.snapshots),
}
def get_unused_resource_config_paths(self, resource_fqns, disabled):
"""Return a list of lists of strings, where each inner list of strings
represents a type + FQN path of a resource configuration that is not
used.
"""
disabled_fqns = frozenset(tuple(fqn) for fqn in disabled)
resource_config_paths = self.get_resource_config_paths()
unused_resource_config_paths = []
for resource_type, config_paths in resource_config_paths.items():
used_fqns = resource_fqns.get(resource_type, frozenset())
fqns = used_fqns | disabled_fqns
for config_path in config_paths:
if not _is_config_used(config_path, fqns):
unused_resource_config_paths.append(
(resource_type,) + config_path
)
return unused_resource_config_paths
def warn_for_unused_resource_config_paths(self, resource_fqns, disabled):
unused = self.get_unused_resource_config_paths(resource_fqns, disabled)
if len(unused) == 0:
return
msg = UNUSED_RESOURCE_CONFIGURATION_PATH_MESSAGE.format(
len(unused),
'\n'.join('- {}'.format('.'.join(u)) for u in unused)
)
warn_or_error(msg, log_fmt=printer.yellow('{}'))
def validate_version(self):
"""Ensure this package works with the installed version of dbt."""
installed = get_installed_version()
if not versions_compatible(*self.dbt_version):
msg = IMPOSSIBLE_VERSION_ERROR.format(
package=self.project_name,
version_spec=[
x.to_version_string() for x in self.dbt_version
]
)
raise DbtProjectError(msg)
if not versions_compatible(installed, *self.dbt_version):
msg = INVALID_VERSION_ERROR.format(
package=self.project_name,
installed=installed.to_version_string(),
version_spec=[
x.to_version_string() for x in self.dbt_version
]
)
raise DbtProjectError(msg)
| 33.748644
| 79
| 0.641644
|
a61d15ef6ef3a78682fa36cb0e57d3cc9dc1a41f
| 5,810
|
py
|
Python
|
tools/schedulers.py
|
dong03/DogNoseLandmarks
|
ac5d1e0436e9e0835a6939f8d125f1d36007bc62
|
[
"MIT"
] | 1
|
2020-09-09T04:34:55.000Z
|
2020-09-09T04:34:55.000Z
|
tools/schedulers.py
|
dong03/DogNoseLandmarks
|
ac5d1e0436e9e0835a6939f8d125f1d36007bc62
|
[
"MIT"
] | null | null | null |
tools/schedulers.py
|
dong03/DogNoseLandmarks
|
ac5d1e0436e9e0835a6939f8d125f1d36007bc62
|
[
"MIT"
] | null | null | null |
from bisect import bisect_right
from timm.optim import AdamW
from torch import optim
from torch.optim import lr_scheduler
from torch.optim.rmsprop import RMSprop
from torch.optim.adamw import AdamW
from torch.optim.lr_scheduler import MultiStepLR, CyclicLR
from torch.optim.lr_scheduler import _LRScheduler
from apex.optimizers import FusedAdam, FusedSGD
class LRStepScheduler(_LRScheduler):
def __init__(self, optimizer, steps, last_epoch=-1):
self.lr_steps = steps
super().__init__(optimizer, last_epoch)
def get_lr(self):
pos = max(bisect_right([x for x, y in self.lr_steps], self.last_epoch) - 1, 0)
return [self.lr_steps[pos][1] if self.lr_steps[pos][0] <= self.last_epoch else base_lr for base_lr in self.base_lrs]
class PolyLR(_LRScheduler):
"""Sets the learning rate of each parameter group according to poly learning rate policy
"""
def __init__(self, optimizer, max_iter=90000, power=0.9, last_epoch=-1,cycle=False):
self.max_iter = max_iter
self.power = power
self.cycle = cycle
super(PolyLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
self.last_epoch_div = (self.last_epoch + 1) % self.max_iter
scale = (self.last_epoch + 1) // self.max_iter + 1.0 if self.cycle else 1
return [(base_lr * ((1 - float(self.last_epoch_div) / self.max_iter) ** (self.power))) / scale for base_lr in self.base_lrs]
class ExponentialLRScheduler(_LRScheduler):
"""Decays the learning rate of each parameter group by gamma every epoch.
When last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
gamma (float): Multiplicative factor of learning rate decay.
last_epoch (int): The index of last epoch. Default: -1.
"""
def __init__(self, optimizer, gamma, last_epoch=-1):
self.gamma = gamma
super(ExponentialLRScheduler, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch <= 0:
return self.base_lrs
return [base_lr * self.gamma**self.last_epoch for base_lr in self.base_lrs]
def create_optimizer(optimizer_config, model, master_params=None):
if optimizer_config.get("classifier_lr", -1) != -1:
# Separate classifier parameters from all others
net_params = []
classifier_params = []
for k, v in model.named_parameters():
if not v.requires_grad:
continue
if k.find("encoder") != -1:
net_params.append(v)
else:
classifier_params.append(v)
params = [
{"params": net_params},
{"params": classifier_params, "lr": optimizer_config["classifier_lr"]},
]
else:
if master_params:
params = master_params
else:
params = model.parameters()
if optimizer_config["type"] == "SGD":
optimizer = optim.SGD(params,
lr=optimizer_config["learning_rate"],
momentum=optimizer_config["momentum"],
weight_decay=optimizer_config["weight_decay"],
nesterov=optimizer_config["nesterov"])
elif optimizer_config["type"] == "FusedSGD":
optimizer = FusedSGD(params,
lr=optimizer_config["learning_rate"],
momentum=optimizer_config["momentum"],
weight_decay=optimizer_config["weight_decay"],
nesterov=optimizer_config["nesterov"])
elif optimizer_config["type"] == "Adam":
optimizer = optim.Adam(params,
lr=optimizer_config["learning_rate"],
weight_decay=optimizer_config["weight_decay"])
elif optimizer_config["type"] == "FusedAdam":
optimizer = FusedAdam(params,
lr=optimizer_config["learning_rate"],
weight_decay=optimizer_config["weight_decay"])
elif optimizer_config["type"] == "AdamW":
optimizer = AdamW(params,
lr=optimizer_config["learning_rate"],
weight_decay=optimizer_config["weight_decay"])
elif optimizer_config["type"] == "RmsProp":
optimizer = RMSprop(params,
lr=optimizer_config["learning_rate"],
weight_decay=optimizer_config["weight_decay"])
else:
raise KeyError("unrecognized optimizer {}".format(optimizer_config["type"]))
if optimizer_config["schedule"]["type"] == "step":
scheduler = LRStepScheduler(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "clr":
scheduler = CyclicLR(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "multistep":
scheduler = MultiStepLR(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "exponential":
scheduler = ExponentialLRScheduler(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "poly":
scheduler = PolyLR(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "constant":
scheduler = lr_scheduler.LambdaLR(optimizer, lambda epoch: 1.0)
elif optimizer_config["schedule"]["type"] == "linear":
def linear_lr(it):
return it * optimizer_config["schedule"]["params"]["alpha"] + optimizer_config["schedule"]["params"]["beta"]
scheduler = lr_scheduler.LambdaLR(optimizer, linear_lr)
return optimizer, scheduler
| 45.748031
| 132
| 0.624957
|
2e959f8ed7a4499d9c31891b4aea32055ab1ea01
| 296
|
py
|
Python
|
myFirstPiPy/myFirstPiPy/tests/test_all.py
|
StudentESE/myFirstPiP
|
b6116753f77f1f3cec9b36f73d86a008fe27c500
|
[
"BSD-2-Clause"
] | null | null | null |
myFirstPiPy/myFirstPiPy/tests/test_all.py
|
StudentESE/myFirstPiP
|
b6116753f77f1f3cec9b36f73d86a008fe27c500
|
[
"BSD-2-Clause"
] | null | null | null |
myFirstPiPy/myFirstPiPy/tests/test_all.py
|
StudentESE/myFirstPiP
|
b6116753f77f1f3cec9b36f73d86a008fe27c500
|
[
"BSD-2-Clause"
] | null | null | null |
import unittest
import __init__ as myModule
class TestMyMethod(unittest.TestCase):
def test_myMethod(self):
self.assertEqual(myModule.myMethod(), "Hello PiP")
def test_myVariable(self):
self.assertIs(myModule.myVariable, 123)
if __name__ == '__main__':
unittest.main()
| 32.888889
| 58
| 0.726351
|
c8f733c620e2eb5a11eadb130462c3f53340d87d
| 1,391
|
py
|
Python
|
part_2_read_test_json.py
|
hooplapunta/cc_tools
|
43bc430b3374f55a72c13c3bb01bd5422ec5c4aa
|
[
"MIT"
] | null | null | null |
part_2_read_test_json.py
|
hooplapunta/cc_tools
|
43bc430b3374f55a72c13c3bb01bd5422ec5c4aa
|
[
"MIT"
] | null | null | null |
part_2_read_test_json.py
|
hooplapunta/cc_tools
|
43bc430b3374f55a72c13c3bb01bd5422ec5c4aa
|
[
"MIT"
] | null | null | null |
import test_data
import json
#Creates and returns a GameLibrary object(defined in test_data) from loaded json_data
def make_game_library_from_json( json_data ):
#Initialize a new GameLibrary
game_library = test_data.GameLibrary()
### Begin Add Code Here ###
#Loop through the json_data
#Create a new Game object from the json_data by reading
# title
# year
# platform (which requires reading name and launch_year)
#Add that Game object to the game_library
### End Add Code Here ###
for game_data in json_data:
print(game_data)
platform_data = game_data["platform"]
platform = test_data.Platform(platform_data["name"], platform_data["launch_year"])
game = test_data.Game(game_data["title"], platform, game_data["year"])
game_library.add_game(game)
return game_library
#Part 2
input_json_file = "data/test_data.json"
### Begin Add Code Here ###
#Open the file specified by input_json_file
#Use the json module to load the data from the file
#Use make_game_library_from_json(json_data) to convert the data to GameLibrary data
#Print out the resulting GameLibrary data using print()
### End Add Code Here ###
with open(input_json_file, "r") as reader:
test_data_json = json.load(reader)
print(test_data_json)
lib = make_game_library_from_json (test_data_json)
print(lib)
| 31.613636
| 90
| 0.718188
|
f9ba7c323173201b928f341ba4b0774443a55b1f
| 3,712
|
py
|
Python
|
DeepLearning/pytorch/papers/EfficientDet/backbone.py
|
MikoyChinese/Learn
|
c482b1e84496279935b5bb2cfc1e6d78e2868c63
|
[
"Apache-2.0"
] | null | null | null |
DeepLearning/pytorch/papers/EfficientDet/backbone.py
|
MikoyChinese/Learn
|
c482b1e84496279935b5bb2cfc1e6d78e2868c63
|
[
"Apache-2.0"
] | null | null | null |
DeepLearning/pytorch/papers/EfficientDet/backbone.py
|
MikoyChinese/Learn
|
c482b1e84496279935b5bb2cfc1e6d78e2868c63
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torch import nn
from efficientdet.model import BiFPN, Regressor, Classifier, EfficientNet
from efficientdet.utils import Anchors
class EfficientDetBackbone(nn.Module):
def __init__(self, num_classes=80, compound_coef=0, load_weights=False, **kwargs):
super(EfficientDetBackbone, self).__init__()
self.compound_coef = compound_coef
self.backbone_compound_coef = [0, 1, 2, 3, 4, 5, 6, 6, 7]
self.fpn_num_filters = [64, 88, 112, 160, 224, 288, 384, 384, 384]
self.fpn_cell_repeats = [3, 4, 5, 6, 7, 7, 8, 8, 8]
self.input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536, 1536]
self.box_class_repeats = [3, 3, 3, 4, 4, 4, 5, 5, 5]
self.pyramid_levels = [5, 5, 5, 5, 5, 5, 5, 5, 6]
self.anchor_scale = [4., 4., 4., 4., 4., 4., 4., 5., 4.]
self.aspect_ratios = kwargs.get('ratios', [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)])
self.num_scales = len(kwargs.get('scales', [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]))
conv_channel_coef = {
# the channels of P3/P4/P5.
0: [40, 112, 320],
1: [40, 112, 320],
2: [48, 120, 352],
3: [48, 136, 384],
4: [56, 160, 448],
5: [64, 176, 512],
6: [72, 200, 576],
7: [72, 200, 576],
8: [80, 224, 640],
}
num_anchors = len(self.aspect_ratios) * self.num_scales
self.bifpn = nn.Sequential(
*[BiFPN(self.fpn_num_filters[self.compound_coef],
conv_channel_coef[compound_coef],
True if _ == 0 else False,
attention=True if compound_coef < 6 else False,
use_p8=compound_coef > 7) for _ in range(self.fpn_cell_repeats[compound_coef])])
self.num_classes = num_classes
self.regressor = Regressor(in_channels=self.fpn_num_filters[self.compound_coef],
num_anchors=num_anchors,
num_layers=self.box_class_repeats[self.compound_coef],
pyramid_levels=self.pyramid_levels[self.compound_coef])
self.classifier = Classifier(in_channels=self.fpn_num_filters[self.compound_coef],
num_anchors=num_anchors,
num_classes=num_classes,
num_layers=self.box_class_repeats[self.compound_coef],
pyramid_levels=self.pyramid_levels[self.compound_coef])
self.anchors = Anchors(anchor_scale=self.anchor_scale[compound_coef],
pyramid_levels=(torch.arange(self.pyramid_levels[self.compound_coef]) + 3).tolist(),
**kwargs)
self.backbone_net = EfficientNet(self.backbone_compound_coef[compound_coef], load_weights)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def forward(self, inputs):
max_size = inputs.shape[-1]
_, p3, p4, p5 = self.backbone_net(inputs)
features = (p3, p4, p5)
features = self.bifpn(features)
regression = self.regressor(features)
classification = self.classifier(features)
anchors = self.anchors(inputs, inputs.dtype)
return features, regression, classification, anchors
def init_backbone(self, path):
state_dict = torch.load(path)
try:
ret = self.load_state_dict(state_dict, strict=False)
print(ret)
except RuntimeError as e:
print('[Error]: Ignoring %s' % str(e))
| 44.722892
| 115
| 0.562769
|
d9a6d5e397e715bcf69c1e2cb9b31432e9ae2d4d
| 12,764
|
py
|
Python
|
featuretools/variable_types/variable.py
|
jeffzi/featuretools
|
cd674b3f302832cacb3ce9cc95d5ce2f5052a4d6
|
[
"BSD-3-Clause"
] | null | null | null |
featuretools/variable_types/variable.py
|
jeffzi/featuretools
|
cd674b3f302832cacb3ce9cc95d5ce2f5052a4d6
|
[
"BSD-3-Clause"
] | null | null | null |
featuretools/variable_types/variable.py
|
jeffzi/featuretools
|
cd674b3f302832cacb3ce9cc95d5ce2f5052a4d6
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pandas as pd
from featuretools.utils.gen_utils import find_descendents
class Variable(object):
"""Represent a variable in an entity
A Variable is analogous to a column in table in a relational database
Args:
id (str) : Id of variable. Must match underlying data in Entity
it belongs to.
entity (:class:`.Entity`) : Entity this variable belongs to.
name (str, optional) : Variable name. Defaults to id.
See Also:
:class:`.Entity`, :class:`.Relationship`, :class:`.BaseEntitySet`
"""
type_string = None
_default_pandas_dtype = object
def __init__(self, id, entity, name=None):
assert isinstance(id, str), "Variable id must be a string"
self.id = id
self._name = name
self.entity_id = entity.id
assert entity.entityset is not None, "Entity must contain reference to EntitySet"
self.entity = entity
self._interesting_values = pd.Series()
@property
def entityset(self):
return self.entity.entityset
def __eq__(self, other, deep=False):
shallow_eq = isinstance(other, self.__class__) and \
self.id == other.id and \
self.entity_id == other.entity_id
if not deep:
return shallow_eq
else:
return shallow_eq and set(self.interesting_values.values) == set(other.interesting_values.values)
def __hash__(self):
return hash((self.id, self.entity_id))
def __repr__(self):
return u"<Variable: {} (dtype = {})>".format(self.name, self.type_string)
@classmethod
def create_from(cls, variable):
"""Create new variable this type from existing
Args:
variable (Variable) : Existing variable to create from.
Returns:
:class:`.Variable` : new variable
"""
v = cls(id=variable.id, name=variable.name, entity=variable.entity)
return v
@property
def name(self):
return self._name if self._name is not None else self.id
@property
def dtype(self):
return self.type_string \
if self.type_string is not None else "generic_type"
@name.setter
def name(self, name):
self._name = name
@property
def interesting_values(self):
return self._interesting_values
@interesting_values.setter
def interesting_values(self, interesting_values):
self._interesting_values = pd.Series(interesting_values)
@property
def series(self):
return self.entity.df[self.id]
def to_data_description(self):
return {
'id': self.id,
'type': {
'value': self.type_string,
},
'properties': {
'name': self.name,
'entity': self.entity.id,
'interesting_values': self._interesting_values.to_json()
},
}
class Unknown(Variable):
pass
class Discrete(Variable):
"""Superclass representing variables that take on discrete values"""
type_string = "discrete"
def __init__(self, id, entity, name=None):
super(Discrete, self).__init__(id, entity, name)
self._interesting_values = pd.Series()
@property
def interesting_values(self):
return self._interesting_values
@interesting_values.setter
def interesting_values(self, values):
seen = set()
seen_add = seen.add
self._interesting_values = pd.Series([v for v in values if not
(v in seen or seen_add(v))])
class Boolean(Variable):
"""Represents variables that take on one of two values
Args:
true_values (list) : List of valued true values. Defaults to [1, True, "true", "True", "yes", "t", "T"]
false_values (list): List of valued false values. Defaults to [0, False, "false", "False", "no", "f", "F"]
"""
type_string = "boolean"
_default_pandas_dtype = bool
def __init__(self,
id,
entity,
name=None,
true_values=None,
false_values=None):
default = [1, True, "true", "True", "yes", "t", "T"]
self.true_values = true_values or default
default = [0, False, "false", "False", "no", "f", "F"]
self.false_values = false_values or default
super(Boolean, self).__init__(id, entity, name=name)
def to_data_description(self):
description = super(Boolean, self).to_data_description()
description['type'].update({
'true_values': self.true_values,
'false_values': self.false_values
})
return description
class Categorical(Discrete):
"""Represents variables that can take an unordered discrete values
Args:
categories (list) : List of categories. If left blank, inferred from data.
"""
type_string = "categorical"
def __init__(self, id, entity, name=None, categories=None):
self.categories = None or []
super(Categorical, self).__init__(id, entity, name=name)
def to_data_description(self):
description = super(Categorical, self).to_data_description()
description['type'].update({'categories': self.categories})
return description
class Id(Categorical):
"""Represents variables that identify another entity"""
type_string = "id"
_default_pandas_dtype = int
class Ordinal(Discrete):
"""Represents variables that take on an ordered discrete value"""
type_string = "ordinal"
_default_pandas_dtype = int
class Numeric(Variable):
"""Represents variables that contain numeric values
Args:
range (list, optional) : List of start and end. Can use inf and -inf to represent infinity. Unconstrained if not specified.
start_inclusive (bool, optional) : Whether or not range includes the start value.
end_inclusive (bool, optional) : Whether or not range includes the end value
Attributes:
max (float)
min (float)
std (float)
mean (float)
"""
type_string = "numeric"
_default_pandas_dtype = float
def __init__(self,
id,
entity,
name=None,
range=None,
start_inclusive=True,
end_inclusive=False):
self.range = None or []
self.start_inclusive = start_inclusive
self.end_inclusive = end_inclusive
super(Numeric, self).__init__(id, entity, name=name)
def to_data_description(self):
description = super(Numeric, self).to_data_description()
description['type'].update({
'range': self.range,
'start_inclusive': self.start_inclusive,
'end_inclusive': self.end_inclusive,
})
return description
class Index(Variable):
"""Represents variables that uniquely identify an instance of an entity
Attributes:
count (int)
"""
type_string = "index"
_default_pandas_dtype = int
class Datetime(Variable):
"""Represents variables that are points in time
Args:
format (str): Python datetime format string documented `here <http://strftime.org/>`_.
"""
type_string = "datetime"
_default_pandas_dtype = np.datetime64
def __init__(self, id, entity, name=None, format=None):
self.format = format
super(Datetime, self).__init__(id, entity, name=name)
def __repr__(self):
return u"<Variable: {} (dtype: {}, format: {})>".format(self.name, self.type_string, self.format)
def to_data_description(self):
description = super(Datetime, self).to_data_description()
description['type'].update({'format': self.format})
return description
class TimeIndex(Variable):
"""Represents time index of entity"""
type_string = "time_index"
_default_pandas_dtype = np.datetime64
class NumericTimeIndex(TimeIndex, Numeric):
"""Represents time index of entity that is numeric"""
type_string = "numeric_time_index"
_default_pandas_dtype = float
class DatetimeTimeIndex(TimeIndex, Datetime):
"""Represents time index of entity that is a datetime"""
type_string = "datetime_time_index"
_default_pandas_dtype = np.datetime64
class Timedelta(Variable):
"""Represents variables that are timedeltas
Args:
range (list, optional) : List of start and end of allowed range in seconds. Can use inf and -inf to represent infinity. Unconstrained if not specified.
start_inclusive (bool, optional) : Whether or not range includes the start value.
end_inclusive (bool, optional) : Whether or not range includes the end value
"""
type_string = "timedelta"
_default_pandas_dtype = np.timedelta64
def __init__(self,
id,
entity,
name=None,
range=None,
start_inclusive=True,
end_inclusive=False):
self.range = range or []
self.start_inclusive = start_inclusive
self.end_inclusive = end_inclusive
super(Timedelta, self).__init__(id, entity, name=name)
def to_data_description(self):
description = super(Timedelta, self).to_data_description()
description['type'].update({
'range': self.range,
'start_inclusive': self.start_inclusive,
'end_inclusive': self.end_inclusive,
})
return description
class Text(Variable):
"""Represents variables that are arbitary strings"""
type_string = "text"
_default_pandas_dtype = str
class PandasTypes(object):
_all = 'all'
_categorical = 'category'
_pandas_datetimes = ['datetime64[ns]', 'datetime64[ns, tz]']
_pandas_timedeltas = ['Timedelta']
_pandas_numerics = ['int16', 'int32', 'int64',
'float16', 'float32', 'float64']
class LatLong(Variable):
"""Represents an ordered pair (Latitude, Longitude)
To make a latlong in a dataframe do
data['latlong'] = data[['latitude', 'longitude']].apply(tuple, axis=1)
"""
type_string = "latlong"
class ZIPCode(Categorical):
"""Represents a postal address in the United States.
Consists of a series of digits which are casts as
string. Five digit and 9 digit zipcodes are supported.
"""
type_string = "zipcode"
_default_pandas_dtype = str
class IPAddress(Variable):
"""Represents a computer network address. Represented
in dotted-decimal notation. IPv4 and IPv6 are supported.
"""
type_string = "ip"
_default_pandas_dtype = str
class FullName(Variable):
"""Represents a person's full name. May consist of a
first name, last name, and a title.
"""
type_string = "full_name"
_default_pandas_dtype = str
class EmailAddress(Variable):
"""Represents an email box to which email message are sent.
Consists of a local-part, an @ symbol, and a domain.
"""
type_string = "email"
_default_pandas_dtype = str
class URL(Variable):
"""Represents a valid web url (with or without http/www)"""
type_string = "url"
_default_pandas_dtype = str
class PhoneNumber(Variable):
"""Represents any valid phone number.
Can be with/without parenthesis.
Can be with/without area/country codes.
"""
type_string = "phone_number"
_default_pandas_dtype = str
class DateOfBirth(Datetime):
"""Represents a date of birth as a datetime"""
type_string = "date_of_birth"
_default_pandas_dtype = np.datetime64
class CountryCode(Categorical):
"""Represents an ISO-3166 standard country code.
ISO 3166-1 (countries) are supported. These codes
should be in the Alpha-2 format.
e.g. United States of America = US
"""
type_string = "country_code"
_default_pandas_dtype = str
class SubRegionCode(Categorical):
"""Represents an ISO-3166 standard sub-region code.
ISO 3166-2 codes (sub-regions are supported. These codes
should be in the Alpha-2 format.
e.g. United States of America, Arizona = US-AZ
"""
type_string = "subregion_code"
_default_pandas_dtype = str
class FilePath(Variable):
"""Represents a valid filepath, absolute or relative"""
type_string = "filepath"
_default_pandas_dtype = str
def find_variable_types():
return {str(vtype.type_string): vtype for vtype in find_descendents(
Variable) if hasattr(vtype, 'type_string')}
DEFAULT_DTYPE_VALUES = {
np.datetime64: pd.Timestamp.now(),
int: 0,
float: 0.1,
np.timedelta64: pd.Timedelta('1d'),
object: 'object',
bool: True,
str: 'test'
}
| 29.752914
| 159
| 0.63875
|
bcf2535a3b2410e7b659082114e1fa965f50d07d
| 1,749
|
py
|
Python
|
tests/unit/resources/networking/test_interconnect_types.py
|
PragadeeswaranS/oneview-python
|
3acc113b8dd30029beb7c228c3bc2bbe67d3485b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/resources/networking/test_interconnect_types.py
|
PragadeeswaranS/oneview-python
|
3acc113b8dd30029beb7c228c3bc2bbe67d3485b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/resources/networking/test_interconnect_types.py
|
PragadeeswaranS/oneview-python
|
3acc113b8dd30029beb7c228c3bc2bbe67d3485b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
###
# (C) Copyright [2019] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import unittest
import mock
from hpOneView.connection import connection
from hpOneView.resources.networking.interconnect_types import InterconnectTypes
from hpOneView.resources.resource import Resource, ResourceHelper
class InterconnectTypesTest(unittest.TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host)
self._interconnect_types = InterconnectTypes(self.connection)
@mock.patch.object(ResourceHelper, 'do_requests_to_getall')
def test_get_all_called_once(self, mock_do_get):
filter = 'name=TestName'
sort = 'name:ascending'
self._interconnect_types.get_all(2, 500, filter=filter, sort=sort)
mock_do_get.assert_called_once_with('/rest/interconnect-types?start=2&count=500&filter=name%3DTestName&sort=name%3Aascending', 500)
@mock.patch.object(Resource, 'get_by')
def test_get_by_called_once(self, mock_get_by):
self._interconnect_types.get_by_name('HP VC Flex-10 Enet Module')
mock_get_by.assert_called_once_with(
'name', 'HP VC Flex-10 Enet Module')
| 35.693878
| 139
| 0.74271
|
000d7adbe57df358025e2c9b6c3390e117ef0dbd
| 2,466
|
py
|
Python
|
huawei_lte/errors.py
|
bfg100k/HuaweiB525Router
|
dd2c6b2954b1e48f571e6024c2c5fb29e90ef0b1
|
[
"MIT"
] | 64
|
2019-02-05T23:31:57.000Z
|
2022-02-13T22:59:31.000Z
|
huawei_lte/errors.py
|
bfg100k/HuaweiB525Router
|
dd2c6b2954b1e48f571e6024c2c5fb29e90ef0b1
|
[
"MIT"
] | 14
|
2019-07-13T22:43:21.000Z
|
2021-01-04T22:46:20.000Z
|
huawei_lte/errors.py
|
bfg100k/HuaweiB525Router
|
dd2c6b2954b1e48f571e6024c2c5fb29e90ef0b1
|
[
"MIT"
] | 20
|
2019-05-09T12:48:16.000Z
|
2022-01-12T10:55:29.000Z
|
import huawei_lte.xmlobjects as xmlobjects
class RouterError(Exception):
__ERRORS = [
[2000, 'Python API: %s - %s'],
[100001, 'An unkown error occurred'],
[100002, 'No such URL. The router does not support this function'],
[100003, 'You have no rights to access this function'],
[100004, 'The system is busy'],
[100005, 'Format error'], #TODO: This is not very discriptive, XML format?
[100006, 'Parameter error'], #TODO: Assume invalid attribute, or missing attribute?
[100007, 'Save config file error'],
[100008, 'Get config file error'],
[101001, 'No SIM card, or invalid SIM card'],
[101002, 'Check SIM card PIN lock'],
[101003, 'Check SIM card PUN lock'],
[101004, 'Check SIM card is usable'],
[101005, 'Enable PIN failed'],
[101006, 'Disable PIN failed'],
[101007, 'Unlock PIN failed'],
[101008, 'Disable Auto PIN failed'],
[101009, 'Enable Auto PIN failed'],
[108001, 'The username is wrong'],
[108002, 'The password is wrong'],
[108003, 'The user is already logged in'],
[108004, 'Modify password failed'],
[108005, 'Too many users logged in'],
[108006, 'The username and/or password are wrong'],
[108007, 'Logging in too many times'],
[108010, 'Access denied, logins are too frequent'],
[118001, 'Cradle get current connected user IP failed'],
[118002, 'Cradle get current connected user MAC failed'],
[118003, 'Cradle set MAC failed'],
[118004, 'Cradle get WAN information failed'],
[118005, 'Cradle coding failure'],
[118006, 'Cradle update profile failed'],
[120001, 'Voice is busy'],
[125001, 'Invalid authentication token'],
[125002, 'Invalid session'],
[125003, 'Invalid session token']
#TODO: Add 9003 occurring when setting static ip addresses
]
@classmethod
def hasError(cls, xml): return '<error>' in xml
@classmethod
def getErrorMessage(cls, code):
code = int(code)
return next((err[1] for err in cls.__ERRORS if err[0] == code), 'An unknown error occurred')
def __init__(self, response):
error = xmlobjects.Error()
error.parseXML(response)
self.code = error.code
self.message = error.message
super(RouterError, self).__init__(self.code +": "+self.message)
| 39.142857
| 100
| 0.607461
|
0b3e3c8e7b8d7d624827e0aff2c268649440b0fb
| 8,526
|
py
|
Python
|
api/tacticalrmm/tacticalrmm/constants.py
|
v2cloud/tacticalrmm
|
12f599f9749985f66ff9b559c5e5abd36064b182
|
[
"MIT"
] | null | null | null |
api/tacticalrmm/tacticalrmm/constants.py
|
v2cloud/tacticalrmm
|
12f599f9749985f66ff9b559c5e5abd36064b182
|
[
"MIT"
] | null | null | null |
api/tacticalrmm/tacticalrmm/constants.py
|
v2cloud/tacticalrmm
|
12f599f9749985f66ff9b559c5e5abd36064b182
|
[
"MIT"
] | null | null | null |
from enum import Enum
from django.db import models
class MeshAgentIdent(Enum):
WIN32 = 3
WIN64 = 4
LINUX32 = 5
LINUX64 = 6
LINUX_ARM_64 = 26
LINUX_ARM_HF = 25
def __str__(self):
return str(self.value)
CORESETTINGS_CACHE_KEY = "core_settings"
ROLE_CACHE_PREFIX = "role_"
class ScriptShell(models.TextChoices):
POWERSHELL = "powershell", "Powershell"
CMD = "cmd", "Batch (CMD)"
PYTHON = "python", "Python"
SHELL = "shell", "Shell"
class ScriptType(models.TextChoices):
USER_DEFINED = "userdefined", "User Defined"
BUILT_IN = "builtin", "Built In"
class EvtLogNames(models.TextChoices):
APPLICATION = "Application", "Application"
SYSTEM = "System", "System"
SECURITY = "Security", "Security"
class EvtLogTypes(models.TextChoices):
INFO = "INFO", "Information"
WARNING = "WARNING", "Warning"
ERROR = "ERROR", "Error"
AUDIT_SUCCESS = "AUDIT_SUCCESS", "Success Audit"
AUDIT_FAILURE = "AUDIT_FAILURE", "Failure Audit"
class EvtLogFailWhen(models.TextChoices):
CONTAINS = "contains", "Log contains"
NOT_CONTAINS = "not_contains", "Log does not contain"
class CheckStatus(models.TextChoices):
PASSING = "passing", "Passing"
FAILING = "failing", "Failing"
PENDING = "pending", "Pending"
class PAStatus(models.TextChoices):
PENDING = "pending", "Pending"
COMPLETED = "completed", "Completed"
class PAAction(models.TextChoices):
SCHED_REBOOT = "schedreboot", "Scheduled Reboot"
AGENT_UPDATE = "agentupdate", "Agent Update"
CHOCO_INSTALL = "chocoinstall", "Chocolatey Software Install"
RUN_CMD = "runcmd", "Run Command"
RUN_SCRIPT = "runscript", "Run Script"
RUN_PATCH_SCAN = "runpatchscan", "Run Patch Scan"
RUN_PATCH_INSTALL = "runpatchinstall", "Run Patch Install"
class CheckType(models.TextChoices):
DISK_SPACE = "diskspace", "Disk Space Check"
PING = "ping", "Ping Check"
CPU_LOAD = "cpuload", "CPU Load Check"
MEMORY = "memory", "Memory Check"
WINSVC = "winsvc", "Service Check"
SCRIPT = "script", "Script Check"
EVENT_LOG = "eventlog", "Event Log Check"
class AuditActionType(models.TextChoices):
LOGIN = "login", "User Login"
FAILED_LOGIN = "failed_login", "Failed User Login"
DELETE = "delete", "Delete Object"
MODIFY = "modify", "Modify Object"
ADD = "add", "Add Object"
VIEW = "view", "View Object"
CHECK_RUN = "check_run", "Check Run"
TASK_RUN = "task_run", "Task Run"
AGENT_INSTALL = "agent_install", "Agent Install"
REMOTE_SESSION = "remote_session", "Remote Session"
EXEC_SCRIPT = "execute_script", "Execute Script"
EXEC_COMMAND = "execute_command", "Execute Command"
BULK_ACTION = "bulk_action", "Bulk Action"
URL_ACTION = "url_action", "URL Action"
class AuditObjType(models.TextChoices):
USER = "user", "User"
SCRIPT = "script", "Script"
AGENT = "agent", "Agent"
POLICY = "policy", "Policy"
WINUPDATE = "winupdatepolicy", "Patch Policy"
CLIENT = "client", "Client"
SITE = "site", "Site"
CHECK = "check", "Check"
AUTOTASK = "automatedtask", "Automated Task"
CORE = "coresettings", "Core Settings"
BULK = "bulk", "Bulk"
ALERT_TEMPLATE = "alerttemplate", "Alert Template"
ROLE = "role", "Role"
URL_ACTION = "urlaction", "URL Action"
KEYSTORE = "keystore", "Global Key Store"
CUSTOM_FIELD = "customfield", "Custom Field"
class DebugLogLevel(models.TextChoices):
INFO = "info", "Info"
WARN = "warning", "Warning"
ERROR = "error", "Error"
CRITICAL = "critical", "Critical"
class DebugLogType(models.TextChoices):
AGENT_UPDATE = "agent_update", "Agent Update"
AGENT_ISSUES = "agent_issues", "Agent Issues"
WIN_UPDATES = "win_updates", "Windows Updates"
SYSTEM_ISSUES = "system_issues", "System Issues"
SCRIPTING = "scripting", "Scripting"
# Agent db fields that are not needed for most queries, speeds up query
AGENT_DEFER = (
"wmi_detail",
"services",
"created_by",
"created_time",
"modified_by",
"modified_time",
)
ONLINE_AGENTS = (
"pk",
"agent_id",
"last_seen",
"overdue_time",
"offline_time",
"version",
)
FIELDS_TRIGGER_TASK_UPDATE_AGENT = [
"run_time_bit_weekdays",
"run_time_date",
"expire_date",
"daily_interval",
"weekly_interval",
"enabled",
"remove_if_not_scheduled",
"run_asap_after_missed",
"monthly_days_of_month",
"monthly_months_of_year",
"monthly_weeks_of_month",
"task_repetition_duration",
"task_repetition_interval",
"stop_task_at_duration_end",
"random_task_delay",
"run_asap_after_missed",
"task_instance_policy",
]
POLICY_TASK_FIELDS_TO_COPY = [
"alert_severity",
"email_alert",
"text_alert",
"dashboard_alert",
"name",
"actions",
"run_time_bit_weekdays",
"run_time_date",
"expire_date",
"daily_interval",
"weekly_interval",
"task_type",
"enabled",
"remove_if_not_scheduled",
"run_asap_after_missed",
"custom_field",
"collector_all_output",
"monthly_days_of_month",
"monthly_months_of_year",
"monthly_weeks_of_month",
"task_repetition_duration",
"task_repetition_interval",
"stop_task_at_duration_end",
"random_task_delay",
"run_asap_after_missed",
"task_instance_policy",
"continue_on_error",
]
CHECKS_NON_EDITABLE_FIELDS = [
"check_type",
"overridden_by_policy",
"created_by",
"created_time",
"modified_by",
"modified_time",
]
POLICY_CHECK_FIELDS_TO_COPY = [
"warning_threshold",
"error_threshold",
"alert_severity",
"name",
"run_interval",
"disk",
"fails_b4_alert",
"ip",
"script",
"script_args",
"info_return_codes",
"warning_return_codes",
"timeout",
"svc_name",
"svc_display_name",
"svc_policy_mode",
"pass_if_start_pending",
"pass_if_svc_not_exist",
"restart_if_stopped",
"log_name",
"event_id",
"event_id_is_wildcard",
"event_type",
"event_source",
"event_message",
"fail_when",
"search_last_days",
"number_of_events_b4_alert",
"email_alert",
"text_alert",
"dashboard_alert",
]
WEEK_DAYS = {
"Sunday": 0x1,
"Monday": 0x2,
"Tuesday": 0x4,
"Wednesday": 0x8,
"Thursday": 0x10,
"Friday": 0x20,
"Saturday": 0x40,
}
MONTHS = {
"January": 0x1,
"February": 0x2,
"March": 0x4,
"April": 0x8,
"May": 0x10,
"June": 0x20,
"July": 0x40,
"August": 0x80,
"September": 0x100,
"October": 0x200,
"November": 0x400,
"December": 0x800,
}
WEEKS = {
"First Week": 0x1,
"Second Week": 0x2,
"Third Week": 0x4,
"Fourth Week": 0x8,
"Last Week": 0x10,
}
MONTH_DAYS = {f"{b}": 0x1 << a for a, b in enumerate(range(1, 32))}
MONTH_DAYS["Last Day"] = 0x80000000
DEMO_NOT_ALLOWED = [
{"name": "AgentProcesses", "methods": ["DELETE"]},
{"name": "AgentMeshCentral", "methods": ["GET", "POST"]},
{"name": "update_agents", "methods": ["POST"]},
{"name": "send_raw_cmd", "methods": ["POST"]},
{"name": "install_agent", "methods": ["POST"]},
{"name": "GenerateAgent", "methods": ["GET"]},
{"name": "email_test", "methods": ["POST"]},
{"name": "server_maintenance", "methods": ["POST"]},
{"name": "CodeSign", "methods": ["PATCH", "POST"]},
{"name": "TwilioSMSTest", "methods": ["POST"]},
{"name": "GetEditActionService", "methods": ["PUT", "POST"]},
{"name": "TestScript", "methods": ["POST"]},
{"name": "GetUpdateDeleteAgent", "methods": ["DELETE"]},
{"name": "Reboot", "methods": ["POST", "PATCH"]},
{"name": "recover", "methods": ["POST"]},
{"name": "run_script", "methods": ["POST"]},
{"name": "bulk", "methods": ["POST"]},
{"name": "WMI", "methods": ["POST"]},
{"name": "PolicyAutoTask", "methods": ["POST"]},
{"name": "RunAutoTask", "methods": ["POST"]},
{"name": "run_checks", "methods": ["POST"]},
{"name": "GetSoftware", "methods": ["POST", "PUT"]},
{"name": "ScanWindowsUpdates", "methods": ["POST"]},
{"name": "InstallWindowsUpdates", "methods": ["POST"]},
{"name": "PendingActions", "methods": ["DELETE"]},
{"name": "clear_cache", "methods": ["GET"]},
]
LINUX_NOT_IMPLEMENTED = [
{"name": "ScanWindowsUpdates", "methods": ["POST"]},
{"name": "GetSoftware", "methods": ["POST", "PUT"]},
{"name": "Reboot", "methods": ["PATCH"]}, # TODO implement reboot later
]
| 26.811321
| 76
| 0.629369
|
f5bff68874a788009cfe6f66ced9081140bda556
| 9,783
|
py
|
Python
|
markdown_it/rules_block/list.py
|
tadeu/markdown-it-py
|
53d9b3379494dc7f4ef53eaf4726399aac875215
|
[
"MIT"
] | 285
|
2020-04-30T02:45:16.000Z
|
2022-03-30T03:25:44.000Z
|
markdown_it/rules_block/list.py
|
tadeu/markdown-it-py
|
53d9b3379494dc7f4ef53eaf4726399aac875215
|
[
"MIT"
] | 181
|
2020-04-30T21:31:24.000Z
|
2022-03-22T12:20:35.000Z
|
markdown_it/rules_block/list.py
|
tadeu/markdown-it-py
|
53d9b3379494dc7f4ef53eaf4726399aac875215
|
[
"MIT"
] | 37
|
2020-05-04T22:30:43.000Z
|
2022-03-19T22:54:17.000Z
|
# Lists
import logging
from .state_block import StateBlock
from ..common.utils import isSpace
LOGGER = logging.getLogger(__name__)
# Search `[-+*][\n ]`, returns next pos after marker on success
# or -1 on fail.
def skipBulletListMarker(state: StateBlock, startLine: int):
pos = state.bMarks[startLine] + state.tShift[startLine]
maximum = state.eMarks[startLine]
marker = state.srcCharCode[pos]
pos += 1
# Check bullet /* * */ /* - */ /* + */
if marker != 0x2A and marker != 0x2D and marker != 0x2B:
return -1
if pos < maximum:
ch = state.srcCharCode[pos]
if not isSpace(ch):
# " -test " - is not a list item
return -1
return pos
# Search `\d+[.)][\n ]`, returns next pos after marker on success
# or -1 on fail.
def skipOrderedListMarker(state: StateBlock, startLine: int):
start = state.bMarks[startLine] + state.tShift[startLine]
pos = start
maximum = state.eMarks[startLine]
# List marker should have at least 2 chars (digit + dot)
if pos + 1 >= maximum:
return -1
ch = state.srcCharCode[pos]
pos += 1
# /* 0 */ /* 9 */
if ch < 0x30 or ch > 0x39:
return -1
while True:
# EOL -> fail
if pos >= maximum:
return -1
ch = state.srcCharCode[pos]
pos += 1
# /* 0 */ /* 9 */
if ch >= 0x30 and ch <= 0x39:
# List marker should have no more than 9 digits
# (prevents integer overflow in browsers)
if pos - start >= 10:
return -1
continue
# found valid marker: /* ) */ /* . */
if ch == 0x29 or ch == 0x2E:
break
return -1
if pos < maximum:
ch = state.srcCharCode[pos]
if not isSpace(ch):
# " 1.test " - is not a list item
return -1
return pos
def markTightParagraphs(state: StateBlock, idx: int):
level = state.level + 2
i = idx + 2
length = len(state.tokens) - 2
while i < length:
if state.tokens[i].level == level and state.tokens[i].type == "paragraph_open":
state.tokens[i + 2].hidden = True
state.tokens[i].hidden = True
i += 2
i += 1
def list_block(state: StateBlock, startLine: int, endLine: int, silent: bool):
LOGGER.debug("entering list: %s, %s, %s, %s", state, startLine, endLine, silent)
isTerminatingParagraph = False
tight = True
# if it's indented more than 3 spaces, it should be a code block
if state.sCount[startLine] - state.blkIndent >= 4:
return False
# Special case:
# - item 1
# - item 2
# - item 3
# - item 4
# - this one is a paragraph continuation
if (
state.listIndent >= 0
and state.sCount[startLine] - state.listIndent >= 4
and state.sCount[startLine] < state.blkIndent
):
return False
# limit conditions when list can interrupt
# a paragraph (validation mode only)
if silent and state.parentType == "paragraph":
# Next list item should still terminate previous list item
#
# This code can fail if plugins use blkIndent as well as lists,
# but I hope the spec gets fixed long before that happens.
#
if state.tShift[startLine] >= state.blkIndent:
isTerminatingParagraph = True
# Detect list type and position after marker
posAfterMarker = skipOrderedListMarker(state, startLine)
if posAfterMarker >= 0:
isOrdered = True
start = state.bMarks[startLine] + state.tShift[startLine]
markerValue = int(state.src[start : posAfterMarker - 1])
# If we're starting a new ordered list right after
# a paragraph, it should start with 1.
if isTerminatingParagraph and markerValue != 1:
return False
else:
posAfterMarker = skipBulletListMarker(state, startLine)
if posAfterMarker >= 0:
isOrdered = False
else:
return False
# If we're starting a new unordered list right after
# a paragraph, first line should not be empty.
if isTerminatingParagraph:
if state.skipSpaces(posAfterMarker) >= state.eMarks[startLine]:
return False
# We should terminate list on style change. Remember first one to compare.
markerCharCode = state.srcCharCode[posAfterMarker - 1]
# For validation mode we can terminate immediately
if silent:
return True
# Start list
listTokIdx = len(state.tokens)
if isOrdered:
token = state.push("ordered_list_open", "ol", 1)
if markerValue != 1:
token.attrs = {"start": markerValue}
else:
token = state.push("bullet_list_open", "ul", 1)
token.map = listLines = [startLine, 0]
token.markup = chr(markerCharCode)
#
# Iterate list items
#
nextLine = startLine
prevEmptyEnd = False
terminatorRules = state.md.block.ruler.getRules("list")
oldParentType = state.parentType
state.parentType = "list"
while nextLine < endLine:
pos = posAfterMarker
maximum = state.eMarks[nextLine]
initial = offset = (
state.sCount[nextLine]
+ posAfterMarker
- (state.bMarks[startLine] + state.tShift[startLine])
)
while pos < maximum:
ch = state.srcCharCode[pos]
if ch == 0x09:
offset += 4 - (offset + state.bsCount[nextLine]) % 4
elif ch == 0x20:
offset += 1
else:
break
pos += 1
contentStart = pos
if contentStart >= maximum:
# trimming space in "- \n 3" case, indent is 1 here
indentAfterMarker = 1
else:
indentAfterMarker = offset - initial
# If we have more than 4 spaces, the indent is 1
# (the rest is just indented code block)
if indentAfterMarker > 4:
indentAfterMarker = 1
# " - test"
# ^^^^^ - calculating total length of this thing
indent = initial + indentAfterMarker
# Run subparser & write tokens
token = state.push("list_item_open", "li", 1)
token.markup = chr(markerCharCode)
token.map = itemLines = [startLine, 0]
# change current state, then restore it after parser subcall
oldTight = state.tight
oldTShift = state.tShift[startLine]
oldSCount = state.sCount[startLine]
# - example list
# ^ listIndent position will be here
# ^ blkIndent position will be here
#
oldListIndent = state.listIndent
state.listIndent = state.blkIndent
state.blkIndent = indent
state.tight = True
state.tShift[startLine] = contentStart - state.bMarks[startLine]
state.sCount[startLine] = offset
if contentStart >= maximum and state.isEmpty(startLine + 1):
# workaround for this case
# (list item is empty, list terminates before "foo"):
# ~~~~~~~~
# -
#
# foo
# ~~~~~~~~
state.line = min(state.line + 2, endLine)
else:
# NOTE in list.js this was:
# state.md.block.tokenize(state, startLine, endLine, True)
# but tokeniz does not take the final parameter
state.md.block.tokenize(state, startLine, endLine)
# If any of list item is tight, mark list as tight
if (not state.tight) or prevEmptyEnd:
tight = False
# Item become loose if finish with empty line,
# but we should filter last element, because it means list finish
prevEmptyEnd = (state.line - startLine) > 1 and state.isEmpty(state.line - 1)
state.blkIndent = state.listIndent
state.listIndent = oldListIndent
state.tShift[startLine] = oldTShift
state.sCount[startLine] = oldSCount
state.tight = oldTight
token = state.push("list_item_close", "li", -1)
token.markup = chr(markerCharCode)
nextLine = startLine = state.line
itemLines[1] = nextLine
if nextLine >= endLine:
break
contentStart = state.bMarks[startLine]
#
# Try to check if list is terminated or continued.
#
if state.sCount[nextLine] < state.blkIndent:
break
# if it's indented more than 3 spaces, it should be a code block
if state.sCount[startLine] - state.blkIndent >= 4:
break
# fail if terminating block found
terminate = False
for terminatorRule in terminatorRules:
if terminatorRule(state, nextLine, endLine, True):
terminate = True
break
if terminate:
break
# fail if list has another type
if isOrdered:
posAfterMarker = skipOrderedListMarker(state, nextLine)
if posAfterMarker < 0:
break
else:
posAfterMarker = skipBulletListMarker(state, nextLine)
if posAfterMarker < 0:
break
if markerCharCode != state.srcCharCode[posAfterMarker - 1]:
break
# Finalize list
if isOrdered:
token = state.push("ordered_list_close", "ol", -1)
else:
token = state.push("bullet_list_close", "ul", -1)
token.markup = chr(markerCharCode)
listLines[1] = nextLine
state.line = nextLine
state.parentType = oldParentType
# mark paragraphs tight if needed
if tight:
markTightParagraphs(state, listTokIdx)
return True
| 28.605263
| 87
| 0.58019
|
278d9602b03ec5b23a68f59190a5f44c74df243c
| 1,572
|
py
|
Python
|
include/matrix/python/console_formatter.py
|
lanfis/Walker
|
db174fcc0e672da7c3113571a8ce772ad2e1fac5
|
[
"MIT"
] | null | null | null |
include/matrix/python/console_formatter.py
|
lanfis/Walker
|
db174fcc0e672da7c3113571a8ce772ad2e1fac5
|
[
"MIT"
] | null | null | null |
include/matrix/python/console_formatter.py
|
lanfis/Walker
|
db174fcc0e672da7c3113571a8ce772ad2e1fac5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# license removed for brevity
import sys
class Console_Formatter:
def INFO(self, msg, node_name=None):
MSG = None if node_name is "" else self.GREEN+"[{}]".format(self.node_name)
MSG = MSG + self.NO_COLOR + msg +self.NO_COLOR
return MSG
def DEBUG(self, msg, node_name=None):
MSG = None if node_name is "" else self.BLUE+"[{}]".format(self.node_name)
MSG = MSG + self.NO_COLOR + msg +self.NO_COLOR
return MSG
def WARN(self, msg, node_name=None):
MSG = None if node_name is "" else self.YELLOW+"[{}]".format(self.node_name)
MSG = MSG + self.YELLOW + msg +self.NO_COLOR
return MSG
def ERR(self, msg, node_name=None):
MSG = None if node_name is "" else self.RED+"[{}]".format(self.node_name)
MSG = MSG + self.RED + msg +self.NO_COLOR
return MSG
def FATAL(self, msg, node_name=None):
MSG = None if node_name is "" else self.RED+"[{}]".format(self.node_name)
MSG = MSG + self.RED + msg +self.NO_COLOR
return MSG
def __init__(self, node_name=None):
self.node_name = node_name
self.NO_COLOR = "\033[0m"
self.BLACK = "\033[30m"
self.RED = "\033[31m"
self.GREEN = "\033[32m"
self.YELLOW = "\033[33m"
self.BLUE = "\033[34m"
self.MAGENTA = "\033[35m"
self.CYAN = "\033[36m"
self.LIGHTGRAY = "\033[37m"
| 36.55814
| 85
| 0.540712
|
905000858d4e2d7bbf8d8b5f01fe46157656448c
| 5,455
|
py
|
Python
|
cnc/hal_virtual.py
|
whisperoftheshot/PyCNC
|
41cb8c11c66bf21d168dc7f2756ade4a8514a139
|
[
"MIT"
] | null | null | null |
cnc/hal_virtual.py
|
whisperoftheshot/PyCNC
|
41cb8c11c66bf21d168dc7f2756ade4a8514a139
|
[
"MIT"
] | null | null | null |
cnc/hal_virtual.py
|
whisperoftheshot/PyCNC
|
41cb8c11c66bf21d168dc7f2756ade4a8514a139
|
[
"MIT"
] | null | null | null |
from __future__ import division
import time
from cnc.pulses import *
from cnc.config import *
""" This is virtual device class which is very useful for debugging.
It checks PulseGenerator with some tests.
"""
def init():
""" Initialize GPIO pins and machine itself.
"""
logging.info("initialize hal")
def spindle_control(percent):
""" Spindle control implementation 0..100.
:param percent: Spindle speed in percent.
"""
logging.info("spindle control: {}%".format(percent))
def disable_steppers():
""" Disable all steppers until any movement occurs.
"""
logging.info("hal disable steppers")
def calibrate(x, y, z):
""" Move head to home position till end stop switch will be triggered.
Do not return till all procedures are completed.
:param x: boolean, True to calibrate X axis.
:param y: boolean, True to calibrate Y axis.
:param z: boolean, True to calibrate Z axis.
:return: boolean, True if all specified end stops were triggered.
"""
logging.info("hal calibrate, x={}, y={}, z={}".format(x, y, z))
return True
# noinspection PyUnusedLocal
def move(generator):
""" Move head to specified position.
:param generator: PulseGenerator object.
"""
delta = generator.delta()
ix = iy = iz = ie = 0
lx, ly, lz, le = None, None, None, None
dx, dy, dz, de = 0, 0, 0, 0
mx, my, mz, me = 0, 0, 0, 0
cx, cy, cz, ce = 0, 0, 0, 0
direction_x, direction_y, direction_z, direction_e = 1, 1, 1, 1
st = time.time()
direction_found = False
for direction, tx, ty, tz, te in generator:
if direction:
direction_found = True
direction_x, direction_y, direction_z, direction_e = tx, ty, tz, te
if STEPPER_INVERTED_X:
direction_x = -direction_x
if STEPPER_INVERTED_Y:
direction_y = -direction_y
if STEPPER_INVERTED_Z:
direction_z = -direction_z
if STEPPER_INVERTED_E:
direction_e = -direction_e
if isinstance(generator, PulseGeneratorLinear):
assert ((direction_x < 0 and delta.x < 0)
or (direction_x > 0 and delta.x > 0) or delta.x == 0)
assert ((direction_y < 0 and delta.y < 0)
or (direction_y > 0 and delta.y > 0) or delta.y == 0)
assert ((direction_z < 0 and delta.z < 0)
or (direction_z > 0 and delta.z > 0) or delta.z == 0)
assert ((direction_e < 0 and delta.e < 0)
or (direction_e > 0 and delta.e > 0) or delta.e == 0)
continue
if tx is not None:
if tx > mx:
mx = tx
tx = int(round(tx * 1000000))
ix += direction_x
cx += 1
if lx is not None:
dx = tx - lx
assert dx > 0, "negative or zero time delta detected for x"
lx = tx
else:
dx = None
if ty is not None:
if ty > my:
my = ty
ty = int(round(ty * 1000000))
iy += direction_y
cy += 1
if ly is not None:
dy = ty - ly
assert dy > 0, "negative or zero time delta detected for y"
ly = ty
else:
dy = None
if tz is not None:
if tz > mz:
mz = tz
tz = int(round(tz * 1000000))
iz += direction_z
cz += 1
if lz is not None:
dz = tz - lz
assert dz > 0, "negative or zero time delta detected for z"
lz = tz
else:
dz = None
if te is not None:
if te > me:
me = te
te = int(round(te * 1000000))
ie += direction_e
ce += 1
if le is not None:
de = te - le
assert de > 0, "negative or zero time delta detected for e"
le = te
else:
de = None
# very verbose, uncomment on demand
# logging.debug("Iteration {} is {} {} {} {}".
# format(max(ix, iy, iz, ie), tx, ty, tz, te))
f = list(x for x in (tx, ty, tz, te) if x is not None)
assert f.count(f[0]) == len(f), "fast forwarded pulse detected"
pt = time.time()
assert direction_found, "direction not found"
assert round(ix / STEPPER_PULSES_PER_MM_X, 10) == delta.x,\
"x wrong number of pulses"
assert round(iy / STEPPER_PULSES_PER_MM_Y, 10) == delta.y,\
"y wrong number of pulses"
assert round(iz / STEPPER_PULSES_PER_MM_Z, 10) == delta.z, \
"z wrong number of pulses"
assert round(ie / STEPPER_PULSES_PER_MM_E, 10) == delta.e, \
"e wrong number of pulses"
assert max(mx, my, mz, me) <= generator.total_time_s(), \
"interpolation time or pulses wrong"
logging.debug("Moved {}, {}, {}, {} iterations".format(ix, iy, iz, ie))
logging.info("prepared in " + str(round(pt - st, 2)) + "s, estimated "
+ str(round(generator.total_time_s(), 2)) + "s")
def join():
""" Wait till motors work.
"""
logging.info("hal join()")
def deinit():
""" De-initialise.
"""
logging.info("hal deinit()")
def watchdog_feed():
""" Feed hardware watchdog.
"""
pass
| 32.861446
| 79
| 0.530889
|
ea1de5d55853926d9987b4fa4ffddd545999e802
| 2,917
|
py
|
Python
|
AsciiArt/ascii-art.py
|
linxiaohui/CodeRepoPy
|
0bb6a47225b0638dec360dd441ed506f9295a4b1
|
[
"MIT"
] | null | null | null |
AsciiArt/ascii-art.py
|
linxiaohui/CodeRepoPy
|
0bb6a47225b0638dec360dd441ed506f9295a4b1
|
[
"MIT"
] | null | null | null |
AsciiArt/ascii-art.py
|
linxiaohui/CodeRepoPy
|
0bb6a47225b0638dec360dd441ed506f9295a4b1
|
[
"MIT"
] | null | null | null |
"""
How it works?
We scale a given image to a standard resolution that suitably represents the ASCII version of a given image.
The scaled version is then converted to a grayscale image.
In a grayscale image, there are 256 shades of gray, or in other words,
each pixel carries only the intensity information which is represented by an 8 bit value.
A pixel with a value of 0 is assumed to be black and the one with 255 is assumed to be white.
We divide the whole range of 0-255 into 11 smaller ranges of 25 pixels each
and then assign each pixel a character according to the range it falls in.
The point is to assign a group of pixels with slightly varying intensity the same ASCII char.
We use the PIL library to play with the images.
The code given below is almost self explanatory.
The default char mapping and resolution doesn't render good ASCII arts for every image size
and so you should try modifying the char mapping and image size to the one that best represents the given image.
From: http://www.hackerearth.com/notes/beautiful-python-a-simple-ascii-art-generator-from-images/
"""
from PIL import Image
ASCII_CHARS = [ '#', '?', '%', '.', 'S', '+', '.', '*', ':', ',', '@']
def scale_image(image, new_width=100):
"""Resizes an image preserving the aspect ratio.
"""
(original_width, original_height) = image.size
aspect_ratio = original_height/float(original_width)
new_height = int(aspect_ratio * new_width)
new_image = image.resize((new_width, new_height))
return new_image
def convert_to_grayscale(image):
return image.convert('L')
def map_pixels_to_ascii_chars(image, range_width=25):
"""Maps each pixel to an ascii char based on the range
in which it lies.
0-255 is divided into 11 ranges of 25 pixels each.
"""
pixels_in_image = list(image.getdata())
pixels_to_chars = [ASCII_CHARS[pixel_value//range_width] for pixel_value in
pixels_in_image]
return "".join(pixels_to_chars)
def convert_image_to_ascii(image, new_width=100):
image = scale_image(image)
image = convert_to_grayscale(image)
pixels_to_chars = map_pixels_to_ascii_chars(image)
len_pixels_to_chars = len(pixels_to_chars)
image_ascii = [pixels_to_chars[index: index + new_width] for index in
range(0, len_pixels_to_chars, new_width)]
return "\n".join(image_ascii)
def handle_image_conversion(image_filepath):
image = None
try:
image = Image.open(image_filepath)
except Exception as e:
print("Unable to open image file {image_filepath}.".format(image_filepath=image_filepath))
print(e)
return
image_ascii = convert_image_to_ascii(image)
print(image_ascii)
if __name__=='__main__':
import sys
image_file_path = sys.argv[1]
handle_image_conversion(image_file_path)
| 36.924051
| 113
| 0.70929
|
0cd46792179934cd95439eef8eafea9b385d2853
| 832
|
py
|
Python
|
jaraco/input/binary.py
|
jaraco/jaraco.input
|
2693448286ce496efc82045ad4e1165f0479cc15
|
[
"MIT"
] | null | null | null |
jaraco/input/binary.py
|
jaraco/jaraco.input
|
2693448286ce496efc82045ad4e1165f0479cc15
|
[
"MIT"
] | null | null | null |
jaraco/input/binary.py
|
jaraco/jaraco.input
|
2693448286ce496efc82045ad4e1165f0479cc15
|
[
"MIT"
] | null | null | null |
def get_bit_values(number, size=32):
"""
Get bit values as a list for a given number
>>> get_bit_values(1) == [0]*31 + [1]
True
>>> get_bit_values(0xDEADBEEF)
[1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, ..., 0, 1, 1, 1, 1]
You may override the default word size of 32-bits to match your actual
application.
>>> get_bit_values(0x3, 2)
[1, 1]
>>> get_bit_values(0x3, 4)
[0, 0, 1, 1]
"""
res = list(gen_bit_values(number))
res.reverse()
# 0-pad the most significant bit
res = [0] * (size - len(res)) + res
return res
def gen_bit_values(number):
"""
Return a zero or one for each bit of a numeric value up to the most
significant 1 bit, beginning with the least significant bit.
"""
while number:
yield number & 0x1
number >>= 1
| 24.470588
| 74
| 0.579327
|
702bc72f4925fdf589162911cef89997a640ed7b
| 1,691
|
py
|
Python
|
examples/dfp/v201805/inventory_service/get_all_ad_units.py
|
christineyi3898/googleads-python-lib
|
cd707dc897b93cf1bbb19355f7424e7834e7fb55
|
[
"Apache-2.0"
] | 1
|
2019-10-21T04:10:22.000Z
|
2019-10-21T04:10:22.000Z
|
examples/dfp/v201805/inventory_service/get_all_ad_units.py
|
christineyi3898/googleads-python-lib
|
cd707dc897b93cf1bbb19355f7424e7834e7fb55
|
[
"Apache-2.0"
] | null | null | null |
examples/dfp/v201805/inventory_service/get_all_ad_units.py
|
christineyi3898/googleads-python-lib
|
cd707dc897b93cf1bbb19355f7424e7834e7fb55
|
[
"Apache-2.0"
] | 1
|
2019-10-21T04:10:51.000Z
|
2019-10-21T04:10:51.000Z
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all ad units.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
ad_unit_service = client.GetService('InventoryService', version='v201805')
# Create a statement to select ad units.
statement = dfp.StatementBuilder()
# Retrieve a small amount of ad units at a time, paging
# through until all ad units have been retrieved.
while True:
response = ad_unit_service.getAdUnitsByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for ad_unit in response['results']:
# Print out some information for each ad unit.
print('Ad unit with ID "%s" and name "%s" was found.\n' %
(ad_unit['id'], ad_unit['name']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| 33.82
| 77
| 0.719101
|
22810b8afffdfed8a7d5fe3862bd01a9ab75b727
| 32,927
|
py
|
Python
|
sdk/core/azure-core/tests/azure_core_asynctests/test_basic_transport.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 1
|
2020-12-10T03:17:51.000Z
|
2020-12-10T03:17:51.000Z
|
sdk/core/azure-core/tests/azure_core_asynctests/test_basic_transport.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/core/azure-core/tests/azure_core_asynctests/test_basic_transport.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 1
|
2020-07-31T16:33:36.000Z
|
2020-07-31T16:33:36.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# -------------------------------------------------------------------------
from six.moves.http_client import HTTPConnection
import time
try:
from unittest import mock
except ImportError:
import mock
from azure.core.pipeline.transport import HttpRequest, AsyncHttpResponse, AsyncHttpTransport, AioHttpTransport
from azure.core.pipeline.policies import HeadersPolicy
from azure.core.pipeline import AsyncPipeline
import pytest
# transport = mock.MagicMock(spec=AsyncHttpTransport)
# MagicMock support async cxt manager only after 3.8
# https://github.com/python/cpython/pull/9296
class MockAsyncHttpTransport(AsyncHttpTransport):
async def __aenter__(self): return self
async def __aexit__(self, *args): pass
async def open(self): pass
async def close(self): pass
async def send(self, request, **kwargs): pass
class MockResponse(AsyncHttpResponse):
def __init__(self, request, body, content_type):
super(MockResponse, self).__init__(request, None)
self._body = body
self.content_type = content_type
def body(self):
return self._body
@pytest.mark.asyncio
async def test_basic_options_aiohttp():
request = HttpRequest("OPTIONS", "https://httpbin.org")
async with AsyncPipeline(AioHttpTransport(), policies=[]) as pipeline:
response = await pipeline.run(request)
assert pipeline._transport.session is None
assert isinstance(response.http_response.status_code, int)
@pytest.mark.asyncio
async def test_multipart_send():
transport = MockAsyncHttpTransport()
class RequestPolicy(object):
async def on_request(self, request):
# type: (PipelineRequest) -> None
request.http_request.headers['x-ms-date'] = 'Thu, 14 Jun 2018 16:46:54 GMT'
req0 = HttpRequest("DELETE", "/container0/blob0")
req1 = HttpRequest("DELETE", "/container1/blob1")
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
req0,
req1,
policies=[RequestPolicy()],
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525" # Fix it so test are deterministic
)
async with AsyncPipeline(transport) as pipeline:
await pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.asyncio
async def test_multipart_send_with_context():
transport = MockAsyncHttpTransport()
header_policy = HeadersPolicy()
class RequestPolicy(object):
async def on_request(self, request):
# type: (PipelineRequest) -> None
request.http_request.headers['x-ms-date'] = 'Thu, 14 Jun 2018 16:46:54 GMT'
req0 = HttpRequest("DELETE", "/container0/blob0")
req1 = HttpRequest("DELETE", "/container1/blob1")
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
req0,
req1,
policies=[header_policy, RequestPolicy()],
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525", # Fix it so test are deterministic
headers={'Accept': 'application/json'}
)
async with AsyncPipeline(transport) as pipeline:
await pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'Accept: application/json\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'Accept: application/json\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.asyncio
async def test_multipart_send_with_one_changeset():
transport = MockAsyncHttpTransport()
requests = [
HttpRequest("DELETE", "/container0/blob0"),
HttpRequest("DELETE", "/container1/blob1")
]
changeset = HttpRequest(None, None)
changeset.set_multipart_mixed(
*requests,
boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
changeset,
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
async with AsyncPipeline(transport) as pipeline:
await pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.asyncio
async def test_multipart_send_with_multiple_changesets():
transport = MockAsyncHttpTransport()
changeset1 = HttpRequest(None, None)
changeset1.set_multipart_mixed(
HttpRequest("DELETE", "/container0/blob0"),
HttpRequest("DELETE", "/container1/blob1"),
boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
changeset2 = HttpRequest(None, None)
changeset2.set_multipart_mixed(
HttpRequest("DELETE", "/container2/blob2"),
HttpRequest("DELETE", "/container3/blob3"),
boundary="changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314"
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
changeset1,
changeset2,
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525",
)
async with AsyncPipeline(transport) as pipeline:
await pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'DELETE /container2/blob2 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 3\r\n'
b'\r\n'
b'DELETE /container3/blob3 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.asyncio
async def test_multipart_send_with_combination_changeset_first():
transport = MockAsyncHttpTransport()
changeset = HttpRequest(None, None)
changeset.set_multipart_mixed(
HttpRequest("DELETE", "/container0/blob0"),
HttpRequest("DELETE", "/container1/blob1"),
boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
changeset,
HttpRequest("DELETE", "/container2/blob2"),
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
async with AsyncPipeline(transport) as pipeline:
await pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'DELETE /container2/blob2 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.asyncio
async def test_multipart_send_with_combination_changeset_last():
transport = MockAsyncHttpTransport()
changeset = HttpRequest(None, None)
changeset.set_multipart_mixed(
HttpRequest("DELETE", "/container1/blob1"),
HttpRequest("DELETE", "/container2/blob2"),
boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
HttpRequest("DELETE", "/container0/blob0"),
changeset,
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
async with AsyncPipeline(transport) as pipeline:
await pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'DELETE /container2/blob2 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.asyncio
async def test_multipart_send_with_combination_changeset_middle():
transport = MockAsyncHttpTransport()
changeset = HttpRequest(None, None)
changeset.set_multipart_mixed(
HttpRequest("DELETE", "/container1/blob1"),
boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
HttpRequest("DELETE", "/container0/blob0"),
changeset,
HttpRequest("DELETE", "/container2/blob2"),
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
async with AsyncPipeline(transport) as pipeline:
await pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'DELETE /container2/blob2 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.asyncio
async def test_multipart_receive():
class ResponsePolicy(object):
def on_response(self, request, response):
# type: (PipelineRequest, PipelineResponse) -> None
response.http_response.headers['x-ms-fun'] = 'true'
class AsyncResponsePolicy(object):
async def on_response(self, request, response):
# type: (PipelineRequest, PipelineResponse) -> None
response.http_response.headers['x-ms-async-fun'] = 'true'
req0 = HttpRequest("DELETE", "/container0/blob0")
req1 = HttpRequest("DELETE", "/container1/blob1")
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
req0,
req1,
policies=[ResponsePolicy(), AsyncResponsePolicy()]
)
body_as_str = (
"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n"
"Content-Type: application/http\r\n"
"Content-ID: 0\r\n"
"\r\n"
"HTTP/1.1 202 Accepted\r\n"
"x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n"
"x-ms-version: 2018-11-09\r\n"
"\r\n"
"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n"
"Content-Type: application/http\r\n"
"Content-ID: 2\r\n"
"\r\n"
"HTTP/1.1 404 The specified blob does not exist.\r\n"
"x-ms-error-code: BlobNotFound\r\n"
"x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e2852\r\n"
"x-ms-version: 2018-11-09\r\n"
"Content-Length: 216\r\n"
"Content-Type: application/xml\r\n"
"\r\n"
'<?xml version="1.0" encoding="utf-8"?>\r\n'
"<Error><Code>BlobNotFound</Code><Message>The specified blob does not exist.\r\n"
"RequestId:778fdc83-801e-0000-62ff-0334671e2852\r\n"
"Time:2018-06-14T16:46:54.6040685Z</Message></Error>\r\n"
"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--"
)
response = MockResponse(
request,
body_as_str.encode('ascii'),
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
async for part in response.parts():
parts.append(part)
assert len(parts) == 2
res0 = parts[0]
assert res0.status_code == 202
assert res0.headers['x-ms-fun'] == 'true'
assert res0.headers['x-ms-async-fun'] == 'true'
res1 = parts[1]
assert res1.status_code == 404
assert res1.headers['x-ms-fun'] == 'true'
assert res1.headers['x-ms-async-fun'] == 'true'
@pytest.mark.asyncio
async def test_multipart_receive_with_one_changeset():
changeset = HttpRequest(None, None)
changeset.set_multipart_mixed(
HttpRequest("DELETE", "/container0/blob0"),
HttpRequest("DELETE", "/container1/blob1")
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(changeset)
body_as_bytes = (
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'HTTP/1.1 202 Accepted\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'HTTP/1.1 202 Accepted\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n'
)
response = MockResponse(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
async for part in response.parts():
parts.append(part)
assert len(parts) == 2
res0 = parts[0]
assert res0.status_code == 202
@pytest.mark.asyncio
async def test_multipart_receive_with_multiple_changesets():
changeset1 = HttpRequest(None, None)
changeset1.set_multipart_mixed(
HttpRequest("DELETE", "/container0/blob0"),
HttpRequest("DELETE", "/container1/blob1")
)
changeset2 = HttpRequest(None, None)
changeset2.set_multipart_mixed(
HttpRequest("DELETE", "/container2/blob2"),
HttpRequest("DELETE", "/container3/blob3")
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(changeset1, changeset2)
body_as_bytes = (
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'HTTP/1.1 200\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'HTTP/1.1 202\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314"\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'HTTP/1.1 404\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 3\r\n'
b'\r\n'
b'HTTP/1.1 409\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n'
)
response = MockResponse(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
async for part in response.parts():
parts.append(part)
assert len(parts) == 4
assert parts[0].status_code == 200
assert parts[1].status_code == 202
assert parts[2].status_code == 404
assert parts[3].status_code == 409
@pytest.mark.asyncio
async def test_multipart_receive_with_combination_changeset_first():
changeset = HttpRequest(None, None)
changeset.set_multipart_mixed(
HttpRequest("DELETE", "/container0/blob0"),
HttpRequest("DELETE", "/container1/blob1")
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(changeset, HttpRequest("DELETE", "/container2/blob2"))
body_as_bytes = (
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'HTTP/1.1 200\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'HTTP/1.1 202\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'HTTP/1.1 404\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n'
)
response = MockResponse(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
async for part in response.parts():
parts.append(part)
assert len(parts) == 3
assert parts[0].status_code == 200
assert parts[1].status_code == 202
assert parts[2].status_code == 404
@pytest.mark.asyncio
async def test_multipart_receive_with_combination_changeset_middle():
changeset = HttpRequest(None, None)
changeset.set_multipart_mixed(HttpRequest("DELETE", "/container1/blob1"))
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
HttpRequest("DELETE", "/container0/blob0"),
changeset,
HttpRequest("DELETE", "/container2/blob2")
)
body_as_bytes = (
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'HTTP/1.1 200\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'HTTP/1.1 202\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'HTTP/1.1 404\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n'
)
response = MockResponse(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
async for part in response.parts():
parts.append(part)
assert len(parts) == 3
assert parts[0].status_code == 200
assert parts[1].status_code == 202
assert parts[2].status_code == 404
@pytest.mark.asyncio
async def test_multipart_receive_with_combination_changeset_last():
changeset = HttpRequest(None, None)
changeset.set_multipart_mixed(
HttpRequest("DELETE", "/container1/blob1"),
HttpRequest("DELETE", "/container2/blob2")
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(HttpRequest("DELETE", "/container0/blob0"), changeset)
body_as_bytes = (
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'HTTP/1.1 200\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'HTTP/1.1 202\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'HTTP/1.1 404\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n'
)
response = MockResponse(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
async for part in response.parts():
parts.append(part)
assert len(parts) == 3
assert parts[0].status_code == 200
assert parts[1].status_code == 202
assert parts[2].status_code == 404
@pytest.mark.asyncio
async def test_multipart_receive_with_bom():
req0 = HttpRequest("DELETE", "/container0/blob0")
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(req0)
body_as_bytes = (
b"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\n"
b"Content-Type: application/http\n"
b"Content-Transfer-Encoding: binary\n"
b"Content-ID: 0\n"
b'\r\n'
b'HTTP/1.1 400 One of the request inputs is not valid.\r\n'
b'Content-Length: 220\r\n'
b'Content-Type: application/xml\r\n'
b'Server: Windows-Azure-Blob/1.0\r\n'
b'\r\n'
b'\xef\xbb\xbf<?xml version="1.0" encoding="utf-8"?>\n<Error><Code>InvalidInput</Code><Message>One'
b'of the request inputs is not valid.\nRequestId:5f3f9f2f-e01e-00cc-6eb1-6d00b5000000\nTime:2019-09-17T23:44:07.4671860Z</Message></Error>\n'
b"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--"
)
response = MockResponse(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
async for part in response.parts():
parts.append(part)
assert len(parts) == 1
res0 = parts[0]
assert res0.status_code == 400
assert res0.body().startswith(b'\xef\xbb\xbf')
@pytest.mark.asyncio
async def test_recursive_multipart_receive():
req0 = HttpRequest("DELETE", "/container0/blob0")
internal_req0 = HttpRequest("DELETE", "/container0/blob0")
req0.set_multipart_mixed(internal_req0)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(req0)
internal_body_as_str = (
"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n"
"Content-Type: application/http\r\n"
"Content-ID: 0\r\n"
"\r\n"
"HTTP/1.1 400 Accepted\r\n"
"x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n"
"x-ms-version: 2018-11-09\r\n"
"\r\n"
"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--"
)
body_as_str = (
"--batchresponse_8d5f5bcd-2cb5-44bb-91b5-e9a722e68cb6\r\n"
"Content-Type: application/http\r\n"
"Content-ID: 0\r\n"
"\r\n"
"HTTP/1.1 202 Accepted\r\n"
"Content-Type: multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n"
"\r\n"
"{}"
"--batchresponse_8d5f5bcd-2cb5-44bb-91b5-e9a722e68cb6--"
).format(internal_body_as_str)
response = MockResponse(
request,
body_as_str.encode('ascii'),
"multipart/mixed; boundary=batchresponse_8d5f5bcd-2cb5-44bb-91b5-e9a722e68cb6"
)
parts = []
async for part in response.parts():
parts.append(part)
assert len(parts) == 1
res0 = parts[0]
assert res0.status_code == 202
internal_parts = []
async for part in res0.parts():
internal_parts.append(part)
assert len(internal_parts) == 1
internal_response0 = internal_parts[0]
assert internal_response0.status_code == 400
| 35.216043
| 149
| 0.62675
|
294d21728368aea940319594e4b0dd7b90e02d84
| 523
|
py
|
Python
|
geokey/users/migrations/0002_auto_20150106_1420.py
|
universityofsussex/geokey
|
25e161dbc81841c57c148053dbe99facc81e84b8
|
[
"Apache-2.0"
] | null | null | null |
geokey/users/migrations/0002_auto_20150106_1420.py
|
universityofsussex/geokey
|
25e161dbc81841c57c148053dbe99facc81e84b8
|
[
"Apache-2.0"
] | null | null | null |
geokey/users/migrations/0002_auto_20150106_1420.py
|
universityofsussex/geokey
|
25e161dbc81841c57c148053dbe99facc81e84b8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.db import migrations
def create_anonymous(apps, schema_editor):
User = apps.get_model("users", "User")
if not User.objects.filter(display_name='AnonymousUser').exists():
User.objects.create(
display_name='AnonymousUser',
password='',
email=''
)
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.RunPython(create_anonymous),
]
| 20.115385
| 70
| 0.602294
|
ccb1280133f71916654ae3a70f30845926149cd0
| 9,155
|
py
|
Python
|
src/old/multimodal_code/utils.py
|
trungnt13/digisami_journal
|
671486d0fe7b65cad80daf8e8b96d475245c5fed
|
[
"Apache-2.0"
] | null | null | null |
src/old/multimodal_code/utils.py
|
trungnt13/digisami_journal
|
671486d0fe7b65cad80daf8e8b96d475245c5fed
|
[
"Apache-2.0"
] | null | null | null |
src/old/multimodal_code/utils.py
|
trungnt13/digisami_journal
|
671486d0fe7b65cad80daf8e8b96d475245c5fed
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function, division, absolute_import
import os
from six.moves import cPickle
import random
import numpy as np
from odin import fuel as F
from odin.utils import get_modelpath, Progbar, bidict
from odin.stats import freqcount
CODE_PATH = '/home/trung/src/digisami'
DATASET_PATH = '/home/trung/data/%s_audio'
ANNO_PATH = '/home/trung/data/%s_anno'
data_order = ['estonia', 'finnish']
SPLIT = [0.6, 0.8]
SEED = 12082518
laugh_labels = bidict({
u'': 0,
u'fl, b': 1,
u'st, e': 2,
u'st, b': 3,
u'fl, e': 4,
u'st, m': 5,
u'fl, o': 6,
u'fl, m': 7,
u'fl, p': 8,
u'fl, o, p': 9,
u'fl, d': 10,
u'st, o': 11,
u'st, p': 12
})
laugh_labels_binary = bidict({
u'': 0,
u'fl, b': 1,
u'st, e': 1,
u'st, b': 1,
u'fl, e': 1,
u'st, m': 1,
u'fl, o': 1,
u'fl, m': 1,
u'fl, p': 1,
u'fl, o, p': 1,
u'fl, d': 1,
u'st, o': 1,
u'st, p': 1
})
laugh_shortened = bidict({
u'': 0,
u'fl, b': 1,
u'st, e': 2,
u'st, b': 3,
u'fl, e': 4,
u'st, o': 5,
u'st, m': 6,
u'fl, o': 7,
u'fl, m': 8,
u'fl, p': 9
})
# ===========================================================================
# Label maker
# ===========================================================================
def flst(x):
"""fl = 1, st = 2"""
return 0 if len(x) == 0 else (1 if 'fl' in x else 2)
def bin(x):
return 1 if len(x) > 0 else 0
def allab(x):
return laugh_labels[x]
def get_anno(lang):
f = open(ANNO_PATH % lang, 'r')
anno = cPickle.load(f)
f.close()
return anno
def get_data(lang, feat, stack, ctx, hop, mode, batch_mode, ncpu):
"""
mode: "binary", "laugh", "all", "emotion"
batch_mode: "all", "mul"
"""
path = DATASET_PATH % lang
if mode == 'binary':
label_parser = bin
elif mode == 'laugh':
label_parser = flst
else:
label_parser = allab
ds = F.Dataset(path, read_only=True)
indices = np.genfromtxt(ds['indices.csv'], dtype=str, delimiter=' ')
# ====== split indices ====== #
n = indices.shape[0]
np.random.seed(SEED); np.random.shuffle(indices)
train_indices = indices[:int(SPLIT[0] * n)]
valid_indices = indices[int(SPLIT[0] * n):int(SPLIT[1] * n)]
test_indices = indices[int(SPLIT[1] * n):]
print('#Files:', n,
' #Train:', train_indices.shape[0],
' #Valid:', valid_indices.shape[0],
' #Test:', test_indices.shape[0]
)
# ====== create feeder ====== #
maximum_queue_size = 66
train = F.Feeder(ds[feat], train_indices, ncpu=ncpu,
buffer_size=3, maximum_queue_size=maximum_queue_size)
valid = F.Feeder(ds[feat], valid_indices, ncpu=max(1, ncpu // 2),
buffer_size=1, maximum_queue_size=maximum_queue_size)
test = F.Feeder(ds[feat], test_indices, ncpu=max(1, ncpu // 2),
buffer_size=1, maximum_queue_size=maximum_queue_size)
# create feature transform
if ctx <= 1:
feature_transform = None
elif stack:
feature_transform = F.recipes.Stacking(left_context=ctx // 2,
right_context=ctx // 2,
shift=hop)
else:
feature_transform = F.recipes.Sequencing(frame_length=ctx,
hop_length=hop,
end='pad')
if batch_mode == 'all':
batch_filter = lambda data: data
elif batch_mode == 'mul':
batch_filter = lambda data: None if len(set(data[-1])) <= 1 else data
else:
raise ValueError('invalid batch_mode=%s' % batch_mode)
# ====== create recipes ====== #
recipes = [
F.recipes.Normalization(local_normalize=False,
mean=ds['%s_mean' % feat],
std=ds['%s_std' % feat]),
feature_transform
]
# ====== set recipes ====== #
train.set_recipes([F.recipes.TransLoader(ds['laugh.dict'], dtype=int, label_dict=label_parser)] +
recipes +
[F.recipes.CreateBatch(batch_filter=batch_filter)])
valid.set_recipes([F.recipes.TransLoader(ds['laugh.dict'], dtype=int, label_dict=label_parser)] +
recipes +
[F.recipes.CreateBatch()])
test.set_recipes([F.recipes.TransLoader(ds['laugh.dict'], dtype=str)] +
recipes +
[F.recipes.CreateFile(return_name=True)])
return train, valid, test
# ===========================================================================
# For evaluation
# ===========================================================================
from sklearn.metrics import (accuracy_score, f1_score, confusion_matrix, classification_report)
def report_func(y_true, y_pred, nb_classes):
def report(true, pred):
labels = np.unique(true).tolist()
print('Labels:', labels, 'y_true:', np.unique(true), 'y_pred:', np.unique(pred))
print('Accuracy:', accuracy_score(true, pred))
print('F1 weighted:', f1_score(true, pred, labels=labels, average='weighted'))
print('F1 micro:', f1_score(true, pred, labels=labels, average='micro'))
print('F1 macro:', f1_score(true, pred, labels=labels, average='macro'))
print('Confusion matrix:')
print(confusion_matrix(true, pred, labels=labels))
# print('Report:')
# print(classification_report(true, pred, labels=labels))
# TODO there exist the case: for [0, 1, 2], we predict [0, 1]
# output is 1, but true value is 0, then calibrate is 0 or 1 ?
def convert_to_flst(true, pred):
if nb_classes == 2:
return [flst(j) if i == 1 and len(j) > 0
else (random.choice([1, 2]) if i == 1 and len(j) == 0
else i)
for i, j in zip(pred, true)]
elif nb_classes == 3:
return pred
else:
return [flst(laugh_labels[i]) for i in pred]
def convert_to_all(true, pred):
alllabels = np.unique([allab(i) for i in true if len(i) > 0])
if nb_classes == 2:
return [allab(j) if i == 1 and len(j) > 0
else (random.choice(alllabels) if i == 1 and len(j) == 0
else i)
for i, j in zip(pred, true)]
elif nb_classes == 3:
return [allab(j) if (i == 1 and 'fl,' in j) or (i == 2 and 'st,' in j)
else (random.choice(alllabels) if i > 0 and len(j) == 0
else i)
for i, j in zip(pred, true)]
else:
return pred
print('SkyPrediction:')
hist_pred = freqcount(y_pred)
for i, j in hist_pred.iteritems():
print(i, ':', j)
hist_true = freqcount(y_true)
print('GroundTrue:')
for i, j in hist_true.iteritems():
print(i, ':', j)
# ====== binary ====== #
print('\n******** Binary problem:')
report([bin(i) for i in y_true],
[1 if i > 0 else 0 for i in y_pred])
# ====== FL-ST ====== #
print('\n******** FL-ST problem:')
report([flst(i) for i in y_true],
convert_to_flst(y_true, y_pred))
# ====== ALL ====== #
print('\n******** ALL %d problem:' % len(laugh_labels))
report([allab(i) for i in y_true],
convert_to_all(y_true, y_pred))
def evaluate(model_path, threshold=0.5):
from odin import backend as K
if not os.path.exists(model_path):
model_path = get_modelpath(name=model_path, override=False)
f, args = cPickle.load(open(model_path, 'r'))
print('======== Configuration ========')
for i, j in args.iteritems():
print(i, ':', j)
print()
print('======== Loading data ========')
test = [get_data(path, args['feat'], args['stack'],
args['ctx'], args['hop'], args['mode'],
args['bmode'], ncpu=2)[-1]
for path in data_order]
for i, j in zip(data_order, test):
print('Test %s:' % i, j.shape)
print('Building predict function ...')
K.set_training(False)
print('Input shape:', (None,) + test[0].shape[1:])
X = K.placeholder(shape=(None,) + test[0].shape[1:], name='X')
f_pred = K.function(X, f(X))
for name, data in zip(data_order, test):
print('=' * 30, name, '=' * 30) # print header
y_true = []
y_pred = []
nb_classes = 0
prog = Progbar(target=data.shape[0], title=name)
for X, y in data.set_batch(batch_size=args['bs'] * 3, seed=None):
_ = f_pred(X)
nb_classes = _.shape[-1]
_ = _ >= threshold if nb_classes == 1 else np.argmax(_, -1)
y_pred.append(_.astype('int32'))
y_true.append(y)
prog.add(X.shape[0])
# ====== report ====== #
prog.update(data.shape[0])
y_pred = np.concatenate(y_pred, axis=0)
y_true = np.concatenate(y_true, axis=0)
report_func(y_true, y_pred, 2 if nb_classes == 1 else nb_classes)
exit()
| 33.534799
| 101
| 0.518187
|
1b98a017bc628a35aa80ac880da460199d38bdc8
| 3,746
|
py
|
Python
|
apps/auth/fixtures.py
|
Eudorajab1/websaw
|
7c3a369789d23ac699868fa1eff6c63e3e5c1e36
|
[
"MIT"
] | 1
|
2022-03-29T00:12:12.000Z
|
2022-03-29T00:12:12.000Z
|
apps/auth/fixtures.py
|
Eudorajab1/websaw
|
7c3a369789d23ac699868fa1eff6c63e3e5c1e36
|
[
"MIT"
] | null | null | null |
apps/auth/fixtures.py
|
Eudorajab1/websaw
|
7c3a369789d23ac699868fa1eff6c63e3e5c1e36
|
[
"MIT"
] | null | null | null |
from websaw import DefaultApp, DefaultContext, XAuth, AuthErr, redirect
from websaw.core import request, Fixture, BaseContext
from pprint import pprint
class Auth(XAuth):
def take_on(self, ctx: BaseContext):
self.data.ctx = ctx
self.data.db = ctx.auth_db
self.data.session = ctx.session
self.data.cuser = ctx.current_user
self.data.user = self.data.cuser.user
self.data.shared_data = ctx.state.shared_data
if not self.data.shared_data.get('template_context', None):
self.data.shared_data['template_context'] = {} #intialise it
self.data.shared_data['template_context']['auth_user'] = self.data.cuser.user
flash = self.data.session.get('message', None)
if flash:
f_message = dict(message=flash['message'], _class=flash['_class'])
self.data.shared_data['template_context']['flash'] = f_message
self.data.session['message'] = None
def user_by_login(self, login: str) -> dict:
login = login.lower()
db = self.data.ctx.auth_db
user = db(db.auth_user.username == login).select().first()
return user
def user_for_session(self, user):
suser = super().user_for_session(user)
suser['email'] = user['email']
suser['username'] = user['username']
suser['first_name'] = user['first_name']
suser['last_name'] = user['last_name']
return suser
def register(self, fields):
db = self.data.ctx.auth_db
ret = db.auth_user.insert(**fields)
db.commit()
return ret
def update_profile(self, user_id, fields):
db = self.data.ctx.auth_db
res = db.auth_user(user_id).update_record(
username = fields["username"],
email = fields["email"],
first_name = fields["first_name"],
last_name = fields["last_name"],
)
db.commit()
print('Res in update_profiel is ', res)
if res:
self.store_user_in_session(res.as_dict())
return res['id']
else:
return dict(message='Could not update profiel', _class='danger')
def has_membership(self, role):
user_id = self.user_id
db = self.data.db
belongs = db(db.auth_membership.user_id == user_id).select()
for b in belongs:
if db.auth_roles(b.role_id).role == role:
return True
return False
auth = Auth()
class Flash(Fixture):
def take_on(self, ctx: BaseContext):
self.data.ctx = ctx
self.data.message = None
self.data._class = None
self.data.session = ctx.session
self.data.shared_data = ctx.state.shared_data
if not self.data.shared_data.get('template_context', None):
self.data.shared_data['template_context'] = {} #initilaise shared data
flash = self.data.session.get('message', None)
if flash:
mess = flash['message']
cl = flash['_class']
self.set(mess, cl)
self.data.session['message'] = None
self.data.shared_data['template_context']['flash'] = flash
return True
else:
return False
def store_message_in_session(self, message: dict):
session = self.data.session
session["message"] = message
def set(self, message, _class):
self.data.message = message
self.data._class = _class
f_message = dict(message=message, _class=_class)
flash = self.store_message_in_session(f_message)
self.data.shared_data['template_context']['flash'] = f_message
return flash
flash = Flash()
| 36.019231
| 85
| 0.599039
|
aa5ff839cefe5bbec114956e9a9593d3945beeb0
| 321,430
|
py
|
Python
|
pyboto3/docdb.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 91
|
2016-12-31T11:38:37.000Z
|
2021-09-16T19:33:23.000Z
|
pyboto3/docdb.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 7
|
2017-01-02T18:54:23.000Z
|
2020-08-11T13:54:02.000Z
|
pyboto3/docdb.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 26
|
2016-12-31T13:11:00.000Z
|
2022-03-03T21:01:12.000Z
|
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def add_tags_to_resource(ResourceName=None, Tags=None):
"""
Adds metadata tags to an Amazon DocumentDB resource. You can use these tags with cost allocation reporting to track costs that are associated with Amazon DocumentDB resources. or in a Condition statement in an AWS Identity and Access Management (IAM) policy for Amazon DocumentDB.
See also: AWS API Documentation
Exceptions
:example: response = client.add_tags_to_resource(
ResourceName='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ResourceName: string
:param ResourceName: [REQUIRED]\nThe Amazon DocumentDB resource that the tags are added to. This value is an Amazon Resource Name (ARN).\n
:type Tags: list
:param Tags: [REQUIRED]\nThe tags to be assigned to the Amazon DocumentDB resource.\n\n(dict) --Metadata assigned to an Amazon DocumentDB resource consisting of a key-value pair.\n\nKey (string) --The required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\nValue (string) --The optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\n\n\n\n
:returns:
DocDB.Client.exceptions.DBInstanceNotFoundFault
DocDB.Client.exceptions.DBSnapshotNotFoundFault
DocDB.Client.exceptions.DBClusterNotFoundFault
"""
pass
def apply_pending_maintenance_action(ResourceIdentifier=None, ApplyAction=None, OptInType=None):
"""
Applies a pending maintenance action to a resource (for example, to a DB instance).
See also: AWS API Documentation
Exceptions
:example: response = client.apply_pending_maintenance_action(
ResourceIdentifier='string',
ApplyAction='string',
OptInType='string'
)
:type ResourceIdentifier: string
:param ResourceIdentifier: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource that the pending maintenance action applies to.\n
:type ApplyAction: string
:param ApplyAction: [REQUIRED]\nThe pending maintenance action to apply to this resource.\nValid values: system-update , db-upgrade\n
:type OptInType: string
:param OptInType: [REQUIRED]\nA value that specifies the type of opt-in request or undoes an opt-in request. An opt-in request of type immediate can\'t be undone.\nValid values:\n\nimmediate - Apply the maintenance action immediately.\nnext-maintenance - Apply the maintenance action during the next maintenance window for the resource.\nundo-opt-in - Cancel any existing next-maintenance opt-in requests.\n\n
:rtype: dict
ReturnsResponse Syntax
{
'ResourcePendingMaintenanceActions': {
'ResourceIdentifier': 'string',
'PendingMaintenanceActionDetails': [
{
'Action': 'string',
'AutoAppliedAfterDate': datetime(2015, 1, 1),
'ForcedApplyDate': datetime(2015, 1, 1),
'OptInStatus': 'string',
'CurrentApplyDate': datetime(2015, 1, 1),
'Description': 'string'
},
]
}
}
Response Structure
(dict) --
ResourcePendingMaintenanceActions (dict) --
Represents the output of ApplyPendingMaintenanceAction .
ResourceIdentifier (string) --
The Amazon Resource Name (ARN) of the resource that has pending maintenance actions.
PendingMaintenanceActionDetails (list) --
A list that provides details about the pending maintenance actions for the resource.
(dict) --
Provides information about a pending maintenance action for a resource.
Action (string) --
The type of pending maintenance action that is available for the resource.
AutoAppliedAfterDate (datetime) --
The date of the maintenance window when the action is applied. The maintenance action is applied to the resource during its first maintenance window after this date. If this date is specified, any next-maintenance opt-in requests are ignored.
ForcedApplyDate (datetime) --
The date when the maintenance action is automatically applied. The maintenance action is applied to the resource on this date regardless of the maintenance window for the resource. If this date is specified, any immediate opt-in requests are ignored.
OptInStatus (string) --
Indicates the type of opt-in request that has been received for the resource.
CurrentApplyDate (datetime) --
The effective date when the pending maintenance action is applied to the resource.
Description (string) --
A description providing more detail about the maintenance action.
Exceptions
DocDB.Client.exceptions.ResourceNotFoundFault
DocDB.Client.exceptions.InvalidDBClusterStateFault
DocDB.Client.exceptions.InvalidDBInstanceStateFault
:return: {
'ResourcePendingMaintenanceActions': {
'ResourceIdentifier': 'string',
'PendingMaintenanceActionDetails': [
{
'Action': 'string',
'AutoAppliedAfterDate': datetime(2015, 1, 1),
'ForcedApplyDate': datetime(2015, 1, 1),
'OptInStatus': 'string',
'CurrentApplyDate': datetime(2015, 1, 1),
'Description': 'string'
},
]
}
}
:returns:
DocDB.Client.exceptions.ResourceNotFoundFault
DocDB.Client.exceptions.InvalidDBClusterStateFault
DocDB.Client.exceptions.InvalidDBInstanceStateFault
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def copy_db_cluster_parameter_group(SourceDBClusterParameterGroupIdentifier=None, TargetDBClusterParameterGroupIdentifier=None, TargetDBClusterParameterGroupDescription=None, Tags=None):
"""
Copies the specified cluster parameter group.
See also: AWS API Documentation
Exceptions
:example: response = client.copy_db_cluster_parameter_group(
SourceDBClusterParameterGroupIdentifier='string',
TargetDBClusterParameterGroupIdentifier='string',
TargetDBClusterParameterGroupDescription='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type SourceDBClusterParameterGroupIdentifier: string
:param SourceDBClusterParameterGroupIdentifier: [REQUIRED]\nThe identifier or Amazon Resource Name (ARN) for the source cluster parameter group.\nConstraints:\n\nMust specify a valid cluster parameter group.\nIf the source cluster parameter group is in the same AWS Region as the copy, specify a valid parameter group identifier; for example, my-db-cluster-param-group , or a valid ARN.\nIf the source parameter group is in a different AWS Region than the copy, specify a valid cluster parameter group ARN; for example, arn:aws:rds:us-east-1:123456789012:cluster-pg:custom-cluster-group1 .\n\n
:type TargetDBClusterParameterGroupIdentifier: string
:param TargetDBClusterParameterGroupIdentifier: [REQUIRED]\nThe identifier for the copied cluster parameter group.\nConstraints:\n\nCannot be null, empty, or blank.\nMust contain from 1 to 255 letters, numbers, or hyphens.\nThe first character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\n\nExample: my-cluster-param-group1\n
:type TargetDBClusterParameterGroupDescription: string
:param TargetDBClusterParameterGroupDescription: [REQUIRED]\nA description for the copied cluster parameter group.\n
:type Tags: list
:param Tags: The tags that are to be assigned to the parameter group.\n\n(dict) --Metadata assigned to an Amazon DocumentDB resource consisting of a key-value pair.\n\nKey (string) --The required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\nValue (string) --The optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'DBClusterParameterGroup': {
'DBClusterParameterGroupName': 'string',
'DBParameterGroupFamily': 'string',
'Description': 'string',
'DBClusterParameterGroupArn': 'string'
}
}
Response Structure
(dict) --
DBClusterParameterGroup (dict) --
Detailed information about a cluster parameter group.
DBClusterParameterGroupName (string) --
Provides the name of the cluster parameter group.
DBParameterGroupFamily (string) --
Provides the name of the parameter group family that this cluster parameter group is compatible with.
Description (string) --
Provides the customer-specified description for this cluster parameter group.
DBClusterParameterGroupArn (string) --
The Amazon Resource Name (ARN) for the cluster parameter group.
Exceptions
DocDB.Client.exceptions.DBParameterGroupNotFoundFault
DocDB.Client.exceptions.DBParameterGroupQuotaExceededFault
DocDB.Client.exceptions.DBParameterGroupAlreadyExistsFault
:return: {
'DBClusterParameterGroup': {
'DBClusterParameterGroupName': 'string',
'DBParameterGroupFamily': 'string',
'Description': 'string',
'DBClusterParameterGroupArn': 'string'
}
}
:returns:
DocDB.Client.exceptions.DBParameterGroupNotFoundFault
DocDB.Client.exceptions.DBParameterGroupQuotaExceededFault
DocDB.Client.exceptions.DBParameterGroupAlreadyExistsFault
"""
pass
def copy_db_cluster_snapshot(SourceDBClusterSnapshotIdentifier=None, TargetDBClusterSnapshotIdentifier=None, KmsKeyId=None, PreSignedUrl=None, CopyTags=None, Tags=None):
"""
Copies a snapshot of a cluster.
To copy a cluster snapshot from a shared manual cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared cluster snapshot.
To cancel the copy operation after it is in progress, delete the target cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in the copying status.
See also: AWS API Documentation
Exceptions
:example: response = client.copy_db_cluster_snapshot(
SourceDBClusterSnapshotIdentifier='string',
TargetDBClusterSnapshotIdentifier='string',
KmsKeyId='string',
PreSignedUrl='string',
CopyTags=True|False,
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type SourceDBClusterSnapshotIdentifier: string
:param SourceDBClusterSnapshotIdentifier: [REQUIRED]\nThe identifier of the cluster snapshot to copy. This parameter is not case sensitive.\nYou can\'t copy an encrypted, shared cluster snapshot from one AWS Region to another.\nConstraints:\n\nMust specify a valid system snapshot in the 'available' state.\nIf the source snapshot is in the same AWS Region as the copy, specify a valid snapshot identifier.\nIf the source snapshot is in a different AWS Region than the copy, specify a valid cluster snapshot ARN.\n\nExample: my-cluster-snapshot1\n
:type TargetDBClusterSnapshotIdentifier: string
:param TargetDBClusterSnapshotIdentifier: [REQUIRED]\nThe identifier of the new cluster snapshot to create from the source cluster snapshot. This parameter is not case sensitive.\nConstraints:\n\nMust contain from 1 to 63 letters, numbers, or hyphens.\nThe first character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\n\nExample: my-cluster-snapshot2\n
:type KmsKeyId: string
:param KmsKeyId: The AWS KMS key ID for an encrypted cluster snapshot. The AWS KMS key ID is the Amazon Resource Name (ARN), AWS KMS key identifier, or the AWS KMS key alias for the AWS KMS encryption key.\nIf you copy an encrypted cluster snapshot from your AWS account, you can specify a value for KmsKeyId to encrypt the copy with a new AWS KMS encryption key. If you don\'t specify a value for KmsKeyId , then the copy of the cluster snapshot is encrypted with the same AWS KMS key as the source cluster snapshot.\nIf you copy an encrypted cluster snapshot that is shared from another AWS account, then you must specify a value for KmsKeyId .\nTo copy an encrypted cluster snapshot to another AWS Region, set KmsKeyId to the AWS KMS key ID that you want to use to encrypt the copy of the cluster snapshot in the destination Region. AWS KMS encryption keys are specific to the AWS Region that they are created in, and you can\'t use encryption keys from one Region in another Region.\nIf you copy an unencrypted cluster snapshot and specify a value for the KmsKeyId parameter, an error is returned.\n
:type PreSignedUrl: string
:param PreSignedUrl: The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot API action in the AWS Region that contains the source cluster snapshot to copy. You must use the PreSignedUrl parameter when copying an encrypted cluster snapshot from another AWS Region.\nThe presigned URL must be a valid request for the CopyDBSClusterSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied. The presigned URL request must contain the following parameter values:\n\nKmsKeyId - The AWS KMS key identifier for the key to use to encrypt the copy of the cluster snapshot in the destination AWS Region. This is the same identifier for both the CopyDBClusterSnapshot action that is called in the destination AWS Region, and the action contained in the presigned URL.\nDestinationRegion - The name of the AWS Region that the DB cluster snapshot will be created in.\nSourceDBClusterSnapshotIdentifier - The cluster snapshot identifier for the encrypted cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted cluster snapshot from the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:my-cluster-snapshot-20161115 .\n\n
:type CopyTags: boolean
:param CopyTags: Set to true to copy all tags from the source cluster snapshot to the target cluster snapshot, and otherwise false . The default is false .
:type Tags: list
:param Tags: The tags to be assigned to the cluster snapshot.\n\n(dict) --Metadata assigned to an Amazon DocumentDB resource consisting of a key-value pair.\n\nKey (string) --The required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\nValue (string) --The optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'DBClusterSnapshot': {
'AvailabilityZones': [
'string',
],
'DBClusterSnapshotIdentifier': 'string',
'DBClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Engine': 'string',
'Status': 'string',
'Port': 123,
'VpcId': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'EngineVersion': 'string',
'SnapshotType': 'string',
'PercentProgress': 123,
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DBClusterSnapshotArn': 'string',
'SourceDBClusterSnapshotArn': 'string'
}
}
Response Structure
(dict) --
DBClusterSnapshot (dict) --
Detailed information about a cluster snapshot.
AvailabilityZones (list) --
Provides the list of Amazon EC2 Availability Zones that instances in the cluster snapshot can be restored in.
(string) --
DBClusterSnapshotIdentifier (string) --
Specifies the identifier for the cluster snapshot.
DBClusterIdentifier (string) --
Specifies the cluster identifier of the cluster that this cluster snapshot was created from.
SnapshotCreateTime (datetime) --
Provides the time when the snapshot was taken, in UTC.
Engine (string) --
Specifies the name of the database engine.
Status (string) --
Specifies the status of this cluster snapshot.
Port (integer) --
Specifies the port that the cluster was listening on at the time of the snapshot.
VpcId (string) --
Provides the virtual private cloud (VPC) ID that is associated with the cluster snapshot.
ClusterCreateTime (datetime) --
Specifies the time when the cluster was created, in Universal Coordinated Time (UTC).
MasterUsername (string) --
Provides the master user name for the cluster snapshot.
EngineVersion (string) --
Provides the version of the database engine for this cluster snapshot.
SnapshotType (string) --
Provides the type of the cluster snapshot.
PercentProgress (integer) --
Specifies the percentage of the estimated data that has been transferred.
StorageEncrypted (boolean) --
Specifies whether the cluster snapshot is encrypted.
KmsKeyId (string) --
If StorageEncrypted is true , the AWS KMS key identifier for the encrypted cluster snapshot.
DBClusterSnapshotArn (string) --
The Amazon Resource Name (ARN) for the cluster snapshot.
SourceDBClusterSnapshotArn (string) --
If the cluster snapshot was copied from a source cluster snapshot, the ARN for the source cluster snapshot; otherwise, a null value.
Exceptions
DocDB.Client.exceptions.DBClusterSnapshotAlreadyExistsFault
DocDB.Client.exceptions.DBClusterSnapshotNotFoundFault
DocDB.Client.exceptions.InvalidDBClusterStateFault
DocDB.Client.exceptions.InvalidDBClusterSnapshotStateFault
DocDB.Client.exceptions.SnapshotQuotaExceededFault
DocDB.Client.exceptions.KMSKeyNotAccessibleFault
:return: {
'DBClusterSnapshot': {
'AvailabilityZones': [
'string',
],
'DBClusterSnapshotIdentifier': 'string',
'DBClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Engine': 'string',
'Status': 'string',
'Port': 123,
'VpcId': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'EngineVersion': 'string',
'SnapshotType': 'string',
'PercentProgress': 123,
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DBClusterSnapshotArn': 'string',
'SourceDBClusterSnapshotArn': 'string'
}
}
:returns:
(string) --
"""
pass
def create_db_cluster(AvailabilityZones=None, BackupRetentionPeriod=None, DBClusterIdentifier=None, DBClusterParameterGroupName=None, VpcSecurityGroupIds=None, DBSubnetGroupName=None, Engine=None, EngineVersion=None, Port=None, MasterUsername=None, MasterUserPassword=None, PreferredBackupWindow=None, PreferredMaintenanceWindow=None, Tags=None, StorageEncrypted=None, KmsKeyId=None, EnableCloudwatchLogsExports=None, DeletionProtection=None):
"""
Creates a new Amazon DocumentDB cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.create_db_cluster(
AvailabilityZones=[
'string',
],
BackupRetentionPeriod=123,
DBClusterIdentifier='string',
DBClusterParameterGroupName='string',
VpcSecurityGroupIds=[
'string',
],
DBSubnetGroupName='string',
Engine='string',
EngineVersion='string',
Port=123,
MasterUsername='string',
MasterUserPassword='string',
PreferredBackupWindow='string',
PreferredMaintenanceWindow='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
StorageEncrypted=True|False,
KmsKeyId='string',
EnableCloudwatchLogsExports=[
'string',
],
DeletionProtection=True|False
)
:type AvailabilityZones: list
:param AvailabilityZones: A list of Amazon EC2 Availability Zones that instances in the cluster can be created in.\n\n(string) --\n\n
:type BackupRetentionPeriod: integer
:param BackupRetentionPeriod: The number of days for which automated backups are retained. You must specify a minimum value of 1.\nDefault: 1\nConstraints:\n\nMust be a value from 1 to 35.\n\n
:type DBClusterIdentifier: string
:param DBClusterIdentifier: [REQUIRED]\nThe cluster identifier. This parameter is stored as a lowercase string.\nConstraints:\n\nMust contain from 1 to 63 letters, numbers, or hyphens.\nThe first character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\n\nExample: my-cluster\n
:type DBClusterParameterGroupName: string
:param DBClusterParameterGroupName: The name of the cluster parameter group to associate with this cluster.
:type VpcSecurityGroupIds: list
:param VpcSecurityGroupIds: A list of EC2 VPC security groups to associate with this cluster.\n\n(string) --\n\n
:type DBSubnetGroupName: string
:param DBSubnetGroupName: A subnet group to associate with this cluster.\nConstraints: Must match the name of an existing DBSubnetGroup . Must not be default.\nExample: mySubnetgroup\n
:type Engine: string
:param Engine: [REQUIRED]\nThe name of the database engine to be used for this cluster.\nValid values: docdb\n
:type EngineVersion: string
:param EngineVersion: The version number of the database engine to use.
:type Port: integer
:param Port: The port number on which the instances in the cluster accept connections.
:type MasterUsername: string
:param MasterUsername: [REQUIRED]\nThe name of the master user for the cluster.\nConstraints:\n\nMust be from 1 to 63 letters or numbers.\nThe first character must be a letter.\nCannot be a reserved word for the chosen database engine.\n\n
:type MasterUserPassword: string
:param MasterUserPassword: [REQUIRED]\nThe password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote ('), or the 'at' symbol (@).\nConstraints: Must contain from 8 to 100 characters.\n
:type PreferredBackupWindow: string
:param PreferredBackupWindow: The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.\nThe default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region.\nConstraints:\n\nMust be in the format hh24:mi-hh24:mi .\nMust be in Universal Coordinated Time (UTC).\nMust not conflict with the preferred maintenance window.\nMust be at least 30 minutes.\n\n
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\nFormat: ddd:hh24:mi-ddd:hh24:mi\nThe default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.\nValid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun\nConstraints: Minimum 30-minute window.\n
:type Tags: list
:param Tags: The tags to be assigned to the cluster.\n\n(dict) --Metadata assigned to an Amazon DocumentDB resource consisting of a key-value pair.\n\nKey (string) --The required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\nValue (string) --The optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\n\n\n\n
:type StorageEncrypted: boolean
:param StorageEncrypted: Specifies whether the cluster is encrypted.
:type KmsKeyId: string
:param KmsKeyId: The AWS KMS key identifier for an encrypted cluster.\nThe AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are creating a cluster using the same AWS account that owns the AWS KMS encryption key that is used to encrypt the new cluster, you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.\nIf an encryption key is not specified in KmsKeyId :\n\nIf ReplicationSourceIdentifier identifies an encrypted source, then Amazon DocumentDB uses the encryption key that is used to encrypt the source. Otherwise, Amazon DocumentDB uses your default encryption key.\nIf the StorageEncrypted parameter is true and ReplicationSourceIdentifier is not specified, Amazon DocumentDB uses your default encryption key.\n\nAWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.\nIf you create a replica of an encrypted cluster in another AWS Region, you must set KmsKeyId to a KMS key ID that is valid in the destination AWS Region. This key is used to encrypt the replica in that AWS Region.\n
:type EnableCloudwatchLogsExports: list
:param EnableCloudwatchLogsExports: A list of log types that need to be enabled for exporting to Amazon CloudWatch Logs.\n\n(string) --\n\n
:type DeletionProtection: boolean
:param DeletionProtection: Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.
:rtype: dict
ReturnsResponse Syntax
{
'DBCluster': {
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
}
}
Response Structure
(dict) --
DBCluster (dict) --
Detailed information about a cluster.
AvailabilityZones (list) --
Provides the list of Amazon EC2 Availability Zones that instances in the cluster can be created in.
(string) --
BackupRetentionPeriod (integer) --
Specifies the number of days for which automatic snapshots are retained.
DBClusterIdentifier (string) --
Contains a user-supplied cluster identifier. This identifier is the unique key that identifies a cluster.
DBClusterParameterGroup (string) --
Specifies the name of the cluster parameter group for the cluster.
DBSubnetGroup (string) --
Specifies information on the subnet group that is associated with the cluster, including the name, description, and subnets in the subnet group.
Status (string) --
Specifies the current state of this cluster.
PercentProgress (string) --
Specifies the progress of the operation as a percentage.
EarliestRestorableTime (datetime) --
The earliest time to which a database can be restored with point-in-time restore.
Endpoint (string) --
Specifies the connection endpoint for the primary instance of the cluster.
ReaderEndpoint (string) --
The reader endpoint for the cluster. The reader endpoint for a cluster load balances connections across the Amazon DocumentDB replicas that are available in a cluster. As clients request new connections to the reader endpoint, Amazon DocumentDB distributes the connection requests among the Amazon DocumentDB replicas in the cluster. This functionality can help balance your read workload across multiple Amazon DocumentDB replicas in your cluster.
If a failover occurs, and the Amazon DocumentDB replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Amazon DocumentDB replicas in the cluster, you can then reconnect to the reader endpoint.
MultiAZ (boolean) --
Specifies whether the cluster has instances in multiple Availability Zones.
Engine (string) --
Provides the name of the database engine to be used for this cluster.
EngineVersion (string) --
Indicates the database engine version.
LatestRestorableTime (datetime) --
Specifies the latest time to which a database can be restored with point-in-time restore.
Port (integer) --
Specifies the port that the database engine is listening on.
MasterUsername (string) --
Contains the master user name for the cluster.
PreferredBackupWindow (string) --
Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod .
PreferredMaintenanceWindow (string) --
Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
DBClusterMembers (list) --
Provides the list of instances that make up the cluster.
(dict) --
Contains information about an instance that is part of a cluster.
DBInstanceIdentifier (string) --
Specifies the instance identifier for this member of the cluster.
IsClusterWriter (boolean) --
A value that is true if the cluster member is the primary instance for the cluster and false otherwise.
DBClusterParameterGroupStatus (string) --
Specifies the status of the cluster parameter group for this member of the DB cluster.
PromotionTier (integer) --
A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.
VpcSecurityGroups (list) --
Provides a list of virtual private cloud (VPC) security groups that the cluster belongs to.
(dict) --
Used as a response element for queries on virtual private cloud (VPC) security group membership.
VpcSecurityGroupId (string) --
The name of the VPC security group.
Status (string) --
The status of the VPC security group.
HostedZoneId (string) --
Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.
StorageEncrypted (boolean) --
Specifies whether the cluster is encrypted.
KmsKeyId (string) --
If StorageEncrypted is true , the AWS KMS key identifier for the encrypted cluster.
DbClusterResourceId (string) --
The AWS Region-unique, immutable identifier for the cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the cluster is accessed.
DBClusterArn (string) --
The Amazon Resource Name (ARN) for the cluster.
AssociatedRoles (list) --
Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the cluster. IAM roles that are associated with a cluster grant permission for the cluster to access other AWS services on your behalf.
(dict) --
Describes an AWS Identity and Access Management (IAM) role that is associated with a cluster.
RoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role that is associated with the DB cluster.
Status (string) --
Describes the state of association between the IAM role and the cluster. The Status property returns one of the following values:
ACTIVE - The IAM role ARN is associated with the cluster and can be used to access other AWS services on your behalf.
PENDING - The IAM role ARN is being associated with the DB cluster.
INVALID - The IAM role ARN is associated with the cluster, but the cluster cannot assume the IAM role to access other AWS services on your behalf.
ClusterCreateTime (datetime) --
Specifies the time when the cluster was created, in Universal Coordinated Time (UTC).
EnabledCloudwatchLogsExports (list) --
A list of log types that this cluster is configured to export to Amazon CloudWatch Logs.
(string) --
DeletionProtection (boolean) --
Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.
Exceptions
DocDB.Client.exceptions.DBClusterAlreadyExistsFault
DocDB.Client.exceptions.InsufficientStorageClusterCapacityFault
DocDB.Client.exceptions.DBClusterQuotaExceededFault
DocDB.Client.exceptions.StorageQuotaExceededFault
DocDB.Client.exceptions.DBSubnetGroupNotFoundFault
DocDB.Client.exceptions.InvalidVPCNetworkStateFault
DocDB.Client.exceptions.InvalidDBClusterStateFault
DocDB.Client.exceptions.InvalidDBSubnetGroupStateFault
DocDB.Client.exceptions.InvalidSubnet
DocDB.Client.exceptions.InvalidDBInstanceStateFault
DocDB.Client.exceptions.DBClusterParameterGroupNotFoundFault
DocDB.Client.exceptions.KMSKeyNotAccessibleFault
DocDB.Client.exceptions.DBClusterNotFoundFault
DocDB.Client.exceptions.DBInstanceNotFoundFault
DocDB.Client.exceptions.DBSubnetGroupDoesNotCoverEnoughAZs
:return: {
'DBCluster': {
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
}
}
:returns:
(string) --
"""
pass
def create_db_cluster_parameter_group(DBClusterParameterGroupName=None, DBParameterGroupFamily=None, Description=None, Tags=None):
"""
Creates a new cluster parameter group.
Parameters in a cluster parameter group apply to all of the instances in a DB cluster.
A cluster parameter group is initially created with the default parameters for the database engine used by instances in the cluster. To provide custom values for any of the parameters, you must modify the group after you create it. After you create a DB cluster parameter group, you must associate it with your cluster. For the new DB cluster parameter group and associated settings to take effect, you must then reboot the instances in the cluster without failover.
See also: AWS API Documentation
Exceptions
:example: response = client.create_db_cluster_parameter_group(
DBClusterParameterGroupName='string',
DBParameterGroupFamily='string',
Description='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type DBClusterParameterGroupName: string
:param DBClusterParameterGroupName: [REQUIRED]\nThe name of the cluster parameter group.\nConstraints:\n\nMust not match the name of an existing DBClusterParameterGroup .\n\n\nNote\nThis value is stored as a lowercase string.\n\n
:type DBParameterGroupFamily: string
:param DBParameterGroupFamily: [REQUIRED]\nThe cluster parameter group family name.\n
:type Description: string
:param Description: [REQUIRED]\nThe description for the cluster parameter group.\n
:type Tags: list
:param Tags: The tags to be assigned to the cluster parameter group.\n\n(dict) --Metadata assigned to an Amazon DocumentDB resource consisting of a key-value pair.\n\nKey (string) --The required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\nValue (string) --The optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'DBClusterParameterGroup': {
'DBClusterParameterGroupName': 'string',
'DBParameterGroupFamily': 'string',
'Description': 'string',
'DBClusterParameterGroupArn': 'string'
}
}
Response Structure
(dict) --
DBClusterParameterGroup (dict) --
Detailed information about a cluster parameter group.
DBClusterParameterGroupName (string) --
Provides the name of the cluster parameter group.
DBParameterGroupFamily (string) --
Provides the name of the parameter group family that this cluster parameter group is compatible with.
Description (string) --
Provides the customer-specified description for this cluster parameter group.
DBClusterParameterGroupArn (string) --
The Amazon Resource Name (ARN) for the cluster parameter group.
Exceptions
DocDB.Client.exceptions.DBParameterGroupQuotaExceededFault
DocDB.Client.exceptions.DBParameterGroupAlreadyExistsFault
:return: {
'DBClusterParameterGroup': {
'DBClusterParameterGroupName': 'string',
'DBParameterGroupFamily': 'string',
'Description': 'string',
'DBClusterParameterGroupArn': 'string'
}
}
:returns:
DocDB.Client.exceptions.DBParameterGroupQuotaExceededFault
DocDB.Client.exceptions.DBParameterGroupAlreadyExistsFault
"""
pass
def create_db_cluster_snapshot(DBClusterSnapshotIdentifier=None, DBClusterIdentifier=None, Tags=None):
"""
Creates a snapshot of a cluster.
See also: AWS API Documentation
Exceptions
:example: response = client.create_db_cluster_snapshot(
DBClusterSnapshotIdentifier='string',
DBClusterIdentifier='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type DBClusterSnapshotIdentifier: string
:param DBClusterSnapshotIdentifier: [REQUIRED]\nThe identifier of the cluster snapshot. This parameter is stored as a lowercase string.\nConstraints:\n\nMust contain from 1 to 63 letters, numbers, or hyphens.\nThe first character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\n\nExample: my-cluster-snapshot1\n
:type DBClusterIdentifier: string
:param DBClusterIdentifier: [REQUIRED]\nThe identifier of the cluster to create a snapshot for. This parameter is not case sensitive.\nConstraints:\n\nMust match the identifier of an existing DBCluster .\n\nExample: my-cluster\n
:type Tags: list
:param Tags: The tags to be assigned to the cluster snapshot.\n\n(dict) --Metadata assigned to an Amazon DocumentDB resource consisting of a key-value pair.\n\nKey (string) --The required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\nValue (string) --The optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'DBClusterSnapshot': {
'AvailabilityZones': [
'string',
],
'DBClusterSnapshotIdentifier': 'string',
'DBClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Engine': 'string',
'Status': 'string',
'Port': 123,
'VpcId': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'EngineVersion': 'string',
'SnapshotType': 'string',
'PercentProgress': 123,
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DBClusterSnapshotArn': 'string',
'SourceDBClusterSnapshotArn': 'string'
}
}
Response Structure
(dict) --
DBClusterSnapshot (dict) --
Detailed information about a cluster snapshot.
AvailabilityZones (list) --
Provides the list of Amazon EC2 Availability Zones that instances in the cluster snapshot can be restored in.
(string) --
DBClusterSnapshotIdentifier (string) --
Specifies the identifier for the cluster snapshot.
DBClusterIdentifier (string) --
Specifies the cluster identifier of the cluster that this cluster snapshot was created from.
SnapshotCreateTime (datetime) --
Provides the time when the snapshot was taken, in UTC.
Engine (string) --
Specifies the name of the database engine.
Status (string) --
Specifies the status of this cluster snapshot.
Port (integer) --
Specifies the port that the cluster was listening on at the time of the snapshot.
VpcId (string) --
Provides the virtual private cloud (VPC) ID that is associated with the cluster snapshot.
ClusterCreateTime (datetime) --
Specifies the time when the cluster was created, in Universal Coordinated Time (UTC).
MasterUsername (string) --
Provides the master user name for the cluster snapshot.
EngineVersion (string) --
Provides the version of the database engine for this cluster snapshot.
SnapshotType (string) --
Provides the type of the cluster snapshot.
PercentProgress (integer) --
Specifies the percentage of the estimated data that has been transferred.
StorageEncrypted (boolean) --
Specifies whether the cluster snapshot is encrypted.
KmsKeyId (string) --
If StorageEncrypted is true , the AWS KMS key identifier for the encrypted cluster snapshot.
DBClusterSnapshotArn (string) --
The Amazon Resource Name (ARN) for the cluster snapshot.
SourceDBClusterSnapshotArn (string) --
If the cluster snapshot was copied from a source cluster snapshot, the ARN for the source cluster snapshot; otherwise, a null value.
Exceptions
DocDB.Client.exceptions.DBClusterSnapshotAlreadyExistsFault
DocDB.Client.exceptions.InvalidDBClusterStateFault
DocDB.Client.exceptions.DBClusterNotFoundFault
DocDB.Client.exceptions.SnapshotQuotaExceededFault
DocDB.Client.exceptions.InvalidDBClusterSnapshotStateFault
:return: {
'DBClusterSnapshot': {
'AvailabilityZones': [
'string',
],
'DBClusterSnapshotIdentifier': 'string',
'DBClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Engine': 'string',
'Status': 'string',
'Port': 123,
'VpcId': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'EngineVersion': 'string',
'SnapshotType': 'string',
'PercentProgress': 123,
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DBClusterSnapshotArn': 'string',
'SourceDBClusterSnapshotArn': 'string'
}
}
:returns:
(string) --
"""
pass
def create_db_instance(DBInstanceIdentifier=None, DBInstanceClass=None, Engine=None, AvailabilityZone=None, PreferredMaintenanceWindow=None, AutoMinorVersionUpgrade=None, Tags=None, DBClusterIdentifier=None, PromotionTier=None):
"""
Creates a new instance.
See also: AWS API Documentation
Exceptions
:example: response = client.create_db_instance(
DBInstanceIdentifier='string',
DBInstanceClass='string',
Engine='string',
AvailabilityZone='string',
PreferredMaintenanceWindow='string',
AutoMinorVersionUpgrade=True|False,
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
DBClusterIdentifier='string',
PromotionTier=123
)
:type DBInstanceIdentifier: string
:param DBInstanceIdentifier: [REQUIRED]\nThe instance identifier. This parameter is stored as a lowercase string.\nConstraints:\n\nMust contain from 1 to 63 letters, numbers, or hyphens.\nThe first character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\n\nExample: mydbinstance\n
:type DBInstanceClass: string
:param DBInstanceClass: [REQUIRED]\nThe compute and memory capacity of the instance; for example, db.r5.large .\n
:type Engine: string
:param Engine: [REQUIRED]\nThe name of the database engine to be used for this instance.\nValid value: docdb\n
:type AvailabilityZone: string
:param AvailabilityZone: The Amazon EC2 Availability Zone that the instance is created in.\nDefault: A random, system-chosen Availability Zone in the endpoint\'s AWS Region.\nExample: us-east-1d\nConstraint: The AvailabilityZone parameter can\'t be specified if the MultiAZ parameter is set to true . The specified Availability Zone must be in the same AWS Region as the current endpoint.\n
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: The time range each week during which system maintenance can occur, in Universal Coordinated Time (UTC).\nFormat: ddd:hh24:mi-ddd:hh24:mi\nThe default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.\nValid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun\nConstraints: Minimum 30-minute window.\n
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: Indicates that minor engine upgrades are applied automatically to the instance during the maintenance window.\nDefault: true\n
:type Tags: list
:param Tags: The tags to be assigned to the instance. You can assign up to 10 tags to an instance.\n\n(dict) --Metadata assigned to an Amazon DocumentDB resource consisting of a key-value pair.\n\nKey (string) --The required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\nValue (string) --The optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\n\n\n\n
:type DBClusterIdentifier: string
:param DBClusterIdentifier: [REQUIRED]\nThe identifier of the cluster that the instance will belong to.\n
:type PromotionTier: integer
:param PromotionTier: A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.\nDefault: 1\nValid values: 0-15\n
:rtype: dict
ReturnsResponse Syntax
{
'DBInstance': {
'DBInstanceIdentifier': 'string',
'DBInstanceClass': 'string',
'Engine': 'string',
'DBInstanceStatus': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123,
'HostedZoneId': 'string'
},
'InstanceCreateTime': datetime(2015, 1, 1),
'PreferredBackupWindow': 'string',
'BackupRetentionPeriod': 123,
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'AvailabilityZone': 'string',
'DBSubnetGroup': {
'DBSubnetGroupName': 'string',
'DBSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'DBSubnetGroupArn': 'string'
},
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'DBInstanceClass': 'string',
'AllocatedStorage': 123,
'MasterUserPassword': 'string',
'Port': 123,
'BackupRetentionPeriod': 123,
'MultiAZ': True|False,
'EngineVersion': 'string',
'LicenseModel': 'string',
'Iops': 123,
'DBInstanceIdentifier': 'string',
'StorageType': 'string',
'CACertificateIdentifier': 'string',
'DBSubnetGroupName': 'string',
'PendingCloudwatchLogsExports': {
'LogTypesToEnable': [
'string',
],
'LogTypesToDisable': [
'string',
]
}
},
'LatestRestorableTime': datetime(2015, 1, 1),
'EngineVersion': 'string',
'AutoMinorVersionUpgrade': True|False,
'PubliclyAccessible': True|False,
'StatusInfos': [
{
'StatusType': 'string',
'Normal': True|False,
'Status': 'string',
'Message': 'string'
},
],
'DBClusterIdentifier': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbiResourceId': 'string',
'CACertificateIdentifier': 'string',
'PromotionTier': 123,
'DBInstanceArn': 'string',
'EnabledCloudwatchLogsExports': [
'string',
]
}
}
Response Structure
(dict) --
DBInstance (dict) --
Detailed information about an instance.
DBInstanceIdentifier (string) --
Contains a user-provided database identifier. This identifier is the unique key that identifies an instance.
DBInstanceClass (string) --
Contains the name of the compute and memory capacity class of the instance.
Engine (string) --
Provides the name of the database engine to be used for this instance.
DBInstanceStatus (string) --
Specifies the current state of this database.
Endpoint (dict) --
Specifies the connection endpoint.
Address (string) --
Specifies the DNS address of the instance.
Port (integer) --
Specifies the port that the database engine is listening on.
HostedZoneId (string) --
Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.
InstanceCreateTime (datetime) --
Provides the date and time that the instance was created.
PreferredBackupWindow (string) --
Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod .
BackupRetentionPeriod (integer) --
Specifies the number of days for which automatic snapshots are retained.
VpcSecurityGroups (list) --
Provides a list of VPC security group elements that the instance belongs to.
(dict) --
Used as a response element for queries on virtual private cloud (VPC) security group membership.
VpcSecurityGroupId (string) --
The name of the VPC security group.
Status (string) --
The status of the VPC security group.
AvailabilityZone (string) --
Specifies the name of the Availability Zone that the instance is located in.
DBSubnetGroup (dict) --
Specifies information on the subnet group that is associated with the instance, including the name, description, and subnets in the subnet group.
DBSubnetGroupName (string) --
The name of the subnet group.
DBSubnetGroupDescription (string) --
Provides the description of the subnet group.
VpcId (string) --
Provides the virtual private cloud (VPC) ID of the subnet group.
SubnetGroupStatus (string) --
Provides the status of the subnet group.
Subnets (list) --
Detailed information about one or more subnets within a subnet group.
(dict) --
Detailed information about a subnet.
SubnetIdentifier (string) --
Specifies the identifier of the subnet.
SubnetAvailabilityZone (dict) --
Specifies the Availability Zone for the subnet.
Name (string) --
The name of the Availability Zone.
SubnetStatus (string) --
Specifies the status of the subnet.
DBSubnetGroupArn (string) --
The Amazon Resource Name (ARN) for the DB subnet group.
PreferredMaintenanceWindow (string) --
Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
PendingModifiedValues (dict) --
Specifies that changes to the instance are pending. This element is included only when changes are pending. Specific changes are identified by subelements.
DBInstanceClass (string) --
Contains the new DBInstanceClass for the instance that will be applied or is currently being applied.
AllocatedStorage (integer) --
Contains the new AllocatedStorage size for then instance that will be applied or is currently being applied.
MasterUserPassword (string) --
Contains the pending or currently in-progress change of the master credentials for the instance.
Port (integer) --
Specifies the pending port for the instance.
BackupRetentionPeriod (integer) --
Specifies the pending number of days for which automated backups are retained.
MultiAZ (boolean) --
Indicates that the Single-AZ instance is to change to a Multi-AZ deployment.
EngineVersion (string) --
Indicates the database engine version.
LicenseModel (string) --
The license model for the instance.
Valid values: license-included , bring-your-own-license , general-public-license
Iops (integer) --
Specifies the new Provisioned IOPS value for the instance that will be applied or is currently being applied.
DBInstanceIdentifier (string) --
Contains the new DBInstanceIdentifier for the instance that will be applied or is currently being applied.
StorageType (string) --
Specifies the storage type to be associated with the instance.
CACertificateIdentifier (string) --
Specifies the identifier of the certificate authority (CA) certificate for the DB instance.
DBSubnetGroupName (string) --
The new subnet group for the instance.
PendingCloudwatchLogsExports (dict) --
A list of the log types whose configuration is still pending. These log types are in the process of being activated or deactivated.
LogTypesToEnable (list) --
Log types that are in the process of being deactivated. After they are deactivated, these log types aren\'t exported to CloudWatch Logs.
(string) --
LogTypesToDisable (list) --
Log types that are in the process of being enabled. After they are enabled, these log types are exported to Amazon CloudWatch Logs.
(string) --
LatestRestorableTime (datetime) --
Specifies the latest time to which a database can be restored with point-in-time restore.
EngineVersion (string) --
Indicates the database engine version.
AutoMinorVersionUpgrade (boolean) --
Indicates that minor version patches are applied automatically.
PubliclyAccessible (boolean) --
Not supported. Amazon DocumentDB does not currently support public endpoints. The value of PubliclyAccessible is always false .
StatusInfos (list) --
The status of a read replica. If the instance is not a read replica, this is blank.
(dict) --
Provides a list of status information for an instance.
StatusType (string) --
This value is currently "read replication ."
Normal (boolean) --
A Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.
Status (string) --
Status of the instance. For a StatusType of read replica, the values can be replicating , error, stopped , or terminated .
Message (string) --
Details of the error if there is an error for the instance. If the instance is not in an error state, this value is blank.
DBClusterIdentifier (string) --
Contains the name of the cluster that the instance is a member of if the instance is a member of a cluster.
StorageEncrypted (boolean) --
Specifies whether or not the instance is encrypted.
KmsKeyId (string) --
If StorageEncrypted is true , the AWS KMS key identifier for the encrypted instance.
DbiResourceId (string) --
The AWS Region-unique, immutable identifier for the instance. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the instance is accessed.
CACertificateIdentifier (string) --
The identifier of the CA certificate for this DB instance.
PromotionTier (integer) --
A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.
DBInstanceArn (string) --
The Amazon Resource Name (ARN) for the instance.
EnabledCloudwatchLogsExports (list) --
A list of log types that this instance is configured to export to Amazon CloudWatch Logs.
(string) --
Exceptions
DocDB.Client.exceptions.DBInstanceAlreadyExistsFault
DocDB.Client.exceptions.InsufficientDBInstanceCapacityFault
DocDB.Client.exceptions.DBParameterGroupNotFoundFault
DocDB.Client.exceptions.DBSecurityGroupNotFoundFault
DocDB.Client.exceptions.InstanceQuotaExceededFault
DocDB.Client.exceptions.StorageQuotaExceededFault
DocDB.Client.exceptions.DBSubnetGroupNotFoundFault
DocDB.Client.exceptions.DBSubnetGroupDoesNotCoverEnoughAZs
DocDB.Client.exceptions.InvalidDBClusterStateFault
DocDB.Client.exceptions.InvalidSubnet
DocDB.Client.exceptions.InvalidVPCNetworkStateFault
DocDB.Client.exceptions.DBClusterNotFoundFault
DocDB.Client.exceptions.StorageTypeNotSupportedFault
DocDB.Client.exceptions.AuthorizationNotFoundFault
DocDB.Client.exceptions.KMSKeyNotAccessibleFault
:return: {
'DBInstance': {
'DBInstanceIdentifier': 'string',
'DBInstanceClass': 'string',
'Engine': 'string',
'DBInstanceStatus': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123,
'HostedZoneId': 'string'
},
'InstanceCreateTime': datetime(2015, 1, 1),
'PreferredBackupWindow': 'string',
'BackupRetentionPeriod': 123,
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'AvailabilityZone': 'string',
'DBSubnetGroup': {
'DBSubnetGroupName': 'string',
'DBSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'DBSubnetGroupArn': 'string'
},
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'DBInstanceClass': 'string',
'AllocatedStorage': 123,
'MasterUserPassword': 'string',
'Port': 123,
'BackupRetentionPeriod': 123,
'MultiAZ': True|False,
'EngineVersion': 'string',
'LicenseModel': 'string',
'Iops': 123,
'DBInstanceIdentifier': 'string',
'StorageType': 'string',
'CACertificateIdentifier': 'string',
'DBSubnetGroupName': 'string',
'PendingCloudwatchLogsExports': {
'LogTypesToEnable': [
'string',
],
'LogTypesToDisable': [
'string',
]
}
},
'LatestRestorableTime': datetime(2015, 1, 1),
'EngineVersion': 'string',
'AutoMinorVersionUpgrade': True|False,
'PubliclyAccessible': True|False,
'StatusInfos': [
{
'StatusType': 'string',
'Normal': True|False,
'Status': 'string',
'Message': 'string'
},
],
'DBClusterIdentifier': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbiResourceId': 'string',
'CACertificateIdentifier': 'string',
'PromotionTier': 123,
'DBInstanceArn': 'string',
'EnabledCloudwatchLogsExports': [
'string',
]
}
}
:returns:
(string) --
"""
pass
def create_db_subnet_group(DBSubnetGroupName=None, DBSubnetGroupDescription=None, SubnetIds=None, Tags=None):
"""
Creates a new subnet group. subnet groups must contain at least one subnet in at least two Availability Zones in the AWS Region.
See also: AWS API Documentation
Exceptions
:example: response = client.create_db_subnet_group(
DBSubnetGroupName='string',
DBSubnetGroupDescription='string',
SubnetIds=[
'string',
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type DBSubnetGroupName: string
:param DBSubnetGroupName: [REQUIRED]\nThe name for the subnet group. This value is stored as a lowercase string.\nConstraints: Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens. Must not be default.\nExample: mySubnetgroup\n
:type DBSubnetGroupDescription: string
:param DBSubnetGroupDescription: [REQUIRED]\nThe description for the subnet group.\n
:type SubnetIds: list
:param SubnetIds: [REQUIRED]\nThe Amazon EC2 subnet IDs for the subnet group.\n\n(string) --\n\n
:type Tags: list
:param Tags: The tags to be assigned to the subnet group.\n\n(dict) --Metadata assigned to an Amazon DocumentDB resource consisting of a key-value pair.\n\nKey (string) --The required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\nValue (string) --The optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'DBSubnetGroup': {
'DBSubnetGroupName': 'string',
'DBSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'DBSubnetGroupArn': 'string'
}
}
Response Structure
(dict) --
DBSubnetGroup (dict) --
Detailed information about a subnet group.
DBSubnetGroupName (string) --
The name of the subnet group.
DBSubnetGroupDescription (string) --
Provides the description of the subnet group.
VpcId (string) --
Provides the virtual private cloud (VPC) ID of the subnet group.
SubnetGroupStatus (string) --
Provides the status of the subnet group.
Subnets (list) --
Detailed information about one or more subnets within a subnet group.
(dict) --
Detailed information about a subnet.
SubnetIdentifier (string) --
Specifies the identifier of the subnet.
SubnetAvailabilityZone (dict) --
Specifies the Availability Zone for the subnet.
Name (string) --
The name of the Availability Zone.
SubnetStatus (string) --
Specifies the status of the subnet.
DBSubnetGroupArn (string) --
The Amazon Resource Name (ARN) for the DB subnet group.
Exceptions
DocDB.Client.exceptions.DBSubnetGroupAlreadyExistsFault
DocDB.Client.exceptions.DBSubnetGroupQuotaExceededFault
DocDB.Client.exceptions.DBSubnetQuotaExceededFault
DocDB.Client.exceptions.DBSubnetGroupDoesNotCoverEnoughAZs
DocDB.Client.exceptions.InvalidSubnet
:return: {
'DBSubnetGroup': {
'DBSubnetGroupName': 'string',
'DBSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'DBSubnetGroupArn': 'string'
}
}
:returns:
DocDB.Client.exceptions.DBSubnetGroupAlreadyExistsFault
DocDB.Client.exceptions.DBSubnetGroupQuotaExceededFault
DocDB.Client.exceptions.DBSubnetQuotaExceededFault
DocDB.Client.exceptions.DBSubnetGroupDoesNotCoverEnoughAZs
DocDB.Client.exceptions.InvalidSubnet
"""
pass
def delete_db_cluster(DBClusterIdentifier=None, SkipFinalSnapshot=None, FinalDBSnapshotIdentifier=None):
"""
Deletes a previously provisioned cluster. When you delete a cluster, all automated backups for that cluster are deleted and can\'t be recovered. Manual DB cluster snapshots of the specified cluster are not deleted.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_db_cluster(
DBClusterIdentifier='string',
SkipFinalSnapshot=True|False,
FinalDBSnapshotIdentifier='string'
)
:type DBClusterIdentifier: string
:param DBClusterIdentifier: [REQUIRED]\nThe cluster identifier for the cluster to be deleted. This parameter isn\'t case sensitive.\nConstraints:\n\nMust match an existing DBClusterIdentifier .\n\n
:type SkipFinalSnapshot: boolean
:param SkipFinalSnapshot: Determines whether a final cluster snapshot is created before the cluster is deleted. If true is specified, no cluster snapshot is created. If false is specified, a cluster snapshot is created before the DB cluster is deleted.\n\nNote\nIf SkipFinalSnapshot is false , you must specify a FinalDBSnapshotIdentifier parameter.\n\nDefault: false\n
:type FinalDBSnapshotIdentifier: string
:param FinalDBSnapshotIdentifier: The cluster snapshot identifier of the new cluster snapshot created when SkipFinalSnapshot is set to false .\n\nNote\nSpecifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.\n\nConstraints:\n\nMust be from 1 to 255 letters, numbers, or hyphens.\nThe first character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\n\n
:rtype: dict
ReturnsResponse Syntax
{
'DBCluster': {
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
}
}
Response Structure
(dict) --
DBCluster (dict) --
Detailed information about a cluster.
AvailabilityZones (list) --
Provides the list of Amazon EC2 Availability Zones that instances in the cluster can be created in.
(string) --
BackupRetentionPeriod (integer) --
Specifies the number of days for which automatic snapshots are retained.
DBClusterIdentifier (string) --
Contains a user-supplied cluster identifier. This identifier is the unique key that identifies a cluster.
DBClusterParameterGroup (string) --
Specifies the name of the cluster parameter group for the cluster.
DBSubnetGroup (string) --
Specifies information on the subnet group that is associated with the cluster, including the name, description, and subnets in the subnet group.
Status (string) --
Specifies the current state of this cluster.
PercentProgress (string) --
Specifies the progress of the operation as a percentage.
EarliestRestorableTime (datetime) --
The earliest time to which a database can be restored with point-in-time restore.
Endpoint (string) --
Specifies the connection endpoint for the primary instance of the cluster.
ReaderEndpoint (string) --
The reader endpoint for the cluster. The reader endpoint for a cluster load balances connections across the Amazon DocumentDB replicas that are available in a cluster. As clients request new connections to the reader endpoint, Amazon DocumentDB distributes the connection requests among the Amazon DocumentDB replicas in the cluster. This functionality can help balance your read workload across multiple Amazon DocumentDB replicas in your cluster.
If a failover occurs, and the Amazon DocumentDB replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Amazon DocumentDB replicas in the cluster, you can then reconnect to the reader endpoint.
MultiAZ (boolean) --
Specifies whether the cluster has instances in multiple Availability Zones.
Engine (string) --
Provides the name of the database engine to be used for this cluster.
EngineVersion (string) --
Indicates the database engine version.
LatestRestorableTime (datetime) --
Specifies the latest time to which a database can be restored with point-in-time restore.
Port (integer) --
Specifies the port that the database engine is listening on.
MasterUsername (string) --
Contains the master user name for the cluster.
PreferredBackupWindow (string) --
Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod .
PreferredMaintenanceWindow (string) --
Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
DBClusterMembers (list) --
Provides the list of instances that make up the cluster.
(dict) --
Contains information about an instance that is part of a cluster.
DBInstanceIdentifier (string) --
Specifies the instance identifier for this member of the cluster.
IsClusterWriter (boolean) --
A value that is true if the cluster member is the primary instance for the cluster and false otherwise.
DBClusterParameterGroupStatus (string) --
Specifies the status of the cluster parameter group for this member of the DB cluster.
PromotionTier (integer) --
A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.
VpcSecurityGroups (list) --
Provides a list of virtual private cloud (VPC) security groups that the cluster belongs to.
(dict) --
Used as a response element for queries on virtual private cloud (VPC) security group membership.
VpcSecurityGroupId (string) --
The name of the VPC security group.
Status (string) --
The status of the VPC security group.
HostedZoneId (string) --
Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.
StorageEncrypted (boolean) --
Specifies whether the cluster is encrypted.
KmsKeyId (string) --
If StorageEncrypted is true , the AWS KMS key identifier for the encrypted cluster.
DbClusterResourceId (string) --
The AWS Region-unique, immutable identifier for the cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the cluster is accessed.
DBClusterArn (string) --
The Amazon Resource Name (ARN) for the cluster.
AssociatedRoles (list) --
Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the cluster. IAM roles that are associated with a cluster grant permission for the cluster to access other AWS services on your behalf.
(dict) --
Describes an AWS Identity and Access Management (IAM) role that is associated with a cluster.
RoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role that is associated with the DB cluster.
Status (string) --
Describes the state of association between the IAM role and the cluster. The Status property returns one of the following values:
ACTIVE - The IAM role ARN is associated with the cluster and can be used to access other AWS services on your behalf.
PENDING - The IAM role ARN is being associated with the DB cluster.
INVALID - The IAM role ARN is associated with the cluster, but the cluster cannot assume the IAM role to access other AWS services on your behalf.
ClusterCreateTime (datetime) --
Specifies the time when the cluster was created, in Universal Coordinated Time (UTC).
EnabledCloudwatchLogsExports (list) --
A list of log types that this cluster is configured to export to Amazon CloudWatch Logs.
(string) --
DeletionProtection (boolean) --
Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.
Exceptions
DocDB.Client.exceptions.DBClusterNotFoundFault
DocDB.Client.exceptions.InvalidDBClusterStateFault
DocDB.Client.exceptions.DBClusterSnapshotAlreadyExistsFault
DocDB.Client.exceptions.SnapshotQuotaExceededFault
DocDB.Client.exceptions.InvalidDBClusterSnapshotStateFault
:return: {
'DBCluster': {
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
}
}
:returns:
(string) --
"""
pass
def delete_db_cluster_parameter_group(DBClusterParameterGroupName=None):
"""
Deletes a specified cluster parameter group. The cluster parameter group to be deleted can\'t be associated with any clusters.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_db_cluster_parameter_group(
DBClusterParameterGroupName='string'
)
:type DBClusterParameterGroupName: string
:param DBClusterParameterGroupName: [REQUIRED]\nThe name of the cluster parameter group.\nConstraints:\n\nMust be the name of an existing cluster parameter group.\nYou can\'t delete a default cluster parameter group.\nCannot be associated with any clusters.\n\n
:returns:
DocDB.Client.exceptions.InvalidDBParameterGroupStateFault
DocDB.Client.exceptions.DBParameterGroupNotFoundFault
"""
pass
def delete_db_cluster_snapshot(DBClusterSnapshotIdentifier=None):
"""
Deletes a cluster snapshot. If the snapshot is being copied, the copy operation is terminated.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_db_cluster_snapshot(
DBClusterSnapshotIdentifier='string'
)
:type DBClusterSnapshotIdentifier: string
:param DBClusterSnapshotIdentifier: [REQUIRED]\nThe identifier of the cluster snapshot to delete.\nConstraints: Must be the name of an existing cluster snapshot in the available state.\n
:rtype: dict
ReturnsResponse Syntax{
'DBClusterSnapshot': {
'AvailabilityZones': [
'string',
],
'DBClusterSnapshotIdentifier': 'string',
'DBClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Engine': 'string',
'Status': 'string',
'Port': 123,
'VpcId': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'EngineVersion': 'string',
'SnapshotType': 'string',
'PercentProgress': 123,
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DBClusterSnapshotArn': 'string',
'SourceDBClusterSnapshotArn': 'string'
}
}
Response Structure
(dict) --
DBClusterSnapshot (dict) --Detailed information about a cluster snapshot.
AvailabilityZones (list) --Provides the list of Amazon EC2 Availability Zones that instances in the cluster snapshot can be restored in.
(string) --
DBClusterSnapshotIdentifier (string) --Specifies the identifier for the cluster snapshot.
DBClusterIdentifier (string) --Specifies the cluster identifier of the cluster that this cluster snapshot was created from.
SnapshotCreateTime (datetime) --Provides the time when the snapshot was taken, in UTC.
Engine (string) --Specifies the name of the database engine.
Status (string) --Specifies the status of this cluster snapshot.
Port (integer) --Specifies the port that the cluster was listening on at the time of the snapshot.
VpcId (string) --Provides the virtual private cloud (VPC) ID that is associated with the cluster snapshot.
ClusterCreateTime (datetime) --Specifies the time when the cluster was created, in Universal Coordinated Time (UTC).
MasterUsername (string) --Provides the master user name for the cluster snapshot.
EngineVersion (string) --Provides the version of the database engine for this cluster snapshot.
SnapshotType (string) --Provides the type of the cluster snapshot.
PercentProgress (integer) --Specifies the percentage of the estimated data that has been transferred.
StorageEncrypted (boolean) --Specifies whether the cluster snapshot is encrypted.
KmsKeyId (string) --If StorageEncrypted is true , the AWS KMS key identifier for the encrypted cluster snapshot.
DBClusterSnapshotArn (string) --The Amazon Resource Name (ARN) for the cluster snapshot.
SourceDBClusterSnapshotArn (string) --If the cluster snapshot was copied from a source cluster snapshot, the ARN for the source cluster snapshot; otherwise, a null value.
Exceptions
DocDB.Client.exceptions.InvalidDBClusterSnapshotStateFault
DocDB.Client.exceptions.DBClusterSnapshotNotFoundFault
:return: {
'DBClusterSnapshot': {
'AvailabilityZones': [
'string',
],
'DBClusterSnapshotIdentifier': 'string',
'DBClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Engine': 'string',
'Status': 'string',
'Port': 123,
'VpcId': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'EngineVersion': 'string',
'SnapshotType': 'string',
'PercentProgress': 123,
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DBClusterSnapshotArn': 'string',
'SourceDBClusterSnapshotArn': 'string'
}
}
:returns:
DocDB.Client.exceptions.InvalidDBClusterSnapshotStateFault
DocDB.Client.exceptions.DBClusterSnapshotNotFoundFault
"""
pass
def delete_db_instance(DBInstanceIdentifier=None):
"""
Deletes a previously provisioned instance.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_db_instance(
DBInstanceIdentifier='string'
)
:type DBInstanceIdentifier: string
:param DBInstanceIdentifier: [REQUIRED]\nThe instance identifier for the instance to be deleted. This parameter isn\'t case sensitive.\nConstraints:\n\nMust match the name of an existing instance.\n\n
:rtype: dict
ReturnsResponse Syntax{
'DBInstance': {
'DBInstanceIdentifier': 'string',
'DBInstanceClass': 'string',
'Engine': 'string',
'DBInstanceStatus': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123,
'HostedZoneId': 'string'
},
'InstanceCreateTime': datetime(2015, 1, 1),
'PreferredBackupWindow': 'string',
'BackupRetentionPeriod': 123,
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'AvailabilityZone': 'string',
'DBSubnetGroup': {
'DBSubnetGroupName': 'string',
'DBSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'DBSubnetGroupArn': 'string'
},
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'DBInstanceClass': 'string',
'AllocatedStorage': 123,
'MasterUserPassword': 'string',
'Port': 123,
'BackupRetentionPeriod': 123,
'MultiAZ': True|False,
'EngineVersion': 'string',
'LicenseModel': 'string',
'Iops': 123,
'DBInstanceIdentifier': 'string',
'StorageType': 'string',
'CACertificateIdentifier': 'string',
'DBSubnetGroupName': 'string',
'PendingCloudwatchLogsExports': {
'LogTypesToEnable': [
'string',
],
'LogTypesToDisable': [
'string',
]
}
},
'LatestRestorableTime': datetime(2015, 1, 1),
'EngineVersion': 'string',
'AutoMinorVersionUpgrade': True|False,
'PubliclyAccessible': True|False,
'StatusInfos': [
{
'StatusType': 'string',
'Normal': True|False,
'Status': 'string',
'Message': 'string'
},
],
'DBClusterIdentifier': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbiResourceId': 'string',
'CACertificateIdentifier': 'string',
'PromotionTier': 123,
'DBInstanceArn': 'string',
'EnabledCloudwatchLogsExports': [
'string',
]
}
}
Response Structure
(dict) --
DBInstance (dict) --Detailed information about an instance.
DBInstanceIdentifier (string) --Contains a user-provided database identifier. This identifier is the unique key that identifies an instance.
DBInstanceClass (string) --Contains the name of the compute and memory capacity class of the instance.
Engine (string) --Provides the name of the database engine to be used for this instance.
DBInstanceStatus (string) --Specifies the current state of this database.
Endpoint (dict) --Specifies the connection endpoint.
Address (string) --Specifies the DNS address of the instance.
Port (integer) --Specifies the port that the database engine is listening on.
HostedZoneId (string) --Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.
InstanceCreateTime (datetime) --Provides the date and time that the instance was created.
PreferredBackupWindow (string) --Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod .
BackupRetentionPeriod (integer) --Specifies the number of days for which automatic snapshots are retained.
VpcSecurityGroups (list) --Provides a list of VPC security group elements that the instance belongs to.
(dict) --Used as a response element for queries on virtual private cloud (VPC) security group membership.
VpcSecurityGroupId (string) --The name of the VPC security group.
Status (string) --The status of the VPC security group.
AvailabilityZone (string) --Specifies the name of the Availability Zone that the instance is located in.
DBSubnetGroup (dict) --Specifies information on the subnet group that is associated with the instance, including the name, description, and subnets in the subnet group.
DBSubnetGroupName (string) --The name of the subnet group.
DBSubnetGroupDescription (string) --Provides the description of the subnet group.
VpcId (string) --Provides the virtual private cloud (VPC) ID of the subnet group.
SubnetGroupStatus (string) --Provides the status of the subnet group.
Subnets (list) --Detailed information about one or more subnets within a subnet group.
(dict) --Detailed information about a subnet.
SubnetIdentifier (string) --Specifies the identifier of the subnet.
SubnetAvailabilityZone (dict) --Specifies the Availability Zone for the subnet.
Name (string) --The name of the Availability Zone.
SubnetStatus (string) --Specifies the status of the subnet.
DBSubnetGroupArn (string) --The Amazon Resource Name (ARN) for the DB subnet group.
PreferredMaintenanceWindow (string) --Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
PendingModifiedValues (dict) --Specifies that changes to the instance are pending. This element is included only when changes are pending. Specific changes are identified by subelements.
DBInstanceClass (string) --Contains the new DBInstanceClass for the instance that will be applied or is currently being applied.
AllocatedStorage (integer) --Contains the new AllocatedStorage size for then instance that will be applied or is currently being applied.
MasterUserPassword (string) --Contains the pending or currently in-progress change of the master credentials for the instance.
Port (integer) --Specifies the pending port for the instance.
BackupRetentionPeriod (integer) --Specifies the pending number of days for which automated backups are retained.
MultiAZ (boolean) --Indicates that the Single-AZ instance is to change to a Multi-AZ deployment.
EngineVersion (string) --Indicates the database engine version.
LicenseModel (string) --The license model for the instance.
Valid values: license-included , bring-your-own-license , general-public-license
Iops (integer) --Specifies the new Provisioned IOPS value for the instance that will be applied or is currently being applied.
DBInstanceIdentifier (string) --Contains the new DBInstanceIdentifier for the instance that will be applied or is currently being applied.
StorageType (string) --Specifies the storage type to be associated with the instance.
CACertificateIdentifier (string) --Specifies the identifier of the certificate authority (CA) certificate for the DB instance.
DBSubnetGroupName (string) --The new subnet group for the instance.
PendingCloudwatchLogsExports (dict) --A list of the log types whose configuration is still pending. These log types are in the process of being activated or deactivated.
LogTypesToEnable (list) --Log types that are in the process of being deactivated. After they are deactivated, these log types aren\'t exported to CloudWatch Logs.
(string) --
LogTypesToDisable (list) --Log types that are in the process of being enabled. After they are enabled, these log types are exported to Amazon CloudWatch Logs.
(string) --
LatestRestorableTime (datetime) --Specifies the latest time to which a database can be restored with point-in-time restore.
EngineVersion (string) --Indicates the database engine version.
AutoMinorVersionUpgrade (boolean) --Indicates that minor version patches are applied automatically.
PubliclyAccessible (boolean) --Not supported. Amazon DocumentDB does not currently support public endpoints. The value of PubliclyAccessible is always false .
StatusInfos (list) --The status of a read replica. If the instance is not a read replica, this is blank.
(dict) --Provides a list of status information for an instance.
StatusType (string) --This value is currently "read replication ."
Normal (boolean) --A Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.
Status (string) --Status of the instance. For a StatusType of read replica, the values can be replicating , error, stopped , or terminated .
Message (string) --Details of the error if there is an error for the instance. If the instance is not in an error state, this value is blank.
DBClusterIdentifier (string) --Contains the name of the cluster that the instance is a member of if the instance is a member of a cluster.
StorageEncrypted (boolean) --Specifies whether or not the instance is encrypted.
KmsKeyId (string) --If StorageEncrypted is true , the AWS KMS key identifier for the encrypted instance.
DbiResourceId (string) --The AWS Region-unique, immutable identifier for the instance. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the instance is accessed.
CACertificateIdentifier (string) --The identifier of the CA certificate for this DB instance.
PromotionTier (integer) --A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.
DBInstanceArn (string) --The Amazon Resource Name (ARN) for the instance.
EnabledCloudwatchLogsExports (list) --A list of log types that this instance is configured to export to Amazon CloudWatch Logs.
(string) --
Exceptions
DocDB.Client.exceptions.DBInstanceNotFoundFault
DocDB.Client.exceptions.InvalidDBInstanceStateFault
DocDB.Client.exceptions.DBSnapshotAlreadyExistsFault
DocDB.Client.exceptions.SnapshotQuotaExceededFault
DocDB.Client.exceptions.InvalidDBClusterStateFault
:return: {
'DBInstance': {
'DBInstanceIdentifier': 'string',
'DBInstanceClass': 'string',
'Engine': 'string',
'DBInstanceStatus': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123,
'HostedZoneId': 'string'
},
'InstanceCreateTime': datetime(2015, 1, 1),
'PreferredBackupWindow': 'string',
'BackupRetentionPeriod': 123,
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'AvailabilityZone': 'string',
'DBSubnetGroup': {
'DBSubnetGroupName': 'string',
'DBSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'DBSubnetGroupArn': 'string'
},
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'DBInstanceClass': 'string',
'AllocatedStorage': 123,
'MasterUserPassword': 'string',
'Port': 123,
'BackupRetentionPeriod': 123,
'MultiAZ': True|False,
'EngineVersion': 'string',
'LicenseModel': 'string',
'Iops': 123,
'DBInstanceIdentifier': 'string',
'StorageType': 'string',
'CACertificateIdentifier': 'string',
'DBSubnetGroupName': 'string',
'PendingCloudwatchLogsExports': {
'LogTypesToEnable': [
'string',
],
'LogTypesToDisable': [
'string',
]
}
},
'LatestRestorableTime': datetime(2015, 1, 1),
'EngineVersion': 'string',
'AutoMinorVersionUpgrade': True|False,
'PubliclyAccessible': True|False,
'StatusInfos': [
{
'StatusType': 'string',
'Normal': True|False,
'Status': 'string',
'Message': 'string'
},
],
'DBClusterIdentifier': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbiResourceId': 'string',
'CACertificateIdentifier': 'string',
'PromotionTier': 123,
'DBInstanceArn': 'string',
'EnabledCloudwatchLogsExports': [
'string',
]
}
}
:returns:
(string) --
"""
pass
def delete_db_subnet_group(DBSubnetGroupName=None):
"""
Deletes a subnet group.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_db_subnet_group(
DBSubnetGroupName='string'
)
:type DBSubnetGroupName: string
:param DBSubnetGroupName: [REQUIRED]\nThe name of the database subnet group to delete.\n\nNote\nYou can\'t delete the default subnet group.\n\nConstraints:\nMust match the name of an existing DBSubnetGroup . Must not be default.\nExample: mySubnetgroup\n
"""
pass
def describe_certificates(CertificateIdentifier=None, Filters=None, MaxRecords=None, Marker=None):
"""
Returns a list of certificate authority (CA) certificates provided by Amazon DocumentDB for this AWS account.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_certificates(
CertificateIdentifier='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type CertificateIdentifier: string
:param CertificateIdentifier: The user-supplied certificate identifier. If this parameter is specified, information for only the specified certificate is returned. If this parameter is omitted, a list of up to MaxRecords certificates is returned. This parameter is not case sensitive.\nConstraints\n\nMust match an existing CertificateIdentifier .\n\n
:type Filters: list
:param Filters: This parameter is not currently supported.\n\n(dict) --A named set of filter values, used to return a more specific list of results. You can use a filter to match a set of resources by specific criteria, such as IDs.\nWildcards are not supported in filters.\n\nName (string) -- [REQUIRED]The name of the filter. Filter names are case sensitive.\n\nValues (list) -- [REQUIRED]One or more filter values. Filter values are case sensitive.\n\n(string) --\n\n\n\n\n\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.\nDefault: 100\nConstraints:\n\nMinimum: 20\nMaximum: 100\n\n
:type Marker: string
:param Marker: An optional pagination token provided by a previous DescribeCertificates request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
ReturnsResponse Syntax
{
'Certificates': [
{
'CertificateIdentifier': 'string',
'CertificateType': 'string',
'Thumbprint': 'string',
'ValidFrom': datetime(2015, 1, 1),
'ValidTill': datetime(2015, 1, 1),
'CertificateArn': 'string'
},
],
'Marker': 'string'
}
Response Structure
(dict) --
Certificates (list) --
A list of certificates for this AWS account.
(dict) --
A certificate authority (CA) certificate for an AWS account.
CertificateIdentifier (string) --
The unique key that identifies a certificate.
Example: rds-ca-2019
CertificateType (string) --
The type of the certificate.
Example: CA
Thumbprint (string) --
The thumbprint of the certificate.
ValidFrom (datetime) --
The starting date-time from which the certificate is valid.
Example: 2019-07-31T17:57:09Z
ValidTill (datetime) --
The date-time after which the certificate is no longer valid.
Example: 2024-07-31T17:57:09Z
CertificateArn (string) --
The Amazon Resource Name (ARN) for the certificate.
Example: arn:aws:rds:us-east-1::cert:rds-ca-2019
Marker (string) --
An optional pagination token provided if the number of records retrieved is greater than MaxRecords . If this parameter is specified, the marker specifies the next record in the list. Including the value of Marker in the next call to DescribeCertificates results in the next page of certificates.
Exceptions
DocDB.Client.exceptions.CertificateNotFoundFault
:return: {
'Certificates': [
{
'CertificateIdentifier': 'string',
'CertificateType': 'string',
'Thumbprint': 'string',
'ValidFrom': datetime(2015, 1, 1),
'ValidTill': datetime(2015, 1, 1),
'CertificateArn': 'string'
},
],
'Marker': 'string'
}
:returns:
DocDB.Client.exceptions.CertificateNotFoundFault
"""
pass
def describe_db_cluster_parameter_groups(DBClusterParameterGroupName=None, Filters=None, MaxRecords=None, Marker=None):
"""
Returns a list of DBClusterParameterGroup descriptions. If a DBClusterParameterGroupName parameter is specified, the list contains only the description of the specified cluster parameter group.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_db_cluster_parameter_groups(
DBClusterParameterGroupName='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type DBClusterParameterGroupName: string
:param DBClusterParameterGroupName: The name of a specific cluster parameter group to return details for.\nConstraints:\n\nIf provided, must match the name of an existing DBClusterParameterGroup .\n\n
:type Filters: list
:param Filters: This parameter is not currently supported.\n\n(dict) --A named set of filter values, used to return a more specific list of results. You can use a filter to match a set of resources by specific criteria, such as IDs.\nWildcards are not supported in filters.\n\nName (string) -- [REQUIRED]The name of the filter. Filter names are case sensitive.\n\nValues (list) -- [REQUIRED]One or more filter values. Filter values are case sensitive.\n\n(string) --\n\n\n\n\n\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.\nDefault: 100\nConstraints: Minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'DBClusterParameterGroups': [
{
'DBClusterParameterGroupName': 'string',
'DBParameterGroupFamily': 'string',
'Description': 'string',
'DBClusterParameterGroupArn': 'string'
},
]
}
Response Structure
(dict) --
Represents the output of DBClusterParameterGroups .
Marker (string) --
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
DBClusterParameterGroups (list) --
A list of cluster parameter groups.
(dict) --
Detailed information about a cluster parameter group.
DBClusterParameterGroupName (string) --
Provides the name of the cluster parameter group.
DBParameterGroupFamily (string) --
Provides the name of the parameter group family that this cluster parameter group is compatible with.
Description (string) --
Provides the customer-specified description for this cluster parameter group.
DBClusterParameterGroupArn (string) --
The Amazon Resource Name (ARN) for the cluster parameter group.
Exceptions
DocDB.Client.exceptions.DBParameterGroupNotFoundFault
:return: {
'Marker': 'string',
'DBClusterParameterGroups': [
{
'DBClusterParameterGroupName': 'string',
'DBParameterGroupFamily': 'string',
'Description': 'string',
'DBClusterParameterGroupArn': 'string'
},
]
}
:returns:
DocDB.Client.exceptions.DBParameterGroupNotFoundFault
"""
pass
def describe_db_cluster_parameters(DBClusterParameterGroupName=None, Source=None, Filters=None, MaxRecords=None, Marker=None):
"""
Returns the detailed parameter list for a particular cluster parameter group.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_db_cluster_parameters(
DBClusterParameterGroupName='string',
Source='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type DBClusterParameterGroupName: string
:param DBClusterParameterGroupName: [REQUIRED]\nThe name of a specific cluster parameter group to return parameter details for.\nConstraints:\n\nIf provided, must match the name of an existing DBClusterParameterGroup .\n\n
:type Source: string
:param Source: A value that indicates to return only parameters for a specific source. Parameter sources can be engine , service , or customer .
:type Filters: list
:param Filters: This parameter is not currently supported.\n\n(dict) --A named set of filter values, used to return a more specific list of results. You can use a filter to match a set of resources by specific criteria, such as IDs.\nWildcards are not supported in filters.\n\nName (string) -- [REQUIRED]The name of the filter. Filter names are case sensitive.\n\nValues (list) -- [REQUIRED]One or more filter values. Filter values are case sensitive.\n\n(string) --\n\n\n\n\n\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.\nDefault: 100\nConstraints: Minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
ReturnsResponse Syntax
{
'Parameters': [
{
'ParameterName': 'string',
'ParameterValue': 'string',
'Description': 'string',
'Source': 'string',
'ApplyType': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'IsModifiable': True|False,
'MinimumEngineVersion': 'string',
'ApplyMethod': 'immediate'|'pending-reboot'
},
],
'Marker': 'string'
}
Response Structure
(dict) --
Represents the output of DBClusterParameterGroup .
Parameters (list) --
Provides a list of parameters for the cluster parameter group.
(dict) --
Detailed information about an individual parameter.
ParameterName (string) --
Specifies the name of the parameter.
ParameterValue (string) --
Specifies the value of the parameter.
Description (string) --
Provides a description of the parameter.
Source (string) --
Indicates the source of the parameter value.
ApplyType (string) --
Specifies the engine-specific parameters type.
DataType (string) --
Specifies the valid data type for the parameter.
AllowedValues (string) --
Specifies the valid range of values for the parameter.
IsModifiable (boolean) --
Indicates whether (true ) or not (false ) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.
MinimumEngineVersion (string) --
The earliest engine version to which the parameter can apply.
ApplyMethod (string) --
Indicates when to apply parameter updates.
Marker (string) --
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
Exceptions
DocDB.Client.exceptions.DBParameterGroupNotFoundFault
:return: {
'Parameters': [
{
'ParameterName': 'string',
'ParameterValue': 'string',
'Description': 'string',
'Source': 'string',
'ApplyType': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'IsModifiable': True|False,
'MinimumEngineVersion': 'string',
'ApplyMethod': 'immediate'|'pending-reboot'
},
],
'Marker': 'string'
}
:returns:
DocDB.Client.exceptions.DBParameterGroupNotFoundFault
"""
pass
def describe_db_cluster_snapshot_attributes(DBClusterSnapshotIdentifier=None):
"""
Returns a list of cluster snapshot attribute names and values for a manual DB cluster snapshot.
When you share snapshots with other AWS accounts, DescribeDBClusterSnapshotAttributes returns the restore attribute and a list of IDs for the AWS accounts that are authorized to copy or restore the manual cluster snapshot. If all is included in the list of values for the restore attribute, then the manual cluster snapshot is public and can be copied or restored by all AWS accounts.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_db_cluster_snapshot_attributes(
DBClusterSnapshotIdentifier='string'
)
:type DBClusterSnapshotIdentifier: string
:param DBClusterSnapshotIdentifier: [REQUIRED]\nThe identifier for the cluster snapshot to describe the attributes for.\n
:rtype: dict
ReturnsResponse Syntax{
'DBClusterSnapshotAttributesResult': {
'DBClusterSnapshotIdentifier': 'string',
'DBClusterSnapshotAttributes': [
{
'AttributeName': 'string',
'AttributeValues': [
'string',
]
},
]
}
}
Response Structure
(dict) --
DBClusterSnapshotAttributesResult (dict) --Detailed information about the attributes that are associated with a cluster snapshot.
DBClusterSnapshotIdentifier (string) --The identifier of the cluster snapshot that the attributes apply to.
DBClusterSnapshotAttributes (list) --The list of attributes and values for the cluster snapshot.
(dict) --Contains the name and values of a manual cluster snapshot attribute.
Manual cluster snapshot attributes are used to authorize other AWS accounts to restore a manual cluster snapshot.
AttributeName (string) --The name of the manual cluster snapshot attribute.
The attribute named restore refers to the list of AWS accounts that have permission to copy or restore the manual cluster snapshot.
AttributeValues (list) --The values for the manual cluster snapshot attribute.
If the AttributeName field is set to restore , then this element returns a list of IDs of the AWS accounts that are authorized to copy or restore the manual cluster snapshot. If a value of all is in the list, then the manual cluster snapshot is public and available for any AWS account to copy or restore.
(string) --
Exceptions
DocDB.Client.exceptions.DBClusterSnapshotNotFoundFault
:return: {
'DBClusterSnapshotAttributesResult': {
'DBClusterSnapshotIdentifier': 'string',
'DBClusterSnapshotAttributes': [
{
'AttributeName': 'string',
'AttributeValues': [
'string',
]
},
]
}
}
:returns:
DocDB.Client.exceptions.DBClusterSnapshotNotFoundFault
"""
pass
def describe_db_cluster_snapshots(DBClusterIdentifier=None, DBClusterSnapshotIdentifier=None, SnapshotType=None, Filters=None, MaxRecords=None, Marker=None, IncludeShared=None, IncludePublic=None):
"""
Returns information about cluster snapshots. This API operation supports pagination.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_db_cluster_snapshots(
DBClusterIdentifier='string',
DBClusterSnapshotIdentifier='string',
SnapshotType='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string',
IncludeShared=True|False,
IncludePublic=True|False
)
:type DBClusterIdentifier: string
:param DBClusterIdentifier: The ID of the cluster to retrieve the list of cluster snapshots for. This parameter can\'t be used with the DBClusterSnapshotIdentifier parameter. This parameter is not case sensitive.\nConstraints:\n\nIf provided, must match the identifier of an existing DBCluster .\n\n
:type DBClusterSnapshotIdentifier: string
:param DBClusterSnapshotIdentifier: A specific cluster snapshot identifier to describe. This parameter can\'t be used with the DBClusterIdentifier parameter. This value is stored as a lowercase string.\nConstraints:\n\nIf provided, must match the identifier of an existing DBClusterSnapshot .\nIf this identifier is for an automated snapshot, the SnapshotType parameter must also be specified.\n\n
:type SnapshotType: string
:param SnapshotType: The type of cluster snapshots to be returned. You can specify one of the following values:\n\nautomated - Return all cluster snapshots that Amazon DocumentDB has automatically created for your AWS account.\nmanual - Return all cluster snapshots that you have manually created for your AWS account.\nshared - Return all manual cluster snapshots that have been shared to your AWS account.\npublic - Return all cluster snapshots that have been marked as public.\n\nIf you don\'t specify a SnapshotType value, then both automated and manual cluster snapshots are returned. You can include shared cluster snapshots with these results by setting the IncludeShared parameter to true . You can include public cluster snapshots with these results by setting the IncludePublic parameter to true .\nThe IncludeShared and IncludePublic parameters don\'t apply for SnapshotType values of manual or automated . The IncludePublic parameter doesn\'t apply when SnapshotType is set to shared . The IncludeShared parameter doesn\'t apply when SnapshotType is set to public .\n
:type Filters: list
:param Filters: This parameter is not currently supported.\n\n(dict) --A named set of filter values, used to return a more specific list of results. You can use a filter to match a set of resources by specific criteria, such as IDs.\nWildcards are not supported in filters.\n\nName (string) -- [REQUIRED]The name of the filter. Filter names are case sensitive.\n\nValues (list) -- [REQUIRED]One or more filter values. Filter values are case sensitive.\n\n(string) --\n\n\n\n\n\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.\nDefault: 100\nConstraints: Minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:type IncludeShared: boolean
:param IncludeShared: Set to true to include shared manual cluster snapshots from other AWS accounts that this AWS account has been given permission to copy or restore, and otherwise false . The default is false .
:type IncludePublic: boolean
:param IncludePublic: Set to true to include manual cluster snapshots that are public and can be copied or restored by any AWS account, and otherwise false . The default is false .
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'DBClusterSnapshots': [
{
'AvailabilityZones': [
'string',
],
'DBClusterSnapshotIdentifier': 'string',
'DBClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Engine': 'string',
'Status': 'string',
'Port': 123,
'VpcId': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'EngineVersion': 'string',
'SnapshotType': 'string',
'PercentProgress': 123,
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DBClusterSnapshotArn': 'string',
'SourceDBClusterSnapshotArn': 'string'
},
]
}
Response Structure
(dict) --
Represents the output of DescribeDBClusterSnapshots .
Marker (string) --
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
DBClusterSnapshots (list) --
Provides a list of cluster snapshots.
(dict) --
Detailed information about a cluster snapshot.
AvailabilityZones (list) --
Provides the list of Amazon EC2 Availability Zones that instances in the cluster snapshot can be restored in.
(string) --
DBClusterSnapshotIdentifier (string) --
Specifies the identifier for the cluster snapshot.
DBClusterIdentifier (string) --
Specifies the cluster identifier of the cluster that this cluster snapshot was created from.
SnapshotCreateTime (datetime) --
Provides the time when the snapshot was taken, in UTC.
Engine (string) --
Specifies the name of the database engine.
Status (string) --
Specifies the status of this cluster snapshot.
Port (integer) --
Specifies the port that the cluster was listening on at the time of the snapshot.
VpcId (string) --
Provides the virtual private cloud (VPC) ID that is associated with the cluster snapshot.
ClusterCreateTime (datetime) --
Specifies the time when the cluster was created, in Universal Coordinated Time (UTC).
MasterUsername (string) --
Provides the master user name for the cluster snapshot.
EngineVersion (string) --
Provides the version of the database engine for this cluster snapshot.
SnapshotType (string) --
Provides the type of the cluster snapshot.
PercentProgress (integer) --
Specifies the percentage of the estimated data that has been transferred.
StorageEncrypted (boolean) --
Specifies whether the cluster snapshot is encrypted.
KmsKeyId (string) --
If StorageEncrypted is true , the AWS KMS key identifier for the encrypted cluster snapshot.
DBClusterSnapshotArn (string) --
The Amazon Resource Name (ARN) for the cluster snapshot.
SourceDBClusterSnapshotArn (string) --
If the cluster snapshot was copied from a source cluster snapshot, the ARN for the source cluster snapshot; otherwise, a null value.
Exceptions
DocDB.Client.exceptions.DBClusterSnapshotNotFoundFault
:return: {
'Marker': 'string',
'DBClusterSnapshots': [
{
'AvailabilityZones': [
'string',
],
'DBClusterSnapshotIdentifier': 'string',
'DBClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Engine': 'string',
'Status': 'string',
'Port': 123,
'VpcId': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'EngineVersion': 'string',
'SnapshotType': 'string',
'PercentProgress': 123,
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DBClusterSnapshotArn': 'string',
'SourceDBClusterSnapshotArn': 'string'
},
]
}
:returns:
(string) --
"""
pass
def describe_db_clusters(DBClusterIdentifier=None, Filters=None, MaxRecords=None, Marker=None):
"""
Returns information about provisioned Amazon DocumentDB clusters. This API operation supports pagination. For certain management features such as cluster and instance lifecycle management, Amazon DocumentDB leverages operational technology that is shared with Amazon RDS and Amazon Neptune. Use the filterName=engine,Values=docdb filter parameter to return only Amazon DocumentDB clusters.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_db_clusters(
DBClusterIdentifier='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type DBClusterIdentifier: string
:param DBClusterIdentifier: The user-provided cluster identifier. If this parameter is specified, information from only the specific cluster is returned. This parameter isn\'t case sensitive.\nConstraints:\n\nIf provided, must match an existing DBClusterIdentifier .\n\n
:type Filters: list
:param Filters: A filter that specifies one or more clusters to describe.\nSupported filters:\n\ndb-cluster-id - Accepts cluster identifiers and cluster Amazon Resource Names (ARNs). The results list only includes information about the clusters identified by these ARNs.\n\n\n(dict) --A named set of filter values, used to return a more specific list of results. You can use a filter to match a set of resources by specific criteria, such as IDs.\nWildcards are not supported in filters.\n\nName (string) -- [REQUIRED]The name of the filter. Filter names are case sensitive.\n\nValues (list) -- [REQUIRED]One or more filter values. Filter values are case sensitive.\n\n(string) --\n\n\n\n\n\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.\nDefault: 100\nConstraints: Minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'DBClusters': [
{
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
},
]
}
Response Structure
(dict) --
Represents the output of DescribeDBClusters .
Marker (string) --
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
DBClusters (list) --
A list of clusters.
(dict) --
Detailed information about a cluster.
AvailabilityZones (list) --
Provides the list of Amazon EC2 Availability Zones that instances in the cluster can be created in.
(string) --
BackupRetentionPeriod (integer) --
Specifies the number of days for which automatic snapshots are retained.
DBClusterIdentifier (string) --
Contains a user-supplied cluster identifier. This identifier is the unique key that identifies a cluster.
DBClusterParameterGroup (string) --
Specifies the name of the cluster parameter group for the cluster.
DBSubnetGroup (string) --
Specifies information on the subnet group that is associated with the cluster, including the name, description, and subnets in the subnet group.
Status (string) --
Specifies the current state of this cluster.
PercentProgress (string) --
Specifies the progress of the operation as a percentage.
EarliestRestorableTime (datetime) --
The earliest time to which a database can be restored with point-in-time restore.
Endpoint (string) --
Specifies the connection endpoint for the primary instance of the cluster.
ReaderEndpoint (string) --
The reader endpoint for the cluster. The reader endpoint for a cluster load balances connections across the Amazon DocumentDB replicas that are available in a cluster. As clients request new connections to the reader endpoint, Amazon DocumentDB distributes the connection requests among the Amazon DocumentDB replicas in the cluster. This functionality can help balance your read workload across multiple Amazon DocumentDB replicas in your cluster.
If a failover occurs, and the Amazon DocumentDB replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Amazon DocumentDB replicas in the cluster, you can then reconnect to the reader endpoint.
MultiAZ (boolean) --
Specifies whether the cluster has instances in multiple Availability Zones.
Engine (string) --
Provides the name of the database engine to be used for this cluster.
EngineVersion (string) --
Indicates the database engine version.
LatestRestorableTime (datetime) --
Specifies the latest time to which a database can be restored with point-in-time restore.
Port (integer) --
Specifies the port that the database engine is listening on.
MasterUsername (string) --
Contains the master user name for the cluster.
PreferredBackupWindow (string) --
Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod .
PreferredMaintenanceWindow (string) --
Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
DBClusterMembers (list) --
Provides the list of instances that make up the cluster.
(dict) --
Contains information about an instance that is part of a cluster.
DBInstanceIdentifier (string) --
Specifies the instance identifier for this member of the cluster.
IsClusterWriter (boolean) --
A value that is true if the cluster member is the primary instance for the cluster and false otherwise.
DBClusterParameterGroupStatus (string) --
Specifies the status of the cluster parameter group for this member of the DB cluster.
PromotionTier (integer) --
A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.
VpcSecurityGroups (list) --
Provides a list of virtual private cloud (VPC) security groups that the cluster belongs to.
(dict) --
Used as a response element for queries on virtual private cloud (VPC) security group membership.
VpcSecurityGroupId (string) --
The name of the VPC security group.
Status (string) --
The status of the VPC security group.
HostedZoneId (string) --
Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.
StorageEncrypted (boolean) --
Specifies whether the cluster is encrypted.
KmsKeyId (string) --
If StorageEncrypted is true , the AWS KMS key identifier for the encrypted cluster.
DbClusterResourceId (string) --
The AWS Region-unique, immutable identifier for the cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the cluster is accessed.
DBClusterArn (string) --
The Amazon Resource Name (ARN) for the cluster.
AssociatedRoles (list) --
Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the cluster. IAM roles that are associated with a cluster grant permission for the cluster to access other AWS services on your behalf.
(dict) --
Describes an AWS Identity and Access Management (IAM) role that is associated with a cluster.
RoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role that is associated with the DB cluster.
Status (string) --
Describes the state of association between the IAM role and the cluster. The Status property returns one of the following values:
ACTIVE - The IAM role ARN is associated with the cluster and can be used to access other AWS services on your behalf.
PENDING - The IAM role ARN is being associated with the DB cluster.
INVALID - The IAM role ARN is associated with the cluster, but the cluster cannot assume the IAM role to access other AWS services on your behalf.
ClusterCreateTime (datetime) --
Specifies the time when the cluster was created, in Universal Coordinated Time (UTC).
EnabledCloudwatchLogsExports (list) --
A list of log types that this cluster is configured to export to Amazon CloudWatch Logs.
(string) --
DeletionProtection (boolean) --
Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.
Exceptions
DocDB.Client.exceptions.DBClusterNotFoundFault
:return: {
'Marker': 'string',
'DBClusters': [
{
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
},
]
}
:returns:
(string) --
"""
pass
def describe_db_engine_versions(Engine=None, EngineVersion=None, DBParameterGroupFamily=None, Filters=None, MaxRecords=None, Marker=None, DefaultOnly=None, ListSupportedCharacterSets=None, ListSupportedTimezones=None):
"""
Returns a list of the available engines.
See also: AWS API Documentation
:example: response = client.describe_db_engine_versions(
Engine='string',
EngineVersion='string',
DBParameterGroupFamily='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string',
DefaultOnly=True|False,
ListSupportedCharacterSets=True|False,
ListSupportedTimezones=True|False
)
:type Engine: string
:param Engine: The database engine to return.
:type EngineVersion: string
:param EngineVersion: The database engine version to return.\nExample: 5.1.49\n
:type DBParameterGroupFamily: string
:param DBParameterGroupFamily: The name of a specific parameter group family to return details for.\nConstraints:\n\nIf provided, must match an existing DBParameterGroupFamily .\n\n
:type Filters: list
:param Filters: This parameter is not currently supported.\n\n(dict) --A named set of filter values, used to return a more specific list of results. You can use a filter to match a set of resources by specific criteria, such as IDs.\nWildcards are not supported in filters.\n\nName (string) -- [REQUIRED]The name of the filter. Filter names are case sensitive.\n\nValues (list) -- [REQUIRED]One or more filter values. Filter values are case sensitive.\n\n(string) --\n\n\n\n\n\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.\nDefault: 100\nConstraints: Minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:type DefaultOnly: boolean
:param DefaultOnly: Indicates that only the default version of the specified engine or engine and major version combination is returned.
:type ListSupportedCharacterSets: boolean
:param ListSupportedCharacterSets: If this parameter is specified and the requested engine supports the CharacterSetName parameter for CreateDBInstance , the response includes a list of supported character sets for each engine version.
:type ListSupportedTimezones: boolean
:param ListSupportedTimezones: If this parameter is specified and the requested engine supports the TimeZone parameter for CreateDBInstance , the response includes a list of supported time zones for each engine version.
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'DBEngineVersions': [
{
'Engine': 'string',
'EngineVersion': 'string',
'DBParameterGroupFamily': 'string',
'DBEngineDescription': 'string',
'DBEngineVersionDescription': 'string',
'ValidUpgradeTarget': [
{
'Engine': 'string',
'EngineVersion': 'string',
'Description': 'string',
'AutoUpgrade': True|False,
'IsMajorVersionUpgrade': True|False
},
],
'ExportableLogTypes': [
'string',
],
'SupportsLogExportsToCloudwatchLogs': True|False
},
]
}
Response Structure
(dict) --
Represents the output of DescribeDBEngineVersions .
Marker (string) --
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
DBEngineVersions (list) --
Detailed information about one or more engine versions.
(dict) --
Detailed information about an engine version.
Engine (string) --
The name of the database engine.
EngineVersion (string) --
The version number of the database engine.
DBParameterGroupFamily (string) --
The name of the parameter group family for the database engine.
DBEngineDescription (string) --
The description of the database engine.
DBEngineVersionDescription (string) --
The description of the database engine version.
ValidUpgradeTarget (list) --
A list of engine versions that this database engine version can be upgraded to.
(dict) --
The version of the database engine that an instance can be upgraded to.
Engine (string) --
The name of the upgrade target database engine.
EngineVersion (string) --
The version number of the upgrade target database engine.
Description (string) --
The version of the database engine that an instance can be upgraded to.
AutoUpgrade (boolean) --
A value that indicates whether the target version is applied to any source DB instances that have AutoMinorVersionUpgrade set to true .
IsMajorVersionUpgrade (boolean) --
A value that indicates whether a database engine is upgraded to a major version.
ExportableLogTypes (list) --
The types of logs that the database engine has available for export to Amazon CloudWatch Logs.
(string) --
SupportsLogExportsToCloudwatchLogs (boolean) --
A value that indicates whether the engine version supports exporting the log types specified by ExportableLogTypes to CloudWatch Logs.
:return: {
'Marker': 'string',
'DBEngineVersions': [
{
'Engine': 'string',
'EngineVersion': 'string',
'DBParameterGroupFamily': 'string',
'DBEngineDescription': 'string',
'DBEngineVersionDescription': 'string',
'ValidUpgradeTarget': [
{
'Engine': 'string',
'EngineVersion': 'string',
'Description': 'string',
'AutoUpgrade': True|False,
'IsMajorVersionUpgrade': True|False
},
],
'ExportableLogTypes': [
'string',
],
'SupportsLogExportsToCloudwatchLogs': True|False
},
]
}
:returns:
(string) --
"""
pass
def describe_db_instances(DBInstanceIdentifier=None, Filters=None, MaxRecords=None, Marker=None):
"""
Returns information about provisioned Amazon DocumentDB instances. This API supports pagination.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_db_instances(
DBInstanceIdentifier='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type DBInstanceIdentifier: string
:param DBInstanceIdentifier: The user-provided instance identifier. If this parameter is specified, information from only the specific instance is returned. This parameter isn\'t case sensitive.\nConstraints:\n\nIf provided, must match the identifier of an existing DBInstance .\n\n
:type Filters: list
:param Filters: A filter that specifies one or more instances to describe.\nSupported filters:\n\ndb-cluster-id - Accepts cluster identifiers and cluster Amazon Resource Names (ARNs). The results list includes only the information about the instances that are associated with the clusters that are identified by these ARNs.\ndb-instance-id - Accepts instance identifiers and instance ARNs. The results list includes only the information about the instances that are identified by these ARNs.\n\n\n(dict) --A named set of filter values, used to return a more specific list of results. You can use a filter to match a set of resources by specific criteria, such as IDs.\nWildcards are not supported in filters.\n\nName (string) -- [REQUIRED]The name of the filter. Filter names are case sensitive.\n\nValues (list) -- [REQUIRED]One or more filter values. Filter values are case sensitive.\n\n(string) --\n\n\n\n\n\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.\nDefault: 100\nConstraints: Minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'DBInstances': [
{
'DBInstanceIdentifier': 'string',
'DBInstanceClass': 'string',
'Engine': 'string',
'DBInstanceStatus': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123,
'HostedZoneId': 'string'
},
'InstanceCreateTime': datetime(2015, 1, 1),
'PreferredBackupWindow': 'string',
'BackupRetentionPeriod': 123,
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'AvailabilityZone': 'string',
'DBSubnetGroup': {
'DBSubnetGroupName': 'string',
'DBSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'DBSubnetGroupArn': 'string'
},
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'DBInstanceClass': 'string',
'AllocatedStorage': 123,
'MasterUserPassword': 'string',
'Port': 123,
'BackupRetentionPeriod': 123,
'MultiAZ': True|False,
'EngineVersion': 'string',
'LicenseModel': 'string',
'Iops': 123,
'DBInstanceIdentifier': 'string',
'StorageType': 'string',
'CACertificateIdentifier': 'string',
'DBSubnetGroupName': 'string',
'PendingCloudwatchLogsExports': {
'LogTypesToEnable': [
'string',
],
'LogTypesToDisable': [
'string',
]
}
},
'LatestRestorableTime': datetime(2015, 1, 1),
'EngineVersion': 'string',
'AutoMinorVersionUpgrade': True|False,
'PubliclyAccessible': True|False,
'StatusInfos': [
{
'StatusType': 'string',
'Normal': True|False,
'Status': 'string',
'Message': 'string'
},
],
'DBClusterIdentifier': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbiResourceId': 'string',
'CACertificateIdentifier': 'string',
'PromotionTier': 123,
'DBInstanceArn': 'string',
'EnabledCloudwatchLogsExports': [
'string',
]
},
]
}
Response Structure
(dict) --
Represents the output of DescribeDBInstances .
Marker (string) --
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
DBInstances (list) --
Detailed information about one or more instances.
(dict) --
Detailed information about an instance.
DBInstanceIdentifier (string) --
Contains a user-provided database identifier. This identifier is the unique key that identifies an instance.
DBInstanceClass (string) --
Contains the name of the compute and memory capacity class of the instance.
Engine (string) --
Provides the name of the database engine to be used for this instance.
DBInstanceStatus (string) --
Specifies the current state of this database.
Endpoint (dict) --
Specifies the connection endpoint.
Address (string) --
Specifies the DNS address of the instance.
Port (integer) --
Specifies the port that the database engine is listening on.
HostedZoneId (string) --
Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.
InstanceCreateTime (datetime) --
Provides the date and time that the instance was created.
PreferredBackupWindow (string) --
Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod .
BackupRetentionPeriod (integer) --
Specifies the number of days for which automatic snapshots are retained.
VpcSecurityGroups (list) --
Provides a list of VPC security group elements that the instance belongs to.
(dict) --
Used as a response element for queries on virtual private cloud (VPC) security group membership.
VpcSecurityGroupId (string) --
The name of the VPC security group.
Status (string) --
The status of the VPC security group.
AvailabilityZone (string) --
Specifies the name of the Availability Zone that the instance is located in.
DBSubnetGroup (dict) --
Specifies information on the subnet group that is associated with the instance, including the name, description, and subnets in the subnet group.
DBSubnetGroupName (string) --
The name of the subnet group.
DBSubnetGroupDescription (string) --
Provides the description of the subnet group.
VpcId (string) --
Provides the virtual private cloud (VPC) ID of the subnet group.
SubnetGroupStatus (string) --
Provides the status of the subnet group.
Subnets (list) --
Detailed information about one or more subnets within a subnet group.
(dict) --
Detailed information about a subnet.
SubnetIdentifier (string) --
Specifies the identifier of the subnet.
SubnetAvailabilityZone (dict) --
Specifies the Availability Zone for the subnet.
Name (string) --
The name of the Availability Zone.
SubnetStatus (string) --
Specifies the status of the subnet.
DBSubnetGroupArn (string) --
The Amazon Resource Name (ARN) for the DB subnet group.
PreferredMaintenanceWindow (string) --
Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
PendingModifiedValues (dict) --
Specifies that changes to the instance are pending. This element is included only when changes are pending. Specific changes are identified by subelements.
DBInstanceClass (string) --
Contains the new DBInstanceClass for the instance that will be applied or is currently being applied.
AllocatedStorage (integer) --
Contains the new AllocatedStorage size for then instance that will be applied or is currently being applied.
MasterUserPassword (string) --
Contains the pending or currently in-progress change of the master credentials for the instance.
Port (integer) --
Specifies the pending port for the instance.
BackupRetentionPeriod (integer) --
Specifies the pending number of days for which automated backups are retained.
MultiAZ (boolean) --
Indicates that the Single-AZ instance is to change to a Multi-AZ deployment.
EngineVersion (string) --
Indicates the database engine version.
LicenseModel (string) --
The license model for the instance.
Valid values: license-included , bring-your-own-license , general-public-license
Iops (integer) --
Specifies the new Provisioned IOPS value for the instance that will be applied or is currently being applied.
DBInstanceIdentifier (string) --
Contains the new DBInstanceIdentifier for the instance that will be applied or is currently being applied.
StorageType (string) --
Specifies the storage type to be associated with the instance.
CACertificateIdentifier (string) --
Specifies the identifier of the certificate authority (CA) certificate for the DB instance.
DBSubnetGroupName (string) --
The new subnet group for the instance.
PendingCloudwatchLogsExports (dict) --
A list of the log types whose configuration is still pending. These log types are in the process of being activated or deactivated.
LogTypesToEnable (list) --
Log types that are in the process of being deactivated. After they are deactivated, these log types aren\'t exported to CloudWatch Logs.
(string) --
LogTypesToDisable (list) --
Log types that are in the process of being enabled. After they are enabled, these log types are exported to Amazon CloudWatch Logs.
(string) --
LatestRestorableTime (datetime) --
Specifies the latest time to which a database can be restored with point-in-time restore.
EngineVersion (string) --
Indicates the database engine version.
AutoMinorVersionUpgrade (boolean) --
Indicates that minor version patches are applied automatically.
PubliclyAccessible (boolean) --
Not supported. Amazon DocumentDB does not currently support public endpoints. The value of PubliclyAccessible is always false .
StatusInfos (list) --
The status of a read replica. If the instance is not a read replica, this is blank.
(dict) --
Provides a list of status information for an instance.
StatusType (string) --
This value is currently "read replication ."
Normal (boolean) --
A Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.
Status (string) --
Status of the instance. For a StatusType of read replica, the values can be replicating , error, stopped , or terminated .
Message (string) --
Details of the error if there is an error for the instance. If the instance is not in an error state, this value is blank.
DBClusterIdentifier (string) --
Contains the name of the cluster that the instance is a member of if the instance is a member of a cluster.
StorageEncrypted (boolean) --
Specifies whether or not the instance is encrypted.
KmsKeyId (string) --
If StorageEncrypted is true , the AWS KMS key identifier for the encrypted instance.
DbiResourceId (string) --
The AWS Region-unique, immutable identifier for the instance. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the instance is accessed.
CACertificateIdentifier (string) --
The identifier of the CA certificate for this DB instance.
PromotionTier (integer) --
A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.
DBInstanceArn (string) --
The Amazon Resource Name (ARN) for the instance.
EnabledCloudwatchLogsExports (list) --
A list of log types that this instance is configured to export to Amazon CloudWatch Logs.
(string) --
Exceptions
DocDB.Client.exceptions.DBInstanceNotFoundFault
:return: {
'Marker': 'string',
'DBInstances': [
{
'DBInstanceIdentifier': 'string',
'DBInstanceClass': 'string',
'Engine': 'string',
'DBInstanceStatus': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123,
'HostedZoneId': 'string'
},
'InstanceCreateTime': datetime(2015, 1, 1),
'PreferredBackupWindow': 'string',
'BackupRetentionPeriod': 123,
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'AvailabilityZone': 'string',
'DBSubnetGroup': {
'DBSubnetGroupName': 'string',
'DBSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'DBSubnetGroupArn': 'string'
},
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'DBInstanceClass': 'string',
'AllocatedStorage': 123,
'MasterUserPassword': 'string',
'Port': 123,
'BackupRetentionPeriod': 123,
'MultiAZ': True|False,
'EngineVersion': 'string',
'LicenseModel': 'string',
'Iops': 123,
'DBInstanceIdentifier': 'string',
'StorageType': 'string',
'CACertificateIdentifier': 'string',
'DBSubnetGroupName': 'string',
'PendingCloudwatchLogsExports': {
'LogTypesToEnable': [
'string',
],
'LogTypesToDisable': [
'string',
]
}
},
'LatestRestorableTime': datetime(2015, 1, 1),
'EngineVersion': 'string',
'AutoMinorVersionUpgrade': True|False,
'PubliclyAccessible': True|False,
'StatusInfos': [
{
'StatusType': 'string',
'Normal': True|False,
'Status': 'string',
'Message': 'string'
},
],
'DBClusterIdentifier': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbiResourceId': 'string',
'CACertificateIdentifier': 'string',
'PromotionTier': 123,
'DBInstanceArn': 'string',
'EnabledCloudwatchLogsExports': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass
def describe_db_subnet_groups(DBSubnetGroupName=None, Filters=None, MaxRecords=None, Marker=None):
"""
Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup .
See also: AWS API Documentation
Exceptions
:example: response = client.describe_db_subnet_groups(
DBSubnetGroupName='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type DBSubnetGroupName: string
:param DBSubnetGroupName: The name of the subnet group to return details for.
:type Filters: list
:param Filters: This parameter is not currently supported.\n\n(dict) --A named set of filter values, used to return a more specific list of results. You can use a filter to match a set of resources by specific criteria, such as IDs.\nWildcards are not supported in filters.\n\nName (string) -- [REQUIRED]The name of the filter. Filter names are case sensitive.\n\nValues (list) -- [REQUIRED]One or more filter values. Filter values are case sensitive.\n\n(string) --\n\n\n\n\n\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.\nDefault: 100\nConstraints: Minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'DBSubnetGroups': [
{
'DBSubnetGroupName': 'string',
'DBSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'DBSubnetGroupArn': 'string'
},
]
}
Response Structure
(dict) --
Represents the output of DescribeDBSubnetGroups .
Marker (string) --
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
DBSubnetGroups (list) --
Detailed information about one or more subnet groups.
(dict) --
Detailed information about a subnet group.
DBSubnetGroupName (string) --
The name of the subnet group.
DBSubnetGroupDescription (string) --
Provides the description of the subnet group.
VpcId (string) --
Provides the virtual private cloud (VPC) ID of the subnet group.
SubnetGroupStatus (string) --
Provides the status of the subnet group.
Subnets (list) --
Detailed information about one or more subnets within a subnet group.
(dict) --
Detailed information about a subnet.
SubnetIdentifier (string) --
Specifies the identifier of the subnet.
SubnetAvailabilityZone (dict) --
Specifies the Availability Zone for the subnet.
Name (string) --
The name of the Availability Zone.
SubnetStatus (string) --
Specifies the status of the subnet.
DBSubnetGroupArn (string) --
The Amazon Resource Name (ARN) for the DB subnet group.
Exceptions
DocDB.Client.exceptions.DBSubnetGroupNotFoundFault
:return: {
'Marker': 'string',
'DBSubnetGroups': [
{
'DBSubnetGroupName': 'string',
'DBSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'DBSubnetGroupArn': 'string'
},
]
}
:returns:
DocDB.Client.exceptions.DBSubnetGroupNotFoundFault
"""
pass
def describe_engine_default_cluster_parameters(DBParameterGroupFamily=None, Filters=None, MaxRecords=None, Marker=None):
"""
Returns the default engine and system parameter information for the cluster database engine.
See also: AWS API Documentation
:example: response = client.describe_engine_default_cluster_parameters(
DBParameterGroupFamily='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type DBParameterGroupFamily: string
:param DBParameterGroupFamily: [REQUIRED]\nThe name of the cluster parameter group family to return the engine parameter information for.\n
:type Filters: list
:param Filters: This parameter is not currently supported.\n\n(dict) --A named set of filter values, used to return a more specific list of results. You can use a filter to match a set of resources by specific criteria, such as IDs.\nWildcards are not supported in filters.\n\nName (string) -- [REQUIRED]The name of the filter. Filter names are case sensitive.\n\nValues (list) -- [REQUIRED]One or more filter values. Filter values are case sensitive.\n\n(string) --\n\n\n\n\n\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.\nDefault: 100\nConstraints: Minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
ReturnsResponse Syntax
{
'EngineDefaults': {
'DBParameterGroupFamily': 'string',
'Marker': 'string',
'Parameters': [
{
'ParameterName': 'string',
'ParameterValue': 'string',
'Description': 'string',
'Source': 'string',
'ApplyType': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'IsModifiable': True|False,
'MinimumEngineVersion': 'string',
'ApplyMethod': 'immediate'|'pending-reboot'
},
]
}
}
Response Structure
(dict) --
EngineDefaults (dict) --
Contains the result of a successful invocation of the DescribeEngineDefaultClusterParameters operation.
DBParameterGroupFamily (string) --
The name of the cluster parameter group family to return the engine parameter information for.
Marker (string) --
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
Parameters (list) --
The parameters of a particular cluster parameter group family.
(dict) --
Detailed information about an individual parameter.
ParameterName (string) --
Specifies the name of the parameter.
ParameterValue (string) --
Specifies the value of the parameter.
Description (string) --
Provides a description of the parameter.
Source (string) --
Indicates the source of the parameter value.
ApplyType (string) --
Specifies the engine-specific parameters type.
DataType (string) --
Specifies the valid data type for the parameter.
AllowedValues (string) --
Specifies the valid range of values for the parameter.
IsModifiable (boolean) --
Indicates whether (true ) or not (false ) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.
MinimumEngineVersion (string) --
The earliest engine version to which the parameter can apply.
ApplyMethod (string) --
Indicates when to apply parameter updates.
:return: {
'EngineDefaults': {
'DBParameterGroupFamily': 'string',
'Marker': 'string',
'Parameters': [
{
'ParameterName': 'string',
'ParameterValue': 'string',
'Description': 'string',
'Source': 'string',
'ApplyType': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'IsModifiable': True|False,
'MinimumEngineVersion': 'string',
'ApplyMethod': 'immediate'|'pending-reboot'
},
]
}
}
"""
pass
def describe_event_categories(SourceType=None, Filters=None):
"""
Displays a list of categories for all event source types, or, if specified, for a specified source type.
See also: AWS API Documentation
:example: response = client.describe_event_categories(
SourceType='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
]
)
:type SourceType: string
:param SourceType: The type of source that is generating the events.\nValid values: db-instance , db-parameter-group , db-security-group , db-snapshot\n
:type Filters: list
:param Filters: This parameter is not currently supported.\n\n(dict) --A named set of filter values, used to return a more specific list of results. You can use a filter to match a set of resources by specific criteria, such as IDs.\nWildcards are not supported in filters.\n\nName (string) -- [REQUIRED]The name of the filter. Filter names are case sensitive.\n\nValues (list) -- [REQUIRED]One or more filter values. Filter values are case sensitive.\n\n(string) --\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'EventCategoriesMapList': [
{
'SourceType': 'string',
'EventCategories': [
'string',
]
},
]
}
Response Structure
(dict) --
Represents the output of DescribeEventCategories .
EventCategoriesMapList (list) --
A list of event category maps.
(dict) --
An event source type, accompanied by one or more event category names.
SourceType (string) --
The source type that the returned categories belong to.
EventCategories (list) --
The event categories for the specified source type.
(string) --
:return: {
'EventCategoriesMapList': [
{
'SourceType': 'string',
'EventCategories': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass
def describe_events(SourceIdentifier=None, SourceType=None, StartTime=None, EndTime=None, Duration=None, EventCategories=None, Filters=None, MaxRecords=None, Marker=None):
"""
Returns events related to instances, security groups, snapshots, and DB parameter groups for the past 14 days. You can obtain events specific to a particular DB instance, security group, snapshot, or parameter group by providing the name as a parameter. By default, the events of the past hour are returned.
See also: AWS API Documentation
:example: response = client.describe_events(
SourceIdentifier='string',
SourceType='db-instance'|'db-parameter-group'|'db-security-group'|'db-snapshot'|'db-cluster'|'db-cluster-snapshot',
StartTime=datetime(2015, 1, 1),
EndTime=datetime(2015, 1, 1),
Duration=123,
EventCategories=[
'string',
],
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type SourceIdentifier: string
:param SourceIdentifier: The identifier of the event source for which events are returned. If not specified, then all sources are included in the response.\nConstraints:\n\nIf SourceIdentifier is provided, SourceType must also be provided.\nIf the source type is DBInstance , a DBInstanceIdentifier must be provided.\nIf the source type is DBSecurityGroup , a DBSecurityGroupName must be provided.\nIf the source type is DBParameterGroup , a DBParameterGroupName must be provided.\nIf the source type is DBSnapshot , a DBSnapshotIdentifier must be provided.\nCannot end with a hyphen or contain two consecutive hyphens.\n\n
:type SourceType: string
:param SourceType: The event source to retrieve events for. If no value is specified, all events are returned.
:type StartTime: datetime
:param StartTime: The beginning of the time interval to retrieve events for, specified in ISO 8601 format.\nExample: 2009-07-08T18:00Z\n
:type EndTime: datetime
:param EndTime: The end of the time interval for which to retrieve events, specified in ISO 8601 format.\nExample: 2009-07-08T18:00Z\n
:type Duration: integer
:param Duration: The number of minutes to retrieve events for.\nDefault: 60\n
:type EventCategories: list
:param EventCategories: A list of event categories that trigger notifications for an event notification subscription.\n\n(string) --\n\n
:type Filters: list
:param Filters: This parameter is not currently supported.\n\n(dict) --A named set of filter values, used to return a more specific list of results. You can use a filter to match a set of resources by specific criteria, such as IDs.\nWildcards are not supported in filters.\n\nName (string) -- [REQUIRED]The name of the filter. Filter names are case sensitive.\n\nValues (list) -- [REQUIRED]One or more filter values. Filter values are case sensitive.\n\n(string) --\n\n\n\n\n\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.\nDefault: 100\nConstraints: Minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
ReturnsResponse Syntax
{
'Marker': 'string',
'Events': [
{
'SourceIdentifier': 'string',
'SourceType': 'db-instance'|'db-parameter-group'|'db-security-group'|'db-snapshot'|'db-cluster'|'db-cluster-snapshot',
'Message': 'string',
'EventCategories': [
'string',
],
'Date': datetime(2015, 1, 1),
'SourceArn': 'string'
},
]
}
Response Structure
(dict) --
Represents the output of DescribeEvents .
Marker (string) --
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
Events (list) --
Detailed information about one or more events.
(dict) --
Detailed information about an event.
SourceIdentifier (string) --
Provides the identifier for the source of the event.
SourceType (string) --
Specifies the source type for this event.
Message (string) --
Provides the text of this event.
EventCategories (list) --
Specifies the category for the event.
(string) --
Date (datetime) --
Specifies the date and time of the event.
SourceArn (string) --
The Amazon Resource Name (ARN) for the event.
:return: {
'Marker': 'string',
'Events': [
{
'SourceIdentifier': 'string',
'SourceType': 'db-instance'|'db-parameter-group'|'db-security-group'|'db-snapshot'|'db-cluster'|'db-cluster-snapshot',
'Message': 'string',
'EventCategories': [
'string',
],
'Date': datetime(2015, 1, 1),
'SourceArn': 'string'
},
]
}
:returns:
(string) --
"""
pass
def describe_orderable_db_instance_options(Engine=None, EngineVersion=None, DBInstanceClass=None, LicenseModel=None, Vpc=None, Filters=None, MaxRecords=None, Marker=None):
"""
Returns a list of orderable instance options for the specified engine.
See also: AWS API Documentation
:example: response = client.describe_orderable_db_instance_options(
Engine='string',
EngineVersion='string',
DBInstanceClass='string',
LicenseModel='string',
Vpc=True|False,
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
Marker='string'
)
:type Engine: string
:param Engine: [REQUIRED]\nThe name of the engine to retrieve instance options for.\n
:type EngineVersion: string
:param EngineVersion: The engine version filter value. Specify this parameter to show only the available offerings that match the specified engine version.
:type DBInstanceClass: string
:param DBInstanceClass: The instance class filter value. Specify this parameter to show only the available offerings that match the specified instance class.
:type LicenseModel: string
:param LicenseModel: The license model filter value. Specify this parameter to show only the available offerings that match the specified license model.
:type Vpc: boolean
:param Vpc: The virtual private cloud (VPC) filter value. Specify this parameter to show only the available VPC or non-VPC offerings.
:type Filters: list
:param Filters: This parameter is not currently supported.\n\n(dict) --A named set of filter values, used to return a more specific list of results. You can use a filter to match a set of resources by specific criteria, such as IDs.\nWildcards are not supported in filters.\n\nName (string) -- [REQUIRED]The name of the filter. Filter names are case sensitive.\n\nValues (list) -- [REQUIRED]One or more filter values. Filter values are case sensitive.\n\n(string) --\n\n\n\n\n\n
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.\nDefault: 100\nConstraints: Minimum 20, maximum 100.\n
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:rtype: dict
ReturnsResponse Syntax
{
'OrderableDBInstanceOptions': [
{
'Engine': 'string',
'EngineVersion': 'string',
'DBInstanceClass': 'string',
'LicenseModel': 'string',
'AvailabilityZones': [
{
'Name': 'string'
},
],
'Vpc': True|False
},
],
'Marker': 'string'
}
Response Structure
(dict) --
Represents the output of DescribeOrderableDBInstanceOptions .
OrderableDBInstanceOptions (list) --
The options that are available for a particular orderable instance.
(dict) --
The options that are available for an instance.
Engine (string) --
The engine type of an instance.
EngineVersion (string) --
The engine version of an instance.
DBInstanceClass (string) --
The instance class for an instance.
LicenseModel (string) --
The license model for an instance.
AvailabilityZones (list) --
A list of Availability Zones for an instance.
(dict) --
Information about an Availability Zone.
Name (string) --
The name of the Availability Zone.
Vpc (boolean) --
Indicates whether an instance is in a virtual private cloud (VPC).
Marker (string) --
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:return: {
'OrderableDBInstanceOptions': [
{
'Engine': 'string',
'EngineVersion': 'string',
'DBInstanceClass': 'string',
'LicenseModel': 'string',
'AvailabilityZones': [
{
'Name': 'string'
},
],
'Vpc': True|False
},
],
'Marker': 'string'
}
"""
pass
def describe_pending_maintenance_actions(ResourceIdentifier=None, Filters=None, Marker=None, MaxRecords=None):
"""
Returns a list of resources (for example, instances) that have at least one pending maintenance action.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_pending_maintenance_actions(
ResourceIdentifier='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
],
Marker='string',
MaxRecords=123
)
:type ResourceIdentifier: string
:param ResourceIdentifier: The ARN of a resource to return pending maintenance actions for.
:type Filters: list
:param Filters: A filter that specifies one or more resources to return pending maintenance actions for.\nSupported filters:\n\ndb-cluster-id - Accepts cluster identifiers and cluster Amazon Resource Names (ARNs). The results list includes only pending maintenance actions for the clusters identified by these ARNs.\ndb-instance-id - Accepts instance identifiers and instance ARNs. The results list includes only pending maintenance actions for the DB instances identified by these ARNs.\n\n\n(dict) --A named set of filter values, used to return a more specific list of results. You can use a filter to match a set of resources by specific criteria, such as IDs.\nWildcards are not supported in filters.\n\nName (string) -- [REQUIRED]The name of the filter. Filter names are case sensitive.\n\nValues (list) -- [REQUIRED]One or more filter values. Filter values are case sensitive.\n\n(string) --\n\n\n\n\n\n
:type Marker: string
:param Marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
:type MaxRecords: integer
:param MaxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.\nDefault: 100\nConstraints: Minimum 20, maximum 100.\n
:rtype: dict
ReturnsResponse Syntax
{
'PendingMaintenanceActions': [
{
'ResourceIdentifier': 'string',
'PendingMaintenanceActionDetails': [
{
'Action': 'string',
'AutoAppliedAfterDate': datetime(2015, 1, 1),
'ForcedApplyDate': datetime(2015, 1, 1),
'OptInStatus': 'string',
'CurrentApplyDate': datetime(2015, 1, 1),
'Description': 'string'
},
]
},
],
'Marker': 'string'
}
Response Structure
(dict) --
Represents the output of DescribePendingMaintenanceActions .
PendingMaintenanceActions (list) --
The maintenance actions to be applied.
(dict) --
Represents the output of ApplyPendingMaintenanceAction .
ResourceIdentifier (string) --
The Amazon Resource Name (ARN) of the resource that has pending maintenance actions.
PendingMaintenanceActionDetails (list) --
A list that provides details about the pending maintenance actions for the resource.
(dict) --
Provides information about a pending maintenance action for a resource.
Action (string) --
The type of pending maintenance action that is available for the resource.
AutoAppliedAfterDate (datetime) --
The date of the maintenance window when the action is applied. The maintenance action is applied to the resource during its first maintenance window after this date. If this date is specified, any next-maintenance opt-in requests are ignored.
ForcedApplyDate (datetime) --
The date when the maintenance action is automatically applied. The maintenance action is applied to the resource on this date regardless of the maintenance window for the resource. If this date is specified, any immediate opt-in requests are ignored.
OptInStatus (string) --
Indicates the type of opt-in request that has been received for the resource.
CurrentApplyDate (datetime) --
The effective date when the pending maintenance action is applied to the resource.
Description (string) --
A description providing more detail about the maintenance action.
Marker (string) --
An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords .
Exceptions
DocDB.Client.exceptions.ResourceNotFoundFault
:return: {
'PendingMaintenanceActions': [
{
'ResourceIdentifier': 'string',
'PendingMaintenanceActionDetails': [
{
'Action': 'string',
'AutoAppliedAfterDate': datetime(2015, 1, 1),
'ForcedApplyDate': datetime(2015, 1, 1),
'OptInStatus': 'string',
'CurrentApplyDate': datetime(2015, 1, 1),
'Description': 'string'
},
]
},
],
'Marker': 'string'
}
:returns:
DocDB.Client.exceptions.ResourceNotFoundFault
"""
pass
def failover_db_cluster(DBClusterIdentifier=None, TargetDBInstanceIdentifier=None):
"""
Forces a failover for a cluster.
A failover for a cluster promotes one of the Amazon DocumentDB replicas (read-only instances) in the cluster to be the primary instance (the cluster writer).
If the primary instance fails, Amazon DocumentDB automatically fails over to an Amazon DocumentDB replica, if one exists. You can force a failover when you want to simulate a failure of a primary instance for testing.
See also: AWS API Documentation
Exceptions
:example: response = client.failover_db_cluster(
DBClusterIdentifier='string',
TargetDBInstanceIdentifier='string'
)
:type DBClusterIdentifier: string
:param DBClusterIdentifier: A cluster identifier to force a failover for. This parameter is not case sensitive.\nConstraints:\n\nMust match the identifier of an existing DBCluster .\n\n
:type TargetDBInstanceIdentifier: string
:param TargetDBInstanceIdentifier: The name of the instance to promote to the primary instance.\nYou must specify the instance identifier for an Amazon DocumentDB replica in the cluster. For example, mydbcluster-replica1 .\n
:rtype: dict
ReturnsResponse Syntax
{
'DBCluster': {
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
}
}
Response Structure
(dict) --
DBCluster (dict) --
Detailed information about a cluster.
AvailabilityZones (list) --
Provides the list of Amazon EC2 Availability Zones that instances in the cluster can be created in.
(string) --
BackupRetentionPeriod (integer) --
Specifies the number of days for which automatic snapshots are retained.
DBClusterIdentifier (string) --
Contains a user-supplied cluster identifier. This identifier is the unique key that identifies a cluster.
DBClusterParameterGroup (string) --
Specifies the name of the cluster parameter group for the cluster.
DBSubnetGroup (string) --
Specifies information on the subnet group that is associated with the cluster, including the name, description, and subnets in the subnet group.
Status (string) --
Specifies the current state of this cluster.
PercentProgress (string) --
Specifies the progress of the operation as a percentage.
EarliestRestorableTime (datetime) --
The earliest time to which a database can be restored with point-in-time restore.
Endpoint (string) --
Specifies the connection endpoint for the primary instance of the cluster.
ReaderEndpoint (string) --
The reader endpoint for the cluster. The reader endpoint for a cluster load balances connections across the Amazon DocumentDB replicas that are available in a cluster. As clients request new connections to the reader endpoint, Amazon DocumentDB distributes the connection requests among the Amazon DocumentDB replicas in the cluster. This functionality can help balance your read workload across multiple Amazon DocumentDB replicas in your cluster.
If a failover occurs, and the Amazon DocumentDB replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Amazon DocumentDB replicas in the cluster, you can then reconnect to the reader endpoint.
MultiAZ (boolean) --
Specifies whether the cluster has instances in multiple Availability Zones.
Engine (string) --
Provides the name of the database engine to be used for this cluster.
EngineVersion (string) --
Indicates the database engine version.
LatestRestorableTime (datetime) --
Specifies the latest time to which a database can be restored with point-in-time restore.
Port (integer) --
Specifies the port that the database engine is listening on.
MasterUsername (string) --
Contains the master user name for the cluster.
PreferredBackupWindow (string) --
Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod .
PreferredMaintenanceWindow (string) --
Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
DBClusterMembers (list) --
Provides the list of instances that make up the cluster.
(dict) --
Contains information about an instance that is part of a cluster.
DBInstanceIdentifier (string) --
Specifies the instance identifier for this member of the cluster.
IsClusterWriter (boolean) --
A value that is true if the cluster member is the primary instance for the cluster and false otherwise.
DBClusterParameterGroupStatus (string) --
Specifies the status of the cluster parameter group for this member of the DB cluster.
PromotionTier (integer) --
A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.
VpcSecurityGroups (list) --
Provides a list of virtual private cloud (VPC) security groups that the cluster belongs to.
(dict) --
Used as a response element for queries on virtual private cloud (VPC) security group membership.
VpcSecurityGroupId (string) --
The name of the VPC security group.
Status (string) --
The status of the VPC security group.
HostedZoneId (string) --
Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.
StorageEncrypted (boolean) --
Specifies whether the cluster is encrypted.
KmsKeyId (string) --
If StorageEncrypted is true , the AWS KMS key identifier for the encrypted cluster.
DbClusterResourceId (string) --
The AWS Region-unique, immutable identifier for the cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the cluster is accessed.
DBClusterArn (string) --
The Amazon Resource Name (ARN) for the cluster.
AssociatedRoles (list) --
Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the cluster. IAM roles that are associated with a cluster grant permission for the cluster to access other AWS services on your behalf.
(dict) --
Describes an AWS Identity and Access Management (IAM) role that is associated with a cluster.
RoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role that is associated with the DB cluster.
Status (string) --
Describes the state of association between the IAM role and the cluster. The Status property returns one of the following values:
ACTIVE - The IAM role ARN is associated with the cluster and can be used to access other AWS services on your behalf.
PENDING - The IAM role ARN is being associated with the DB cluster.
INVALID - The IAM role ARN is associated with the cluster, but the cluster cannot assume the IAM role to access other AWS services on your behalf.
ClusterCreateTime (datetime) --
Specifies the time when the cluster was created, in Universal Coordinated Time (UTC).
EnabledCloudwatchLogsExports (list) --
A list of log types that this cluster is configured to export to Amazon CloudWatch Logs.
(string) --
DeletionProtection (boolean) --
Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.
Exceptions
DocDB.Client.exceptions.DBClusterNotFoundFault
DocDB.Client.exceptions.InvalidDBClusterStateFault
DocDB.Client.exceptions.InvalidDBInstanceStateFault
:return: {
'DBCluster': {
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
}
}
:returns:
(string) --
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def list_tags_for_resource(ResourceName=None, Filters=None):
"""
Lists all tags on an Amazon DocumentDB resource.
See also: AWS API Documentation
Exceptions
:example: response = client.list_tags_for_resource(
ResourceName='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
]
)
:type ResourceName: string
:param ResourceName: [REQUIRED]\nThe Amazon DocumentDB resource with tags to be listed. This value is an Amazon Resource Name (ARN).\n
:type Filters: list
:param Filters: This parameter is not currently supported.\n\n(dict) --A named set of filter values, used to return a more specific list of results. You can use a filter to match a set of resources by specific criteria, such as IDs.\nWildcards are not supported in filters.\n\nName (string) -- [REQUIRED]The name of the filter. Filter names are case sensitive.\n\nValues (list) -- [REQUIRED]One or more filter values. Filter values are case sensitive.\n\n(string) --\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'TagList': [
{
'Key': 'string',
'Value': 'string'
},
]
}
Response Structure
(dict) --
Represents the output of ListTagsForResource .
TagList (list) --
A list of one or more tags.
(dict) --
Metadata assigned to an Amazon DocumentDB resource consisting of a key-value pair.
Key (string) --
The required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can\'t be prefixed with "aws:" or "rds:". The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$").
Value (string) --
The optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can\'t be prefixed with "aws:" or "rds:". The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$").
Exceptions
DocDB.Client.exceptions.DBInstanceNotFoundFault
DocDB.Client.exceptions.DBSnapshotNotFoundFault
DocDB.Client.exceptions.DBClusterNotFoundFault
:return: {
'TagList': [
{
'Key': 'string',
'Value': 'string'
},
]
}
:returns:
DocDB.Client.exceptions.DBInstanceNotFoundFault
DocDB.Client.exceptions.DBSnapshotNotFoundFault
DocDB.Client.exceptions.DBClusterNotFoundFault
"""
pass
def modify_db_cluster(DBClusterIdentifier=None, NewDBClusterIdentifier=None, ApplyImmediately=None, BackupRetentionPeriod=None, DBClusterParameterGroupName=None, VpcSecurityGroupIds=None, Port=None, MasterUserPassword=None, PreferredBackupWindow=None, PreferredMaintenanceWindow=None, CloudwatchLogsExportConfiguration=None, EngineVersion=None, DeletionProtection=None):
"""
Modifies a setting for an Amazon DocumentDB cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_db_cluster(
DBClusterIdentifier='string',
NewDBClusterIdentifier='string',
ApplyImmediately=True|False,
BackupRetentionPeriod=123,
DBClusterParameterGroupName='string',
VpcSecurityGroupIds=[
'string',
],
Port=123,
MasterUserPassword='string',
PreferredBackupWindow='string',
PreferredMaintenanceWindow='string',
CloudwatchLogsExportConfiguration={
'EnableLogTypes': [
'string',
],
'DisableLogTypes': [
'string',
]
},
EngineVersion='string',
DeletionProtection=True|False
)
:type DBClusterIdentifier: string
:param DBClusterIdentifier: [REQUIRED]\nThe cluster identifier for the cluster that is being modified. This parameter is not case sensitive.\nConstraints:\n\nMust match the identifier of an existing DBCluster .\n\n
:type NewDBClusterIdentifier: string
:param NewDBClusterIdentifier: The new cluster identifier for the cluster when renaming a cluster. This value is stored as a lowercase string.\nConstraints:\n\nMust contain from 1 to 63 letters, numbers, or hyphens.\nThe first character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\n\nExample: my-cluster2\n
:type ApplyImmediately: boolean
:param ApplyImmediately: A value that specifies whether the changes in this request and any pending changes are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the cluster. If this parameter is set to false , changes to the cluster are applied during the next maintenance window.\nThe ApplyImmediately parameter affects only the NewDBClusterIdentifier and MasterUserPassword values. If you set this parameter value to false , the changes to the NewDBClusterIdentifier and MasterUserPassword values are applied during the next maintenance window. All other changes are applied immediately, regardless of the value of the ApplyImmediately parameter.\nDefault: false\n
:type BackupRetentionPeriod: integer
:param BackupRetentionPeriod: The number of days for which automated backups are retained. You must specify a minimum value of 1.\nDefault: 1\nConstraints:\n\nMust be a value from 1 to 35.\n\n
:type DBClusterParameterGroupName: string
:param DBClusterParameterGroupName: The name of the cluster parameter group to use for the cluster.
:type VpcSecurityGroupIds: list
:param VpcSecurityGroupIds: A list of virtual private cloud (VPC) security groups that the cluster will belong to.\n\n(string) --\n\n
:type Port: integer
:param Port: The port number on which the cluster accepts connections.\nConstraints: Must be a value from 1150 to 65535 .\nDefault: The same port as the original cluster.\n
:type MasterUserPassword: string
:param MasterUserPassword: The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote ('), or the 'at' symbol (@).\nConstraints: Must contain from 8 to 100 characters.\n
:type PreferredBackupWindow: string
:param PreferredBackupWindow: The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.\nThe default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region.\nConstraints:\n\nMust be in the format hh24:mi-hh24:mi .\nMust be in Universal Coordinated Time (UTC).\nMust not conflict with the preferred maintenance window.\nMust be at least 30 minutes.\n\n
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\nFormat: ddd:hh24:mi-ddd:hh24:mi\nThe default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.\nValid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun\nConstraints: Minimum 30-minute window.\n
:type CloudwatchLogsExportConfiguration: dict
:param CloudwatchLogsExportConfiguration: The configuration setting for the log types to be enabled for export to Amazon CloudWatch Logs for a specific instance or cluster. The EnableLogTypes and DisableLogTypes arrays determine which logs are exported (or not exported) to CloudWatch Logs.\n\nEnableLogTypes (list) --The list of log types to enable.\n\n(string) --\n\n\nDisableLogTypes (list) --The list of log types to disable.\n\n(string) --\n\n\n\n
:type EngineVersion: string
:param EngineVersion: The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true .
:type DeletionProtection: boolean
:param DeletionProtection: Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.
:rtype: dict
ReturnsResponse Syntax
{
'DBCluster': {
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
}
}
Response Structure
(dict) --
DBCluster (dict) --
Detailed information about a cluster.
AvailabilityZones (list) --
Provides the list of Amazon EC2 Availability Zones that instances in the cluster can be created in.
(string) --
BackupRetentionPeriod (integer) --
Specifies the number of days for which automatic snapshots are retained.
DBClusterIdentifier (string) --
Contains a user-supplied cluster identifier. This identifier is the unique key that identifies a cluster.
DBClusterParameterGroup (string) --
Specifies the name of the cluster parameter group for the cluster.
DBSubnetGroup (string) --
Specifies information on the subnet group that is associated with the cluster, including the name, description, and subnets in the subnet group.
Status (string) --
Specifies the current state of this cluster.
PercentProgress (string) --
Specifies the progress of the operation as a percentage.
EarliestRestorableTime (datetime) --
The earliest time to which a database can be restored with point-in-time restore.
Endpoint (string) --
Specifies the connection endpoint for the primary instance of the cluster.
ReaderEndpoint (string) --
The reader endpoint for the cluster. The reader endpoint for a cluster load balances connections across the Amazon DocumentDB replicas that are available in a cluster. As clients request new connections to the reader endpoint, Amazon DocumentDB distributes the connection requests among the Amazon DocumentDB replicas in the cluster. This functionality can help balance your read workload across multiple Amazon DocumentDB replicas in your cluster.
If a failover occurs, and the Amazon DocumentDB replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Amazon DocumentDB replicas in the cluster, you can then reconnect to the reader endpoint.
MultiAZ (boolean) --
Specifies whether the cluster has instances in multiple Availability Zones.
Engine (string) --
Provides the name of the database engine to be used for this cluster.
EngineVersion (string) --
Indicates the database engine version.
LatestRestorableTime (datetime) --
Specifies the latest time to which a database can be restored with point-in-time restore.
Port (integer) --
Specifies the port that the database engine is listening on.
MasterUsername (string) --
Contains the master user name for the cluster.
PreferredBackupWindow (string) --
Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod .
PreferredMaintenanceWindow (string) --
Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
DBClusterMembers (list) --
Provides the list of instances that make up the cluster.
(dict) --
Contains information about an instance that is part of a cluster.
DBInstanceIdentifier (string) --
Specifies the instance identifier for this member of the cluster.
IsClusterWriter (boolean) --
A value that is true if the cluster member is the primary instance for the cluster and false otherwise.
DBClusterParameterGroupStatus (string) --
Specifies the status of the cluster parameter group for this member of the DB cluster.
PromotionTier (integer) --
A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.
VpcSecurityGroups (list) --
Provides a list of virtual private cloud (VPC) security groups that the cluster belongs to.
(dict) --
Used as a response element for queries on virtual private cloud (VPC) security group membership.
VpcSecurityGroupId (string) --
The name of the VPC security group.
Status (string) --
The status of the VPC security group.
HostedZoneId (string) --
Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.
StorageEncrypted (boolean) --
Specifies whether the cluster is encrypted.
KmsKeyId (string) --
If StorageEncrypted is true , the AWS KMS key identifier for the encrypted cluster.
DbClusterResourceId (string) --
The AWS Region-unique, immutable identifier for the cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the cluster is accessed.
DBClusterArn (string) --
The Amazon Resource Name (ARN) for the cluster.
AssociatedRoles (list) --
Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the cluster. IAM roles that are associated with a cluster grant permission for the cluster to access other AWS services on your behalf.
(dict) --
Describes an AWS Identity and Access Management (IAM) role that is associated with a cluster.
RoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role that is associated with the DB cluster.
Status (string) --
Describes the state of association between the IAM role and the cluster. The Status property returns one of the following values:
ACTIVE - The IAM role ARN is associated with the cluster and can be used to access other AWS services on your behalf.
PENDING - The IAM role ARN is being associated with the DB cluster.
INVALID - The IAM role ARN is associated with the cluster, but the cluster cannot assume the IAM role to access other AWS services on your behalf.
ClusterCreateTime (datetime) --
Specifies the time when the cluster was created, in Universal Coordinated Time (UTC).
EnabledCloudwatchLogsExports (list) --
A list of log types that this cluster is configured to export to Amazon CloudWatch Logs.
(string) --
DeletionProtection (boolean) --
Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.
Exceptions
DocDB.Client.exceptions.DBClusterNotFoundFault
DocDB.Client.exceptions.InvalidDBClusterStateFault
DocDB.Client.exceptions.StorageQuotaExceededFault
DocDB.Client.exceptions.DBSubnetGroupNotFoundFault
DocDB.Client.exceptions.InvalidVPCNetworkStateFault
DocDB.Client.exceptions.InvalidDBSubnetGroupStateFault
DocDB.Client.exceptions.InvalidSubnet
DocDB.Client.exceptions.DBClusterParameterGroupNotFoundFault
DocDB.Client.exceptions.InvalidDBSecurityGroupStateFault
DocDB.Client.exceptions.InvalidDBInstanceStateFault
DocDB.Client.exceptions.DBClusterAlreadyExistsFault
:return: {
'DBCluster': {
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
}
}
:returns:
(string) --
"""
pass
def modify_db_cluster_parameter_group(DBClusterParameterGroupName=None, Parameters=None):
"""
Modifies the parameters of a cluster parameter group. To modify more than one parameter, submit a list of the following: ParameterName , ParameterValue , and ApplyMethod . A maximum of 20 parameters can be modified in a single request.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_db_cluster_parameter_group(
DBClusterParameterGroupName='string',
Parameters=[
{
'ParameterName': 'string',
'ParameterValue': 'string',
'Description': 'string',
'Source': 'string',
'ApplyType': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'IsModifiable': True|False,
'MinimumEngineVersion': 'string',
'ApplyMethod': 'immediate'|'pending-reboot'
},
]
)
:type DBClusterParameterGroupName: string
:param DBClusterParameterGroupName: [REQUIRED]\nThe name of the cluster parameter group to modify.\n
:type Parameters: list
:param Parameters: [REQUIRED]\nA list of parameters in the cluster parameter group to modify.\n\n(dict) --Detailed information about an individual parameter.\n\nParameterName (string) --Specifies the name of the parameter.\n\nParameterValue (string) --Specifies the value of the parameter.\n\nDescription (string) --Provides a description of the parameter.\n\nSource (string) --Indicates the source of the parameter value.\n\nApplyType (string) --Specifies the engine-specific parameters type.\n\nDataType (string) --Specifies the valid data type for the parameter.\n\nAllowedValues (string) --Specifies the valid range of values for the parameter.\n\nIsModifiable (boolean) --Indicates whether (true ) or not (false ) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.\n\nMinimumEngineVersion (string) --The earliest engine version to which the parameter can apply.\n\nApplyMethod (string) --Indicates when to apply parameter updates.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'DBClusterParameterGroupName': 'string'
}
Response Structure
(dict) --
Contains the name of a cluster parameter group.
DBClusterParameterGroupName (string) --
The name of a cluster parameter group.
Constraints:
Must be from 1 to 255 letters or numbers.
The first character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
Note
This value is stored as a lowercase string.
Exceptions
DocDB.Client.exceptions.DBParameterGroupNotFoundFault
DocDB.Client.exceptions.InvalidDBParameterGroupStateFault
:return: {
'DBClusterParameterGroupName': 'string'
}
:returns:
Must be from 1 to 255 letters or numbers.
The first character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
"""
pass
def modify_db_cluster_snapshot_attribute(DBClusterSnapshotIdentifier=None, AttributeName=None, ValuesToAdd=None, ValuesToRemove=None):
"""
Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.
To share a manual cluster snapshot with other AWS accounts, specify restore as the AttributeName , and use the ValuesToAdd parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual cluster snapshot. Use the value all to make the manual cluster snapshot public, which means that it can be copied or restored by all AWS accounts. Do not add the all value for any manual DB cluster snapshots that contain private information that you don\'t want available to all AWS accounts. If a manual cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the ValuesToAdd parameter. You can\'t use all as a value for that parameter in this case.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_db_cluster_snapshot_attribute(
DBClusterSnapshotIdentifier='string',
AttributeName='string',
ValuesToAdd=[
'string',
],
ValuesToRemove=[
'string',
]
)
:type DBClusterSnapshotIdentifier: string
:param DBClusterSnapshotIdentifier: [REQUIRED]\nThe identifier for the cluster snapshot to modify the attributes for.\n
:type AttributeName: string
:param AttributeName: [REQUIRED]\nThe name of the cluster snapshot attribute to modify.\nTo manage authorization for other AWS accounts to copy or restore a manual cluster snapshot, set this value to restore .\n
:type ValuesToAdd: list
:param ValuesToAdd: A list of cluster snapshot attributes to add to the attribute specified by AttributeName .\nTo authorize other AWS accounts to copy or restore a manual cluster snapshot, set this list to include one or more AWS account IDs. To make the manual cluster snapshot restorable by any AWS account, set it to all . Do not add the all value for any manual cluster snapshots that contain private information that you don\'t want to be available to all AWS accounts.\n\n(string) --\n\n
:type ValuesToRemove: list
:param ValuesToRemove: A list of cluster snapshot attributes to remove from the attribute specified by AttributeName .\nTo remove authorization for other AWS accounts to copy or restore a manual cluster snapshot, set this list to include one or more AWS account identifiers. To remove authorization for any AWS account to copy or restore the cluster snapshot, set it to all . If you specify all , an AWS account whose account ID is explicitly added to the restore attribute can still copy or restore a manual cluster snapshot.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'DBClusterSnapshotAttributesResult': {
'DBClusterSnapshotIdentifier': 'string',
'DBClusterSnapshotAttributes': [
{
'AttributeName': 'string',
'AttributeValues': [
'string',
]
},
]
}
}
Response Structure
(dict) --
DBClusterSnapshotAttributesResult (dict) --
Detailed information about the attributes that are associated with a cluster snapshot.
DBClusterSnapshotIdentifier (string) --
The identifier of the cluster snapshot that the attributes apply to.
DBClusterSnapshotAttributes (list) --
The list of attributes and values for the cluster snapshot.
(dict) --
Contains the name and values of a manual cluster snapshot attribute.
Manual cluster snapshot attributes are used to authorize other AWS accounts to restore a manual cluster snapshot.
AttributeName (string) --
The name of the manual cluster snapshot attribute.
The attribute named restore refers to the list of AWS accounts that have permission to copy or restore the manual cluster snapshot.
AttributeValues (list) --
The values for the manual cluster snapshot attribute.
If the AttributeName field is set to restore , then this element returns a list of IDs of the AWS accounts that are authorized to copy or restore the manual cluster snapshot. If a value of all is in the list, then the manual cluster snapshot is public and available for any AWS account to copy or restore.
(string) --
Exceptions
DocDB.Client.exceptions.DBClusterSnapshotNotFoundFault
DocDB.Client.exceptions.InvalidDBClusterSnapshotStateFault
DocDB.Client.exceptions.SharedSnapshotQuotaExceededFault
:return: {
'DBClusterSnapshotAttributesResult': {
'DBClusterSnapshotIdentifier': 'string',
'DBClusterSnapshotAttributes': [
{
'AttributeName': 'string',
'AttributeValues': [
'string',
]
},
]
}
}
:returns:
(string) --
"""
pass
def modify_db_instance(DBInstanceIdentifier=None, DBInstanceClass=None, ApplyImmediately=None, PreferredMaintenanceWindow=None, AutoMinorVersionUpgrade=None, NewDBInstanceIdentifier=None, CACertificateIdentifier=None, PromotionTier=None):
"""
Modifies settings for an instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_db_instance(
DBInstanceIdentifier='string',
DBInstanceClass='string',
ApplyImmediately=True|False,
PreferredMaintenanceWindow='string',
AutoMinorVersionUpgrade=True|False,
NewDBInstanceIdentifier='string',
CACertificateIdentifier='string',
PromotionTier=123
)
:type DBInstanceIdentifier: string
:param DBInstanceIdentifier: [REQUIRED]\nThe instance identifier. This value is stored as a lowercase string.\nConstraints:\n\nMust match the identifier of an existing DBInstance .\n\n
:type DBInstanceClass: string
:param DBInstanceClass: The new compute and memory capacity of the instance; for example, db.r5.large . Not all instance classes are available in all AWS Regions.\nIf you modify the instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless ApplyImmediately is specified as true for this request.\nDefault: Uses existing setting.\n
:type ApplyImmediately: boolean
:param ApplyImmediately: Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow setting for the instance.\nIf this parameter is set to false , changes to the instance are applied during the next maintenance window. Some parameter changes can cause an outage and are applied on the next reboot.\nDefault: false\n
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter doesn\'t result in an outage except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, changing this parameter causes a reboot of the instance. If you are moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure that pending changes are applied.\nDefault: Uses existing setting.\nFormat: ddd:hh24:mi-ddd:hh24:mi\nValid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun\nConstraints: Must be at least 30 minutes.\n
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: Indicates that minor version upgrades are applied automatically to the instance during the maintenance window. Changing this parameter doesn\'t result in an outage except in the following case, and the change is asynchronously applied as soon as possible. An outage results if this parameter is set to true during the maintenance window, and a newer minor version is available, and Amazon DocumentDB has enabled automatic patching for that engine version.
:type NewDBInstanceIdentifier: string
:param NewDBInstanceIdentifier: The new instance identifier for the instance when renaming an instance. When you change the instance identifier, an instance reboot occurs immediately if you set Apply Immediately to true . It occurs during the next maintenance window if you set Apply Immediately to false . This value is stored as a lowercase string.\nConstraints:\n\nMust contain from 1 to 63 letters, numbers, or hyphens.\nThe first character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\n\nExample: mydbinstance\n
:type CACertificateIdentifier: string
:param CACertificateIdentifier: Indicates the certificate that needs to be associated with the instance.
:type PromotionTier: integer
:param PromotionTier: A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.\nDefault: 1\nValid values: 0-15\n
:rtype: dict
ReturnsResponse Syntax
{
'DBInstance': {
'DBInstanceIdentifier': 'string',
'DBInstanceClass': 'string',
'Engine': 'string',
'DBInstanceStatus': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123,
'HostedZoneId': 'string'
},
'InstanceCreateTime': datetime(2015, 1, 1),
'PreferredBackupWindow': 'string',
'BackupRetentionPeriod': 123,
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'AvailabilityZone': 'string',
'DBSubnetGroup': {
'DBSubnetGroupName': 'string',
'DBSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'DBSubnetGroupArn': 'string'
},
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'DBInstanceClass': 'string',
'AllocatedStorage': 123,
'MasterUserPassword': 'string',
'Port': 123,
'BackupRetentionPeriod': 123,
'MultiAZ': True|False,
'EngineVersion': 'string',
'LicenseModel': 'string',
'Iops': 123,
'DBInstanceIdentifier': 'string',
'StorageType': 'string',
'CACertificateIdentifier': 'string',
'DBSubnetGroupName': 'string',
'PendingCloudwatchLogsExports': {
'LogTypesToEnable': [
'string',
],
'LogTypesToDisable': [
'string',
]
}
},
'LatestRestorableTime': datetime(2015, 1, 1),
'EngineVersion': 'string',
'AutoMinorVersionUpgrade': True|False,
'PubliclyAccessible': True|False,
'StatusInfos': [
{
'StatusType': 'string',
'Normal': True|False,
'Status': 'string',
'Message': 'string'
},
],
'DBClusterIdentifier': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbiResourceId': 'string',
'CACertificateIdentifier': 'string',
'PromotionTier': 123,
'DBInstanceArn': 'string',
'EnabledCloudwatchLogsExports': [
'string',
]
}
}
Response Structure
(dict) --
DBInstance (dict) --
Detailed information about an instance.
DBInstanceIdentifier (string) --
Contains a user-provided database identifier. This identifier is the unique key that identifies an instance.
DBInstanceClass (string) --
Contains the name of the compute and memory capacity class of the instance.
Engine (string) --
Provides the name of the database engine to be used for this instance.
DBInstanceStatus (string) --
Specifies the current state of this database.
Endpoint (dict) --
Specifies the connection endpoint.
Address (string) --
Specifies the DNS address of the instance.
Port (integer) --
Specifies the port that the database engine is listening on.
HostedZoneId (string) --
Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.
InstanceCreateTime (datetime) --
Provides the date and time that the instance was created.
PreferredBackupWindow (string) --
Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod .
BackupRetentionPeriod (integer) --
Specifies the number of days for which automatic snapshots are retained.
VpcSecurityGroups (list) --
Provides a list of VPC security group elements that the instance belongs to.
(dict) --
Used as a response element for queries on virtual private cloud (VPC) security group membership.
VpcSecurityGroupId (string) --
The name of the VPC security group.
Status (string) --
The status of the VPC security group.
AvailabilityZone (string) --
Specifies the name of the Availability Zone that the instance is located in.
DBSubnetGroup (dict) --
Specifies information on the subnet group that is associated with the instance, including the name, description, and subnets in the subnet group.
DBSubnetGroupName (string) --
The name of the subnet group.
DBSubnetGroupDescription (string) --
Provides the description of the subnet group.
VpcId (string) --
Provides the virtual private cloud (VPC) ID of the subnet group.
SubnetGroupStatus (string) --
Provides the status of the subnet group.
Subnets (list) --
Detailed information about one or more subnets within a subnet group.
(dict) --
Detailed information about a subnet.
SubnetIdentifier (string) --
Specifies the identifier of the subnet.
SubnetAvailabilityZone (dict) --
Specifies the Availability Zone for the subnet.
Name (string) --
The name of the Availability Zone.
SubnetStatus (string) --
Specifies the status of the subnet.
DBSubnetGroupArn (string) --
The Amazon Resource Name (ARN) for the DB subnet group.
PreferredMaintenanceWindow (string) --
Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
PendingModifiedValues (dict) --
Specifies that changes to the instance are pending. This element is included only when changes are pending. Specific changes are identified by subelements.
DBInstanceClass (string) --
Contains the new DBInstanceClass for the instance that will be applied or is currently being applied.
AllocatedStorage (integer) --
Contains the new AllocatedStorage size for then instance that will be applied or is currently being applied.
MasterUserPassword (string) --
Contains the pending or currently in-progress change of the master credentials for the instance.
Port (integer) --
Specifies the pending port for the instance.
BackupRetentionPeriod (integer) --
Specifies the pending number of days for which automated backups are retained.
MultiAZ (boolean) --
Indicates that the Single-AZ instance is to change to a Multi-AZ deployment.
EngineVersion (string) --
Indicates the database engine version.
LicenseModel (string) --
The license model for the instance.
Valid values: license-included , bring-your-own-license , general-public-license
Iops (integer) --
Specifies the new Provisioned IOPS value for the instance that will be applied or is currently being applied.
DBInstanceIdentifier (string) --
Contains the new DBInstanceIdentifier for the instance that will be applied or is currently being applied.
StorageType (string) --
Specifies the storage type to be associated with the instance.
CACertificateIdentifier (string) --
Specifies the identifier of the certificate authority (CA) certificate for the DB instance.
DBSubnetGroupName (string) --
The new subnet group for the instance.
PendingCloudwatchLogsExports (dict) --
A list of the log types whose configuration is still pending. These log types are in the process of being activated or deactivated.
LogTypesToEnable (list) --
Log types that are in the process of being deactivated. After they are deactivated, these log types aren\'t exported to CloudWatch Logs.
(string) --
LogTypesToDisable (list) --
Log types that are in the process of being enabled. After they are enabled, these log types are exported to Amazon CloudWatch Logs.
(string) --
LatestRestorableTime (datetime) --
Specifies the latest time to which a database can be restored with point-in-time restore.
EngineVersion (string) --
Indicates the database engine version.
AutoMinorVersionUpgrade (boolean) --
Indicates that minor version patches are applied automatically.
PubliclyAccessible (boolean) --
Not supported. Amazon DocumentDB does not currently support public endpoints. The value of PubliclyAccessible is always false .
StatusInfos (list) --
The status of a read replica. If the instance is not a read replica, this is blank.
(dict) --
Provides a list of status information for an instance.
StatusType (string) --
This value is currently "read replication ."
Normal (boolean) --
A Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.
Status (string) --
Status of the instance. For a StatusType of read replica, the values can be replicating , error, stopped , or terminated .
Message (string) --
Details of the error if there is an error for the instance. If the instance is not in an error state, this value is blank.
DBClusterIdentifier (string) --
Contains the name of the cluster that the instance is a member of if the instance is a member of a cluster.
StorageEncrypted (boolean) --
Specifies whether or not the instance is encrypted.
KmsKeyId (string) --
If StorageEncrypted is true , the AWS KMS key identifier for the encrypted instance.
DbiResourceId (string) --
The AWS Region-unique, immutable identifier for the instance. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the instance is accessed.
CACertificateIdentifier (string) --
The identifier of the CA certificate for this DB instance.
PromotionTier (integer) --
A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.
DBInstanceArn (string) --
The Amazon Resource Name (ARN) for the instance.
EnabledCloudwatchLogsExports (list) --
A list of log types that this instance is configured to export to Amazon CloudWatch Logs.
(string) --
Exceptions
DocDB.Client.exceptions.InvalidDBInstanceStateFault
DocDB.Client.exceptions.InvalidDBSecurityGroupStateFault
DocDB.Client.exceptions.DBInstanceAlreadyExistsFault
DocDB.Client.exceptions.DBInstanceNotFoundFault
DocDB.Client.exceptions.DBSecurityGroupNotFoundFault
DocDB.Client.exceptions.DBParameterGroupNotFoundFault
DocDB.Client.exceptions.InsufficientDBInstanceCapacityFault
DocDB.Client.exceptions.StorageQuotaExceededFault
DocDB.Client.exceptions.InvalidVPCNetworkStateFault
DocDB.Client.exceptions.DBUpgradeDependencyFailureFault
DocDB.Client.exceptions.StorageTypeNotSupportedFault
DocDB.Client.exceptions.AuthorizationNotFoundFault
DocDB.Client.exceptions.CertificateNotFoundFault
:return: {
'DBInstance': {
'DBInstanceIdentifier': 'string',
'DBInstanceClass': 'string',
'Engine': 'string',
'DBInstanceStatus': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123,
'HostedZoneId': 'string'
},
'InstanceCreateTime': datetime(2015, 1, 1),
'PreferredBackupWindow': 'string',
'BackupRetentionPeriod': 123,
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'AvailabilityZone': 'string',
'DBSubnetGroup': {
'DBSubnetGroupName': 'string',
'DBSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'DBSubnetGroupArn': 'string'
},
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'DBInstanceClass': 'string',
'AllocatedStorage': 123,
'MasterUserPassword': 'string',
'Port': 123,
'BackupRetentionPeriod': 123,
'MultiAZ': True|False,
'EngineVersion': 'string',
'LicenseModel': 'string',
'Iops': 123,
'DBInstanceIdentifier': 'string',
'StorageType': 'string',
'CACertificateIdentifier': 'string',
'DBSubnetGroupName': 'string',
'PendingCloudwatchLogsExports': {
'LogTypesToEnable': [
'string',
],
'LogTypesToDisable': [
'string',
]
}
},
'LatestRestorableTime': datetime(2015, 1, 1),
'EngineVersion': 'string',
'AutoMinorVersionUpgrade': True|False,
'PubliclyAccessible': True|False,
'StatusInfos': [
{
'StatusType': 'string',
'Normal': True|False,
'Status': 'string',
'Message': 'string'
},
],
'DBClusterIdentifier': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbiResourceId': 'string',
'CACertificateIdentifier': 'string',
'PromotionTier': 123,
'DBInstanceArn': 'string',
'EnabledCloudwatchLogsExports': [
'string',
]
}
}
:returns:
(string) --
"""
pass
def modify_db_subnet_group(DBSubnetGroupName=None, DBSubnetGroupDescription=None, SubnetIds=None):
"""
Modifies an existing subnet group. subnet groups must contain at least one subnet in at least two Availability Zones in the AWS Region.
See also: AWS API Documentation
Exceptions
:example: response = client.modify_db_subnet_group(
DBSubnetGroupName='string',
DBSubnetGroupDescription='string',
SubnetIds=[
'string',
]
)
:type DBSubnetGroupName: string
:param DBSubnetGroupName: [REQUIRED]\nThe name for the subnet group. This value is stored as a lowercase string. You can\'t modify the default subnet group.\nConstraints: Must match the name of an existing DBSubnetGroup . Must not be default.\nExample: mySubnetgroup\n
:type DBSubnetGroupDescription: string
:param DBSubnetGroupDescription: The description for the subnet group.
:type SubnetIds: list
:param SubnetIds: [REQUIRED]\nThe Amazon EC2 subnet IDs for the subnet group.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'DBSubnetGroup': {
'DBSubnetGroupName': 'string',
'DBSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'DBSubnetGroupArn': 'string'
}
}
Response Structure
(dict) --
DBSubnetGroup (dict) --
Detailed information about a subnet group.
DBSubnetGroupName (string) --
The name of the subnet group.
DBSubnetGroupDescription (string) --
Provides the description of the subnet group.
VpcId (string) --
Provides the virtual private cloud (VPC) ID of the subnet group.
SubnetGroupStatus (string) --
Provides the status of the subnet group.
Subnets (list) --
Detailed information about one or more subnets within a subnet group.
(dict) --
Detailed information about a subnet.
SubnetIdentifier (string) --
Specifies the identifier of the subnet.
SubnetAvailabilityZone (dict) --
Specifies the Availability Zone for the subnet.
Name (string) --
The name of the Availability Zone.
SubnetStatus (string) --
Specifies the status of the subnet.
DBSubnetGroupArn (string) --
The Amazon Resource Name (ARN) for the DB subnet group.
Exceptions
DocDB.Client.exceptions.DBSubnetGroupNotFoundFault
DocDB.Client.exceptions.DBSubnetQuotaExceededFault
DocDB.Client.exceptions.SubnetAlreadyInUse
DocDB.Client.exceptions.DBSubnetGroupDoesNotCoverEnoughAZs
DocDB.Client.exceptions.InvalidSubnet
:return: {
'DBSubnetGroup': {
'DBSubnetGroupName': 'string',
'DBSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'DBSubnetGroupArn': 'string'
}
}
:returns:
DocDB.Client.exceptions.DBSubnetGroupNotFoundFault
DocDB.Client.exceptions.DBSubnetQuotaExceededFault
DocDB.Client.exceptions.SubnetAlreadyInUse
DocDB.Client.exceptions.DBSubnetGroupDoesNotCoverEnoughAZs
DocDB.Client.exceptions.InvalidSubnet
"""
pass
def reboot_db_instance(DBInstanceIdentifier=None, ForceFailover=None):
"""
You might need to reboot your instance, usually for maintenance reasons. For example, if you make certain changes, or if you change the cluster parameter group that is associated with the instance, you must reboot the instance for the changes to take effect.
Rebooting an instance restarts the database engine service. Rebooting an instance results in a momentary outage, during which the instance status is set to rebooting .
See also: AWS API Documentation
Exceptions
:example: response = client.reboot_db_instance(
DBInstanceIdentifier='string',
ForceFailover=True|False
)
:type DBInstanceIdentifier: string
:param DBInstanceIdentifier: [REQUIRED]\nThe instance identifier. This parameter is stored as a lowercase string.\nConstraints:\n\nMust match the identifier of an existing DBInstance .\n\n
:type ForceFailover: boolean
:param ForceFailover: When true , the reboot is conducted through a Multi-AZ failover.\nConstraint: You can\'t specify true if the instance is not configured for Multi-AZ.\n
:rtype: dict
ReturnsResponse Syntax
{
'DBInstance': {
'DBInstanceIdentifier': 'string',
'DBInstanceClass': 'string',
'Engine': 'string',
'DBInstanceStatus': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123,
'HostedZoneId': 'string'
},
'InstanceCreateTime': datetime(2015, 1, 1),
'PreferredBackupWindow': 'string',
'BackupRetentionPeriod': 123,
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'AvailabilityZone': 'string',
'DBSubnetGroup': {
'DBSubnetGroupName': 'string',
'DBSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'DBSubnetGroupArn': 'string'
},
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'DBInstanceClass': 'string',
'AllocatedStorage': 123,
'MasterUserPassword': 'string',
'Port': 123,
'BackupRetentionPeriod': 123,
'MultiAZ': True|False,
'EngineVersion': 'string',
'LicenseModel': 'string',
'Iops': 123,
'DBInstanceIdentifier': 'string',
'StorageType': 'string',
'CACertificateIdentifier': 'string',
'DBSubnetGroupName': 'string',
'PendingCloudwatchLogsExports': {
'LogTypesToEnable': [
'string',
],
'LogTypesToDisable': [
'string',
]
}
},
'LatestRestorableTime': datetime(2015, 1, 1),
'EngineVersion': 'string',
'AutoMinorVersionUpgrade': True|False,
'PubliclyAccessible': True|False,
'StatusInfos': [
{
'StatusType': 'string',
'Normal': True|False,
'Status': 'string',
'Message': 'string'
},
],
'DBClusterIdentifier': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbiResourceId': 'string',
'CACertificateIdentifier': 'string',
'PromotionTier': 123,
'DBInstanceArn': 'string',
'EnabledCloudwatchLogsExports': [
'string',
]
}
}
Response Structure
(dict) --
DBInstance (dict) --
Detailed information about an instance.
DBInstanceIdentifier (string) --
Contains a user-provided database identifier. This identifier is the unique key that identifies an instance.
DBInstanceClass (string) --
Contains the name of the compute and memory capacity class of the instance.
Engine (string) --
Provides the name of the database engine to be used for this instance.
DBInstanceStatus (string) --
Specifies the current state of this database.
Endpoint (dict) --
Specifies the connection endpoint.
Address (string) --
Specifies the DNS address of the instance.
Port (integer) --
Specifies the port that the database engine is listening on.
HostedZoneId (string) --
Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.
InstanceCreateTime (datetime) --
Provides the date and time that the instance was created.
PreferredBackupWindow (string) --
Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod .
BackupRetentionPeriod (integer) --
Specifies the number of days for which automatic snapshots are retained.
VpcSecurityGroups (list) --
Provides a list of VPC security group elements that the instance belongs to.
(dict) --
Used as a response element for queries on virtual private cloud (VPC) security group membership.
VpcSecurityGroupId (string) --
The name of the VPC security group.
Status (string) --
The status of the VPC security group.
AvailabilityZone (string) --
Specifies the name of the Availability Zone that the instance is located in.
DBSubnetGroup (dict) --
Specifies information on the subnet group that is associated with the instance, including the name, description, and subnets in the subnet group.
DBSubnetGroupName (string) --
The name of the subnet group.
DBSubnetGroupDescription (string) --
Provides the description of the subnet group.
VpcId (string) --
Provides the virtual private cloud (VPC) ID of the subnet group.
SubnetGroupStatus (string) --
Provides the status of the subnet group.
Subnets (list) --
Detailed information about one or more subnets within a subnet group.
(dict) --
Detailed information about a subnet.
SubnetIdentifier (string) --
Specifies the identifier of the subnet.
SubnetAvailabilityZone (dict) --
Specifies the Availability Zone for the subnet.
Name (string) --
The name of the Availability Zone.
SubnetStatus (string) --
Specifies the status of the subnet.
DBSubnetGroupArn (string) --
The Amazon Resource Name (ARN) for the DB subnet group.
PreferredMaintenanceWindow (string) --
Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
PendingModifiedValues (dict) --
Specifies that changes to the instance are pending. This element is included only when changes are pending. Specific changes are identified by subelements.
DBInstanceClass (string) --
Contains the new DBInstanceClass for the instance that will be applied or is currently being applied.
AllocatedStorage (integer) --
Contains the new AllocatedStorage size for then instance that will be applied or is currently being applied.
MasterUserPassword (string) --
Contains the pending or currently in-progress change of the master credentials for the instance.
Port (integer) --
Specifies the pending port for the instance.
BackupRetentionPeriod (integer) --
Specifies the pending number of days for which automated backups are retained.
MultiAZ (boolean) --
Indicates that the Single-AZ instance is to change to a Multi-AZ deployment.
EngineVersion (string) --
Indicates the database engine version.
LicenseModel (string) --
The license model for the instance.
Valid values: license-included , bring-your-own-license , general-public-license
Iops (integer) --
Specifies the new Provisioned IOPS value for the instance that will be applied or is currently being applied.
DBInstanceIdentifier (string) --
Contains the new DBInstanceIdentifier for the instance that will be applied or is currently being applied.
StorageType (string) --
Specifies the storage type to be associated with the instance.
CACertificateIdentifier (string) --
Specifies the identifier of the certificate authority (CA) certificate for the DB instance.
DBSubnetGroupName (string) --
The new subnet group for the instance.
PendingCloudwatchLogsExports (dict) --
A list of the log types whose configuration is still pending. These log types are in the process of being activated or deactivated.
LogTypesToEnable (list) --
Log types that are in the process of being deactivated. After they are deactivated, these log types aren\'t exported to CloudWatch Logs.
(string) --
LogTypesToDisable (list) --
Log types that are in the process of being enabled. After they are enabled, these log types are exported to Amazon CloudWatch Logs.
(string) --
LatestRestorableTime (datetime) --
Specifies the latest time to which a database can be restored with point-in-time restore.
EngineVersion (string) --
Indicates the database engine version.
AutoMinorVersionUpgrade (boolean) --
Indicates that minor version patches are applied automatically.
PubliclyAccessible (boolean) --
Not supported. Amazon DocumentDB does not currently support public endpoints. The value of PubliclyAccessible is always false .
StatusInfos (list) --
The status of a read replica. If the instance is not a read replica, this is blank.
(dict) --
Provides a list of status information for an instance.
StatusType (string) --
This value is currently "read replication ."
Normal (boolean) --
A Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.
Status (string) --
Status of the instance. For a StatusType of read replica, the values can be replicating , error, stopped , or terminated .
Message (string) --
Details of the error if there is an error for the instance. If the instance is not in an error state, this value is blank.
DBClusterIdentifier (string) --
Contains the name of the cluster that the instance is a member of if the instance is a member of a cluster.
StorageEncrypted (boolean) --
Specifies whether or not the instance is encrypted.
KmsKeyId (string) --
If StorageEncrypted is true , the AWS KMS key identifier for the encrypted instance.
DbiResourceId (string) --
The AWS Region-unique, immutable identifier for the instance. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the instance is accessed.
CACertificateIdentifier (string) --
The identifier of the CA certificate for this DB instance.
PromotionTier (integer) --
A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.
DBInstanceArn (string) --
The Amazon Resource Name (ARN) for the instance.
EnabledCloudwatchLogsExports (list) --
A list of log types that this instance is configured to export to Amazon CloudWatch Logs.
(string) --
Exceptions
DocDB.Client.exceptions.InvalidDBInstanceStateFault
DocDB.Client.exceptions.DBInstanceNotFoundFault
:return: {
'DBInstance': {
'DBInstanceIdentifier': 'string',
'DBInstanceClass': 'string',
'Engine': 'string',
'DBInstanceStatus': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123,
'HostedZoneId': 'string'
},
'InstanceCreateTime': datetime(2015, 1, 1),
'PreferredBackupWindow': 'string',
'BackupRetentionPeriod': 123,
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'AvailabilityZone': 'string',
'DBSubnetGroup': {
'DBSubnetGroupName': 'string',
'DBSubnetGroupDescription': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'DBSubnetGroupArn': 'string'
},
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'DBInstanceClass': 'string',
'AllocatedStorage': 123,
'MasterUserPassword': 'string',
'Port': 123,
'BackupRetentionPeriod': 123,
'MultiAZ': True|False,
'EngineVersion': 'string',
'LicenseModel': 'string',
'Iops': 123,
'DBInstanceIdentifier': 'string',
'StorageType': 'string',
'CACertificateIdentifier': 'string',
'DBSubnetGroupName': 'string',
'PendingCloudwatchLogsExports': {
'LogTypesToEnable': [
'string',
],
'LogTypesToDisable': [
'string',
]
}
},
'LatestRestorableTime': datetime(2015, 1, 1),
'EngineVersion': 'string',
'AutoMinorVersionUpgrade': True|False,
'PubliclyAccessible': True|False,
'StatusInfos': [
{
'StatusType': 'string',
'Normal': True|False,
'Status': 'string',
'Message': 'string'
},
],
'DBClusterIdentifier': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbiResourceId': 'string',
'CACertificateIdentifier': 'string',
'PromotionTier': 123,
'DBInstanceArn': 'string',
'EnabledCloudwatchLogsExports': [
'string',
]
}
}
:returns:
(string) --
"""
pass
def remove_tags_from_resource(ResourceName=None, TagKeys=None):
"""
Removes metadata tags from an Amazon DocumentDB resource.
See also: AWS API Documentation
Exceptions
:example: response = client.remove_tags_from_resource(
ResourceName='string',
TagKeys=[
'string',
]
)
:type ResourceName: string
:param ResourceName: [REQUIRED]\nThe Amazon DocumentDB resource that the tags are removed from. This value is an Amazon Resource Name (ARN).\n
:type TagKeys: list
:param TagKeys: [REQUIRED]\nThe tag key (name) of the tag to be removed.\n\n(string) --\n\n
:returns:
DocDB.Client.exceptions.DBInstanceNotFoundFault
DocDB.Client.exceptions.DBSnapshotNotFoundFault
DocDB.Client.exceptions.DBClusterNotFoundFault
"""
pass
def reset_db_cluster_parameter_group(DBClusterParameterGroupName=None, ResetAllParameters=None, Parameters=None):
"""
Modifies the parameters of a cluster parameter group to the default value. To reset specific parameters, submit a list of the following: ParameterName and ApplyMethod . To reset the entire cluster parameter group, specify the DBClusterParameterGroupName and ResetAllParameters parameters.
When you reset the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance reboot.
See also: AWS API Documentation
Exceptions
:example: response = client.reset_db_cluster_parameter_group(
DBClusterParameterGroupName='string',
ResetAllParameters=True|False,
Parameters=[
{
'ParameterName': 'string',
'ParameterValue': 'string',
'Description': 'string',
'Source': 'string',
'ApplyType': 'string',
'DataType': 'string',
'AllowedValues': 'string',
'IsModifiable': True|False,
'MinimumEngineVersion': 'string',
'ApplyMethod': 'immediate'|'pending-reboot'
},
]
)
:type DBClusterParameterGroupName: string
:param DBClusterParameterGroupName: [REQUIRED]\nThe name of the cluster parameter group to reset.\n
:type ResetAllParameters: boolean
:param ResetAllParameters: A value that is set to true to reset all parameters in the cluster parameter group to their default values, and false otherwise. You can\'t use this parameter if there is a list of parameter names specified for the Parameters parameter.
:type Parameters: list
:param Parameters: A list of parameter names in the cluster parameter group to reset to the default values. You can\'t use this parameter if the ResetAllParameters parameter is set to true .\n\n(dict) --Detailed information about an individual parameter.\n\nParameterName (string) --Specifies the name of the parameter.\n\nParameterValue (string) --Specifies the value of the parameter.\n\nDescription (string) --Provides a description of the parameter.\n\nSource (string) --Indicates the source of the parameter value.\n\nApplyType (string) --Specifies the engine-specific parameters type.\n\nDataType (string) --Specifies the valid data type for the parameter.\n\nAllowedValues (string) --Specifies the valid range of values for the parameter.\n\nIsModifiable (boolean) --Indicates whether (true ) or not (false ) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed.\n\nMinimumEngineVersion (string) --The earliest engine version to which the parameter can apply.\n\nApplyMethod (string) --Indicates when to apply parameter updates.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'DBClusterParameterGroupName': 'string'
}
Response Structure
(dict) --
Contains the name of a cluster parameter group.
DBClusterParameterGroupName (string) --
The name of a cluster parameter group.
Constraints:
Must be from 1 to 255 letters or numbers.
The first character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
Note
This value is stored as a lowercase string.
Exceptions
DocDB.Client.exceptions.InvalidDBParameterGroupStateFault
DocDB.Client.exceptions.DBParameterGroupNotFoundFault
:return: {
'DBClusterParameterGroupName': 'string'
}
:returns:
Must be from 1 to 255 letters or numbers.
The first character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
"""
pass
def restore_db_cluster_from_snapshot(AvailabilityZones=None, DBClusterIdentifier=None, SnapshotIdentifier=None, Engine=None, EngineVersion=None, Port=None, DBSubnetGroupName=None, VpcSecurityGroupIds=None, Tags=None, KmsKeyId=None, EnableCloudwatchLogsExports=None, DeletionProtection=None):
"""
Creates a new cluster from a snapshot or cluster snapshot.
If a snapshot is specified, the target cluster is created from the source DB snapshot with a default configuration and default security group.
If a cluster snapshot is specified, the target cluster is created from the source cluster restore point with the same configuration as the original source DB cluster, except that the new cluster is created with the default security group.
See also: AWS API Documentation
Exceptions
:example: response = client.restore_db_cluster_from_snapshot(
AvailabilityZones=[
'string',
],
DBClusterIdentifier='string',
SnapshotIdentifier='string',
Engine='string',
EngineVersion='string',
Port=123,
DBSubnetGroupName='string',
VpcSecurityGroupIds=[
'string',
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
KmsKeyId='string',
EnableCloudwatchLogsExports=[
'string',
],
DeletionProtection=True|False
)
:type AvailabilityZones: list
:param AvailabilityZones: Provides the list of Amazon EC2 Availability Zones that instances in the restored DB cluster can be created in.\n\n(string) --\n\n
:type DBClusterIdentifier: string
:param DBClusterIdentifier: [REQUIRED]\nThe name of the cluster to create from the snapshot or cluster snapshot. This parameter isn\'t case sensitive.\nConstraints:\n\nMust contain from 1 to 63 letters, numbers, or hyphens.\nThe first character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\n\nExample: my-snapshot-id\n
:type SnapshotIdentifier: string
:param SnapshotIdentifier: [REQUIRED]\nThe identifier for the snapshot or cluster snapshot to restore from.\nYou can use either the name or the Amazon Resource Name (ARN) to specify a cluster snapshot. However, you can use only the ARN to specify a snapshot.\nConstraints:\n\nMust match the identifier of an existing snapshot.\n\n
:type Engine: string
:param Engine: [REQUIRED]\nThe database engine to use for the new cluster.\nDefault: The same as source.\nConstraint: Must be compatible with the engine of the source.\n
:type EngineVersion: string
:param EngineVersion: The version of the database engine to use for the new cluster.
:type Port: integer
:param Port: The port number on which the new cluster accepts connections.\nConstraints: Must be a value from 1150 to 65535 .\nDefault: The same port as the original cluster.\n
:type DBSubnetGroupName: string
:param DBSubnetGroupName: The name of the subnet group to use for the new cluster.\nConstraints: If provided, must match the name of an existing DBSubnetGroup .\nExample: mySubnetgroup\n
:type VpcSecurityGroupIds: list
:param VpcSecurityGroupIds: A list of virtual private cloud (VPC) security groups that the new cluster will belong to.\n\n(string) --\n\n
:type Tags: list
:param Tags: The tags to be assigned to the restored cluster.\n\n(dict) --Metadata assigned to an Amazon DocumentDB resource consisting of a key-value pair.\n\nKey (string) --The required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\nValue (string) --The optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\n\n\n\n
:type KmsKeyId: string
:param KmsKeyId: The AWS KMS key identifier to use when restoring an encrypted cluster from a DB snapshot or cluster snapshot.\nThe AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are restoring a cluster with the same AWS account that owns the AWS KMS encryption key used to encrypt the new cluster, then you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.\nIf you do not specify a value for the KmsKeyId parameter, then the following occurs:\n\nIf the snapshot or cluster snapshot in SnapshotIdentifier is encrypted, then the restored cluster is encrypted using the AWS KMS key that was used to encrypt the snapshot or the cluster snapshot.\nIf the snapshot or the cluster snapshot in SnapshotIdentifier is not encrypted, then the restored DB cluster is not encrypted.\n\n
:type EnableCloudwatchLogsExports: list
:param EnableCloudwatchLogsExports: A list of log types that must be enabled for exporting to Amazon CloudWatch Logs.\n\n(string) --\n\n
:type DeletionProtection: boolean
:param DeletionProtection: Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.
:rtype: dict
ReturnsResponse Syntax
{
'DBCluster': {
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
}
}
Response Structure
(dict) --
DBCluster (dict) --
Detailed information about a cluster.
AvailabilityZones (list) --
Provides the list of Amazon EC2 Availability Zones that instances in the cluster can be created in.
(string) --
BackupRetentionPeriod (integer) --
Specifies the number of days for which automatic snapshots are retained.
DBClusterIdentifier (string) --
Contains a user-supplied cluster identifier. This identifier is the unique key that identifies a cluster.
DBClusterParameterGroup (string) --
Specifies the name of the cluster parameter group for the cluster.
DBSubnetGroup (string) --
Specifies information on the subnet group that is associated with the cluster, including the name, description, and subnets in the subnet group.
Status (string) --
Specifies the current state of this cluster.
PercentProgress (string) --
Specifies the progress of the operation as a percentage.
EarliestRestorableTime (datetime) --
The earliest time to which a database can be restored with point-in-time restore.
Endpoint (string) --
Specifies the connection endpoint for the primary instance of the cluster.
ReaderEndpoint (string) --
The reader endpoint for the cluster. The reader endpoint for a cluster load balances connections across the Amazon DocumentDB replicas that are available in a cluster. As clients request new connections to the reader endpoint, Amazon DocumentDB distributes the connection requests among the Amazon DocumentDB replicas in the cluster. This functionality can help balance your read workload across multiple Amazon DocumentDB replicas in your cluster.
If a failover occurs, and the Amazon DocumentDB replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Amazon DocumentDB replicas in the cluster, you can then reconnect to the reader endpoint.
MultiAZ (boolean) --
Specifies whether the cluster has instances in multiple Availability Zones.
Engine (string) --
Provides the name of the database engine to be used for this cluster.
EngineVersion (string) --
Indicates the database engine version.
LatestRestorableTime (datetime) --
Specifies the latest time to which a database can be restored with point-in-time restore.
Port (integer) --
Specifies the port that the database engine is listening on.
MasterUsername (string) --
Contains the master user name for the cluster.
PreferredBackupWindow (string) --
Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod .
PreferredMaintenanceWindow (string) --
Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
DBClusterMembers (list) --
Provides the list of instances that make up the cluster.
(dict) --
Contains information about an instance that is part of a cluster.
DBInstanceIdentifier (string) --
Specifies the instance identifier for this member of the cluster.
IsClusterWriter (boolean) --
A value that is true if the cluster member is the primary instance for the cluster and false otherwise.
DBClusterParameterGroupStatus (string) --
Specifies the status of the cluster parameter group for this member of the DB cluster.
PromotionTier (integer) --
A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.
VpcSecurityGroups (list) --
Provides a list of virtual private cloud (VPC) security groups that the cluster belongs to.
(dict) --
Used as a response element for queries on virtual private cloud (VPC) security group membership.
VpcSecurityGroupId (string) --
The name of the VPC security group.
Status (string) --
The status of the VPC security group.
HostedZoneId (string) --
Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.
StorageEncrypted (boolean) --
Specifies whether the cluster is encrypted.
KmsKeyId (string) --
If StorageEncrypted is true , the AWS KMS key identifier for the encrypted cluster.
DbClusterResourceId (string) --
The AWS Region-unique, immutable identifier for the cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the cluster is accessed.
DBClusterArn (string) --
The Amazon Resource Name (ARN) for the cluster.
AssociatedRoles (list) --
Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the cluster. IAM roles that are associated with a cluster grant permission for the cluster to access other AWS services on your behalf.
(dict) --
Describes an AWS Identity and Access Management (IAM) role that is associated with a cluster.
RoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role that is associated with the DB cluster.
Status (string) --
Describes the state of association between the IAM role and the cluster. The Status property returns one of the following values:
ACTIVE - The IAM role ARN is associated with the cluster and can be used to access other AWS services on your behalf.
PENDING - The IAM role ARN is being associated with the DB cluster.
INVALID - The IAM role ARN is associated with the cluster, but the cluster cannot assume the IAM role to access other AWS services on your behalf.
ClusterCreateTime (datetime) --
Specifies the time when the cluster was created, in Universal Coordinated Time (UTC).
EnabledCloudwatchLogsExports (list) --
A list of log types that this cluster is configured to export to Amazon CloudWatch Logs.
(string) --
DeletionProtection (boolean) --
Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.
Exceptions
DocDB.Client.exceptions.DBClusterAlreadyExistsFault
DocDB.Client.exceptions.DBClusterQuotaExceededFault
DocDB.Client.exceptions.StorageQuotaExceededFault
DocDB.Client.exceptions.DBSubnetGroupNotFoundFault
DocDB.Client.exceptions.DBSnapshotNotFoundFault
DocDB.Client.exceptions.DBClusterSnapshotNotFoundFault
DocDB.Client.exceptions.InsufficientDBClusterCapacityFault
DocDB.Client.exceptions.InsufficientStorageClusterCapacityFault
DocDB.Client.exceptions.InvalidDBSnapshotStateFault
DocDB.Client.exceptions.InvalidDBClusterSnapshotStateFault
DocDB.Client.exceptions.StorageQuotaExceededFault
DocDB.Client.exceptions.InvalidVPCNetworkStateFault
DocDB.Client.exceptions.InvalidRestoreFault
DocDB.Client.exceptions.DBSubnetGroupNotFoundFault
DocDB.Client.exceptions.InvalidSubnet
DocDB.Client.exceptions.KMSKeyNotAccessibleFault
:return: {
'DBCluster': {
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
}
}
:returns:
(string) --
"""
pass
def restore_db_cluster_to_point_in_time(DBClusterIdentifier=None, SourceDBClusterIdentifier=None, RestoreToTime=None, UseLatestRestorableTime=None, Port=None, DBSubnetGroupName=None, VpcSecurityGroupIds=None, Tags=None, KmsKeyId=None, EnableCloudwatchLogsExports=None, DeletionProtection=None):
"""
Restores a cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target cluster is created from the source cluster with the same configuration as the original cluster, except that the new cluster is created with the default security group.
See also: AWS API Documentation
Exceptions
:example: response = client.restore_db_cluster_to_point_in_time(
DBClusterIdentifier='string',
SourceDBClusterIdentifier='string',
RestoreToTime=datetime(2015, 1, 1),
UseLatestRestorableTime=True|False,
Port=123,
DBSubnetGroupName='string',
VpcSecurityGroupIds=[
'string',
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
KmsKeyId='string',
EnableCloudwatchLogsExports=[
'string',
],
DeletionProtection=True|False
)
:type DBClusterIdentifier: string
:param DBClusterIdentifier: [REQUIRED]\nThe name of the new cluster to be created.\nConstraints:\n\nMust contain from 1 to 63 letters, numbers, or hyphens.\nThe first character must be a letter.\nCannot end with a hyphen or contain two consecutive hyphens.\n\n
:type SourceDBClusterIdentifier: string
:param SourceDBClusterIdentifier: [REQUIRED]\nThe identifier of the source cluster from which to restore.\nConstraints:\n\nMust match the identifier of an existing DBCluster .\n\n
:type RestoreToTime: datetime
:param RestoreToTime: The date and time to restore the cluster to.\nValid values: A time in Universal Coordinated Time (UTC) format.\nConstraints:\n\nMust be before the latest restorable time for the instance.\nMust be specified if the UseLatestRestorableTime parameter is not provided.\nCannot be specified if the UseLatestRestorableTime parameter is true .\nCannot be specified if the RestoreType parameter is copy-on-write .\n\nExample: 2015-03-07T23:45:00Z\n
:type UseLatestRestorableTime: boolean
:param UseLatestRestorableTime: A value that is set to true to restore the cluster to the latest restorable backup time, and false otherwise.\nDefault: false\nConstraints: Cannot be specified if the RestoreToTime parameter is provided.\n
:type Port: integer
:param Port: The port number on which the new cluster accepts connections.\nConstraints: Must be a value from 1150 to 65535 .\nDefault: The default port for the engine.\n
:type DBSubnetGroupName: string
:param DBSubnetGroupName: The subnet group name to use for the new cluster.\nConstraints: If provided, must match the name of an existing DBSubnetGroup .\nExample: mySubnetgroup\n
:type VpcSecurityGroupIds: list
:param VpcSecurityGroupIds: A list of VPC security groups that the new cluster belongs to.\n\n(string) --\n\n
:type Tags: list
:param Tags: The tags to be assigned to the restored cluster.\n\n(dict) --Metadata assigned to an Amazon DocumentDB resource consisting of a key-value pair.\n\nKey (string) --The required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\nValue (string) --The optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can\'t be prefixed with 'aws:' or 'rds:'. The string can contain only the set of Unicode letters, digits, white space, \'_\', \'.\', \'/\', \'=\', \'+\', \'-\' (Java regex: '^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$').\n\n\n\n\n
:type KmsKeyId: string
:param KmsKeyId: The AWS KMS key identifier to use when restoring an encrypted cluster from an encrypted cluster.\nThe AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are restoring a cluster with the same AWS account that owns the AWS KMS encryption key used to encrypt the new cluster, then you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.\nYou can restore to a new cluster and encrypt the new cluster with an AWS KMS key that is different from the AWS KMS key used to encrypt the source cluster. The new DB cluster is encrypted with the AWS KMS key identified by the KmsKeyId parameter.\nIf you do not specify a value for the KmsKeyId parameter, then the following occurs:\n\nIf the cluster is encrypted, then the restored cluster is encrypted using the AWS KMS key that was used to encrypt the source cluster.\nIf the cluster is not encrypted, then the restored cluster is not encrypted.\n\nIf DBClusterIdentifier refers to a cluster that is not encrypted, then the restore request is rejected.\n
:type EnableCloudwatchLogsExports: list
:param EnableCloudwatchLogsExports: A list of log types that must be enabled for exporting to Amazon CloudWatch Logs.\n\n(string) --\n\n
:type DeletionProtection: boolean
:param DeletionProtection: Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.
:rtype: dict
ReturnsResponse Syntax
{
'DBCluster': {
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
}
}
Response Structure
(dict) --
DBCluster (dict) --
Detailed information about a cluster.
AvailabilityZones (list) --
Provides the list of Amazon EC2 Availability Zones that instances in the cluster can be created in.
(string) --
BackupRetentionPeriod (integer) --
Specifies the number of days for which automatic snapshots are retained.
DBClusterIdentifier (string) --
Contains a user-supplied cluster identifier. This identifier is the unique key that identifies a cluster.
DBClusterParameterGroup (string) --
Specifies the name of the cluster parameter group for the cluster.
DBSubnetGroup (string) --
Specifies information on the subnet group that is associated with the cluster, including the name, description, and subnets in the subnet group.
Status (string) --
Specifies the current state of this cluster.
PercentProgress (string) --
Specifies the progress of the operation as a percentage.
EarliestRestorableTime (datetime) --
The earliest time to which a database can be restored with point-in-time restore.
Endpoint (string) --
Specifies the connection endpoint for the primary instance of the cluster.
ReaderEndpoint (string) --
The reader endpoint for the cluster. The reader endpoint for a cluster load balances connections across the Amazon DocumentDB replicas that are available in a cluster. As clients request new connections to the reader endpoint, Amazon DocumentDB distributes the connection requests among the Amazon DocumentDB replicas in the cluster. This functionality can help balance your read workload across multiple Amazon DocumentDB replicas in your cluster.
If a failover occurs, and the Amazon DocumentDB replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Amazon DocumentDB replicas in the cluster, you can then reconnect to the reader endpoint.
MultiAZ (boolean) --
Specifies whether the cluster has instances in multiple Availability Zones.
Engine (string) --
Provides the name of the database engine to be used for this cluster.
EngineVersion (string) --
Indicates the database engine version.
LatestRestorableTime (datetime) --
Specifies the latest time to which a database can be restored with point-in-time restore.
Port (integer) --
Specifies the port that the database engine is listening on.
MasterUsername (string) --
Contains the master user name for the cluster.
PreferredBackupWindow (string) --
Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod .
PreferredMaintenanceWindow (string) --
Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
DBClusterMembers (list) --
Provides the list of instances that make up the cluster.
(dict) --
Contains information about an instance that is part of a cluster.
DBInstanceIdentifier (string) --
Specifies the instance identifier for this member of the cluster.
IsClusterWriter (boolean) --
A value that is true if the cluster member is the primary instance for the cluster and false otherwise.
DBClusterParameterGroupStatus (string) --
Specifies the status of the cluster parameter group for this member of the DB cluster.
PromotionTier (integer) --
A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.
VpcSecurityGroups (list) --
Provides a list of virtual private cloud (VPC) security groups that the cluster belongs to.
(dict) --
Used as a response element for queries on virtual private cloud (VPC) security group membership.
VpcSecurityGroupId (string) --
The name of the VPC security group.
Status (string) --
The status of the VPC security group.
HostedZoneId (string) --
Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.
StorageEncrypted (boolean) --
Specifies whether the cluster is encrypted.
KmsKeyId (string) --
If StorageEncrypted is true , the AWS KMS key identifier for the encrypted cluster.
DbClusterResourceId (string) --
The AWS Region-unique, immutable identifier for the cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the cluster is accessed.
DBClusterArn (string) --
The Amazon Resource Name (ARN) for the cluster.
AssociatedRoles (list) --
Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the cluster. IAM roles that are associated with a cluster grant permission for the cluster to access other AWS services on your behalf.
(dict) --
Describes an AWS Identity and Access Management (IAM) role that is associated with a cluster.
RoleArn (string) --
The Amazon Resource Name (ARN) of the IAM role that is associated with the DB cluster.
Status (string) --
Describes the state of association between the IAM role and the cluster. The Status property returns one of the following values:
ACTIVE - The IAM role ARN is associated with the cluster and can be used to access other AWS services on your behalf.
PENDING - The IAM role ARN is being associated with the DB cluster.
INVALID - The IAM role ARN is associated with the cluster, but the cluster cannot assume the IAM role to access other AWS services on your behalf.
ClusterCreateTime (datetime) --
Specifies the time when the cluster was created, in Universal Coordinated Time (UTC).
EnabledCloudwatchLogsExports (list) --
A list of log types that this cluster is configured to export to Amazon CloudWatch Logs.
(string) --
DeletionProtection (boolean) --
Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.
Exceptions
DocDB.Client.exceptions.DBClusterAlreadyExistsFault
DocDB.Client.exceptions.DBClusterNotFoundFault
DocDB.Client.exceptions.DBClusterQuotaExceededFault
DocDB.Client.exceptions.DBClusterSnapshotNotFoundFault
DocDB.Client.exceptions.DBSubnetGroupNotFoundFault
DocDB.Client.exceptions.InsufficientDBClusterCapacityFault
DocDB.Client.exceptions.InsufficientStorageClusterCapacityFault
DocDB.Client.exceptions.InvalidDBClusterSnapshotStateFault
DocDB.Client.exceptions.InvalidDBClusterStateFault
DocDB.Client.exceptions.InvalidDBSnapshotStateFault
DocDB.Client.exceptions.InvalidRestoreFault
DocDB.Client.exceptions.InvalidSubnet
DocDB.Client.exceptions.InvalidVPCNetworkStateFault
DocDB.Client.exceptions.KMSKeyNotAccessibleFault
DocDB.Client.exceptions.StorageQuotaExceededFault
:return: {
'DBCluster': {
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
}
}
:returns:
(string) --
"""
pass
def start_db_cluster(DBClusterIdentifier=None):
"""
Restarts the stopped cluster that is specified by DBClusterIdentifier . For more information, see Stopping and Starting an Amazon DocumentDB Cluster .
See also: AWS API Documentation
Exceptions
:example: response = client.start_db_cluster(
DBClusterIdentifier='string'
)
:type DBClusterIdentifier: string
:param DBClusterIdentifier: [REQUIRED]\nThe identifier of the cluster to restart. Example: docdb-2019-05-28-15-24-52\n
:rtype: dict
ReturnsResponse Syntax{
'DBCluster': {
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
}
}
Response Structure
(dict) --
DBCluster (dict) --Detailed information about a cluster.
AvailabilityZones (list) --Provides the list of Amazon EC2 Availability Zones that instances in the cluster can be created in.
(string) --
BackupRetentionPeriod (integer) --Specifies the number of days for which automatic snapshots are retained.
DBClusterIdentifier (string) --Contains a user-supplied cluster identifier. This identifier is the unique key that identifies a cluster.
DBClusterParameterGroup (string) --Specifies the name of the cluster parameter group for the cluster.
DBSubnetGroup (string) --Specifies information on the subnet group that is associated with the cluster, including the name, description, and subnets in the subnet group.
Status (string) --Specifies the current state of this cluster.
PercentProgress (string) --Specifies the progress of the operation as a percentage.
EarliestRestorableTime (datetime) --The earliest time to which a database can be restored with point-in-time restore.
Endpoint (string) --Specifies the connection endpoint for the primary instance of the cluster.
ReaderEndpoint (string) --The reader endpoint for the cluster. The reader endpoint for a cluster load balances connections across the Amazon DocumentDB replicas that are available in a cluster. As clients request new connections to the reader endpoint, Amazon DocumentDB distributes the connection requests among the Amazon DocumentDB replicas in the cluster. This functionality can help balance your read workload across multiple Amazon DocumentDB replicas in your cluster.
If a failover occurs, and the Amazon DocumentDB replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Amazon DocumentDB replicas in the cluster, you can then reconnect to the reader endpoint.
MultiAZ (boolean) --Specifies whether the cluster has instances in multiple Availability Zones.
Engine (string) --Provides the name of the database engine to be used for this cluster.
EngineVersion (string) --Indicates the database engine version.
LatestRestorableTime (datetime) --Specifies the latest time to which a database can be restored with point-in-time restore.
Port (integer) --Specifies the port that the database engine is listening on.
MasterUsername (string) --Contains the master user name for the cluster.
PreferredBackupWindow (string) --Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod .
PreferredMaintenanceWindow (string) --Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
DBClusterMembers (list) --Provides the list of instances that make up the cluster.
(dict) --Contains information about an instance that is part of a cluster.
DBInstanceIdentifier (string) --Specifies the instance identifier for this member of the cluster.
IsClusterWriter (boolean) --A value that is true if the cluster member is the primary instance for the cluster and false otherwise.
DBClusterParameterGroupStatus (string) --Specifies the status of the cluster parameter group for this member of the DB cluster.
PromotionTier (integer) --A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.
VpcSecurityGroups (list) --Provides a list of virtual private cloud (VPC) security groups that the cluster belongs to.
(dict) --Used as a response element for queries on virtual private cloud (VPC) security group membership.
VpcSecurityGroupId (string) --The name of the VPC security group.
Status (string) --The status of the VPC security group.
HostedZoneId (string) --Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.
StorageEncrypted (boolean) --Specifies whether the cluster is encrypted.
KmsKeyId (string) --If StorageEncrypted is true , the AWS KMS key identifier for the encrypted cluster.
DbClusterResourceId (string) --The AWS Region-unique, immutable identifier for the cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the cluster is accessed.
DBClusterArn (string) --The Amazon Resource Name (ARN) for the cluster.
AssociatedRoles (list) --Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the cluster. IAM roles that are associated with a cluster grant permission for the cluster to access other AWS services on your behalf.
(dict) --Describes an AWS Identity and Access Management (IAM) role that is associated with a cluster.
RoleArn (string) --The Amazon Resource Name (ARN) of the IAM role that is associated with the DB cluster.
Status (string) --Describes the state of association between the IAM role and the cluster. The Status property returns one of the following values:
ACTIVE - The IAM role ARN is associated with the cluster and can be used to access other AWS services on your behalf.
PENDING - The IAM role ARN is being associated with the DB cluster.
INVALID - The IAM role ARN is associated with the cluster, but the cluster cannot assume the IAM role to access other AWS services on your behalf.
ClusterCreateTime (datetime) --Specifies the time when the cluster was created, in Universal Coordinated Time (UTC).
EnabledCloudwatchLogsExports (list) --A list of log types that this cluster is configured to export to Amazon CloudWatch Logs.
(string) --
DeletionProtection (boolean) --Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.
Exceptions
DocDB.Client.exceptions.DBClusterNotFoundFault
DocDB.Client.exceptions.InvalidDBClusterStateFault
DocDB.Client.exceptions.InvalidDBInstanceStateFault
:return: {
'DBCluster': {
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
}
}
:returns:
ACTIVE - The IAM role ARN is associated with the cluster and can be used to access other AWS services on your behalf.
PENDING - The IAM role ARN is being associated with the DB cluster.
INVALID - The IAM role ARN is associated with the cluster, but the cluster cannot assume the IAM role to access other AWS services on your behalf.
"""
pass
def stop_db_cluster(DBClusterIdentifier=None):
"""
Stops the running cluster that is specified by DBClusterIdentifier . The cluster must be in the available state. For more information, see Stopping and Starting an Amazon DocumentDB Cluster .
See also: AWS API Documentation
Exceptions
:example: response = client.stop_db_cluster(
DBClusterIdentifier='string'
)
:type DBClusterIdentifier: string
:param DBClusterIdentifier: [REQUIRED]\nThe identifier of the cluster to stop. Example: docdb-2019-05-28-15-24-52\n
:rtype: dict
ReturnsResponse Syntax{
'DBCluster': {
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
}
}
Response Structure
(dict) --
DBCluster (dict) --Detailed information about a cluster.
AvailabilityZones (list) --Provides the list of Amazon EC2 Availability Zones that instances in the cluster can be created in.
(string) --
BackupRetentionPeriod (integer) --Specifies the number of days for which automatic snapshots are retained.
DBClusterIdentifier (string) --Contains a user-supplied cluster identifier. This identifier is the unique key that identifies a cluster.
DBClusterParameterGroup (string) --Specifies the name of the cluster parameter group for the cluster.
DBSubnetGroup (string) --Specifies information on the subnet group that is associated with the cluster, including the name, description, and subnets in the subnet group.
Status (string) --Specifies the current state of this cluster.
PercentProgress (string) --Specifies the progress of the operation as a percentage.
EarliestRestorableTime (datetime) --The earliest time to which a database can be restored with point-in-time restore.
Endpoint (string) --Specifies the connection endpoint for the primary instance of the cluster.
ReaderEndpoint (string) --The reader endpoint for the cluster. The reader endpoint for a cluster load balances connections across the Amazon DocumentDB replicas that are available in a cluster. As clients request new connections to the reader endpoint, Amazon DocumentDB distributes the connection requests among the Amazon DocumentDB replicas in the cluster. This functionality can help balance your read workload across multiple Amazon DocumentDB replicas in your cluster.
If a failover occurs, and the Amazon DocumentDB replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Amazon DocumentDB replicas in the cluster, you can then reconnect to the reader endpoint.
MultiAZ (boolean) --Specifies whether the cluster has instances in multiple Availability Zones.
Engine (string) --Provides the name of the database engine to be used for this cluster.
EngineVersion (string) --Indicates the database engine version.
LatestRestorableTime (datetime) --Specifies the latest time to which a database can be restored with point-in-time restore.
Port (integer) --Specifies the port that the database engine is listening on.
MasterUsername (string) --Contains the master user name for the cluster.
PreferredBackupWindow (string) --Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod .
PreferredMaintenanceWindow (string) --Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
DBClusterMembers (list) --Provides the list of instances that make up the cluster.
(dict) --Contains information about an instance that is part of a cluster.
DBInstanceIdentifier (string) --Specifies the instance identifier for this member of the cluster.
IsClusterWriter (boolean) --A value that is true if the cluster member is the primary instance for the cluster and false otherwise.
DBClusterParameterGroupStatus (string) --Specifies the status of the cluster parameter group for this member of the DB cluster.
PromotionTier (integer) --A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.
VpcSecurityGroups (list) --Provides a list of virtual private cloud (VPC) security groups that the cluster belongs to.
(dict) --Used as a response element for queries on virtual private cloud (VPC) security group membership.
VpcSecurityGroupId (string) --The name of the VPC security group.
Status (string) --The status of the VPC security group.
HostedZoneId (string) --Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.
StorageEncrypted (boolean) --Specifies whether the cluster is encrypted.
KmsKeyId (string) --If StorageEncrypted is true , the AWS KMS key identifier for the encrypted cluster.
DbClusterResourceId (string) --The AWS Region-unique, immutable identifier for the cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the cluster is accessed.
DBClusterArn (string) --The Amazon Resource Name (ARN) for the cluster.
AssociatedRoles (list) --Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the cluster. IAM roles that are associated with a cluster grant permission for the cluster to access other AWS services on your behalf.
(dict) --Describes an AWS Identity and Access Management (IAM) role that is associated with a cluster.
RoleArn (string) --The Amazon Resource Name (ARN) of the IAM role that is associated with the DB cluster.
Status (string) --Describes the state of association between the IAM role and the cluster. The Status property returns one of the following values:
ACTIVE - The IAM role ARN is associated with the cluster and can be used to access other AWS services on your behalf.
PENDING - The IAM role ARN is being associated with the DB cluster.
INVALID - The IAM role ARN is associated with the cluster, but the cluster cannot assume the IAM role to access other AWS services on your behalf.
ClusterCreateTime (datetime) --Specifies the time when the cluster was created, in Universal Coordinated Time (UTC).
EnabledCloudwatchLogsExports (list) --A list of log types that this cluster is configured to export to Amazon CloudWatch Logs.
(string) --
DeletionProtection (boolean) --Specifies whether this cluster can be deleted. If DeletionProtection is enabled, the cluster cannot be deleted unless it is modified and DeletionProtection is disabled. DeletionProtection protects clusters from being accidentally deleted.
Exceptions
DocDB.Client.exceptions.DBClusterNotFoundFault
DocDB.Client.exceptions.InvalidDBClusterStateFault
DocDB.Client.exceptions.InvalidDBInstanceStateFault
:return: {
'DBCluster': {
'AvailabilityZones': [
'string',
],
'BackupRetentionPeriod': 123,
'DBClusterIdentifier': 'string',
'DBClusterParameterGroup': 'string',
'DBSubnetGroup': 'string',
'Status': 'string',
'PercentProgress': 'string',
'EarliestRestorableTime': datetime(2015, 1, 1),
'Endpoint': 'string',
'ReaderEndpoint': 'string',
'MultiAZ': True|False,
'Engine': 'string',
'EngineVersion': 'string',
'LatestRestorableTime': datetime(2015, 1, 1),
'Port': 123,
'MasterUsername': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'DBClusterMembers': [
{
'DBInstanceIdentifier': 'string',
'IsClusterWriter': True|False,
'DBClusterParameterGroupStatus': 'string',
'PromotionTier': 123
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'HostedZoneId': 'string',
'StorageEncrypted': True|False,
'KmsKeyId': 'string',
'DbClusterResourceId': 'string',
'DBClusterArn': 'string',
'AssociatedRoles': [
{
'RoleArn': 'string',
'Status': 'string'
},
],
'ClusterCreateTime': datetime(2015, 1, 1),
'EnabledCloudwatchLogsExports': [
'string',
],
'DeletionProtection': True|False
}
}
:returns:
ACTIVE - The IAM role ARN is associated with the cluster and can be used to access other AWS services on your behalf.
PENDING - The IAM role ARN is being associated with the DB cluster.
INVALID - The IAM role ARN is associated with the cluster, but the cluster cannot assume the IAM role to access other AWS services on your behalf.
"""
pass
| 37.523932
| 1,428
| 0.676191
|
e20be1b9fe1b4f062c386feb79c75ab3c83f453d
| 6,046
|
py
|
Python
|
benchmarks/skimage/cucim_restoration_bench.py
|
chrisroat/cucim
|
8b909e374b9a77bb1c8ad747c793c30a3d86d4b4
|
[
"Apache-2.0"
] | null | null | null |
benchmarks/skimage/cucim_restoration_bench.py
|
chrisroat/cucim
|
8b909e374b9a77bb1c8ad747c793c30a3d86d4b4
|
[
"Apache-2.0"
] | null | null | null |
benchmarks/skimage/cucim_restoration_bench.py
|
chrisroat/cucim
|
8b909e374b9a77bb1c8ad747c793c30a3d86d4b4
|
[
"Apache-2.0"
] | null | null | null |
import math
import os
import pickle
import cucim.skimage
import cucim.skimage.restoration
import cupy as cp
import cupyx.scipy.ndimage as ndi
import numpy as np
import pandas as pd
import skimage
import skimage.restoration
from cucim.skimage.restoration import denoise_tv_chambolle as tv_gpu
from skimage.restoration import denoise_tv_chambolle as tv_cpu
from _image_bench import ImageBench
class DenoiseBench(ImageBench):
def set_args(self, dtype):
if np.dtype(dtype).kind in "iu":
im1 = skimage.data.camera()
else:
im1 = skimage.data.camera() / 255.0
im1 = im1.astype(dtype)
if len(self.shape) == 3:
im1 = im1[..., np.newaxis]
# add noise
if np.dtype(dtype).kind in "iu":
sigma = 0.05 * 255
im1 = im1 + sigma * np.random.randn(*im1.shape)
im1 = np.clip(im1, 0, 255).astype(dtype)
else:
sigma = 0.05
im1 = im1 + sigma * np.random.randn(*im1.shape)
n_tile = [math.ceil(s / im_s) for s, im_s in zip(self.shape, im1.shape)]
slices = tuple([slice(s) for s in self.shape])
image = np.tile(im1, n_tile)[slices]
imaged = cp.asarray(image)
self.args_cpu = (image,)
self.args_gpu = (imaged,)
class CalibratedDenoiseBench(ImageBench):
def set_args(self, dtype):
if np.dtype(dtype).kind in "iu":
im1 = skimage.data.camera()
else:
im1 = skimage.data.camera() / 255.0
im1 = im1.astype(dtype)
if len(self.shape) == 3:
im1 = im1[..., np.newaxis]
# add noise
if np.dtype(dtype).kind in "iu":
sigma = 0.05 * 255
im1 = im1 + sigma * np.random.randn(*im1.shape)
im1 = np.clip(im1, 0, 255).astype(dtype)
else:
sigma = 0.05
im1 = im1 + sigma * np.random.randn(*im1.shape)
n_tile = [math.ceil(s / im_s) for s, im_s in zip(self.shape, im1.shape)]
slices = tuple([slice(s) for s in self.shape])
image = np.tile(im1, n_tile)[slices]
imaged = cp.asarray(image)
denoise_parameters = {"weight": np.linspace(0.01, 0.4, 10)}
self.args_cpu = (image, tv_cpu, denoise_parameters)
self.args_gpu = (imaged, tv_gpu, denoise_parameters)
class DeconvolutionBench(ImageBench):
def set_args(self, dtype):
if np.dtype(dtype).kind in "iu":
im1 = skimage.data.camera()
else:
im1 = skimage.data.camera() / 255.0
im1 = im1.astype(dtype)
if len(self.shape) == 3:
im1 = im1[..., np.newaxis]
im1 = cp.array(im1)
n_tile = [math.ceil(s / im_s) for s, im_s in zip(self.shape, im1.shape)]
slices = tuple([slice(s) for s in self.shape])
imaged = cp.tile(im1, n_tile)[slices]
psfd = cp.ones((5,) * imaged.ndim) / 25
imaged = ndi.convolve(imaged, psfd)
image = cp.asnumpy(imaged)
psf = cp.asnumpy(psfd)
self.args_cpu = (image, psf)
self.args_gpu = (imaged, psfd)
pfile = "cucim_restoration_results.pickle"
if os.path.exists(pfile):
with open(pfile, "rb") as f:
all_results = pickle.load(f)
else:
all_results = pd.DataFrame()
dtypes = [np.float32]
for function_name, fixed_kwargs, var_kwargs, allow_color, allow_nd in [
# _denoise.py
("denoise_tv_chambolle", dict(), dict(weight=[0.02]), True, True),
# j_invariant.py
("calibrate_denoiser", dict(), dict(), False, True),
]:
for shape in [(512, 512), (1980, 1080), (1980, 1080, 3), (128, 128, 128)]:
ndim = len(shape)
if not allow_nd:
if not allow_color:
if ndim > 2:
continue
else:
if ndim > 3 or (ndim == 3 and shape[-1] not in [3, 4]):
continue
if shape[-1] == 3 and not allow_color:
continue
if function_name == "denoise_tv_chambolle":
fixed_kwargs["channel_axis"] = -1 if shape[-1] == 3 else None
if function_name == "calibrate_denoiser":
denoise_class = CalibratedDenoiseBench
else:
denoise_class = DenoiseBench
B = denoise_class(
function_name=function_name,
shape=shape,
dtypes=dtypes,
fixed_kwargs=fixed_kwargs,
var_kwargs=var_kwargs,
module_cpu=skimage.restoration,
module_gpu=cucim.skimage.restoration,
)
results = B.run_benchmark(duration=1)
all_results = all_results.append(results["full"])
# function_name, fixed_kwargs, var_kwargs, allow_color, allow_nd = ('unsupervised_wiener', dict(), dict(), False, True)
for function_name, fixed_kwargs, var_kwargs, allow_color, allow_nd in [
# deconvolution.py
("wiener", dict(balance=100.0), dict(), False, False),
("unsupervised_wiener", dict(), dict(), False, False),
("richardson_lucy", dict(), dict(num_iter=[5]), False, True),
]:
for shape in [(512, 512), (3840, 2160), (3840, 2160, 3), (192, 192, 192)]:
ndim = len(shape)
if not allow_nd:
if not allow_color:
if ndim > 2:
continue
else:
if ndim > 3 or (ndim == 3 and shape[-1] not in [3, 4]):
continue
if shape[-1] == 3 and not allow_color:
continue
B = DeconvolutionBench(
function_name=function_name,
shape=shape,
dtypes=dtypes,
fixed_kwargs=fixed_kwargs,
var_kwargs=var_kwargs,
module_cpu=skimage.restoration,
module_gpu=cucim.skimage.restoration,
)
results = B.run_benchmark(duration=1)
all_results = all_results.append(results["full"])
fbase = os.path.splitext(pfile)[0]
all_results.to_csv(fbase + ".csv")
all_results.to_pickle(pfile)
with open(fbase + ".md", "wt") as f:
f.write(all_results.to_markdown())
| 31.989418
| 119
| 0.577407
|
24d3f587dfe209aac6560a64bf5eac919b86013b
| 28,038
|
py
|
Python
|
test/functional/__init__.py
|
NicolasT/swift
|
16e1e1e3c6a921a335e5340be9bde698f4ee5b65
|
[
"Apache-2.0"
] | null | null | null |
test/functional/__init__.py
|
NicolasT/swift
|
16e1e1e3c6a921a335e5340be9bde698f4ee5b65
|
[
"Apache-2.0"
] | null | null | null |
test/functional/__init__.py
|
NicolasT/swift
|
16e1e1e3c6a921a335e5340be9bde698f4ee5b65
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import pickle
import socket
import locale
import eventlet
import eventlet.debug
import functools
import random
from time import time, sleep
from httplib import HTTPException
from urlparse import urlparse
from nose import SkipTest
from contextlib import closing
from gzip import GzipFile
from shutil import rmtree
from tempfile import mkdtemp
from test import get_config
from test.functional.swift_test_client import Account, Connection, \
ResponseError
# This has the side effect of mocking out the xattr module so that unit tests
# (and in this case, when in-process functional tests are called for) can run
# on file systems that don't support extended attributes.
from test.unit import debug_logger, FakeMemcache
from swift.common import constraints, utils, ring, storage_policy
from swift.common.wsgi import monkey_patch_mimetools
from swift.common.middleware import catch_errors, gatekeeper, healthcheck, \
proxy_logging, container_sync, bulk, tempurl, slo, dlo, ratelimit, \
tempauth, container_quotas, account_quotas
from swift.common.utils import config_true_value
from swift.proxy import server as proxy_server
from swift.account import server as account_server
from swift.container import server as container_server
from swift.obj import server as object_server, mem_server as mem_object_server
import swift.proxy.controllers.obj
# In order to get the proper blocking behavior of sockets without using
# threads, where we can set an arbitrary timeout for some piece of code under
# test, we use eventlet with the standard socket library patched. We have to
# perform this setup at module import time, since all the socket module
# bindings in the swiftclient code will have been made by the time nose
# invokes the package or class setup methods.
eventlet.hubs.use_hub(utils.get_hub())
eventlet.patcher.monkey_patch(all=False, socket=True)
eventlet.debug.hub_exceptions(False)
from swiftclient import get_auth, http_connection
has_insecure = False
try:
from swiftclient import __version__ as client_version
# Prevent a ValueError in StrictVersion with '2.0.3.68.ga99c2ff'
client_version = '.'.join(client_version.split('.')[:3])
except ImportError:
# Pre-PBR we had version, not __version__. Anyhow...
client_version = '1.2'
from distutils.version import StrictVersion
if StrictVersion(client_version) >= StrictVersion('2.0'):
has_insecure = True
config = {}
web_front_end = None
normalized_urls = None
# If no config was read, we will fall back to old school env vars
swift_test_auth_version = None
swift_test_auth = os.environ.get('SWIFT_TEST_AUTH')
swift_test_user = [os.environ.get('SWIFT_TEST_USER'), None, None, '']
swift_test_key = [os.environ.get('SWIFT_TEST_KEY'), None, None, '']
swift_test_tenant = ['', '', '', '']
swift_test_perm = ['', '', '', '']
swift_test_domain = ['', '', '', '']
swift_test_user_id = ['', '', '', '']
swift_test_tenant_id = ['', '', '', '']
skip, skip2, skip3 = False, False, False
orig_collate = ''
insecure = False
orig_hash_path_suff_pref = ('', '')
orig_swift_conf_name = None
in_process = False
_testdir = _test_servers = _test_sockets = _test_coros = None
class FakeMemcacheMiddleware(object):
"""
Caching middleware that fakes out caching in swift.
"""
def __init__(self, app, conf):
self.app = app
self.memcache = FakeMemcache()
def __call__(self, env, start_response):
env['swift.cache'] = self.memcache
return self.app(env, start_response)
def fake_memcache_filter_factory(conf):
def filter_app(app):
return FakeMemcacheMiddleware(app, conf)
return filter_app
# swift.conf contents for in-process functional test runs
functests_swift_conf = '''
[swift-hash]
swift_hash_path_suffix = inprocfunctests
swift_hash_path_prefix = inprocfunctests
[swift-constraints]
max_file_size = %d
''' % ((8 * 1024 * 1024) + 2) # 8 MB + 2
def in_process_setup(the_object_server=object_server):
print >>sys.stderr, 'IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS'
print >>sys.stderr, 'Using object_server: %s' % the_object_server.__name__
monkey_patch_mimetools()
global _testdir
_testdir = os.path.join(mkdtemp(), 'tmp_functional')
utils.mkdirs(_testdir)
rmtree(_testdir)
utils.mkdirs(os.path.join(_testdir, 'sda1'))
utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
utils.mkdirs(os.path.join(_testdir, 'sdb1'))
utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
swift_conf = os.path.join(_testdir, "swift.conf")
with open(swift_conf, "w") as scfp:
scfp.write(functests_swift_conf)
global orig_swift_conf_name
orig_swift_conf_name = utils.SWIFT_CONF_FILE
utils.SWIFT_CONF_FILE = swift_conf
constraints.reload_constraints()
storage_policy.SWIFT_CONF_FILE = swift_conf
storage_policy.reload_storage_policies()
global config
if constraints.SWIFT_CONSTRAINTS_LOADED:
# Use the swift constraints that are loaded for the test framework
# configuration
config.update(constraints.EFFECTIVE_CONSTRAINTS)
else:
# In-process swift constraints were not loaded, somethings wrong
raise SkipTest
global orig_hash_path_suff_pref
orig_hash_path_suff_pref = utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX
utils.validate_hash_conf()
# We create the proxy server listening socket to get its port number so
# that we can add it as the "auth_port" value for the functional test
# clients.
prolis = eventlet.listen(('localhost', 0))
# The following set of configuration values is used both for the
# functional test frame work and for the various proxy, account, container
# and object servers.
config.update({
# Values needed by the various in-process swift servers
'devices': _testdir,
'swift_dir': _testdir,
'mount_check': 'false',
'client_timeout': 4,
'allow_account_management': 'true',
'account_autocreate': 'true',
'allowed_headers':
'content-disposition, content-encoding, x-delete-at,'
' x-object-manifest, x-static-large-object',
'allow_versions': 'True',
# Below are values used by the functional test framework, as well as
# by the various in-process swift servers
'auth_host': '127.0.0.1',
'auth_port': str(prolis.getsockname()[1]),
'auth_ssl': 'no',
'auth_prefix': '/auth/',
# Primary functional test account (needs admin access to the
# account)
'account': 'test',
'username': 'tester',
'password': 'testing',
# User on a second account (needs admin access to the account)
'account2': 'test2',
'username2': 'tester2',
'password2': 'testing2',
# User on same account as first, but without admin access
'username3': 'tester3',
'password3': 'testing3',
# For tempauth middleware
'user_admin_admin': 'admin .admin .reseller_admin',
'user_test_tester': 'testing .admin',
'user_test2_tester2': 'testing2 .admin',
'user_test_tester3': 'testing3'
})
acc1lis = eventlet.listen(('localhost', 0))
acc2lis = eventlet.listen(('localhost', 0))
con1lis = eventlet.listen(('localhost', 0))
con2lis = eventlet.listen(('localhost', 0))
obj1lis = eventlet.listen(('localhost', 0))
obj2lis = eventlet.listen(('localhost', 0))
global _test_sockets
_test_sockets = \
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis)
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
with closing(GzipFile(account_ring_path, 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': acc1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': acc2lis.getsockname()[1]}], 30),
f)
container_ring_path = os.path.join(_testdir, 'container.ring.gz')
with closing(GzipFile(container_ring_path, 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': con1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': con2lis.getsockname()[1]}], 30),
f)
object_ring_path = os.path.join(_testdir, 'object.ring.gz')
with closing(GzipFile(object_ring_path, 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': obj1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': obj2lis.getsockname()[1]}], 30),
f)
eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
# Turn off logging requests by the underlying WSGI software.
eventlet.wsgi.HttpProtocol.log_request = lambda *a: None
logger = utils.get_logger(config, 'wsgi-server', log_route='wsgi')
# Redirect logging other messages by the underlying WSGI software.
eventlet.wsgi.HttpProtocol.log_message = \
lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a)
# Default to only 4 seconds for in-process functional test runs
eventlet.wsgi.WRITE_TIMEOUT = 4
prosrv = proxy_server.Application(config, logger=debug_logger('proxy'))
acc1srv = account_server.AccountController(
config, logger=debug_logger('acct1'))
acc2srv = account_server.AccountController(
config, logger=debug_logger('acct2'))
con1srv = container_server.ContainerController(
config, logger=debug_logger('cont1'))
con2srv = container_server.ContainerController(
config, logger=debug_logger('cont2'))
obj1srv = the_object_server.ObjectController(
config, logger=debug_logger('obj1'))
obj2srv = the_object_server.ObjectController(
config, logger=debug_logger('obj2'))
global _test_servers
_test_servers = \
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv)
pipeline = [
catch_errors.filter_factory,
gatekeeper.filter_factory,
healthcheck.filter_factory,
proxy_logging.filter_factory,
fake_memcache_filter_factory,
container_sync.filter_factory,
bulk.filter_factory,
tempurl.filter_factory,
slo.filter_factory,
dlo.filter_factory,
ratelimit.filter_factory,
tempauth.filter_factory,
container_quotas.filter_factory,
account_quotas.filter_factory,
proxy_logging.filter_factory,
]
app = prosrv
import mock
for filter_factory in reversed(pipeline):
app_filter = filter_factory(config)
with mock.patch('swift.common.utils') as mock_utils:
mock_utils.get_logger.return_value = None
app = app_filter(app)
app.logger = prosrv.logger
nl = utils.NullLogger()
prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl)
acc1spa = eventlet.spawn(eventlet.wsgi.server, acc1lis, acc1srv, nl)
acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl)
con1spa = eventlet.spawn(eventlet.wsgi.server, con1lis, con1srv, nl)
con2spa = eventlet.spawn(eventlet.wsgi.server, con2lis, con2srv, nl)
obj1spa = eventlet.spawn(eventlet.wsgi.server, obj1lis, obj1srv, nl)
obj2spa = eventlet.spawn(eventlet.wsgi.server, obj2lis, obj2srv, nl)
global _test_coros
_test_coros = \
(prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa)
# Create accounts "test" and "test2"
def create_account(act):
ts = utils.normalize_timestamp(time())
partition, nodes = prosrv.account_ring.get_nodes(act)
for node in nodes:
# Note: we are just using the http_connect method in the object
# controller here to talk to the account server nodes.
conn = swift.proxy.controllers.obj.http_connect(
node['ip'], node['port'], node['device'], partition, 'PUT',
'/' + act, {'X-Timestamp': ts, 'x-trans-id': act})
resp = conn.getresponse()
assert(resp.status == 201)
create_account('AUTH_test')
create_account('AUTH_test2')
cluster_info = {}
def get_cluster_info():
# The fallback constraints used for testing will come from the current
# effective constraints.
eff_constraints = dict(constraints.EFFECTIVE_CONSTRAINTS)
# We'll update those constraints based on what the /info API provides, if
# anything.
global cluster_info
try:
conn = Connection(config)
conn.authenticate()
cluster_info.update(conn.cluster_info())
except (ResponseError, socket.error):
# Failed to get cluster_information via /info API, so fall back on
# test.conf data
pass
else:
try:
eff_constraints.update(cluster_info['swift'])
except KeyError:
# Most likely the swift cluster has "expose_info = false" set
# in its proxy-server.conf file, so we'll just do the best we
# can.
print >>sys.stderr, "** Swift Cluster not exposing /info **"
# Finally, we'll allow any constraint present in the swift-constraints
# section of test.conf to override everything. Note that only those
# constraints defined in the constraints module are converted to integers.
test_constraints = get_config('swift-constraints')
for k in constraints.DEFAULT_CONSTRAINTS:
try:
test_constraints[k] = int(test_constraints[k])
except KeyError:
pass
except ValueError:
print >>sys.stderr, "Invalid constraint value: %s = %s" % (
k, test_constraints[k])
eff_constraints.update(test_constraints)
# Just make it look like these constraints were loaded from a /info call,
# even if the /info call failed, or when they are overridden by values
# from the swift-constraints section of test.conf
cluster_info['swift'] = eff_constraints
def setup_package():
in_process_env = os.environ.get('SWIFT_TEST_IN_PROCESS')
if in_process_env is not None:
use_in_process = utils.config_true_value(in_process_env)
else:
use_in_process = None
global in_process
if use_in_process:
# Explicitly set to True, so barrel on ahead with in-process
# functional test setup.
in_process = True
# NOTE: No attempt is made to a read local test.conf file.
else:
if use_in_process is None:
# Not explicitly set, default to using in-process functional tests
# if the test.conf file is not found, or does not provide a usable
# configuration.
config.update(get_config('func_test'))
if config:
in_process = False
else:
in_process = True
else:
# Explicitly set to False, do not attempt to use in-process
# functional tests, be sure we attempt to read from local
# test.conf file.
in_process = False
config.update(get_config('func_test'))
if in_process:
in_mem_obj_env = os.environ.get('SWIFT_TEST_IN_MEMORY_OBJ')
in_mem_obj = utils.config_true_value(in_mem_obj_env)
in_process_setup(the_object_server=(
mem_object_server if in_mem_obj else object_server))
global web_front_end
web_front_end = config.get('web_front_end', 'integral')
global normalized_urls
normalized_urls = config.get('normalized_urls', False)
global orig_collate
orig_collate = locale.setlocale(locale.LC_COLLATE)
locale.setlocale(locale.LC_COLLATE, config.get('collate', 'C'))
global insecure
insecure = config_true_value(config.get('insecure', False))
global swift_test_auth_version
global swift_test_auth
global swift_test_user
global swift_test_key
global swift_test_tenant
global swift_test_perm
global swift_test_domain
if config:
swift_test_auth_version = str(config.get('auth_version', '1'))
swift_test_auth = 'http'
if config_true_value(config.get('auth_ssl', 'no')):
swift_test_auth = 'https'
if 'auth_prefix' not in config:
config['auth_prefix'] = '/'
try:
suffix = '://%(auth_host)s:%(auth_port)s%(auth_prefix)s' % config
swift_test_auth += suffix
except KeyError:
pass # skip
if swift_test_auth_version == "1":
swift_test_auth += 'v1.0'
try:
if 'account' in config:
swift_test_user[0] = '%(account)s:%(username)s' % config
else:
swift_test_user[0] = '%(username)s' % config
swift_test_key[0] = config['password']
except KeyError:
# bad config, no account/username configured, tests cannot be
# run
pass
try:
swift_test_user[1] = '%s%s' % (
'%s:' % config['account2'] if 'account2' in config else '',
config['username2'])
swift_test_key[1] = config['password2']
except KeyError:
pass # old config, no second account tests can be run
try:
swift_test_user[2] = '%s%s' % (
'%s:' % config['account'] if 'account'
in config else '', config['username3'])
swift_test_key[2] = config['password3']
except KeyError:
pass # old config, no third account tests can be run
for _ in range(3):
swift_test_perm[_] = swift_test_user[_]
else:
swift_test_user[0] = config['username']
swift_test_tenant[0] = config['account']
swift_test_key[0] = config['password']
swift_test_user[1] = config['username2']
swift_test_tenant[1] = config['account2']
swift_test_key[1] = config['password2']
swift_test_user[2] = config['username3']
swift_test_tenant[2] = config['account']
swift_test_key[2] = config['password3']
if 'username4' in config:
swift_test_user[3] = config['username4']
swift_test_tenant[3] = config['account4']
swift_test_key[3] = config['password4']
swift_test_domain[3] = config['domain4']
for _ in range(4):
swift_test_perm[_] = swift_test_tenant[_] + ':' \
+ swift_test_user[_]
global skip
skip = not all([swift_test_auth, swift_test_user[0], swift_test_key[0]])
if skip:
print >>sys.stderr, 'SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG'
global skip2
skip2 = not all([not skip, swift_test_user[1], swift_test_key[1]])
if not skip and skip2:
print >>sys.stderr, \
'SKIPPING SECOND ACCOUNT FUNCTIONAL TESTS' \
' DUE TO NO CONFIG FOR THEM'
global skip3
skip3 = not all([not skip, swift_test_user[2], swift_test_key[2]])
if not skip and skip3:
print >>sys.stderr, \
'SKIPPING THIRD ACCOUNT FUNCTIONAL TESTS DUE TO NO CONFIG FOR THEM'
global skip_if_not_v3
skip_if_not_v3 = (swift_test_auth_version != '3'
or not all([not skip,
swift_test_user[3],
swift_test_key[3]]))
if not skip and skip_if_not_v3:
print >>sys.stderr, \
'SKIPPING FUNCTIONAL TESTS SPECIFIC TO AUTH VERSION 3'
get_cluster_info()
def teardown_package():
global orig_collate
locale.setlocale(locale.LC_COLLATE, orig_collate)
# clean up containers and objects left behind after running tests
conn = Connection(config)
conn.authenticate()
account = Account(conn, config.get('account', config['username']))
account.delete_containers()
global in_process
if in_process:
try:
for server in _test_coros:
server.kill()
except Exception:
pass
try:
rmtree(os.path.dirname(_testdir))
except Exception:
pass
utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX = \
orig_hash_path_suff_pref
utils.SWIFT_CONF_FILE = orig_swift_conf_name
constraints.reload_constraints()
class AuthError(Exception):
pass
class InternalServerError(Exception):
pass
url = [None, None, None, None]
token = [None, None, None, None]
parsed = [None, None, None, None]
conn = [None, None, None, None]
def connection(url):
if has_insecure:
return http_connection(url, insecure=insecure)
return http_connection(url)
def retry(func, *args, **kwargs):
"""
You can use the kwargs to override:
'retries' (default: 5)
'use_account' (default: 1) - which user's token to pass
'url_account' (default: matches 'use_account') - which user's storage URL
'resource' (default: url[url_account] - URL to connect to; retry()
will interpolate the variable :storage_url: if present
"""
global url, token, parsed, conn
retries = kwargs.get('retries', 5)
attempts, backoff = 0, 1
# use account #1 by default; turn user's 1-indexed account into 0-indexed
use_account = kwargs.pop('use_account', 1) - 1
# access our own account by default
url_account = kwargs.pop('url_account', use_account + 1) - 1
os_options = {'user_domain_name': swift_test_domain[use_account],
'project_domain_name': swift_test_domain[use_account]}
while attempts <= retries:
attempts += 1
try:
if not url[use_account] or not token[use_account]:
url[use_account], token[use_account] = \
get_auth(swift_test_auth, swift_test_user[use_account],
swift_test_key[use_account],
snet=False,
tenant_name=swift_test_tenant[use_account],
auth_version=swift_test_auth_version,
os_options=os_options)
parsed[use_account] = conn[use_account] = None
if not parsed[use_account] or not conn[use_account]:
parsed[use_account], conn[use_account] = \
connection(url[use_account])
# default resource is the account url[url_account]
resource = kwargs.pop('resource', '%(storage_url)s')
template_vars = {'storage_url': url[url_account]}
parsed_result = urlparse(resource % template_vars)
return func(url[url_account], token[use_account],
parsed_result, conn[url_account],
*args, **kwargs)
except (socket.error, HTTPException):
if attempts > retries:
raise
parsed[use_account] = conn[use_account] = None
except AuthError:
url[use_account] = token[use_account] = None
continue
except InternalServerError:
pass
if attempts <= retries:
sleep(backoff)
backoff *= 2
raise Exception('No result after %s retries.' % retries)
def check_response(conn):
resp = conn.getresponse()
if resp.status == 401:
resp.read()
raise AuthError()
elif resp.status // 100 == 5:
resp.read()
raise InternalServerError()
return resp
def load_constraint(name):
global cluster_info
try:
c = cluster_info['swift'][name]
except KeyError:
raise SkipTest("Missing constraint: %s" % name)
if not isinstance(c, int):
raise SkipTest("Bad value, %r, for constraint: %s" % (c, name))
return c
def get_storage_policy_from_cluster_info(info):
policies = info['swift'].get('policies', {})
default_policy = []
non_default_policies = []
for p in policies:
if p.get('default', {}):
default_policy.append(p)
else:
non_default_policies.append(p)
return default_policy, non_default_policies
def reset_acl():
def post(url, token, parsed, conn):
conn.request('POST', parsed.path, '', {
'X-Auth-Token': token,
'X-Account-Access-Control': '{}'
})
return check_response(conn)
resp = retry(post, use_account=1)
resp.read()
def requires_acls(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
global skip, cluster_info
if skip or not cluster_info:
raise SkipTest
# Determine whether this cluster has account ACLs; if not, skip test
if not cluster_info.get('tempauth', {}).get('account_acls'):
raise SkipTest
if 'keystoneauth' in cluster_info:
# remove when keystoneauth supports account acls
raise SkipTest
reset_acl()
try:
rv = f(*args, **kwargs)
finally:
reset_acl()
return rv
return wrapper
class FunctionalStoragePolicyCollection(object):
def __init__(self, policies):
self._all = policies
self.default = None
for p in self:
if p.get('default', False):
assert self.default is None, 'Found multiple default ' \
'policies %r and %r' % (self.default, p)
self.default = p
@classmethod
def from_info(cls, info=None):
if not (info or cluster_info):
get_cluster_info()
info = info or cluster_info
try:
policy_info = info['swift']['policies']
except KeyError:
raise AssertionError('Did not find any policy info in %r' % info)
policies = cls(policy_info)
assert policies.default, \
'Did not find default policy in %r' % policy_info
return policies
def __len__(self):
return len(self._all)
def __iter__(self):
return iter(self._all)
def __getitem__(self, index):
return self._all[index]
def filter(self, **kwargs):
return self.__class__([p for p in self if all(
p.get(k) == v for k, v in kwargs.items())])
def exclude(self, **kwargs):
return self.__class__([p for p in self if all(
p.get(k) != v for k, v in kwargs.items())])
def select(self):
return random.choice(self)
def requires_policies(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if skip:
raise SkipTest
try:
self.policies = FunctionalStoragePolicyCollection.from_info()
except AssertionError:
raise SkipTest("Unable to determine available policies")
if len(self.policies) < 2:
raise SkipTest("Multiple policies not enabled")
return f(self, *args, **kwargs)
return wrapper
| 36.603133
| 79
| 0.634389
|
1c4da840d1b02cac2a696c76cc31d30757ac9afb
| 344
|
py
|
Python
|
ch07/07_09.py
|
leeseedong/book-cryptocurrency
|
58c0bb3f5a80f8cc73ba47c4839be3bd33c9d67c
|
[
"Apache-2.0"
] | 121
|
2019-03-23T13:53:06.000Z
|
2022-03-28T15:15:03.000Z
|
ch07/07_09.py
|
leeseedong/book-cryptocurrency
|
58c0bb3f5a80f8cc73ba47c4839be3bd33c9d67c
|
[
"Apache-2.0"
] | 3
|
2021-04-14T14:31:26.000Z
|
2021-05-09T13:46:14.000Z
|
ch07/07_09.py
|
leeseedong/book-cryptocurrency
|
58c0bb3f5a80f8cc73ba47c4839be3bd33c9d67c
|
[
"Apache-2.0"
] | 114
|
2019-03-21T13:43:03.000Z
|
2022-03-31T18:42:11.000Z
|
import pybithumb
import numpy as np
df = pybithumb.get_ohlcv("BTC")
df = df['2018']
df['range'] = (df['high'] - df['low']) * 0.5
df['target'] = df['open'] + df['range'].shift(1)
df['ror'] = np.where(df['high'] > df['target'],
df['close'] / df['target'],
1)
ror = df['ror'].cumprod()[-2]
print(ror)
| 21.5
| 48
| 0.505814
|
13b84123abf3bc2e468a223d7995e489fea72362
| 539
|
py
|
Python
|
plotly/validators/scattergeo/marker/colorbar/titlefont/_size.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/scattergeo/marker/colorbar/titlefont/_size.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 1
|
2020-12-15T16:56:11.000Z
|
2020-12-15T16:56:11.000Z
|
plotly/validators/scattergeo/marker/colorbar/titlefont/_size.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='size',
parent_name='scattergeo.marker.colorbar.titlefont',
**kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
min=kwargs.pop('min', 1),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 26.95
| 66
| 0.597403
|
9a673d0823617f1e57ab61e645a2b71a7950e3f5
| 1,409
|
py
|
Python
|
card.py
|
catInside/ES_best_team
|
580c567369b03c0f2350bc4c9d7289b092cc104a
|
[
"Apache-2.0"
] | null | null | null |
card.py
|
catInside/ES_best_team
|
580c567369b03c0f2350bc4c9d7289b092cc104a
|
[
"Apache-2.0"
] | null | null | null |
card.py
|
catInside/ES_best_team
|
580c567369b03c0f2350bc4c9d7289b092cc104a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from character import Character
class Card(object):
def __init__(self, name, point, char_name):
self.name = name
self.point = point
self.char_name = char_name
self.char = None
def __eq__(self, other):
if isinstance(other, Character):
return self.char_name == other.name
else:
return self.name == other.name
def __ne__(self, other):
return self.name != other.name
def __ge__(self, other):
return self.point >= other.point
def __gt__(self, other):
return self.point > other.point
def __le__(self, other):
return self.point <= other.point
def __lt__(self, other):
return self.point < other.point
def __add__(self, other):
return self.point + other.point
def __radd__(self, num):
return self.point + num
def __repr__(self):
return self.char_name + ': ' + self.name
def __str__(self):
return self.char_name + ': ' + self.name
def add_char(self, char):
if char.name == self.char_name:
self.char = char
| 28.18
| 60
| 0.479063
|
b838f5fe549bb4358128f3f652c93844cc7b1dcc
| 480
|
py
|
Python
|
Fractions/__init__.py
|
alexcamargos/Learning_Python_Programming
|
f1cce9f85a672468b6ed1eb98dea9f7c09443722
|
[
"MIT"
] | 2
|
2021-06-04T23:39:14.000Z
|
2021-09-15T05:36:35.000Z
|
Fractions/__init__.py
|
alexcamargos/Learning_Python_Programming
|
f1cce9f85a672468b6ed1eb98dea9f7c09443722
|
[
"MIT"
] | null | null | null |
Fractions/__init__.py
|
alexcamargos/Learning_Python_Programming
|
f1cce9f85a672468b6ed1eb98dea9f7c09443722
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
#
# -----------------------------------------------------------------------------------------------------------------------
# Name: Fractions
# Version: 0.0.1
# Summary: fractions.py - implements a rational numbers
#
# Author: Alexsander Lopes Camargos
# Author-email: alcamargos@vivaldi.net
#
# License: MIT
# -----------------------------------------------------------------------------------------------------------------------
| 34.285714
| 122
| 0.341667
|
6c466adbde4791d8151eb64ba2b26f4456c73990
| 1,074
|
py
|
Python
|
socialite/apps/twitter/decorators.py
|
dgouldin/django-socialite
|
4de4060f5d8ab89ae9dd4032e66e526873351ba4
|
[
"MIT"
] | 1
|
2015-01-30T19:05:26.000Z
|
2015-01-30T19:05:26.000Z
|
socialite/apps/twitter/decorators.py
|
dgouldin/django-socialite
|
4de4060f5d8ab89ae9dd4032e66e526873351ba4
|
[
"MIT"
] | null | null | null |
socialite/apps/twitter/decorators.py
|
dgouldin/django-socialite
|
4de4060f5d8ab89ae9dd4032e66e526873351ba4
|
[
"MIT"
] | 2
|
2016-06-26T13:49:31.000Z
|
2021-08-13T01:00:15.000Z
|
import urllib
import urlparse
from django.conf import settings
from django.contrib.auth import logout
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
def twitter_login_required(func):
def inner(request, *args, **kwargs):
# hack the impersonate paramter out of the querystring before continuing
params = dict(urlparse.parse_qsl(request.META['QUERY_STRING']))
impersonate = params.pop(settings.TWITTER_IMPERSONATE_SESSION_KEY, None)
if impersonate is not None:
logout(request)
if not request.user.is_authenticated():
redirect_url = reverse('twitter_authenticate')
request.META['QUERY_STRING'] = urllib.urlencode(params)
if impersonate:
request.session[settings.TWITTER_IMPERSONATE_SESSION_KEY] = impersonate
return HttpResponseRedirect('%s?%s' % (redirect_url, urllib.urlencode({
'next': request.get_full_path(),
})))
return func(request, *args, **kwargs)
return inner
| 37.034483
| 87
| 0.687151
|
ea865e7832061ca11eeb9a2676141f671a136da5
| 554
|
py
|
Python
|
twobuntu/articles/migrations/0002_article_image.py
|
muhiza/originarities
|
ca0a67363579e6237127386f13baa2ab7a7c2717
|
[
"Apache-2.0"
] | 16
|
2015-01-12T12:25:28.000Z
|
2021-06-22T03:23:44.000Z
|
twobuntu/articles/migrations/0002_article_image.py
|
muhiza/originarities
|
ca0a67363579e6237127386f13baa2ab7a7c2717
|
[
"Apache-2.0"
] | 5
|
2015-01-02T01:23:40.000Z
|
2015-10-22T06:11:40.000Z
|
twobuntu/articles/migrations/0002_article_image.py
|
muhiza/originarities
|
ca0a67363579e6237127386f13baa2ab7a7c2717
|
[
"Apache-2.0"
] | 11
|
2015-01-27T06:23:45.000Z
|
2020-05-20T11:46:12.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('images', '0001_initial'),
('articles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='image',
field=models.ForeignKey(blank=True, to='images.Image', help_text=b'An image highlighting the contents of the article.', null=True),
preserve_default=True,
),
]
| 25.181818
| 143
| 0.613718
|
11597855bc41e62f8ffb42fdfd67ab1b4a3f9d81
| 6,437
|
py
|
Python
|
nagare/validate.py
|
nagareproject/config
|
8832debb888f771f6112ae07698bc355354a783c
|
[
"BSD-3-Clause"
] | null | null | null |
nagare/validate.py
|
nagareproject/config
|
8832debb888f771f6112ae07698bc355354a783c
|
[
"BSD-3-Clause"
] | null | null | null |
nagare/validate.py
|
nagareproject/config
|
8832debb888f771f6112ae07698bc355354a783c
|
[
"BSD-3-Clause"
] | null | null | null |
# Encoding: utf-8
# --
# Copyright (c) 2008-2021 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
# --
from functools import partial
from .config_exceptions import SpecificationError, ParameterError
NO_DEFAULT = object()
class Validator(object):
def __getitem__(self, name):
if name.startswith('_'):
raise AttributeError(name)
if name == 'True':
return True
if name == 'False':
return False
return getattr(self, name, name)
@staticmethod
def _number(convert, min, max, default, v, ancestors_names, name):
if v is None:
return default
if isinstance(v, list):
raise ParameterError('not a number {}'.format(repr(v)), sections=ancestors_names, name=name)
try:
v = convert(v)
except ValueError:
raise ParameterError('not a number {}'.format(repr(v)), sections=ancestors_names, name=name)
if (min is not None) and (v < min):
raise ParameterError("the value '{}' is too small".format(v), sections=ancestors_names, name=name)
if (max is not None) and (v > max):
raise ParameterError("the value '{}' is too big".format(v), sections=ancestors_names, name=name)
return v
@classmethod
def integer(cls, min=None, max=None, default=NO_DEFAULT, help=None):
return partial(cls._number, int, min, max, default)
@classmethod
def float(cls, *args, **params):
min = params.get('min')
max = params.get('min')
default = params.get('default', NO_DEFAULT)
return float(*args) if (args or default is NO_DEFAULT) else partial(cls._number, float, min, max, default)
@staticmethod
def _to_boolean(v):
v = v.strip().lower()
if v in ('true', 'on', 'yes', '1'):
return True
if v in ('false', 'off', 'no', '0'):
return False
raise ValueError('not a boolean {}'.format(repr(v)))
@classmethod
def _boolean(cls, default, v, ancestors_names, name):
error = ParameterError('not a boolean {}'.format(repr(v)), sections=ancestors_names, name=name)
if v is None:
return default
if isinstance(v, bool):
return v
if isinstance(v, list):
raise error
try:
return cls._to_boolean(v)
except ValueError:
raise error
@classmethod
def boolean(cls, default=NO_DEFAULT, help=None):
return partial(cls._boolean, default)
@staticmethod
def _string(default, v, ancestors_names, name):
if v is None:
return default
if isinstance(v, list):
raise ParameterError('not a string {}'.format(repr(v)), sections=ancestors_names, name=name)
return v
@classmethod
def string(cls, default=NO_DEFAULT, help=None):
return partial(cls._string, default)
@staticmethod
def _list(convert, min, max, default, v, ancestors_names, name):
if v is None:
return default
if not isinstance(v, (list, tuple)):
v = v.split(',')
if (min is not None) and (len(v) < min):
raise ParameterError('not enougth elements {}'.format(v), sections=ancestors_names, name=name)
if (max is not None) and (len(v) > max):
raise ParameterError('too many elements {}'.format(v), sections=ancestors_names, name=name)
try:
return [convert(e) for e in v]
except ValueError:
raise ParameterError('invalid value(s) in {}'.format(v), sections=ancestors_names, name=name)
@classmethod
def list(cls, *args, **params):
min = params.get('min')
max = params.get('min')
default = params.get('default', NO_DEFAULT)
help = params.get('help')
list_constructor = args or (min, max, default, help) == (None, None, NO_DEFAULT, None)
return list(args) if list_constructor else partial(cls._list, str, min, max, default)
@classmethod
def string_list(cls, min=None, max=None, default=NO_DEFAULT, help=None):
return partial(cls._list, str, min, max, default)
force_list = string_list
@classmethod
def _tuple(cls, min, max, default, v, ancestors_names, name):
return tuple(cls._list(str, min, max, default, v, ancestors_names, name))
@classmethod
def tuple(cls, *args, **params):
min = params.get('min')
max = params.get('min')
default = params.get('default', NO_DEFAULT)
help = params.get('help')
tuple_constructor = args or (min, max, default, help) == (None, None, NO_DEFAULT, None)
return args if tuple_constructor else partial(cls._tuple, min, max, default)
@classmethod
def int_list(cls, min=None, max=None, default=NO_DEFAULT, help=None):
return partial(cls._list, int, min, max, default)
@classmethod
def float_list(cls, min=None, max=None, default=NO_DEFAULT, help=None):
return partial(cls._list, float, min, max, default)
@classmethod
def bool_list(cls, min=None, max=None, default=NO_DEFAULT, help=None):
return partial(cls._list, cls._to_boolean, min, max, default)
@staticmethod
def _option(options, default, v, ancestors_names, name):
if v is None:
return default
if v not in options:
raise ParameterError('not a valid option {}'.format(repr(v)), sections=ancestors_names, name=name)
return v
@classmethod
def option(cls, *args, **params):
default = params.get('default', NO_DEFAULT)
return partial(cls._option, args, default)
def validate(self, expr, v, ancestors_name, name):
try:
validation = eval(expr, {}, self)
if not isinstance(validation, partial):
validation = validation()
return validation(v, ancestors_name, name)
except Exception as e:
e = SpecificationError('invalid specification {}'.format(repr(expr)), sections=ancestors_name, name=name)
e.__cause__ = None
raise e
def get_default_value(self, expr, ancestors_names, name):
return self.validate(expr, None, ancestors_names, name)
| 31.4
| 117
| 0.614727
|
2dfa2de408ddc95e57f93088944f3b784b9cc8e0
| 8,755
|
py
|
Python
|
spark/spark_admin.py
|
wensheng/spark
|
ab47107d000f0670f4cfe131637f72471a04cfb2
|
[
"MIT"
] | null | null | null |
spark/spark_admin.py
|
wensheng/spark
|
ab47107d000f0670f4cfe131637f72471a04cfb2
|
[
"MIT"
] | null | null | null |
spark/spark_admin.py
|
wensheng/spark
|
ab47107d000f0670f4cfe131637f72471a04cfb2
|
[
"MIT"
] | null | null | null |
import os
import sys
import shutil
import spark
from optparse import OptionParser
join = os.path.join
cur_dir = os.getcwd()
proj_template = join(spark.__path__[0],'proj')
files_dir = join(proj_template,'files')
def main():
usage = "usage: %prog [options] [args]"
parser = OptionParser(usage)
parser.add_option("-c", "--create", type="string", nargs=1, dest="name",
help="create a new spark project")
parser.add_option("--demo", choices=('mysql','sqlite'), dest="db_choice",
help="setup demo wiki controllers and related files\ndb_choice is either mysql or sqlite")
parser.add_option("--admin", action="store_true", dest="admin",
help="setup admin controller")
(options, args) = parser.parse_args()
if options.name:
setup_proj(options.name)
elif options.db_choice:
setup_demo(options.db_choice)
elif options.admin:
setup_admin()
else:
print_help(parser)
#try:
# action = args[0]
#except IndexError:
# print_help(parser)
def print_help(parser):
parser.print_help(sys.stderr)
sys.exit()
def setup_proj(name):
proj_dir = join(cur_dir,name)
os.mkdir(proj_dir)
shutil.copytree(join(proj_template,'scripts'),
join(proj_dir,'scripts'))
shutil.copytree(join(proj_template,'etc'),
join(proj_dir,'etc'))
os.chmod(join(proj_dir,'etc','cgiserver.py'),0755)
os.chmod(join(proj_dir,'etc','python.fcgi'),0755)
shutil.copytree(join(proj_template,'webdir'),
join(proj_dir,'webdir'))
os.mkdir(join(proj_dir,'log'))
os.mkdir(join(proj_dir,'cache'))
os.chmod(join(proj_dir,'cache'),0777)
os.mkdir(join(proj_dir,'public'))
os.mkdir(join(proj_dir,'templates'))
import spark.sprite
import socket
tp1 = spark.sprite.Sprite("apache_modpython.conf",files_dir)
fw1 = open(join(proj_dir,'etc','apache_modpython.conf'),'w')
tp1.set_vars({'proj_dir':proj_dir,'server_name':socket.getfqdn()})
fw1.write("\n".join(tp1.display(1)))
fw1.close()
fw1.close()
fr2 = open(join(files_dir,'lighttpd_python.conf'))
fw2 = open(join(proj_dir,'etc','lighttpd_python.conf'),'w')
fw2.write(fr2.read())
fr2.close()
fw2.close()
def setup_demo(db):
controllers = join(cur_dir,'webdir')
templates = join(cur_dir,'templates')
if not (os.path.isdir(controllers) and os.path.isdir(templates)):
print "You are NOT in a spark project directory"
sys.exit()
import spark.sprite
tp1 = spark.sprite.Sprite("wiki_controller.pytp",files_dir)
fw1 = open(join(controllers,'wiki_controller.py'),'w')
tp2 = spark.sprite.Sprite("ajaxwiki_controller.pytp",files_dir)
fw2 = open(join(controllers,'ajaxwiki_controller.py'),'w')
if db=="mysql":
try: import MySQLdb
except ImportError:
print "you don't have mysql python module installed"
sys.exit()
print "You are about to create a database called sparkdemo."
print "Make sure the user has the priviledge to do that."
user = raw_input("what's db username? [root] >>")
if not user: user = "root"
passwd = raw_input("what's db password? [] >>")
from spark.dbmysql import DbSpark
db = DbSpark(user=user,passwd=passwd)
db.q("""CREATE DATABASE sparkdemo;
USE sparkdemo;
CREATE TABLE wiki(
word VARCHAR(100) NOT NULL UNIQUE,
content TEXT NOT NULL);"""
)
vars_assign = {'driver':'mysql',
'dbstring':'db="sparkdemo",user="%s",passwd="%s"'%(user,passwd),
'ismysql':[{}],
}
tp1.set_vars(vars_assign)
tp2.set_vars(vars_assign)
else:
from spark.dbsqlite import DbSpark
dbname = join(cur_dir,'etc','sparkdemo.db')
db = DbSpark(db=dbname)
db.q("""create table wiki(
word VARCHAR NOT NULL UNIQUE,
content TEXT NOT NULL)""")
db.commit()
vars_assign = {'driver':'sqlite',
'dbstring':'db="%s"' % dbname,
}
tp1.set_vars(vars_assign)
tp2.set_vars(vars_assign)
fw1.write("\n".join(tp1.display(1)))
fw1.close()
fw2.write("\n".join(tp2.display(1)))
fw2.close()
try: import simplejson
except ImportError:
import spark.contribs.ez_setup as ez_setup
ez_setup.main(["simplejson"])
if not os.path.isdir(join(cur_dir,'templates','wiki')):
os.mkdir(join(cur_dir,'templates','wiki'))
if not os.path.isdir(join(cur_dir,'public','js')):
os.mkdir(join(cur_dir,'public','js'))
shutil.copy(join(files_dir,'wiki.html'),join(cur_dir,'templates','wiki'))
shutil.copy(join(files_dir,'awiki.js'),join(cur_dir,'public','js'))
print "For ajaxwiki to work, Dojotoolkit (3.4Mb) is required."
answer = raw_input("Do you want to download and install Dojo now? [y,n]")
if answer in ["y", "Y"]:
dojo_dir = join(cur_dir,'public','js','dojo')
if not os.path.isdir(dojo_dir): os.mkdir(dojo_dir, 0755)
dojof = download2temp('http://download.dojotoolkit.org/release-0.3.1/dojo-0.3.1-ajax.zip')
unzip_install(dojof,dojo_dir)
else:
print "you need to manually download Dojo and install it into public/js/dojo"
def download2temp(url):
import urllib2
fr = urllib2.urlopen(url)
size = int(fr.info()['Content-Length'])
import tempfile
ftmp = open(tempfile.mktemp(),"w+b")
import spark.contribs.progress
meter = spark.contribs.progress.TextMeter()
meter.start(text="Downloading",size=size)
i = 0
while i < size:
ftmp.write(fr.read(8192))
i += 8192
meter.update(i)
meter.end(size)
ftmp.seek(0)
return ftmp
def unzip_install(filename,install_dir):
"""does NOT work in windows with python 2.4.2, because namelist() doesn't work.
will fix or work around
"""
import zipfile
zf = zipfile.ZipFile(filename)
namelist = zf.namelist()
singletop = True
first = namelist[0].split('/')[0]
for name in namelist:
if not name.startswith(first):
singletop = False
break
for name in namelist:
names = name.split('/')
if singletop: names = names[1:]
if not os.path.isdir( join(install_dir,*names[:-1]) ):
os.makedirs(join(install_dir, *names[:-1]) )
if name.endswith('/'): continue
outfile = open(join(install_dir, *names), 'wb')
outfile.write(zf.read(name))
outfile.close()
def setup_admin():
try: import MySQLdb
except ImportError:
print "you don't have mysql python module installed"
sys.exit()
try: import simplejson
except ImportError:
import spark.contribs.ez_setup as ez_setup
ez_setup.main(["simplejson"])
create_adminfile()
controllers = join(cur_dir,'webdir')
templates = join(cur_dir,'templates')
if not (os.path.isdir(controllers) and os.path.isdir(templates)):
print "You are NOT in a spark project directory"
sys.exit()
import spark.sprite
tp1 = spark.sprite.Sprite("admin_controller.pytp",files_dir)
fw1 = open(join(controllers,'admin_controller.py'),'w')
print "Make sure the user has the priviledge to manage one or more databases."
user = raw_input("what's database username? [root] >>")
if not user: user = "root"
passwd = raw_input("what's database password? [] >>")
tp1.set_vars({'dbstring':'user="%s",passwd="%s"'%(user,passwd)})
fw1.write("\n".join(tp1.display(1)))
fw1.close()
if not os.path.isdir(join(cur_dir,'templates','admin')):
os.mkdir(join(cur_dir,'templates','admin'))
if not os.path.isdir(join(cur_dir,'public','js')):
os.mkdir(join(cur_dir,'public','js'))
if not os.path.isdir(join(cur_dir,'public','admin')):
os.mkdir(join(cur_dir,'public','admin'))
shutil.copy(join(files_dir,'table.html'),join(cur_dir,'templates','admin'))
shutil.copy(join(files_dir,'ajax_tables.css'),join(cur_dir,'public','admin'))
print "For admin interface to work, Mochikit (291Kb) is required."
answer = raw_input("Do you want to download and install Mochikit now? [y,n]")
if answer in ["y", "Y"]:
mochi_dir = join(cur_dir,'public','js','MochiKit')
if not os.path.isdir(mochi_dir): os.mkdir(mochi_dir, 0755)
mochif = download2temp('http://mochikit.com/dist/MochiKit-1.3.1.zip')
unzip_install(mochif,mochi_dir)
shutil.copy(join(cur_dir,'public','js','MochiKit','packed','MochiKit','MochiKit.js'),
join(cur_dir,'public','js','MochiKit'))
else:
print "you need to manually download MochiKit and install it into public/js/MochiKit"
def create_adminfile():
import sha, getpass
userdata = {}
fp = join(cur_dir,'etc','admin_passwd')
if os.path.exists(fp):
passwdf = open(fp,'r')
for line in passwdf:
i = line.split(':')
if len(i)>1: userdata[i[0]]=i[1]
passwdf.close()
name = raw_input("Enter your username: ")
if len(name)<2:
print "Username too short!"
sys.exit()
password1 = getpass.getpass("Enter your password(>5): ")
if len(password1)<5:
print "password too short!"
sys.exit()
password1 = sha.new(password1).hexdigest()
password2 = getpass.getpass("Enter password again: ")
password2 = sha.new(password2).hexdigest()
if password1 != password2:
print "passwords not match!"
sys.exit()
userdata[name]=password1
passwdf = open(fp,'w')
for i in userdata:
passwdf.write("%s:%s"%(i,userdata[i]))
passwdf.close()
print "User created."
| 31.721014
| 92
| 0.7004
|
6290dde983b2bf260a0cbce3e4e1a512bf627117
| 1,178
|
py
|
Python
|
drned/drned/choice.py
|
micnovak/drned-xmnr
|
af68d4ab1162b7cdcda61da6e825b61ba0069c66
|
[
"Apache-2.0"
] | 7
|
2019-05-21T22:25:16.000Z
|
2022-01-24T16:06:02.000Z
|
drned/drned/choice.py
|
micnovak/drned-xmnr
|
af68d4ab1162b7cdcda61da6e825b61ba0069c66
|
[
"Apache-2.0"
] | 45
|
2018-10-16T14:35:40.000Z
|
2022-03-24T16:11:25.000Z
|
drned/drned/choice.py
|
micnovak/drned-xmnr
|
af68d4ab1162b7cdcda61da6e825b61ba0069c66
|
[
"Apache-2.0"
] | 6
|
2019-06-17T20:35:19.000Z
|
2021-05-12T13:21:50.000Z
|
class Choice(object):
def __init__(self, children, name):
self.children = {}
self.indexes = {}
self.add(children, name)
self.done = False
def add(self, children, name):
if name not in self.children:
self.children[name] = children
self.indexes[name] = 0
def current(self, name):
return self.children[name][self.indexes[name]]
def joined(self, child, name):
return self.current(name) == child
def increment(self):
for name in self.children:
current = self.current(name)
if current and not current.all_done():
return False
# Increment index
self.indexes[name] += 1
if self.indexes[name] >= len(self.children[name]):
self.indexes[name] = 0
self.done = True
else:
return True
return False
def get_choice(choices, path, children, name):
if path in choices:
choice = choices[path]
choice.add(children, name)
else:
choice = Choice(children, name)
choices[path] = choice
return choice
| 28.047619
| 62
| 0.55348
|
0b1ef82017bc12fcbc5bfd81d5ba603840e67d95
| 3,276
|
py
|
Python
|
tools/div_free_base.py
|
marvin-eisenberger/hamiltonian-interpolation
|
d18c2f401feffc672998c5fa1d50c1de03dba902
|
[
"MIT"
] | 5
|
2021-01-05T23:16:55.000Z
|
2021-07-23T12:26:06.000Z
|
tools/div_free_base.py
|
marvin-eisenberger/hamiltonian-interpolation
|
d18c2f401feffc672998c5fa1d50c1de03dba902
|
[
"MIT"
] | null | null | null |
tools/div_free_base.py
|
marvin-eisenberger/hamiltonian-interpolation
|
d18c2f401feffc672998c5fa1d50c1de03dba902
|
[
"MIT"
] | 1
|
2021-02-22T08:31:05.000Z
|
2021-02-22T08:31:05.000Z
|
import torch
import math
import numpy as np
from torch.nn import Parameter
import torch.sparse
from shape_utils import *
from param import device, device_cpu
import quaternion as quat
from base_tools import *
def compute_eigenvectors_3d(vert, k):
kv = torch.arange(1, k+1, device=device, dtype=torch.float32).unsqueeze(0).unsqueeze(1)
vert = vert.unsqueeze(2) * kv * math.pi
vert_sin = torch.sin(vert)
vert_cos = torch.cos(vert) * kv
sin_x = vert_sin[:, 0, :].unsqueeze(2).unsqueeze(3)
sin_y = vert_sin[:, 1, :].unsqueeze(1).unsqueeze(3)
sin_z = vert_sin[:, 2, :].unsqueeze(1).unsqueeze(2)
cos_x = vert_cos[:, 0, :].unsqueeze(2).unsqueeze(3)
cos_y = vert_cos[:, 1, :].unsqueeze(1).unsqueeze(3)
cos_z = vert_cos[:, 2, :].unsqueeze(1).unsqueeze(2)
phi = torch.cat(((cos_x * sin_y * sin_z).unsqueeze(1),
(sin_x * cos_y * sin_z).unsqueeze(1),
(sin_x * sin_y * cos_z).unsqueeze(1)), 1)
scale_fac = torch.sqrt(kv.unsqueeze(2) ** 2 + kv.unsqueeze(3) ** 2) ** (-1)
scale_fac = scale_fac.transpose(1, 3).unsqueeze(4)
scale_fac = torch.cat((scale_fac.unsqueeze(1).repeat_interleave(k, 1),
scale_fac.unsqueeze(2).repeat_interleave(k, 2),
scale_fac.unsqueeze(3).repeat_interleave(k, 3)), 5)
phi = phi.transpose(1, 4).unsqueeze(5).unsqueeze(6)
phi = torch.sum(hat_matrix.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0) * phi, 4)
phi = phi * scale_fac
phi = phi.transpose(1, 4).reshape(vert.shape[0], 3, -1).transpose(1, 2)
return phi
def tensor_prod_velocity(phi, a):
return torch.bmm(phi.permute((2, 0, 1)), a.unsqueeze(0).unsqueeze(2).repeat(3, 1, 1)).permute((1, 2, 0)).squeeze()
def div_free_trans(velo_t, vert_t, k):
n_feat = 3 * k ** 3
phi = compute_eigenvectors_3d(vert_t, k)
M = my_eye(n_feat) * 1e-3
for d in range(3):
M = M + torch.mm(phi[..., d].transpose(0, 1), phi[..., d])
M = M.unsqueeze(0)
phi = phi.permute([2, 0, 1])
xi_d = torch.bmm(phi.transpose(1, 2), velo_t.unsqueeze(2).permute([1, 0, 2]))
xi_d, _ = torch.solve(xi_d, M)
velo_t = torch.bmm(phi, xi_d)
velo_t = velo_t.permute([1, 2, 0]).squeeze()
return velo_t, xi_d
def apply_field(vert_t, xi_d, k):
phi = compute_eigenvectors_3d(vert_t, k)
phi = phi.permute([2, 0, 1])
velo_t = torch.bmm(phi, xi_d)
velo_t = velo_t.permute([1, 2, 0]).squeeze()
return velo_t
class Rigid(torch.nn.Module):
def __init__(self):
super().__init__()
self.translation = Parameter(torch.zeros([3], dtype=torch.float32, device=device), requires_grad=True)
self.rotation = Parameter(torch.as_tensor([1, 0, 0, 0], dtype=torch.float32, device=device), requires_grad=True)
def forward(self, vert):
vert = quat.qrot((self.rotation / (self.rotation.norm())).repeat(vert.shape[0], 1), vert - 0.5)
vert = vert + self.translation.unsqueeze(0) + 0.5
return vert
def detach_(self):
self.translation.requires_grad_(False)
self.rotation.requires_grad_(False)
def detach(self):
self.detach_()
return self
if __name__ == "__main__":
print("main of div_free_base.py")
| 30.055046
| 120
| 0.625153
|
bd45238f75d96d54115593c1c93ef151a0fd7267
| 847
|
py
|
Python
|
programmers/lv3/17676.py
|
KLumy/Basic-Algorithm
|
e52e4200c1955a9062569814ff3418dd06666845
|
[
"MIT"
] | 1
|
2021-01-22T15:58:32.000Z
|
2021-01-22T15:58:32.000Z
|
programmers/lv3/17676.py
|
KLumy/Basic-Algorithm
|
e52e4200c1955a9062569814ff3418dd06666845
|
[
"MIT"
] | null | null | null |
programmers/lv3/17676.py
|
KLumy/Basic-Algorithm
|
e52e4200c1955a9062569814ff3418dd06666845
|
[
"MIT"
] | null | null | null |
from typing import List
import heapq
def solution(lines: List[str]) -> int:
traffic_log = []
for line in lines:
date, time, delay = line.split()
time = time.split(":")
time = (
3600000 * int(time[0]) + 60000 * int(time[1]) + int(float(time[2]) * 1000)
)
delay = int(float(delay[:-1]) * 1000)
traffic_log.append((time - delay + 1, time))
traffic_log.sort()
max_traffic = 1
queue = []
for start, end in traffic_log:
while queue and queue[0] <= start - 1000:
heapq.heappop(queue)
heapq.heappush(queue, end)
max_traffic = len(queue) if len(queue) > max_traffic else max_traffic
return max_traffic
if __name__ == "__main__":
i = ["2016-09-15 01:00:04.001 2.0s", "2016-09-15 01:00:07.000 2s"]
print(solution(i))
| 24.911765
| 86
| 0.57379
|
dfa092cce2f01e3530a85da95c547bc44ff85e51
| 288
|
py
|
Python
|
urdudataste/arynews/arynews/pipelines.py
|
Mubashar-arb/urdu-dataset
|
16b7db91d93d0833fc41fbf9b396b05d92cd7df2
|
[
"MIT"
] | 2
|
2021-07-12T15:14:44.000Z
|
2021-07-16T14:27:30.000Z
|
urdudataste/arynews/arynews/pipelines.py
|
Mubashar-arb/urdu-dataset
|
16b7db91d93d0833fc41fbf9b396b05d92cd7df2
|
[
"MIT"
] | null | null | null |
urdudataste/arynews/arynews/pipelines.py
|
Mubashar-arb/urdu-dataset
|
16b7db91d93d0833fc41fbf9b396b05d92cd7df2
|
[
"MIT"
] | 1
|
2021-07-14T06:31:33.000Z
|
2021-07-14T06:31:33.000Z
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class ArynewsPipeline(object):
def process_item(self, item, spider):
return item
| 24
| 65
| 0.711806
|
edae53f608072fcfe8f65b2f540f04766a643822
| 1,263
|
py
|
Python
|
src/android/toga_android/app.py
|
Donyme/toga
|
2647c7dc5db248025847e3a60b115ff51d4a0d4a
|
[
"BSD-3-Clause"
] | null | null | null |
src/android/toga_android/app.py
|
Donyme/toga
|
2647c7dc5db248025847e3a60b115ff51d4a0d4a
|
[
"BSD-3-Clause"
] | null | null | null |
src/android/toga_android/app.py
|
Donyme/toga
|
2647c7dc5db248025847e3a60b115ff51d4a0d4a
|
[
"BSD-3-Clause"
] | null | null | null |
from android import PythonActivity
from .window import Window
class MainWindow(Window):
pass
class TogaApp:
def __init__(self, app):
self._interface = app
def onStart(self):
print("Toga app: onStart")
def onResume(self):
print("Toga app: onResume")
def onResume(self):
print("Toga app: onResume")
def onPause(self):
print("Toga app: onPause")
def onStop(self):
print("Toga app: onStop")
def onDestroy(self):
print("Toga app: onDestroy")
def onRestart(self):
print("Toga app: onRestart")
class App:
_MAIN_WINDOW_CLASS = MainWindow
def __init__(self, interface):
self.interface = interface
self.interface._impl = self
def create(self):
# Connect this app to the PythonActivity
self._listener = TogaApp(self)
# Set the Python activity listener to be this app.
self.native = PythonActivity.setListener(self._listener)
self.startup()
def open_document(self, fileURL):
print("Can't open document %s (yet)" % fileURL)
def main_loop(self):
# Main loop is a no-op on Android; the app loop is integrated with the
# main Android event loop.
pass
| 21.40678
| 78
| 0.625495
|
7a57b82c3f6ef4ffb82de994f2376cbf530c312f
| 2,852
|
py
|
Python
|
train_interaction.py
|
zwxxxuan/EPI
|
1c48994afc97518a4d8e6df7f6cabaa8792bb425
|
[
"MIT"
] | 28
|
2019-02-22T05:00:10.000Z
|
2021-09-23T05:12:59.000Z
|
train_interaction.py
|
zwxxxuan/EPI
|
1c48994afc97518a4d8e6df7f6cabaa8792bb425
|
[
"MIT"
] | 3
|
2019-06-04T05:38:42.000Z
|
2019-10-27T03:51:56.000Z
|
train_interaction.py
|
zwxxxuan/EPI
|
1c48994afc97518a4d8e6df7f6cabaa8792bb425
|
[
"MIT"
] | 1
|
2020-09-04T02:10:09.000Z
|
2020-09-04T02:10:09.000Z
|
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.normalized_env import normalize
from rllab.envs.gym_env import GymEnv
from sandbox.rocky.tf.envs.base import TfEnv
from sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy
import pickle
import rllab.misc.logger as logger
import os.path as osp
import datetime
import tensorflow as tf
import argparse
from EPI.interaction_policy.trpo import TRPO
from EPI.interaction_policy.prediction_model import PredictionModel
import EPI
def main():
parser = argparse.ArgumentParser()
parser.add_argument('name')
parser.add_argument('-e', type=int)
parser.add_argument('-r', default=0.5, type=float)
args = parser.parse_args()
name = args.name
if 'Striker' in name:
EPI.init('striker', prediction_reward_scale=args.r, embedding_dimension=args.e)
elif 'Hopper' in name:
EPI.init('hopper', prediction_reward_scale=args.r, num_of_envs=100, num_of_params=8,
embedding_dimension=int(args.e))
log_dir = setup_logger(exp_name=name)
logger.log("EPI:DEFAULT_REWARD_SCALE:" + str(EPI.DEFAULT_REWARD_SCALE))
logger.log("EPI:PREDICTION_REWARD_SCALE:" + str(EPI.PREDICTION_REWARD_SCALE))
logger.log("EPI:NUM_OF_PARAMS:" + str(EPI.NUM_OF_PARAMS))
logger.log("EPI:EMBEDDING_DIMENSION:" + str(EPI.EMBEDDING_DIMENSION))
logger.log("EPI:LOSS_TYPE:" + str(EPI.LOSS_TYPE))
env = TfEnv(normalize(GymEnv(name + '-v0')))
policy = GaussianMLPPolicy(
name="policy",
env_spec=env.spec,
hidden_sizes=(32, 32),
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=10000,
max_path_length=10,
n_itr=500,
discount=0.99,
step_size=0.01,
)
prediction_model = PredictionModel(log_dir)
sess = tf.Session()
sess.__enter__()
algo.train(sess=sess, pred_model=prediction_model)
pickle.dump(algo, open(log_dir + "/algo.p", "wb")) # need sess
sess.close()
close_logger(log_dir)
def setup_logger(exp_name=''):
# Logging info
now = datetime.datetime.now()
exp_name = 'Exp' + now.strftime("%y%m%d_") + exp_name
n = 0
while osp.exists('./data/' + exp_name + '_' + str(n)):
n = n + 1
exp_name = exp_name + '_' + str(n)
log_dir = './data/' + exp_name
logger.add_text_output(osp.join(log_dir, 'debug.log'))
logger.add_tabular_output(osp.join(log_dir, 'progress.csv'))
logger.push_prefix("[%s] " % exp_name)
return log_dir
def close_logger(log_dir):
logger.remove_tabular_output(osp.join(log_dir, 'progress.csv'))
logger.remove_text_output(osp.join(log_dir, 'debug.log'))
logger.pop_prefix()
if __name__ == '__main__':
main()
| 31.340659
| 92
| 0.690393
|
d74b76ad57d48d1af7a80ea329a9edbf60c2b541
| 2,093
|
py
|
Python
|
prudentia/cli.py
|
StarterSquad/prudentia
|
44a0a8bd54a3dfcd46c16ed295ca660bb3865368
|
[
"MIT"
] | 29
|
2015-02-04T06:39:12.000Z
|
2019-02-26T08:50:56.000Z
|
prudentia/cli.py
|
StarterSquad/prudentia
|
44a0a8bd54a3dfcd46c16ed295ca660bb3865368
|
[
"MIT"
] | 44
|
2015-01-05T16:08:12.000Z
|
2018-12-29T20:40:42.000Z
|
prudentia/cli.py
|
StarterSquad/prudentia
|
44a0a8bd54a3dfcd46c16ed295ca660bb3865368
|
[
"MIT"
] | 7
|
2015-01-30T17:28:49.000Z
|
2021-02-14T23:43:26.000Z
|
import argparse
import os
import sys
from os import path
from ansible import __version__ as ansible_ver
from . import __version__ as prudentia_ver
# Setting Ansible config file environment variable as first thing
cwd = path.dirname(path.realpath(__file__))
os.environ['ANSIBLE_CONFIG'] = path.join(cwd, 'ansible.cfg')
os.environ['ANSIBLE_ROLES_PATH'] = path.join(cwd, 'roles') + ':/etc/ansible/roles'
os.environ['ANSIBLE_LOOKUP_PLUGINS'] = path.join(cwd, 'plugins', 'lookup') + \
':~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup'
os.environ['ANSIBLE_ACTION_PLUGINS'] = path.join(cwd, 'plugins', 'action') + \
':~/.ansible/plugins/action:/usr/share/ansible/plugins/action'
os.environ['ANSIBLE_LIBRARY'] = path.join(cwd, 'modules')
from prudentia.digital_ocean import DigitalOceanCli
from prudentia.local import LocalCli
from prudentia.ssh import SshCli
from prudentia.vagrant import VagrantCli
Providers = {
'local': LocalCli,
'ssh': SshCli,
'vagrant': VagrantCli,
'digital-ocean': DigitalOceanCli
}
def parse(args=None):
parser = argparse.ArgumentParser(
prog='Prudentia',
description='A useful Continuous Deployment toolkit.'
)
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + prudentia_ver + ', Ansible ' + ansible_ver)
parser.add_argument('provider', choices=Providers.keys(),
help='use one of the available providers')
parser.add_argument('commands', nargs='*', default='',
help='optional quoted list of commands to run with the chosen provider')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args(args)
def run(args):
chosen_cli = Providers[args.provider]()
if args.commands:
for c in args.commands:
print ("Executing: '{0}'\n".format(c))
chosen_cli.onecmd(c)
else:
chosen_cli.cmdloop()
return chosen_cli.provider.provisioned
| 36.086207
| 101
| 0.655518
|
870a5b54de48a268106166a740702e42685f1dd9
| 264
|
py
|
Python
|
loghub/emails.py
|
mskasal/loghub
|
b4821759012468d9003ec1119c06b8db9c3a17d0
|
[
"MIT"
] | null | null | null |
loghub/emails.py
|
mskasal/loghub
|
b4821759012468d9003ec1119c06b8db9c3a17d0
|
[
"MIT"
] | null | null | null |
loghub/emails.py
|
mskasal/loghub
|
b4821759012468d9003ec1119c06b8db9c3a17d0
|
[
"MIT"
] | null | null | null |
from flask.ext.mail import Message
from loghub import mail
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender = sender, recipients = recipients)
msg.body = text_body
msg.html = html_body
mail.send(msg)
| 33
| 68
| 0.738636
|
c7a18f6b2dc263a28bbb7cb8d8990ce3618a2615
| 8,334
|
py
|
Python
|
test/test_who.py
|
rliebz/whoswho
|
0c411e418c240fcec6ea0a23d15bd003056c65d0
|
[
"MIT"
] | 28
|
2018-02-14T23:14:59.000Z
|
2021-07-08T07:24:54.000Z
|
test/test_who.py
|
rliebz/whoswho
|
0c411e418c240fcec6ea0a23d15bd003056c65d0
|
[
"MIT"
] | 1
|
2019-01-21T15:25:49.000Z
|
2019-01-23T19:03:06.000Z
|
test/test_who.py
|
rliebz/whoswho
|
0c411e418c240fcec6ea0a23d15bd003056c65d0
|
[
"MIT"
] | 2
|
2018-09-27T05:46:46.000Z
|
2020-07-16T05:19:02.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import nose
from nose.tools import *
from whoswho import who, config
from nameparser.config.titles import TITLES as NAMEPARSER_TITLES
class TestMatch(unittest.TestCase):
def setUp(self):
self.name = 'Robert Evan Liebowitz'
def test_string(self):
# Only relevant for python 2.X
assert_true(who.match(self.name, str('Robert Liebowitz')))
def test_unicode(self):
name = self.name
assert_true(who.match(name, 'attaché Robert Evan Liebowitz'))
assert_true(who.match(name, 'Rōbért Èvān Lîęböwitz'))
assert_false(who.match(name, 'Rōbért Èvān Lęîböwitz'))
def test_name_and_initials(self):
assert_true(who.match(self.name, 'R. Evan Liebowitz'))
assert_true(who.match(self.name, 'Robert E. Liebowitz'))
assert_true(who.match(self.name, 'R. E. Liebowitz'))
def test_different_number_initials(self):
assert_true(who.match(self.name, 'Robert Liebowitz'))
assert_true(who.match(self.name, 'R. Liebowitz'))
assert_false(who.match(self.name, 'Robert E. E. Liebowitz'))
assert_false(who.match(self.name, 'R. E. E. Liebowitz'))
assert_true(who.match('R.E.E. Liebowitz', 'R. E. E. Liebowitz'))
def test_different_initials(self):
assert_false(who.match(self.name, 'E. R. Liebowitz'))
assert_false(who.match(self.name, 'E. Liebowitz'))
assert_false(who.match(self.name, 'R. V. Liebowitz'))
assert_false(who.match(self.name, 'O. E. Liebowitz'))
def test_short_names(self):
assert_true(who.match(self.name, 'Rob Liebowitz'))
# TODO: Should these be true?
assert_false(who.match(self.name, 'Bert Liebowitz'))
assert_false(who.match(self.name, 'Robbie Liebowitz'))
def test_suffixes(self):
name = 'Robert Liebowitz Jr'
assert_true(who.match(name, 'Robert Liebowitz'))
assert_true(who.match(name, 'Robert Liebowitz Jr'))
assert_true(who.match(name, 'Robert Liebowitz, PhD'))
assert_false(who.match(name, 'Robert Liebowitz, Sr'))
assert_false(who.match(name, 'Robert Liebowitz, Sr, PhD'))
assert_true(who.match(name, 'Robert Liebowitz, Jr, PhD'))
def test_equivalent_suffixes(self):
name = 'Robert Liebowitz Jr'
assert_true(who.match(name, 'Robert Liebowitz Jnr'))
assert_false(who.match(name, 'Robert Liebowitz Snr'))
def test_titles(self):
name = 'Mr. Robert Liebowitz'
assert_true(who.match(name, 'Robert Liebowitz'))
assert_true(who.match(name, 'Sir Robert Liebowitz'))
assert_true(who.match(name, 'Dr. Robert Liebowitz'))
assert_false(who.match(name, 'Mrs. Robert Liebowitz'))
def test_nickname(self):
name = 'Robert "Evan" Liebowitz'
assert_true(who.match(name, 'Evan Liebowitz'))
assert_true(who.match('Evan Liebowitz', name))
assert_false(who.match(name, 'Wrongbert Lieobwitz'))
assert_false(who.match(name, 'Robert Evan'))
assert_false(who.match(name, 'Evan Liebowitz',
options={'check_nickname': False}))
class TestRatio(unittest.TestCase):
def setUp(self):
self.name = 'Robert Evan Liebowitz'
def test_string(self):
# Only relevant for python 2.X
assert_equal(who.ratio(self.name, str('Robert Liebowitz')), 100)
def test_unicode(self):
name = self.name
assert_equal(who.ratio(name, 'attaché Robert Evan Liebowitz'), 100)
assert_equal(who.ratio(name, 'Rōbért Èvān Lîęböwitz'), 100)
assert_true(who.ratio(name, 'Rōbért Èvān Lęîböwitz') < 100)
def test_name_and_initials(self):
assert_equal(who.ratio(self.name, 'R. Evan Liebowitz'), 100)
assert_equal(who.ratio(self.name, 'Robert E. Liebowitz'), 100)
assert_equal(who.ratio(self.name, 'R. E. Liebowitz'), 100)
def test_different_number_initials(self):
assert_equal(who.ratio(self.name, 'Robert Liebowitz'), 100)
assert_equal(who.ratio(self.name, 'R. Liebowitz'), 100)
assert_true(who.ratio(self.name, 'Robert E. E. Liebowitz') < 100)
assert_true(who.ratio(self.name, 'R. E. E. Liebowitz') < 100)
assert_equal(who.ratio('R.E.E. Liebowitz', 'R. E. E. Liebowitz'), 100)
def test_different_initials(self):
assert_true(who.ratio(self.name, 'E. R. Liebowitz') < 100)
assert_true(who.ratio(self.name, 'E. Liebowitz') < 100)
assert_true(who.ratio(self.name, 'R. V. Liebowitz') < 100)
assert_true(who.ratio(self.name, 'O. E. Liebowitz') < 100)
assert_true(who.ratio(self.name, 'E. R. Liebowitz') <
who.ratio(self.name, 'E. E. Liebowitz'))
assert_true(who.ratio(self.name, 'E. R. Liebowitz') <
who.ratio(self.name, 'R. R. Liebowitz'))
assert_true(who.ratio(self.name, 'E. R. Liebowitz') <
who.ratio(self.name, 'E. Liebowitz'))
def test_short_names(self):
assert_true(who.ratio(self.name, 'Rob Liebowitz'))
assert_true(who.ratio(self.name, 'Bert Liebowitz') < 100)
assert_true(who.ratio(self.name, 'Robbie Liebowitz') < 100)
assert_true(who.ratio(self.name, 'xxxxx Liebowitz') <
who.ratio(self.name, 'Bobby Liebowitz'))
def test_suffixes(self):
name = 'Robert Liebowitz Jr'
assert_equal(who.ratio(name, 'Robert Liebowitz'), 100)
assert_equal(who.ratio(name, 'Robert Liebowitz Jr'), 100)
assert_equal(who.ratio(name, 'Robert Liebowitz, PhD'), 100)
assert_false(who.ratio(name, 'Robert Liebowitz, Sr'))
assert_false(who.ratio(name, 'Robert Liebowitz, Sr, PhD'))
assert_equal(who.ratio(name, 'Robert Liebowitz, Jr, PhD'), 100)
# Suffix doesn't change a match
assert_equal(who.ratio(name, 'Zachary Liebowitz, Jr'),
who.ratio(name, 'Zachary Liebowitz'))
def test_equivalent_suffixes(self):
name = 'Robert Liebowitz Jr'
assert_equal(who.ratio(name, 'Robert Liebowitz Jnr'), 100)
assert_false(who.ratio(name, 'Robert Liebowitz Snr'))
def test_titles(self):
name = 'Mr. Robert Liebowitz'
assert_equal(who.ratio(name, 'Robert Liebowitz'), 100)
assert_equal(who.ratio(name, 'Sir Robert Liebowitz'), 100)
assert_equal(who.ratio(name, 'Dr. Robert Liebowitz'), 100)
assert_false(who.ratio(name, 'Mrs. Robert Liebowitz'))
# Title doesn't change a match
assert_equal(who.ratio(name, 'Dr. Zachary Liebowitz'),
who.ratio(name, 'Zachary Liebowitz'))
def test_nickname(self):
name = 'Robert "Evan" Liebowitz'
assert_equal(who.ratio(name, 'Evan Liebowitz'), 100)
assert_equal(who.ratio('Evan Liebowitz', name), 100)
assert_true(who.ratio(name, 'Wrongbert Lieobwitz') < 100)
assert_true(who.ratio(name, 'Robert Evan') < 100)
assert_true(who.ratio(name, 'Evan Liebowitz',
options={'check_nickname': False}) < 100)
assert_true(who.ratio(name, 'xxxx Liebowitz') <
who.ratio(name, 'xvax Liebowitz'))
assert_equal(who.ratio(name, 'xxxx Liebowitz'),
who.ratio(name, 'xvax Liebowitz', 'strict'))
# TODO: Should we ensure that the metadata is up to date?
@nottest
class TestConfig(unittest.TestCase):
def test_titles_all_defined(self):
"""
Check if list of titles is up to date with nameparser
"""
all_titles = (
config.MALE_TITLES |
config.FEMALE_TITLES |
config.GENDERLESS_TITLES
)
assert_equal(all_titles, NAMEPARSER_TITLES)
def test_suffixes_all_defined(self):
"""
Check if list of suffixes is up to date with nameparser
"""
from nameparser.config.suffixes import SUFFIX_ACRONYMS, SUFFIX_NOT_ACRONYMS
all_suffixes = (
config.UNIQUE_SUFFIXES |
config.MISC_SUFFIXES
)
nameparser_suffixes = (
SUFFIX_ACRONYMS |
SUFFIX_NOT_ACRONYMS
)
assert_equal(all_suffixes, nameparser_suffixes)
if __name__ == '__main__':
nose.main()
| 40.852941
| 83
| 0.636069
|
21bf09393f379989fa9a94930ed6fd4c2689b37f
| 2,778
|
py
|
Python
|
tests/test_transfer.py
|
pacdao/gov-token
|
7ce7b8916c70e145f311e4190507b245dbeb2c12
|
[
"MIT"
] | null | null | null |
tests/test_transfer.py
|
pacdao/gov-token
|
7ce7b8916c70e145f311e4190507b245dbeb2c12
|
[
"MIT"
] | null | null | null |
tests/test_transfer.py
|
pacdao/gov-token
|
7ce7b8916c70e145f311e4190507b245dbeb2c12
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import brownie
import pytest
@pytest.mark.skip(reason="no transfer")
def test_sender_balance_decreases(accounts, token):
sender_balance = token.balanceOf(accounts[0])
amount = sender_balance // 4
token.transfer(accounts[1], amount, {"from": accounts[0]})
assert token.balanceOf(accounts[0]) == sender_balance - amount
@pytest.mark.skip(reason="no transfer")
def test_receiver_balance_increases(accounts, token):
receiver_balance = token.balanceOf(accounts[1])
amount = token.balanceOf(accounts[0]) // 4
token.transfer(accounts[1], amount, {"from": accounts[0]})
assert token.balanceOf(accounts[1]) == receiver_balance + amount
def test_total_supply_not_affected(accounts, token):
total_supply = token.totalSupply()
amount = token.balanceOf(accounts[0])
with brownie.reverts("No transfer"):
token.transfer(accounts[1], amount, {"from": accounts[0]})
assert token.totalSupply() == total_supply
@pytest.mark.skip(reason="no transfer")
def test_returns_true(accounts, token):
amount = token.balanceOf(accounts[0])
tx = token.transfer(accounts[1], amount, {"from": accounts[0]})
assert tx.return_value is True
@pytest.mark.skip(reason="no transfer")
def test_transfer_full_balance(accounts, token):
amount = token.balanceOf(accounts[0])
receiver_balance = token.balanceOf(accounts[1])
token.transfer(accounts[1], amount, {"from": accounts[0]})
assert token.balanceOf(accounts[0]) == 0
assert token.balanceOf(accounts[1]) == receiver_balance + amount
def test_transfer_zero_tokens(accounts, token):
sender_balance = token.balanceOf(accounts[0])
receiver_balance = token.balanceOf(accounts[1])
with brownie.reverts():
token.transfer(accounts[1], 0, {"from": accounts[0]})
assert token.balanceOf(accounts[0]) == sender_balance
assert token.balanceOf(accounts[1]) == receiver_balance
@pytest.mark.skip(reason="no transfer")
def test_transfer_to_self(accounts, token, owner):
sender_balance = token.balanceOf(accounts[0])
amount = sender_balance // 4
token.transfer(accounts[0], amount, {"from": accounts[0]})
assert token.balanceOf(accounts[0]) == sender_balance
@pytest.mark.skip(reason="no transfer")
def test_insufficient_balance(accounts, token):
balance = token.balanceOf(accounts[0])
with brownie.reverts():
token.transfer(accounts[1], balance + 1, {"from": accounts[0]})
@pytest.mark.skip(reason="no transfer")
def test_transfer_event_fires(accounts, token):
amount = token.balanceOf(accounts[0])
tx = token.transfer(accounts[1], amount, {"from": accounts[0]})
assert len(tx.events) == 1
assert tx.events["Transfer"].values() == [accounts[0], accounts[1], amount]
| 30.527473
| 79
| 0.711303
|
8f526e9eff86ecaae78fbf6c9fde06132bae1365
| 287
|
py
|
Python
|
chainer/utils/array.py
|
takeratta/chainer
|
02686e98cd6dc8f20979a1f3a79130f076cbfc6c
|
[
"MIT"
] | 7
|
2017-05-08T07:02:40.000Z
|
2018-12-02T18:35:39.000Z
|
chainer/utils/array.py
|
takeratta/chainer
|
02686e98cd6dc8f20979a1f3a79130f076cbfc6c
|
[
"MIT"
] | null | null | null |
chainer/utils/array.py
|
takeratta/chainer
|
02686e98cd6dc8f20979a1f3a79130f076cbfc6c
|
[
"MIT"
] | 1
|
2021-05-27T16:52:11.000Z
|
2021-05-27T16:52:11.000Z
|
import numpy
from chainer import cuda
def as_vec(x):
return x.ravel()
def as_mat(x):
return x.reshape(len(x), -1)
def empty_like(x):
if cuda.available and isinstance(x, cuda.ndarray):
return cuda.cupy.empty_like(x)
else:
return numpy.empty_like(x)
| 15.105263
| 54
| 0.655052
|
675dd6d84ca6e1b7163035d8cba62b570a642b90
| 545
|
py
|
Python
|
publishconf.py
|
tijptjik/thegodsproject
|
8014f7b546289b9ca3aec64f6492b8312628d0a2
|
[
"MIT"
] | null | null | null |
publishconf.py
|
tijptjik/thegodsproject
|
8014f7b546289b9ca3aec64f6492b8312628d0a2
|
[
"MIT"
] | null | null | null |
publishconf.py
|
tijptjik/thegodsproject
|
8014f7b546289b9ca3aec64f6492b8312628d0a2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
SITEURL = 'http://thegodsproject.com'
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
GOOGLE_ANALYTICS = "UA-85430564-1"
| 21.8
| 53
| 0.752294
|
8131b3e77af825166020447c9a094c9068cb4ac7
| 8,735
|
py
|
Python
|
examples/examples/views.py
|
flying-sausages/iommi
|
1cf0a2c513f37392d93df7a5ffeb9958f27f4216
|
[
"BSD-3-Clause"
] | null | null | null |
examples/examples/views.py
|
flying-sausages/iommi
|
1cf0a2c513f37392d93df7a5ffeb9958f27f4216
|
[
"BSD-3-Clause"
] | null | null | null |
examples/examples/views.py
|
flying-sausages/iommi
|
1cf0a2c513f37392d93df7a5ffeb9958f27f4216
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from datetime import (
date,
datetime,
)
from pathlib import Path
from django.conf import settings
from django.contrib.auth.models import User
from django.db import OperationalError
from django.urls import (
include,
path,
reverse,
)
from tri_declarative import (
get_members,
is_shortcut,
LAST,
Namespace,
Shortcut,
)
import iommi.part
import iommi.style
import iommi.traversable
from iommi import (
Column,
Fragment,
Header,
html,
Page,
Table,
)
from iommi._web_compat import HttpResponseRedirect
from iommi.admin import (
Admin,
Auth,
)
from iommi.base import (
items,
keys,
)
from iommi.form import (
Field,
Form,
)
from iommi.menu import (
Menu,
MenuItem,
)
from iommi.style import validate_styles
from .models import (
Album,
Artist,
Foo,
TBar,
TFoo,
Track,
)
# Use this function in your code to check that the style is configured correctly. Pass in all stylable classes in your system. For example if you have subclasses for Field, pass these here.
validate_styles()
def ensure_objects():
if not User.objects.exists():
User.objects.create(username='admin', is_staff=True, first_name='Tony', last_name='Iommi')
for i in range(100 - Foo.objects.count()):
Foo.objects.create(name=f'X{i}', a=i, b=True)
if not TBar.objects.all():
# Fill in some dummy data if none exists
for i in range(200):
f = TFoo.objects.create(a=i, name='Foo: %s' % i)
TBar.objects.create(b=f, c='foo%s' % (i % 3))
# Get some artist and album data
if not Path(settings.STATIC_ROOT).joinpath('album_art').exists():
try:
from scrape_data import scrape_data
scrape_data()
except ImportError as e:
print("!!! Unable to scrape artist and track data.")
print(e)
if not Album.objects.exists():
with open(Path(__file__).parent.parent / 'scraped_data.json') as f:
artists = json.loads(f.read())
for artist_name, albums in items(artists):
artist, _ = Artist.objects.get_or_create(name=artist_name)
for album_name, album_data in items(albums):
album, _ = Album.objects.get_or_create(artist=artist, name=album_name, year=int(album_data['year']))
for i, (track_name, duration) in enumerate(album_data['tracks']):
Track.objects.get_or_create(album=album, index=i + 1, name=track_name, duration=duration)
try:
ensure_objects()
except OperationalError:
# We'll end up here in the management commands before the db is set up
pass
class StyleSelector(Form):
class Meta:
@staticmethod
def actions__submit__post_handler(request, form, **_):
style = form.fields.style.value
settings.IOMMI_DEFAULT_STYLE = style
return HttpResponseRedirect(request.get_full_path())
style = Field.choice(
choices=[k for k, v in items(iommi.style._styles) if not v.internal],
initial=lambda form, field, **_: getattr(settings, 'IOMMI_DEFAULT_STYLE', iommi.style.DEFAULT_STYLE),
)
menu = Menu(
sub_menu=dict(
root=MenuItem(url='/'),
page_examples=MenuItem(url='/page'),
form_examples=MenuItem(url='/form'),
table_examples=MenuItem(url='/table'),
menu_examples=MenuItem(url='/menu'),
supernaut=MenuItem(url='/supernaut'),
admin=MenuItem(url='/iommi-admin'),
login=MenuItem(
display_name='Log in',
url='/iommi-admin/login/?next=/',
include=lambda request, **_: not request.user.is_authenticated,
),
log_out=MenuItem(
display_name='Log out',
url='/iommi-admin/logout/',
include=lambda request, **_: request.user.is_authenticated,
),
)
)
class ExamplesPage(Page):
menu = menu
footer = html.div(
html.hr(),
html.a('iommi rocks!', attrs__href='http://iommi.rocks/'),
StyleSelector(),
after=LAST,
)
class IndexPage(ExamplesPage):
header = html.h1('Welcome to iommi examples application')
logo = html.img(
attrs__src='https://docs.iommi.rocks/en/latest/_static/logo_with_outline.svg',
attrs__style__width='30%',
)
def all_field_sorts(request):
some_choices = ['Foo', 'Bar', 'Baz']
return ExamplesPage(
parts=dict(
header=Header('All sorts of fields'),
form=Form(
fields={
f'{t}__call_target__attribute': t
for t in keys(get_members(cls=Field, member_class=Shortcut, is_member=is_shortcut))
if t
not in [
# These only work if we have an instance
'foreign_key',
'many_to_many',
]
},
fields__radio__choices=some_choices,
fields__choice__choices=some_choices,
fields__choice_queryset__choices=Artist.objects.all(),
fields__multi_choice__choices=some_choices,
fields__multi_choice_queryset__choices=Track.objects.all(),
fields__info__value="This is some information",
fields__text__initial='Text',
fields__textarea__initial='text area\nsecond row',
fields__integer__initial=3,
fields__float__initial=3.14,
fields__password__initial='abc123',
fields__boolean__initial=True,
fields__datetime__initial=datetime.now(),
fields__date__initial=date.today(),
fields__time__initial=datetime.now().time(),
fields__decimal__initial=3.14,
fields__url__initial='http://iommi.rocks',
fields__email__initial='example@example.com',
fields__phone_number__initial='+1 555 555',
actions__submit__include=False,
),
)
)
class DummyRow:
def __init__(self, idx):
self.idx = idx
def __getattr__(self, attr):
_, _, shortcut = attr.partition('column_of_type_')
s = f'{shortcut} #{self.idx}'
if shortcut == 'link':
class Link:
def get_absolute_url(self):
return '#'
def __str__(self):
return 'title'
return Link()
return s
@staticmethod
def get_absolute_url():
return '#'
class ShortcutSelectorForm(Form):
class Meta:
attrs__method = 'get'
actions__submit__post_handler = lambda **_: None
shortcut = Field.multi_choice(
choices=[
t
for t in keys(get_members(cls=Column, member_class=Shortcut, is_member=is_shortcut))
if t
not in [
'icon',
'foreign_key',
'many_to_many',
'choice_queryset',
'multi_choice_queryset',
]
]
)
def all_column_sorts(request):
selected_shortcuts = ShortcutSelectorForm().bind(request=request).fields.shortcut.value or []
type_specifics = Namespace(
choice__choices=['Foo', 'Bar', 'Baz'],
multi_choice__choices=['Foo', 'Bar', 'Baz'],
)
return ExamplesPage(
parts=dict(
header=Header('All sorts of columns'),
form=ShortcutSelectorForm(),
table=Table(
assets__ajax_enhance__template=None,
columns={
f'column_of_type_{t}': dict(
type_specifics.get(t, {}),
call_target__attribute=t,
)
for t in selected_shortcuts
},
rows=[DummyRow(i) for i in range(10)],
),
)
)
class ExampleAdmin(Admin):
class Meta:
iommi_style = None
parts__menu__sub_menu = dict(
home=MenuItem(url='/'),
admin=MenuItem(url=lambda **_: reverse(ExampleAdmin.all_models)),
change_password=MenuItem(url=lambda **_: reverse(Auth.change_password)),
logout=MenuItem(url=lambda **_: reverse(Auth.logout)),
)
parts__footer = Fragment(
after=LAST,
children=dict(
hr=html.hr(),
style=StyleSelector(title='Change iommi style'),
),
)
urlpatterns = [
path('', IndexPage().as_view()),
path('iommi-admin/', include(ExampleAdmin.urls())),
]
| 29.019934
| 189
| 0.581339
|
07082b46d2f9fd351d588d56007c0aece9086a25
| 1,998
|
py
|
Python
|
app/strava/test_tasks.py
|
AartGoossens/sweaty-reports
|
02e6de908cd7d9dcd04e36f383444bd9f62d387b
|
[
"MIT"
] | 1
|
2020-09-07T13:08:01.000Z
|
2020-09-07T13:08:01.000Z
|
app/strava/test_tasks.py
|
AartGoossens/sweaty-reports
|
02e6de908cd7d9dcd04e36f383444bd9f62d387b
|
[
"MIT"
] | 3
|
2020-03-24T17:56:14.000Z
|
2021-02-02T22:31:19.000Z
|
app/strava/test_tasks.py
|
AartGoossens/sweaty-reports
|
02e6de908cd7d9dcd04e36f383444bd9f62d387b
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from pathlib import Path
import asynctest
import pytest
from .. import config
from . import tasks
from .models import StravaAthlete
from .schemas import Event
@pytest.mark.asyncio
async def test_handle_event(mocker):
event = Event(
aspect_type='update',
event_time=1516126040,
object_id=1360128428,
object_type='activity',
owner_id=2,
subscription_id=120475,
updates=dict(title='Messy')
)
with asynctest.patch('app.strava.tasks.generate_report') as mocked_generate_report:
await tasks.handle_event(event)
athlete = await StravaAthlete.objects.get(id=event.owner_id)
mocked_generate_report.assert_called_once_with(athlete, event.object_id)
@pytest.mark.asyncio
async def test_handle_event_athlete(mocker):
mocked_generate_report = mocker.patch('app.strava.tasks.generate_report')
event = Event(
aspect_type='update',
event_time=1516126040,
object_id=1360128428,
object_type='athlete',
owner_id=2,
subscription_id=120475,
updates=dict(title='Messy')
)
await tasks.handle_event(event)
mocked_generate_report.assert_not_called()
@pytest.mark.asyncio
async def test_new_athlete(mocker):
mocked_get_activities = mocker.patch('stravalib.Client.get_activities')
@dataclass
class StravaActivity:
id: int
mocked_get_activities.return_value = [StravaActivity(1337)]
strava_athlete = await StravaAthlete.objects.get(id=1)
assert strava_athlete.backfilled == False
with asynctest.patch('app.strava.tasks.generate_report') as mocked_generate_report:
await tasks.new_athlete(strava_athlete)
mocked_get_activities.assert_called_once_with(limit=config.STRAVA_BACKFILL_COUNT)
mocked_generate_report.assert_called_once_with(strava_athlete, 1337)
strava_athlete = await StravaAthlete.objects.get(id=1)
assert strava_athlete.backfilled == True
| 27
| 87
| 0.737237
|
055613217cedabfb50bb2114101518002e365b6e
| 7,168
|
py
|
Python
|
relation_engine_server/api_versions/api_v1.py
|
slebras/relation_engine
|
71d5cc4841b5140b09da8751eb61cd9ba2c83430
|
[
"MIT"
] | null | null | null |
relation_engine_server/api_versions/api_v1.py
|
slebras/relation_engine
|
71d5cc4841b5140b09da8751eb61cd9ba2c83430
|
[
"MIT"
] | null | null | null |
relation_engine_server/api_versions/api_v1.py
|
slebras/relation_engine
|
71d5cc4841b5140b09da8751eb61cd9ba2c83430
|
[
"MIT"
] | null | null | null |
import flask
from relation_engine_server.utils import (
arango_client,
spec_loader,
auth,
bulk_import,
pull_spec,
config,
parse_json,
)
from relation_engine_server.utils.json_validation import run_validator
from relation_engine_server.exceptions import InvalidParameters
api_v1 = flask.Blueprint("api_v1", __name__)
@api_v1.route("/data_sources", methods=["GET"])
def list_data_sources():
# note the custom response format is used by the frontend, so this endpoint is provided
# in addition to the /specs/data_sources endpoint
data_sources = spec_loader.get_names("data_sources")
return flask.jsonify({"data_sources": data_sources})
@api_v1.route("/data_sources/<name>", methods=["GET"])
def fetch_data_source(name):
data_source = spec_loader.get_schema("data_source", name)
return flask.jsonify({"data_source": data_source})
@api_v1.route("/specs/data_sources", methods=["GET"])
def show_data_sources():
"""Show the current data sources loaded from the spec."""
name = flask.request.args.get("name")
if name:
return flask.jsonify(spec_loader.get_schema("data_source", name))
return flask.jsonify(spec_loader.get_names("data_sources"))
@api_v1.route("/specs/stored_queries", methods=["GET"])
def show_stored_queries():
"""Show the current stored query names loaded from the spec."""
name = flask.request.args.get("name")
if name:
return flask.jsonify(
{"stored_query": spec_loader.get_schema("stored_query", name)}
)
return flask.jsonify(spec_loader.get_names("stored_query"))
@api_v1.route("/specs/collections", methods=["GET"])
@api_v1.route("/specs/schemas", methods=["GET"])
def show_collections():
"""Show the names of the (document) collections (edges and vertices) loaded from the spec."""
name = flask.request.args.get("name")
doc_id = flask.request.args.get("doc_id")
if name:
return flask.jsonify(spec_loader.get_schema("collection", name))
elif doc_id:
return flask.jsonify(spec_loader.get_schema_for_doc(doc_id))
else:
return flask.jsonify(spec_loader.get_names("collection"))
@api_v1.route("/query_results", methods=["POST"])
def run_query():
"""
Run a stored query as a query against the database.
Auth:
- only kbase re admins for ad-hoc queries
- public stored queries (these have access controls within them based on params)
"""
json_body = parse_json.get_json_body() or {}
# fetch number of documents to return
batch_size = int(flask.request.args.get("batch_size", 10000))
full_count = flask.request.args.get("full_count", False)
if "query" in json_body:
# Run an adhoc query for a sysadmin
auth.require_auth_token(roles=["RE_ADMIN"])
query_text = _preprocess_stored_query(json_body["query"], json_body)
del json_body["query"]
if "ws_ids" in query_text:
# Fetch any authorized workspace IDs using a KBase auth token, if present
auth_token = auth.get_auth_header()
json_body["ws_ids"] = auth.get_workspace_ids(auth_token)
resp_body = arango_client.run_query(
query_text=query_text,
bind_vars=json_body,
batch_size=batch_size,
full_count=full_count,
)
return flask.jsonify(resp_body)
if "stored_query" in flask.request.args or "view" in flask.request.args:
# Run a query from a query name
# Note: we are maintaining backwards compatibility here with the "view" arg.
# "stored_query" is the more accurate name
query_name = flask.request.args.get("stored_query") or flask.request.args.get(
"view"
)
stored_query = spec_loader.get_stored_query(query_name)
if "params" in stored_query:
# Validate the user params for the query
stored_query_path = spec_loader.get_stored_query(query_name, path_only=True)
run_validator(
schema_file=stored_query_path, data=json_body, validate_at="/params"
)
stored_query_source = _preprocess_stored_query(
stored_query["query"], stored_query
)
if "ws_ids" in stored_query_source:
# Fetch any authorized workspace IDs using a KBase auth token, if present
auth_token = auth.get_auth_header()
json_body["ws_ids"] = auth.get_workspace_ids(auth_token)
resp_body = arango_client.run_query(
query_text=stored_query_source,
bind_vars=json_body,
batch_size=batch_size,
full_count=full_count,
)
return flask.jsonify(resp_body)
if "cursor_id" in flask.request.args:
# Run a query from a cursor ID
cursor_id = flask.request.args["cursor_id"]
resp_body = arango_client.run_query(cursor_id=cursor_id)
return flask.jsonify(resp_body)
# No valid options were passed
raise InvalidParameters("Pass in a query name or a cursor_id")
@api_v1.route("/specs", methods=["PUT"])
def update_specs():
"""
Manually check for updates, download spec releases, and init new collections.
Auth: admin
"""
auth.require_auth_token(["RE_ADMIN"])
init_collections = "init_collections" in flask.request.args
release_url = flask.request.args.get("release_url")
pull_spec.download_specs(init_collections, release_url, reset=True)
return flask.jsonify({"status": "updated"})
@api_v1.route("/documents", methods=["PUT"])
def save_documents():
"""
Create, update, or replace many documents in a batch.
Auth: admin
"""
auth.require_auth_token(["RE_ADMIN"])
collection_name = flask.request.args["collection"]
query = {"collection": collection_name, "type": "documents"}
if flask.request.args.get("display_errors"):
# Display an array of error messages
query["details"] = "true"
if flask.request.args.get("on_duplicate"):
query["onDuplicate"] = flask.request.args["on_duplicate"]
if flask.request.args.get("overwrite"):
query["overwrite"] = "true"
resp = bulk_import.bulk_import(query)
if resp.get("errors") > 0:
return (flask.jsonify(resp), 400)
else:
return flask.jsonify(resp)
@api_v1.route("/config", methods=["GET"])
def show_config():
"""Show public config data."""
conf = config.get_config()
return flask.jsonify(
{
"auth_url": conf["auth_url"],
"workspace_url": conf["workspace_url"],
"kbase_endpoint": conf["kbase_endpoint"],
"db_url": conf["db_url"],
"db_name": conf["db_name"],
"spec_repo_url": conf["spec_repo_url"],
"spec_release_url": conf["spec_release_url"],
"spec_release_path": conf["spec_release_path"],
}
)
def _preprocess_stored_query(query_text, config):
"""Inject some default code into each stored query."""
ws_id_text = " LET ws_ids = @ws_ids " if "ws_ids" in query_text else ""
return "\n".join([config.get("query_prefix", ""), ws_id_text, query_text])
| 36.385787
| 97
| 0.666574
|
7c5298ef6ba4cba447f060d790bb6d76fdec2a4d
| 12,896
|
py
|
Python
|
mpparser/mp_parser.py
|
nomad-coe/nomad-parser-mp
|
ef6874c976ab4f801f652254aeedfa8852bce0dd
|
[
"Apache-2.0"
] | null | null | null |
mpparser/mp_parser.py
|
nomad-coe/nomad-parser-mp
|
ef6874c976ab4f801f652254aeedfa8852bce0dd
|
[
"Apache-2.0"
] | null | null | null |
mpparser/mp_parser.py
|
nomad-coe/nomad-parser-mp
|
ef6874c976ab4f801f652254aeedfa8852bce0dd
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD.
# See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import logging
import json
import numpy as np
from nomad.units import ureg
from nomad.parsing.parser import FairdiParser
from nomad.datamodel.metainfo.simulation.run import Run, Program
from nomad.datamodel.metainfo.workflow import (
Workflow, Elastic, EquationOfState, EOSFit, Thermodynamics, Stability, Decomposition,
Phonon)
from nomad.datamodel.metainfo.simulation.system import System, Atoms
from nomad.datamodel.metainfo.simulation.method import (
Method, DFT, Electronic, XCFunctional, Functional, BasisSet, BasisSetCellDependent)
from nomad.datamodel.metainfo.simulation.calculation import (
Calculation, Dos, DosValues, BandStructure, BandEnergies)
from mpparser.metainfo.mp import Composition, Symmetry
class MPParser(FairdiParser):
def __init__(self):
super().__init__(
name='parsers/mp', code_name='MaterialsProject',
code_homepage='https://materialsproject.org',
mainfile_mime_re=r'(application/json)|(text/.*)',
mainfile_name_re=r'.*mp.+materials\.json',
mainfile_contents_re=(r'"pymatgen_version":'))
def init_parser(self):
try:
self.data = json.load(open(self.filepath))
except Exception:
self.logger.error('Failed to load json file.')
def parse_elastic(self, source):
sec_workflow = self.archive.m_create(Workflow)
sec_workflow.type = 'elastic'
sec_elastic = sec_workflow.m_create(Elastic)
sec_elastic.energy_stress_calculator = 'VASP'
sec_elastic.calculation_method = 'stress'
source = source.get('elasticity', source)
sec_elastic.elastic_constants_order = source.get('order', 2)
deformations = source.get('deformations')
if deformations is not None:
sec_elastic.n_deformations = len(deformations)
elastic_tensor = source.get('elastic_tensor')
if elastic_tensor is not None:
sec_elastic.elastic_constants_matrix_second_order = elastic_tensor * ureg.GPa
compliance_tensor = source.get('compliance_tensor')
if compliance_tensor is not None:
sec_elastic.compliance_matrix_second_order = compliance_tensor * (1 / ureg.GPa)
if source.get('g_reuss') is not None:
sec_elastic.shear_modulus_reuss = source['g_reuss'] * ureg.GPa
if source.get('g_voigt') is not None:
sec_elastic.shear_modulus_voigt = source['g_voigt'] * ureg.GPa
if source.get('g_vrh') is not None:
sec_elastic.shear_modulus_hill = source['g_vrh'] * ureg.GPa
if source.get('homogeneous_poisson') is not None:
sec_elastic.poisson_ratio_hill = source['homogeneous_poisson']
if source.get('k_reuss') is not None:
sec_elastic.bulk_modulus_reuss = source['k_reuss'] * ureg.GPa
if source.get('k_voigt') is not None:
sec_elastic.bulk_modulus_voigt = source['k_voigt'] * ureg.GPa
if source.get('k_vrh') is not None:
sec_elastic.bulk_modulus_hill = source['k_vrh'] * ureg.GPa
def parse_eos(self, source):
sec_workflow = self.archive.m_create(Workflow)
sec_workflow.type = 'equation_of_state'
sec_eos = sec_workflow.m_create(EquationOfState)
if source.get('volumes') is not None:
sec_eos.volumes = source['volumes'] * ureg.angstrom ** 3
if source.get('energies') is not None:
sec_eos.energies = source['energies'] * ureg.eV
for fit_function, result in source.get('eos', {}).items():
sec_eos_fit = sec_eos.m_create(EOSFit)
sec_eos_fit.function_name = fit_function
if result.get('B') is not None:
sec_eos_fit.bulk_modulus = result['B'] * ureg.eV / ureg.angstrom ** 3
if result.get('C') is not None:
sec_eos_fit.bulk_modulus_derivative = result['C']
if result.get('E0') is not None:
sec_eos_fit.equilibrium_energy = result['E0'] * ureg.eV
if result.get('V0') is not None:
sec_eos_fit.equilibrium_volume = result['V0'] * ureg.angstrom ** 3
if result.get('eos_energies') is not None:
sec_eos_fit.fitted_energies = result['eos_energies'] * ureg.eV
def parse_thermo(self, data):
sec_workflow = self.archive.m_create(Workflow)
sec_workflow.type = 'thermodynamics'
sec_thermo = sec_workflow.m_create(Thermodynamics)
sec_stability = sec_thermo.m_create(Stability)
sec_stability.formation_energy = data.get(
'formation_energy_per_atom', 0) * data.get('nsites', 1) * ureg.eV
sec_stability.delta_formation_energy = data.get('energy_above_hull', 0) * ureg.eV
sec_stability.is_stable = data.get('is_stable')
if data.get('decomposes_to') is not None:
for system in data.get('decomposes_to'):
sec_decomposition = sec_stability.m_create(Decomposition)
sec_decomposition.formula = system.get('formula')
sec_decomposition.fraction = system.get('amount')
def parse_phonon(self, data):
sec_workflow = self.archive.m_create(Workflow)
sec_workflow.type = 'phonon'
sec_phonon = sec_workflow.m_create(Phonon)
# TODO is vasp always mp calculator?
sec_phonon.force_calculator = 'vasp'
calculations = self.archive.run[-1].calculation
calc = calculations[-1] if calculations else self.archive.run[-1].m_create(Calculation)
if data.get('ph_dos') is not None:
sec_dos = calc.m_create(Dos, Calculation.dos_phonon)
sec_dos.energies = data['ph_dos']['frequencies'] * ureg.THz * ureg.h
dos = data['ph_dos']['densities'] * (1 / (ureg.THz * ureg.h))
sec_dos.total.append(DosValues(value=dos))
if data.get('ph_bs') is not None:
sec_phonon.with_non_analytic_correction = data['ph_bs'].get('has_nac')
sec_bs = calc.m_create(BandStructure, Calculation.band_structure_phonon)
bands = np.transpose(data['ph_bs']['bands'])
qpoints = data['ph_bs']['qpoints']
labels = data['ph_bs']['labels_dict']
hisym_qpts = list(labels.values())
labels = list(labels.keys())
endpoints = []
for i, qpoint in enumerate(qpoints):
if qpoint in hisym_qpts:
endpoints.append(i)
if len(endpoints) < 2:
continue
sec_segment = sec_bs.m_create(BandEnergies)
energies = bands[endpoints[0]: endpoints[1] + 1]
sec_segment.energies = np.reshape(energies, (1, *np.shape(energies))) * ureg.THz * ureg.h
sec_segment.kpoints = qpoints[endpoints[0]: endpoints[1] + 1]
sec_segment.endpoints_labels = [labels[hisym_qpts.index(qpoints[i])] for i in endpoints]
endpoints = []
calc.system_ref = self.archive.run[-1].system[0]
# TODO add eigendisplacements
def parse_tasks(self, data):
if len(data['calcs_reversed']) == 0:
return
xc_func_mapping = {
'PAW_PBE': ['GGA_X_PBE', 'GGA_C_PBE']
}
sec_method = self.archive.run[-1].m_create(Method)
sec_xc_functional = XCFunctional()
for potcar_type in data['calcs_reversed'][0].get('input', {}).get('potcar_type', []):
for xc_functional in xc_func_mapping.get(potcar_type, []):
if '_X_' in xc_functional or xc_functional.endswith('_X'):
sec_xc_functional.exchange.append(Functional(name=xc_functional))
elif '_C_' in xc_functional or xc_functional.endswith('_C'):
sec_xc_functional.correlation.append(Functional(name=xc_functional))
elif 'HYB' in xc_functional:
sec_xc_functional.hybrid.append(Functional(name=xc_functional))
else:
sec_xc_functional.contributions.append(Functional(name=xc_functional))
sec_method.dft = DFT(xc_functional=sec_xc_functional)
sec_method.electronic = Electronic(method="DFT")
encut = data['calcs_reversed'][0].get('input', {}).get('incar', {}).get('ENCUT')
prec = data['calcs_reversed'][0].get('input', {}).get('incar', {}).get('PREC')
if encut is not None and prec is not None:
sec_basis = sec_method.m_create(BasisSet)
sec_basis.type = 'plane waves'
sec_basis_set_cell_dependent = sec_basis.m_create(BasisSetCellDependent)
sec_basis_set_cell_dependent.kind = 'plane waves'
prec = 1.3 if 'acc' in prec else 1.0
sec_basis_set_cell_dependent.planewave_cutoff = encut * prec * ureg.eV
self.archive.run[-1].calculation[0].method_ref = sec_method
def parse(self, filepath, archive, logger):
self.filepath = os.path.abspath(filepath)
self.archive = archive
self.logger = logger if logger is not None else logging.getLogger(__name__)
self.maindir = os.path.dirname(self.filepath)
self.init_parser()
sec_run = archive.m_create(Run)
sec_run.program = Program(name='MaterialsProject', version="1.0.0")
# TODO system should be referenced
structure = self.data.get('structure')
if structure is not None:
labels = [site['label'] for site in structure.get('sites')]
positions = [site['xyz'] for site in structure.get('sites')]
cell = structure.get('lattice', {}).get('matrix')
sec_system = sec_run.m_create(System)
sec_atoms = sec_system.m_create(Atoms)
if cell is not None:
sec_atoms.lattice_vectors = cell * ureg.angstrom
sec_atoms.periodic = [True, True, True]
if positions:
sec_atoms.positions = positions * ureg.angstrom
if labels:
sec_atoms.labels = labels
for key, val in self.data.get('composition', {}).items():
sec_system.x_mp_composition.append(
Composition(x_mp_label=key, x_mp_value=val))
for key, val in self.data.get('composition_reduced', {}).items():
sec_system.x_mp_composition_reduced.append(
Composition(x_mp_label=key, x_mp_value=val))
symmetry = self.data.get('symmetry')
if symmetry is not None:
sec_symmetry = sec_system.m_create(Symmetry)
for key, val in symmetry.items():
try:
setattr(sec_symmetry, 'x_mp_%s' % key, val)
except Exception:
pass
# misc
sec_system.x_mp_elements = self.data.get('elements', [])
for key, val in self.data.items():
try:
setattr(sec_system, 'x_mp_%s' % key, val)
except Exception:
pass
# temporary fix to go through workflow normalization
sec_calc = sec_run.m_create(Calculation)
sec_calc.system_ref = sec_system
# TODO should we use the MP api for workflow results?
workflow_files = [f for f in os.listdir(
self.maindir) if f.endswith('.json') and f != os.path.basename(self.filepath)]
for filename in workflow_files:
try:
data = json.load(open(os.path.join(self.maindir, filename)))
except Exception:
continue
# make sure data matches that of system
# TODO maybe better to simply compare filename prefix so no need to load data
if data.get('material_id', data.get('task_id')) != self.data.get('material_id'):
continue
if 'elasticity' in data:
self.parse_elastic(data)
if 'eos' in data:
self.parse_eos(data)
if 'ph_bs' in data or 'ph_dos' in data:
self.parse_phonon(data)
if 'property_name' in data and data.get('property_name') == 'thermo':
self.parse_thermo(data)
if 'calcs_reversed' in data:
self.parse_tasks(data)
| 45.730496
| 105
| 0.628102
|
4807e068bebaaddc9e58ef5cba0f03ca622b1978
| 1,979
|
py
|
Python
|
app_verifications/thread/thread_checks.py
|
kskarbinski/threads-api
|
c144c1cb51422095922310d278f80e4996c10ea0
|
[
"MIT"
] | null | null | null |
app_verifications/thread/thread_checks.py
|
kskarbinski/threads-api
|
c144c1cb51422095922310d278f80e4996c10ea0
|
[
"MIT"
] | null | null | null |
app_verifications/thread/thread_checks.py
|
kskarbinski/threads-api
|
c144c1cb51422095922310d278f80e4996c10ea0
|
[
"MIT"
] | null | null | null |
from app_verifications import BaseChecks
from app_data.threads import threads, threads_invitations, threads_applications
from app_utils.helpers.finders import FindModel
class ThreadChecks(BaseChecks):
"""
Checks related to thread/threads.
All checks return True or False.
"""
def __init__(self, value=None, by="id", model=None):
super(ThreadChecks, self).__init__(container=threads, value=value, by=by, model=model)
self.thread_model = self.model
def check_thread_exists(self):
return True if self.thread_model else False
def check_user_is_owner(self, user_id):
return True if self.thread_model.owner == user_id else False
def check_user_is_not_owner(self, user_id):
return not self.check_user_is_owner(user_id=user_id)
def check_excludes_owner(self, user_ids):
return True if self.thread_model.owner not in user_ids else False
def check_user_is_invited(self, user_id):
thread_invitations = threads_invitations[self.thread_model.id]
return True if FindModel(models_list=thread_invitations).by_user(user_id) else False
def check_user_is_member(self, user_id):
return True if user_id in self.thread_model.users else False
def check_thread_is_private(self):
return True if self.thread_model.private else False
def check_thread_is_not_private(self):
return not self.check_thread_is_private()
def check_user_applied(self, user_id):
return True if FindModel(models_list=threads_applications[self.thread_model.id]).by_user(user_id) else False
def check_user_not_applied(self, user_id):
return not self.check_user_applied(user_id=user_id)
def check_user_invited(self, user_id):
return True if FindModel(models_list=threads_invitations[self.thread_model.id]).by_user(user_id) else False
def check_user_not_invited(self, user_id):
return not self.check_user_invited(user_id=user_id)
| 38.803922
| 116
| 0.749368
|
0636110aa501f3df35d633a1a75dcc94ab026c20
| 1,357
|
py
|
Python
|
apps/common/const.py
|
constructorfleet/AutomationApps
|
a7d14582e30c1d1a4392774ab4e7e00055fa0e26
|
[
"MIT"
] | null | null | null |
apps/common/const.py
|
constructorfleet/AutomationApps
|
a7d14582e30c1d1a4392774ab4e7e00055fa0e26
|
[
"MIT"
] | null | null | null |
apps/common/const.py
|
constructorfleet/AutomationApps
|
a7d14582e30c1d1a4392774ab4e7e00055fa0e26
|
[
"MIT"
] | null | null | null |
ARG_OR = 'or'
ARG_AND = 'and'
ARG_ENTITY_ID = 'entity_id'
ARG_ATTRIBUTE = 'attribute'
ARG_EXISTS = 'exists'
ARG_STATE = 'state'
ARG_VALUE = 'value'
ARG_HOUR = 'hour'
ARG_MINUTE = 'minute'
ARG_SECOND = 'second'
ARG_BEFORE = 'before'
ARG_AFTER = 'after'
ARG_AT = 'at'
ARG_DOMAIN = 'domain'
ARG_GROUPS = 'groups'
ARG_SERVICE = 'service'
ARG_SERVICE_DATA = 'service_data'
ARG_COMPARATOR = 'comparator'
ARG_DEPENDENCIES = "dependencies"
ARG_NOTIFY = 'notify'
ARG_NOTIFY_CATEGORY = 'notify_category'
ARG_NOTIFY_REPLACERS = 'replacers'
ARG_NOTIFY_ENTITY_ID = 'response_entity_id'
ARG_SENSOR = 'sensor'
ARG_FILENAME = 'filename'
ARG_LOG_LEVEL = 'log_level'
ARG_ENABLED_FLAG = 'enabled_flag'
ATTR_SCORE = 'score'
ATTR_FILENAME = 'filename'
ATTR_OLD_STATE = 'old_state'
ATTR_NEW_STATE = 'new_state'
ATTR_ENTITY_NAME = 'entity_name'
ATTR_RGB_COLOR = "rgb_color"
ATTR_COLOR_NAME = "color_name"
EVENT_STATE_CHANGED = 'state_changed'
DOMAIN_NOTIFY = 'notify'
DOMAIN_HOMEASSISTANT = 'homeassistant'
DOMAIN_CAMERA = 'camera'
SERVICE_TURN_ON = 'turn_on'
SERVICE_TURN_OFF = 'turn_off'
SERVICE_SNAPSHOT = 'snapshot'
EQUALS = '='
LESS_THAN = '<'
LESS_THAN_EQUAL_TO = '<='
GREATER_THAN = '>'
GREATER_THAN_EQUAL_TO = '>='
NOT_EQUAL = '!='
VALID_COMPARATORS = [
EQUALS,
LESS_THAN,
LESS_THAN_EQUAL_TO,
GREATER_THAN,
GREATER_THAN_EQUAL_TO,
NOT_EQUAL
]
| 21.887097
| 43
| 0.750184
|
3013977f7a7b727b4dbed6f9a1232d26aa143b9b
| 3,090
|
py
|
Python
|
models.py
|
zhikangD/CycleGAN-Tensorflow-PyTorch-Simple
|
68357996780b758ae313814be16366255080cebf
|
[
"MIT"
] | null | null | null |
models.py
|
zhikangD/CycleGAN-Tensorflow-PyTorch-Simple
|
68357996780b758ae313814be16366255080cebf
|
[
"MIT"
] | null | null | null |
models.py
|
zhikangD/CycleGAN-Tensorflow-PyTorch-Simple
|
68357996780b758ae313814be16366255080cebf
|
[
"MIT"
] | 1
|
2020-11-20T06:30:46.000Z
|
2020-11-20T06:30:46.000Z
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import ops
import functools
import tensorflow as tf
import tensorflow.contrib.slim as slim
conv = functools.partial(slim.conv2d, activation_fn=None)
deconv = functools.partial(slim.conv2d_transpose, activation_fn=None)
relu = tf.nn.relu
lrelu = functools.partial(ops.leak_relu, leak=0.2)
def discriminator(img, scope, df_dim=64, reuse=False, train=True):
bn = functools.partial(slim.batch_norm, scale=True, is_training=train,
decay=0.9, epsilon=1e-5, updates_collections=None)
with tf.variable_scope(scope + '_discriminator', reuse=reuse):
h0 = lrelu(conv(img, df_dim, 4, 2, scope='h0_conv')) # h0 is (128 x 128 x df_dim)
h1 = lrelu(bn(conv(h0, df_dim * 2, 4, 2, scope='h1_conv'), scope='h1_bn')) # h1 is (64 x 64 x df_dim*2)
h2 = lrelu(bn(conv(h1, df_dim * 4, 4, 2, scope='h2_conv'), scope='h2_bn')) # h2 is (32x 32 x df_dim*4)
h3 = lrelu(bn(conv(h2, df_dim * 8, 4, 1, scope='h3_conv'), scope='h3_bn')) # h3 is (32 x 32 x df_dim*8)
h4 = conv(h3, 1, 4, 1, scope='h4_conv') # h4 is (32 x 32 x 1)
return h4
def generator(img, scope, gf_dim=64, reuse=False, train=True):
bn = functools.partial(slim.batch_norm, scale=True, is_training=train,
decay=0.9, epsilon=1e-5, updates_collections=None)
def residule_block(x, dim, scope='res'):
y = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
y = relu(bn(conv(y, dim, 3, 1, padding='VALID', scope=scope + '_conv1'), scope=scope + '_bn1'))
y = tf.pad(y, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
y = bn(conv(y, dim, 3, 1, padding='VALID', scope=scope + '_conv2'), scope=scope + '_bn2')
return y + x
with tf.variable_scope(scope + '_generator', reuse=reuse):
c0 = tf.pad(img, [[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT")
c1 = relu(bn(conv(c0, gf_dim, 7, 1, padding='VALID', scope='c1_conv'), scope='c1_bn'))
c2 = relu(bn(conv(c1, gf_dim * 2, 3, 2, scope='c2_conv'), scope='c2_bn'))
c3 = relu(bn(conv(c2, gf_dim * 4, 3, 2, scope='c3_conv'), scope='c3_bn'))
r1 = residule_block(c3, gf_dim * 4, scope='r1')
r2 = residule_block(r1, gf_dim * 4, scope='r2')
r3 = residule_block(r2, gf_dim * 4, scope='r3')
r4 = residule_block(r3, gf_dim * 4, scope='r4')
r5 = residule_block(r4, gf_dim * 4, scope='r5')
r6 = residule_block(r5, gf_dim * 4, scope='r6')
r7 = residule_block(r6, gf_dim * 4, scope='r7')
r8 = residule_block(r7, gf_dim * 4, scope='r8')
r9 = residule_block(r8, gf_dim * 4, scope='r9')
d1 = relu(bn(deconv(r9, gf_dim * 2, 3, 2, scope='d1_dconv'), scope='d1_bn'))
d2 = relu(bn(deconv(d1, gf_dim, 3, 2, scope='d2_dconv'), scope='d2_bn'))
d2 = tf.pad(d2, [[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT")
pred = conv(d2, 3, 7, 1, padding='VALID', scope='pred_conv')
pred = tf.nn.tanh(pred)
return pred
| 46.119403
| 112
| 0.594822
|
7373e82adcdb3483a229c5c89389d5c91b30d4f9
| 6,688
|
py
|
Python
|
custom_components/nicehash/nicehash.py
|
v00g100skr/ha-nicehash
|
5db5848daff713016893654db5c1636ff7d1ca22
|
[
"MIT"
] | null | null | null |
custom_components/nicehash/nicehash.py
|
v00g100skr/ha-nicehash
|
5db5848daff713016893654db5c1636ff7d1ca22
|
[
"MIT"
] | null | null | null |
custom_components/nicehash/nicehash.py
|
v00g100skr/ha-nicehash
|
5db5848daff713016893654db5c1636ff7d1ca22
|
[
"MIT"
] | null | null | null |
"""
NiceHash API interface
References:
- https://docs.nicehash.com/main/index.html
- https://github.com/nicehash/rest-clients-demo/blob/master/python/nicehash.py
"""
from datetime import datetime
from hashlib import sha256
import hmac
import httpx
import json
import logging
import re
import sys
from time import mktime
import uuid
import logging
from .const import MAX_TWO_BYTES, NICEHASH_API_URL
_LOGGER = logging.getLogger(__name__)
def parse_device_name(raw_name):
name = re.sub(
r"(\s?\(r\))|(\s?\(tm\))|(\s?cpu)|(\s?graphics)|(\s?@.*ghz)",
"",
raw_name,
flags=re.IGNORECASE,
)
return name
class MiningAlgorithm:
def __init__(self, data: dict):
self.name = data.get("title")
self.speed = round(float(data.get("speed")), 2)
unit = data.get("displaySuffix")
self.unit = f"{unit}/s"
class MiningRigDevice:
def __init__(self, rig_name, data: dict):
self.id = '%s_%s' % (rig_name,data.get("id"))
self.name = parse_device_name(data.get("name"))
self.status = data.get("status").get("description")
self.gtemperature = int(data.get("temperature")) % MAX_TWO_BYTES
self.htemperature = int(int(data.get("temperature")) / MAX_TWO_BYTES)
self.load = int(data.get("load")) % MAX_TWO_BYTES
self.rpm = int(data.get("revolutionsPerMinute"))
self.speeds = data.get("speeds")
class MiningRig:
def __init__(self, data: dict):
self.id = data.get("rigId")
self.name = data.get("name")
self.status = data.get("minerStatus")
self.status_time = data.get("statusTime")
self.profitability = data.get("profitability")
self.unpaid_amount = float(data.get("unpaidAmount"))
devices = data.get("devices")
if devices is not None:
self.num_devices = len(devices)
self.devices = dict()
for device_data in devices:
device = MiningRigDevice(data.get("name"), device_data)
self.devices[f"{device.id}"] = device
else:
self.num_devices = 0
self.devices = dict()
def get_algorithms(self):
algorithms = dict()
for device in self.devices.values():
if len(device.speeds) > 0:
algo = MiningAlgorithm(device.speeds[0])
existingAlgo = algorithms.get(algo.name)
if existingAlgo:
existingAlgo.speed += algo.speed
else:
algorithms[algo.name] = algo
return algorithms
class Payout:
def __init__(self, data: dict):
self.id = data.get("id")
self.currency = "Unknown"
self.created = data.get("created")
self.amount = float(data.get("amount"))
self.fee = float(data.get("feeAmount"))
self.account_type = "Unknown"
# Currency
currency = data.get("currency")
if currency:
self.currency = currency.get("enumName")
# Account Type
account_type = data.get("accountType")
if account_type:
self.account_type = account_type.get("enumName")
class NiceHashPublicClient:
async def get_exchange_rates(self):
exchange_data = await self.request("GET",
"/main/api/v2/exchangeRate/list")
return exchange_data.get("list")
async def request(self, method, path, query=None, body=None):
url = NICEHASH_API_URL + path
if query is not None:
url += f"?{query}"
_LOGGER.debug(url)
async with httpx.AsyncClient() as client:
if body:
data = json.dumps(body)
response = await client.request(method, url, data=data)
else:
response = await client.request(method, url)
if response.status_code == 200:
return response.json()
else:
err_messages = [str(response.status_code), response.reason]
if response.content:
err_messages.append(str(response.content))
raise Exception(": ".join(err_messages))
class NiceHashPrivateClient:
def __init__(self, organization_id, key, secret):
self.organization_id = organization_id
self.key = key
self.secret = secret
async def get_accounts(self):
return await self.request("GET", "/main/api/v2/accounting/accounts2")
async def get_mining_rigs(self):
return await self.request("GET", "/main/api/v2/mining/rigs2")
async def get_mining_rig(self, rig_id):
return await self.request("GET", f"/main/api/v2/mining/rig2/{rig_id}")
async def get_rig_payouts(self, size=84):
query = f"size={size}"
return await self.request("GET", "/main/api/v2/mining/rigs/payouts",
query)
async def request(self, method, path, query="", body=None):
xtime = self.get_epoch_ms_from_now()
xnonce = str(uuid.uuid4())
message = f"{self.key}\00{str(xtime)}\00{xnonce}\00\00{self.organization_id}\00\00{method}\00{path}\00{query}"
data = None
if body:
data = json.dumps(body)
message += f"\00{data}"
digest = hmac.new(self.secret.encode(), message.encode(),
sha256).hexdigest()
xauth = f"{self.key}:{digest}"
headers = {
"X-Time": str(xtime),
"X-Nonce": xnonce,
"X-Auth": xauth,
"Content-Type": "application/json",
"X-Organization-Id": self.organization_id,
"X-Request-Id": str(uuid.uuid4()),
}
async with httpx.AsyncClient() as client:
client.headers = headers
url = NICEHASH_API_URL + path
if query:
url += f"?{query}"
_LOGGER.debug(url)
if data:
response = await client.request(method, url, data=data)
else:
response = await client.request(method, url)
if response.status_code == 200:
return response.json()
else:
err_messages = [str(response.status_code), response.reason]
if response.content:
err_messages.append(str(response.content))
raise Exception(": ".join(err_messages))
def get_epoch_ms_from_now(self):
now = datetime.now()
now_ec_since_epoch = mktime(
now.timetuple()) + now.microsecond / 1000000.0
return int(now_ec_since_epoch * 1000)
| 32
| 118
| 0.576106
|
1175b116ca844b90ff9ad6bcdce62a70c87ac173
| 521
|
py
|
Python
|
Algorithms and Data Structures/old/bubble_sort.py
|
ioyy900205/PyTorch_mess-around
|
90d255e17158699fd7902f7746b35fa18975112e
|
[
"MIT"
] | null | null | null |
Algorithms and Data Structures/old/bubble_sort.py
|
ioyy900205/PyTorch_mess-around
|
90d255e17158699fd7902f7746b35fa18975112e
|
[
"MIT"
] | null | null | null |
Algorithms and Data Structures/old/bubble_sort.py
|
ioyy900205/PyTorch_mess-around
|
90d255e17158699fd7902f7746b35fa18975112e
|
[
"MIT"
] | null | null | null |
'''
Date: 2021-06-22 09:59:40
LastEditors: Liuliang
LastEditTime: 2021-06-22 10:24:49
Description: bubble_sort
'''
import random
# from random_int_list import *
from bacic_module.random_int_list import *
import numpy as np
def bubble_sort(li):
lenth = len(li)
for i in range(lenth-1):
for j in range(lenth-1-i):
if li[j] > li[j+1]: li[j],li[j+1] = li[j+1], li[j]
if __name__ == "__main__":
list = random_int_list(5,1000,20)
print(list)
bubble_sort(list)
print(list)
| 18.607143
| 62
| 0.642994
|
963ee09542fe0d3266b44a4bfea45ad516209877
| 22,486
|
py
|
Python
|
checkers-reference/checkers_P2.py
|
cjsproject/Draughts-Checkers
|
76928822692c3daeb87e2f75db1e2ca825abec29
|
[
"MIT"
] | null | null | null |
checkers-reference/checkers_P2.py
|
cjsproject/Draughts-Checkers
|
76928822692c3daeb87e2f75db1e2ca825abec29
|
[
"MIT"
] | null | null | null |
checkers-reference/checkers_P2.py
|
cjsproject/Draughts-Checkers
|
76928822692c3daeb87e2f75db1e2ca825abec29
|
[
"MIT"
] | null | null | null |
from __future__ import nested_scopes
import string
import random
#======================== Class GameEngine =======================================
class GameEngine:
def __init__(self, str_name):
self.str = str_name
def __str__(self):
return self.str
#The return value should be a move that is denoted by a list
def nextMove(self,state):
global PLAYER
curNode = Node(Board(state))
PLAYER = self.str
result = maxv(curNode,Thresh(-2000),Thresh(2000),0,"",self.str)
return result.bestSucc.state.move
#==================== Class Node ============================================
class Node:
def __init__ (self,state,parent=None,depth=0,gval=0,hval=None):
self.state = state
self.parent = parent
self.depth = depth
self.gval = gval
self.hval = hval
def printNode(self):
print "state: ", self.state, " Parent: ", self.parent.state,
print " Gval=",self.gval," Hval=",self.hval
def printPathToNode(self):
if self:
self.printNode()
if self.parent:
printPathToNode(self.parent)
#================== Class Thresh =======================================
class Thresh:
def __init__(self,initVal,node=None):
self.val = initVal
self.bestSucc = node
def __repr__(self):
return str(self.val)
#============== A New Class: Board ============================================
# This class is used to represent board and move
# Class Members:
# board : a list of lists that represents the 8*8 board
# move : is also a list, e.g move = [(1,1),(3,3),(5,5)]
class Board:
def __init__(self,board,move=[]):
self.board = board
self.move = move
#This function outputs the current Board
def PrintBoard(self,name="====== The current board is: =========",parent = None ):
if parent:
print "Move = ",self.move
print "The Parent board is:", name
for i in [7,6,5,4,3,2,1,0]:
print i,":",
for j in range(8):
print parent.board[i][j],
print "\t|\t",i,":",
for j in range(8):
print self.board[i][j],
print
print " ",0,1,2,3,4,5,6,7,"\t|\t ",0,1,2,3,4,5,6,7
else:
print name
print "move = ",self.move
for i in [7,6,5,4,3,2,1,0]:
print i,":",
for j in range(8):
print self.board[i][j],
print
print " ",0,1,2,3,4,5,6,7
#This function has not been finished (To be continued ???, or just use PrintBoard)
def __str__(self):
return "Board"
#=======================================================
#Please only modify code in the following two functions
#=======================================================
#Heuristic function.
#Input:
#Type of return value: a real number
def evalFun(node,space,player):
cur_board = node.state.board
val = 0
opponent = OppDic1[player]
for i in range(8):
for j in range(8):
#number of the king and man
if cur_board[i][j] == player:
val = val + 20
elif cur_board[i][j] == PieceToKingDic[player]:
val = val + 40
elif cur_board[i][j] == opponent:
val = val - 20
elif cur_board[i][j] == PieceToKingDic[opponent]:
val = val - 40
return val
def cutoff(state,depth,space,player):
if depth >= 5 or not successors(state,player):
return 1
else:
return 0
#======================================================
#Please don't change anything below this point
#======================================================
def edgecost (state1, state2):
return 1
def expand (n, succboards):
if succboards == []:
return []
else:
x = map(lambda s: Node(s,parent=n,depth=1+n.depth,\
gval=n.gval+edgecost(n.state,s)),succboards)
return x
#This function will return move. It has not been tested and it is not used yet
def GetMoveList(cur_board,suc_board,player):
for i in range(8):
for j in range(8):
if suc_board[i][j] == '.' and cur_board[i][j] in PlayerDic[player]:
s,r = i,j
if cur_board[i][j] == '.' and suc_board[i][j] in PlayerDic[player]:
a,b = i,j
if abs(s-a) == 1:
move = [(s,r),(a,b)]
else:
move = [(s,r)]
while s != a and r != b:
if s >= 2 and r >= 2 and cur_board[s-1][r-1] in Oppdic[player] and suc_board[s-1][r-1] == '.':
s,r = s-2,r-2
move = move + [(s,r)]
elif s >= 2 and r<= 5 and cur_board[s-1][r+1] in Oppdic[player] and suc_board[s-1][r+1] == '.':
s,r = s-2,r+2
move = move + [(s,r)]
elif s <= 5 and r >= 2 and cur_board[s+1][r-1] in Oppdic[player] and suc_board[s+1][r-1] == '.':
s,r = s+2,r-2
move = move + [(s,r)]
elif s <= 5 and r <= 5 and cur_board[s+1][r+1] in Oppdic[player] and suc_board[s+1][r+1] == '.':
s,r = s+2,r+2
move = move + [(s,r)]
return move
def Jump(board, a,b, jstep, player):
result = []
if player == 'b':
#Jump: upper right
if a <= 5 and b <= 5 and (board[a+1][b+1] == 'r' or board[a+1][b+1] == 'R') and board[a+2][b+2] == '.':
new_board = Copyboard(board)
new_board[a][b], new_board[a+1][b+1] = '.','.'
if a == 5:
new_board[a+2][b+2] = 'B'
else:
new_board[a+2][b+2] = 'b'
tlist = Jump(new_board,a+2,b+2,jstep+1,'b')
for state in tlist:
state.move = [(a,b)]+ state.move
result = result + tlist
#Jump: upper left
if a <= 5 and b >= 2 and (board[a+1][b-1] == 'r' or board[a+1][b-1] == 'R') and board[a+2][b-2] == '.':
new_board = Copyboard(board)
new_board[a][b], new_board[a+1][b-1] = '.','.'
if a == 5:
new_board[a+2][b-2] = 'B'
else:
new_board[a+2][b-2] = 'b'
tlist = Jump(new_board,a+2,b-2,jstep+1,'b')
for state in tlist:
state.move = [(a,b)]+ state.move
result = result + tlist
if not result and jstep >= 1:
result = [Board(board,move = [(a,b)])]
elif player == 'r':
#Jump: down right
if a >= 2 and b <= 5 and (board[a-1][b+1] == 'b' or board[a-1][b+1] == 'B') and board[a-2][b+2] == '.':
new_board = Copyboard(board)
new_board[a][b], new_board[a-1][b+1] = '.','.'
if a == 2:
new_board[a-2][b+2] = 'R'
else:
new_board[a-2][b+2] = 'r'
tlist = Jump(new_board,a-2,b+2,jstep+1,'r')
for state in tlist:
state.move = [(a,b)]+ state.move
result = result + tlist
#Jump: down left
if a >= 2 and b >= 2 and (board[a-1][b-1] == 'b' or board[a-1][b-1] == 'B') and board[a-2][b-2] == '.':
new_board = Copyboard(board)
new_board[a][b], new_board[a-1][b-1] = '.','.'
if a == 2:
new_board[a-2][b-2] = 'R'
else:
new_board[a-2][b-2] = 'r'
tlist = Jump(new_board,a-2,b-2,jstep+1,'r')
for state in tlist:
state.move = [(a,b)]+ state.move
result = result + tlist
if not result and jstep >= 1:
result = [Board(board,move = [(a,b)])]
elif player == 'B' or player == 'R':
#Jump: upper right
if a <= 5 and b <= 5 and (board[a+1][b+1] in OppDic[player]) and board[a+2][b+2] == '.':
new_board = Copyboard(board)
new_board[a][b], new_board[a+1][b+1] = '.','.'
new_board[a+2][b+2] = player
tlist = Jump(new_board,a+2,b+2,jstep+1,player)
for state in tlist:
state.move = [(a,b)]+ state.move
result = result + tlist
#Jump: upper left
if a <= 5 and b >= 2 and (board[a+1][b-1] in OppDic[player]) and board[a+2][b-2] == '.':
new_board = Copyboard(board)
new_board[a][b], new_board[a+1][b-1] = '.','.'
new_board[a+2][b-2] = player
tlist = Jump(new_board,a+2,b-2,jstep+1,player)
for state in tlist:
state.move = [(a,b)]+ state.move
result = result + tlist
#Jump: down right
if a >= 2 and b <= 5 and (board[a-1][b+1] in OppDic[player]) and board[a-2][b+2] == '.':
new_board = Copyboard(board)
new_board[a][b], new_board[a-1][b+1] = '.','.'
new_board[a-2][b+2] = player
tlist = Jump(new_board,a-2,b+2,jstep+1,player)
for state in tlist:
state.move = [(a,b)]+ state.move
result = result + tlist
#Jump: down left
if a >= 2 and b >= 2 and (board[a-1][b-1] in OppDic[player]) and board[a-2][b-2] == '.':
new_board = Copyboard(board)
new_board[a][b], new_board[a-1][b-1] = '.','.'
new_board[a-2][b-2] = player
tlist = Jump(new_board,a-2,b-2,jstep+1,player)
for state in tlist:
state.move = [(a,b)]+ state.move
result = result + tlist
if not result and jstep >= 1:
result = [Board(board,move = [(a,b)])]
return result
def Copyboard(board):
new_board = [[]]*8
for i in range(8):
new_board[i] = [] + board[i]
return new_board
def successors(state,player):
cur_board = state.board
suc_result = []
if player == 'b':
#Test jump: If a piece can jump, it must jump
piece_list = []
for i in range(8):
for j in range(8):
if cur_board[i][j] == 'b' or cur_board[i][j] == 'B':
suc_result = suc_result + Jump(cur_board, i,j, 0, cur_board[i][j])
piece_list = piece_list + [[i,j]]
#Move the piece one step
if not suc_result:
for x in piece_list:
i,j = x[0],x[1]
if cur_board[i][j] == 'b':
#(1)The piece is not in the rightmost column, move to upperright
if j <= 6 and cur_board[i+1][j+1] == '.':
suc_board = Copyboard(cur_board)
suc_board[i][j] = '.'
if i<=5:
suc_board[i+1][j+1] = 'b'
else:
suc_board[i+1][j+1] = 'B'
move = [(i,j),(i+1,j+1)]
suc_result = suc_result + [Board(suc_board,move)]
#(2)The pice is not in the leftmost column, move to the upperleft
if j >= 1 and cur_board[i+1][j-1] == '.':
suc_board = Copyboard(cur_board)
suc_board[i][j] = '.'
if i<= 5:
suc_board[i+1][j-1] = 'b'
else:
suc_board[i+1][j-1] = 'B'
move = [(i,j),(i+1,j-1)]
suc_result = suc_result + [Board(suc_board,move)]
elif cur_board[i][j] == 'B':
#Move the king one step
#(1)The king is not in top and the rightmost column, move to upperright
if i <= 6 and j <= 6 and cur_board[i+1][j+1] == '.':
suc_board = Copyboard(cur_board)
suc_board[i][j] = '.'
suc_board[i+1][j+1] = 'B'
move = [(i,j),(i+1,j+1)]
suc_result = suc_result + [Board(suc_board,move)]
#(2)The king is not in top and the leftmost column, move to the upperleft
if i <= 6 and j >= 1 and cur_board[i+1][j-1] == '.':
suc_board = Copyboard(cur_board)
suc_board[i][j] = '.'
suc_board[i+1][j-1] = 'B'
move = [(i,j),(i+1,j-1)]
suc_result = suc_result + [Board(suc_board,move)]
#(3)The king is not in bottom and the rightmost column, move to the downright
if i >= 1 and j <= 6 and cur_board[i-1][j+1] == '.':
suc_board = Copyboard(cur_board)
suc_board[i][j] = '.'
suc_board[i-1][j+1] = 'B'
move = [(i,j),(i-1,j+1)]
suc_result = suc_result + [Board(suc_board,move)]
#(4)The king is not in the leftmost column, move to the downleft
if i >= 1 and j >= 1 and cur_board[i-1][j-1] == '.':
suc_board = Copyboard(cur_board)
suc_board[i][j] = '.'
suc_board[i-1][j-1] = 'B'
move = [(i,j),(i-1,j-1)]
suc_result = suc_result + [Board(suc_board,move)]
else:
#Test jump: If a piece can jump, it must jump
piece_list = []
for i in range(8):
for j in range(8):
if cur_board[i][j] == 'r' or cur_board[i][j] == 'R':
suc_result = suc_result + Jump(cur_board, i,j, 0, cur_board[i][j])
piece_list = piece_list + [[i,j]]
#If jump is not available, move the piece one step
if not suc_result:
for x in piece_list:
i,j = x[0],x[1]
if cur_board[i][j] == 'r':
#move the piece one step
#(1)the piece is not in the rightmost column, move to downright
if j <= 6 and cur_board[i-1][j+1] == '.':
suc_board = Copyboard(cur_board)
suc_board[i][j] = '.'
if i >= 2:
suc_board[i-1][j+1] = 'r'
else:
suc_board[i-1][j+1] = 'R'
move = [(i,j),(i-1,j+1)]
suc_result = suc_result + [Board(suc_board,move)]
#(2)the pice is not in the leftmost column, move to the upperleft
if j >= 1 and cur_board[i-1][j-1] == '.':
suc_board = Copyboard(cur_board)
suc_board[i][j] = '.'
if i >= 2:
suc_board[i-1][j-1] = 'r'
else:
suc_board[i-1][j-1] = 'R'
move = [(i,j),(i-1,j-1)]
suc_result = suc_result + [Board(suc_board,move)]
elif cur_board[i][j] == 'R':
#move the king one step
#(1)the king is not in top and the rightmost column, move to upperright
if i <= 6 and j <= 6 and cur_board[i+1][j+1] == '.':
suc_board = Copyboard(cur_board)
suc_board[i][j] = '.'
suc_board[i+1][j+1] = 'R'
move = [(i,j),(i+1,j+1)]
suc_result = suc_result + [Board(suc_board,move)]
#(2)the king is not in top and the leftmost column, move to the upperleft
if i <= 6 and j >= 1 and cur_board[i+1][j-1] == '.':
suc_board = Copyboard(cur_board)
suc_board[i][j] = '.'
suc_board[i+1][j-1] = 'R'
move = [(i,j),(i+1,j-1)]
suc_result = suc_result + [Board(suc_board,move)]
#(3)the king is not in bottom and the rightmost column, move to the downright
if i >= 1 and j <= 6 and cur_board[i-1][j+1] == '.':
suc_board = Copyboard(cur_board)
suc_board[i][j] = '.'
suc_board[i-1][j+1] = 'R'
move = [(i,j),(i-1,j+1)]
suc_result = suc_result + [Board(suc_board,move)]
#(4)the king is not in the leftmost column, move to the upperleft
if i >= 1 and j >= 1 and cur_board[i-1][j-1] == '.':
suc_board = Copyboard(cur_board)
suc_board[i][j] = '.'
suc_board[i-1][j-1] = 'R'
move = [(i,j),(i-1,j-1)]
suc_result = suc_result + [Board(suc_board,move)]
return suc_result
#=============================================================================
def maxv (node, parentalpha, parentbeta, depth,space,player):
alpha = Thresh(parentalpha.val,parentalpha.bestSucc)
beta = Thresh(parentbeta.val,parentbeta.bestSucc)
if PrintFlag:
#print "player =",player
print space, "maxv", node.state, " alpha:", alpha.val, " beta:", beta.val,"-->"
if cutoff(node.state,depth,space,player):
#t = Thresh(evalFun(node.state,space,PLAYER),node)
t = Thresh(evalFun(node,space,PLAYER),node)
if PrintFlag:
print space,"returning",t,"<--"
return t
else:
for s in expand(node,successors(node.state,player)):
newspace = space + " "
minval = minv(s, alpha, beta, depth+1,newspace,OppDic1[player])
if minval.val > alpha.val:
alpha.val = minval.val
alpha.bestSucc = s
if PrintFlag:
print space, "alpha updated to ", alpha.val
if alpha.val >= beta.val:
if PrintFlag:
print space, "alpha >= beta so returning beta, which is ", beta.val,"<--"
return beta
if PrintFlag:
print space, "returning alpha ", alpha,"<--"
return alpha
def minv (node, parentalpha, parentbeta, depth, space,player):
alpha = Thresh(parentalpha.val,parentalpha.bestSucc)
beta = Thresh(parentbeta.val,parentbeta.bestSucc)
if PrintFlag:
#print "player =",player
print space, "minv",node.state, " alpha:", alpha.val, " beta:", beta.val,"-->"
if cutoff(node.state,depth,space,player):
#t = Thresh(evalFun(node.state,space,PLAYER),node)
t = Thresh(evalFun(node,space,PLAYER),node)
if PrintFlag:
print space,"returning",t,"<--"
return t
else:
for s in expand(node,successors(node.state,player)):
newspace = space + " "
maxval = maxv(s, alpha, beta, depth+1,newspace,OppDic1[player])
if maxval.val < beta.val:
beta.val = maxval.val
beta.bestSucc = s
if PrintFlag:
print space, "beta updated to ", beta.val
if beta.val <= alpha.val:
if PrintFlag:
print space, "beta <= alpha so returning alpha, which is ", alpha.val,"<--"
return alpha
if PrintFlag:
print space, "returning beta ", beta
return beta
#============= The Checkers Problem =========================
Initial_Board = [ ['b','.','b','.','b','.','b','.'],\
['.','b','.','b','.','b','.','b'],\
['b','.','b','.','b','.','b','.'],\
['.','.','.','.','.','.','.','.'],\
['.','.','.','.','.','.','.','.'],\
['.','r','.','r','.','r','.','r'],\
['r','.','r','.','r','.','r','.'],\
['.','r','.','r','.','r','.','r'] \
]
#This board is used to test one-step move
Test_Board1 = [ ['b','.','b','.','.','.','.','.'],\
['.','b','.','.','.','r','.','b'],\
['.','.','.','.','b','.','.','.'],\
['.','B','.','.','.','.','.','R'],\
['B','.','.','.','.','.','R','.'],\
['.','.','.','r','.','.','.','.'],\
['r','.','b','.','.','.','r','.'],\
['.','.','.','.','.','r','.','r'] \
]
#These boards are used to test jump
Test_Board2 = [ ['.','.','.','.','.','.','.','.'],\
['r','.','R','.','R','.','R','.'],\
['.','.','.','.','.','.','.','.'],\
['r','.','r','.','R','.','r','.'],\
['.','.','.','.','.','.','.','.'],\
['R','.','R','.','r','.','R','.'],\
['.','.','.','B','.','.','.','.'],\
['.','.','.','.','.','.','b','.'] \
]
Test_Board3 = [ ['.','.','.','.','.','.','.','.'],\
['b','.','b','.','b','.','B','.'],\
['.','.','.','.','.','.','.','.'],\
['b','.','b','.','B','.','b','.'],\
['.','.','.','.','.','.','.','.'],\
['B','.','b','.','b','.','B','.'],\
['.','.','.','r','.','.','.','.'],\
['.','.','.','.','.','.','R','.'] \
]
PieceToKingDic = {'r':'R', 'b':'B'}
OppDic = {'B':['r','R'],'R':['b','B'],'b':['r','R'],'r':['b','B']}
PlayerDic = {'r':['r','R'],'b':['b','B'],'R':['r','R'],'B':['b','B']}
OppDic1 = {'b':'r','r':'b'}
PrintFlag = 0
#PLAYER = 'r'
#The following code is used to test the successors function
#Board(Test_Board1).PrintBoard(name = "Test_Board1 for one step move:")
#for board in successors(Board(Test_Board1),'r'):
# board.PrintBoard(parent = Board(Test_Board1))
# print "";
#Board(Test_Board2).PrintBoard(name = "Test_Board2 for jump:")
#for board in successors(Board(Test_Board2),'b'):
# board.PrintBoard(parent = Board(Test_Board2))
# print "";
#Board(Test_Board3).PrintBoard(name = "Test_Board3 for jump:")
#for board in successors(Board(Test_Board3),'r'):
# board.PrintBoard(parent = Board(Test_Board3))
# print "";
| 42.994264
| 111
| 0.42373
|
90a03e48bff9d125781226a18da32cb2fa337ac4
| 14,217
|
py
|
Python
|
tests/sdc/test_populate.py
|
beda-software/adibox-sdc
|
0c2da41ac5ddbe0e33657fe51c6ee7d5aeb382ee
|
[
"MIT"
] | 7
|
2020-05-14T21:00:42.000Z
|
2021-10-17T09:10:18.000Z
|
tests/sdc/test_populate.py
|
beda-software/adibox-sdc
|
0c2da41ac5ddbe0e33657fe51c6ee7d5aeb382ee
|
[
"MIT"
] | 27
|
2020-07-16T06:59:04.000Z
|
2021-11-18T13:22:35.000Z
|
tests/sdc/test_populate.py
|
beda-software/adibox-sdc
|
0c2da41ac5ddbe0e33657fe51c6ee7d5aeb382ee
|
[
"MIT"
] | 2
|
2020-11-06T09:30:09.000Z
|
2021-03-03T09:48:27.000Z
|
from tests.utils import create_parameters
async def test_initial_expression_populate(sdk, safe_db):
q = sdk.client.resource(
"Questionnaire",
**{
"status": "active",
"launchContext": [{"name": "LaunchPatient", "type": "Patient",},],
"item": [
{
"type": "string",
"linkId": "patientId",
"initialExpression": {
"language": "text/fhirpath",
"expression": "%LaunchPatient.id",
},
},
],
},
)
await q.save()
assert q.id is not None
launch_patient = {"resourceType": "Patient", "id": "patienit-id"}
p = await q.execute("$populate", data=create_parameters(LaunchPatient=launch_patient))
assert p == {
"resourceType": "QuestionnaireResponse",
"questionnaire": q.id,
"item": [{"linkId": "patientId", "answer": [{"value": {"string": launch_patient["id"]}}],}],
}
async def test_initial_expression_populate_using_list_endpoint(sdk, safe_db):
q = {
"id": "virtual-id",
"resourceType": "Questionnaire",
"status": "active",
"launchContext": [{"name": "LaunchPatient", "type": "Patient",},],
"item": [
{
"type": "string",
"linkId": "patientId",
"initialExpression": {
"language": "text/fhirpath",
"expression": "%LaunchPatient.id",
},
},
],
}
launch_patient = {"resourceType": "Patient", "id": "patient-id"}
p = await sdk.client.execute(
"Questionnaire/$populate",
data=create_parameters(Questionnaire=q, LaunchPatient=launch_patient),
)
assert p == {
"resourceType": "QuestionnaireResponse",
"questionnaire": q["id"],
"item": [{"linkId": "patientId", "answer": [{"value": {"string": launch_patient["id"]}}],}],
}
async def test_item_context_with_repeats_populate(sdk, safe_db):
q = sdk.client.resource(
"Questionnaire",
**{
"status": "active",
"launchContext": [{"name": "LaunchPatient", "type": "Patient",},],
"item": [
{
"type": "group",
"linkId": "names",
"itemContext": {
"language": "text/fhirpath",
"expression": "%LaunchPatient.name",
},
"item": [
{
"repeats": True,
"type": "string",
"linkId": "firstName",
"initialExpression": {
"language": "text/fhirpath",
"expression": "given",
},
},
],
},
],
},
)
await q.save()
assert q.id is not None
launch_patient = {
"resourceType": "Patient",
"id": "patienit-id",
"name": [{"given": ["Peter", "Middlename"]}, {"given": ["Pit"]}, {"given": ["Little Pitty"]},],
}
p = await q.execute("$populate", data=create_parameters(LaunchPatient=launch_patient))
assert p == {
"item": [
{
"item": [
{
"linkId": "firstName",
"answer": [
{"value": {"string": "Peter"}},
{"value": {"string": "Middlename"}},
{"value": {"string": "Pit"}},
{"value": {"string": "Little Pitty"}},
],
}
],
"linkId": "names",
}
],
"questionnaire": q.id,
"resourceType": "QuestionnaireResponse",
}
async def test_item_context_with_repeating_group_populate(sdk, safe_db):
q = sdk.client.resource(
"Questionnaire",
**{
"status": "active",
"launchContext": [{"name": "LaunchPatient", "type": "Patient",},],
"item": [
{
"type": "group",
"linkId": "addresses",
"repeats": True,
"itemContext": {
"language": "text/fhirpath",
"expression": "%LaunchPatient.address",
},
"item": [
{
"type": "string",
"linkId": "city",
"initialExpression": {
"language": "text/fhirpath",
"expression": "city.first()",
},
},
],
},
],
},
)
await q.save()
assert q.id is not None
launch_patient = {
"resourceType": "Patient",
"id": "patienit-id",
"address": [{"city": "San Francisco"}, {"city": "San Diego"}],
}
p = await q.execute("$populate", data=create_parameters(LaunchPatient=launch_patient))
assert p == {
"item": [
{
"item": [
{
"linkId": "city",
"answer": [
{"value": {"string": "San Francisco"}},
],
}
],
"linkId": "addresses",
},
{
"item": [
{
"linkId": "city",
"answer": [
{"value": {"string": "San Diego"}},
],
}
],
"linkId": "addresses",
}
],
"questionnaire": q.id,
"resourceType": "QuestionnaireResponse",
}
async def test_item_context_without_repeats_populate(sdk, safe_db):
q = sdk.client.resource(
"Questionnaire",
**{
"status": "active",
"launchContext": [{"name": "LaunchPatient", "type": "Patient",},],
"item": [
{
"text": "Address",
"type": "group",
"linkId": "address",
"itemContext": {
"language": "text/fhirpath",
"expression": "%LaunchPatient.address",
},
"item": [
{
"text": "City",
"linkId": "city",
"type": "string",
"initialExpression": {
"language": "text/fhirpath",
"expression": "city",
},
},
{
"text": "Line 1",
"linkId": "line-1",
"type": "string",
"initialExpression": {
"language": "text/fhirpath",
"expression": "line[0]",
},
},
{
"text": "Line 2",
"linkId": "line-2",
"type": "string",
"initialExpression": {
"language": "text/fhirpath",
"expression": "line[1]",
},
},
{
"text": "Country",
"linkId": "Country",
"type": "string",
"initialExpression": {
"language": "text/fhirpath",
"expression": "country",
},
},
],
}
],
},
)
await q.save()
launch_patient = {
"resourceType": "Patient",
"id": "patienit-id",
"address": [
{
"city": "Sydney",
"line": ["Central park", "near metro station museum"],
"country": "Australia",
}
],
}
p = await q.execute("$populate", data=create_parameters(LaunchPatient=launch_patient))
assert p == {
"item": [
{
"item": [
{
"answer": [{"value": {"string": "Sydney"}}],
"linkId": "city",
"text": "City",
},
{
"answer": [{"value": {"string": "Central park"}}],
"linkId": "line-1",
"text": "Line 1",
},
{
"answer": [{"value": {"string": "near metro station " "museum"}}],
"linkId": "line-2",
"text": "Line 2",
},
{
"answer": [{"value": {"string": "Australia"}}],
"linkId": "Country",
"text": "Country",
},
],
"linkId": "address",
"text": "Address",
}
],
"questionnaire": q.id,
"resourceType": "QuestionnaireResponse",
}
async def test_source_queries_populate(sdk, safe_db):
p = sdk.client.resource("Patient")
await p.save()
a = sdk.client.resource(
"Appointment",
**{
"status": "booked",
"start": "2020-01-01T00:00",
"participant": [{"status": "accepted", "actor": p}],
},
)
await a.save()
q = sdk.client.resource(
"Questionnaire",
**{
"status": "active",
"contained": [
{
"resourceType": "Bundle",
"id": "PrePopQuery",
"type": "batch",
"entry": [
{
"request": {
"method": "GET",
"url": "Appointment?patient={{%LaunchPatient.id}}",
},
},
],
}
],
"launchContext": [{"name": "LaunchPatient", "type": "Patient",},],
"sourceQueries": [{"localRef": "Bundle#PrePopQuery"},],
"item": [
{
"type": "string",
"linkId": "last-appointment",
"initialExpression": {
"language": "text/fhirpath",
"expression": "%PrePopQuery.entry.resource.entry.resource.start",
},
},
],
},
)
await q.save()
p = await q.execute("$populate", data=create_parameters(LaunchPatient=p))
assert p == {
"resourceType": "QuestionnaireResponse",
"questionnaire": q.id,
"item": [{"linkId": "last-appointment", "answer": [{"value": {"string": a["start"]}}],}],
}
async def test_multiple_answers_populate(sdk, safe_db):
q = sdk.client.resource(
"Questionnaire",
**{
"status": "active",
"launchContext": [{"name": "Diet", "type": "Bundle",},],
"item": [
{
"type": "choice",
"linkId": "diet",
"repeats": True,
"initialExpression": {
"language": "text/fhirpath",
"expression": "%Diet.entry.resource.oralDiet.type.coding.where(system = 'http://snomed.info/sct')",
},
},
],
},
)
await q.save()
assert q.id is not None
diet = {
"resourceType": "Bundle",
"entry": [
{
"resource": {
"resourceType": "NutritionOrder",
"oralDiet": {
"type": {
"coding": [{"system": "http://snomed.info/sct", "code": "160671006",},]
},
},
},
},
{
"resource": {
"resourceType": "NutritionOrder",
"oralDiet": {"type": {"coding": [{"system": "UNKNOWN", "code": "ABC",},]},},
},
},
{
"resource": {
"resourceType": "NutritionOrder",
"oralDiet": {
"type": {
"coding": [{"system": "http://snomed.info/sct", "code": "302320003",},]
},
},
},
},
],
}
p = await q.execute("$populate", data=create_parameters(Diet=diet))
assert p == {
"resourceType": "QuestionnaireResponse",
"questionnaire": q.id,
"item": [
{
"linkId": "diet",
"answer": [
{
"value": {
"Coding": {"system": "http://snomed.info/sct", "code": "160671006",}
}
},
{
"value": {
"Coding": {"system": "http://snomed.info/sct", "code": "302320003",}
}
},
],
}
],
}
| 31.805369
| 123
| 0.346627
|
2bcacca1fd20eb7692188f2edfb3dd40e2cc8b93
| 27,224
|
py
|
Python
|
tests/sagemaker/test_deployment.py
|
Kublai-Jing/mlflow
|
ad60bf1f6d4496573f8f278efd192fd282dfe5aa
|
[
"Apache-2.0"
] | null | null | null |
tests/sagemaker/test_deployment.py
|
Kublai-Jing/mlflow
|
ad60bf1f6d4496573f8f278efd192fd282dfe5aa
|
[
"Apache-2.0"
] | null | null | null |
tests/sagemaker/test_deployment.py
|
Kublai-Jing/mlflow
|
ad60bf1f6d4496573f8f278efd192fd282dfe5aa
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import os
import pytest
import time
import mock
from collections import namedtuple
import boto3
import botocore
import numpy as np
from click.testing import CliRunner
from sklearn.linear_model import LogisticRegression
import mlflow
import mlflow.pyfunc
import mlflow.sklearn
import mlflow.sagemaker as mfs
import mlflow.sagemaker.cli as mfscli
from mlflow.exceptions import MlflowException
from mlflow.models import Model
from mlflow.protos.databricks_pb2 import ErrorCode, RESOURCE_DOES_NOT_EXIST, \
INVALID_PARAMETER_VALUE, INTERNAL_ERROR
from mlflow.store.s3_artifact_repo import S3ArtifactRepository
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import
from tests.sagemaker.mock import mock_sagemaker, Endpoint, EndpointOperation
TrainedModel = namedtuple("TrainedModel", ["model_path", "run_id", "model_uri"])
@pytest.fixture
def pretrained_model():
model_path = "model"
with mlflow.start_run():
X = np.array([-2, -1, 0, 1, 2, 1]).reshape(-1, 1)
y = np.array([0, 0, 1, 1, 1, 0])
lr = LogisticRegression(solver='lbfgs')
lr.fit(X, y)
mlflow.sklearn.log_model(lr, model_path)
run_id = mlflow.active_run().info.run_id
model_uri = "runs:/" + run_id + "/" + model_path
return TrainedModel(model_path, run_id, model_uri)
@pytest.fixture
def sagemaker_client():
return boto3.client("sagemaker", region_name="us-west-2")
def get_sagemaker_backend(region_name):
return mock_sagemaker.backends[region_name]
def mock_sagemaker_aws_services(fn):
# Import `wraps` from `six` instead of `functools` to properly set the
# wrapped function's `__wrapped__` attribute to the required value
# in Python 2
from six import wraps
from moto import mock_s3, mock_ecr, mock_sts, mock_iam
@mock_ecr
@mock_iam
@mock_s3
@mock_sagemaker
@mock_sts
@wraps(fn)
def mock_wrapper(*args, **kwargs):
# Create an ECR repository for the `mlflow-pyfunc` SageMaker docker image
ecr_client = boto3.client("ecr", region_name="us-west-2")
ecr_client.create_repository(repositoryName=mfs.DEFAULT_IMAGE_NAME)
# Create the moto IAM role
role_policy = """
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "*",
"Resource": "*"
}
]
}
"""
iam_client = boto3.client("iam", region_name="us-west-2")
iam_client.create_role(RoleName="moto", AssumeRolePolicyDocument=role_policy)
return fn(*args, **kwargs)
return mock_wrapper
@pytest.mark.large
def test_deployment_with_unsupported_flavor_raises_exception(pretrained_model):
unsupported_flavor = "this is not a valid flavor"
with pytest.raises(MlflowException) as exc:
mfs.deploy(app_name="bad_flavor",
model_uri=pretrained_model.model_uri,
flavor=unsupported_flavor)
assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
@pytest.mark.large
def test_deployment_with_missing_flavor_raises_exception(pretrained_model):
missing_flavor = "mleap"
with pytest.raises(MlflowException) as exc:
mfs.deploy(app_name="missing-flavor",
model_uri=pretrained_model.model_uri,
flavor=missing_flavor)
assert exc.value.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
@pytest.mark.large
def test_deployment_of_model_with_no_supported_flavors_raises_exception(pretrained_model):
logged_model_path = _download_artifact_from_uri(pretrained_model.model_uri)
model_config_path = os.path.join(logged_model_path, "MLmodel")
model_config = Model.load(model_config_path)
del model_config.flavors[mlflow.pyfunc.FLAVOR_NAME]
model_config.save(path=model_config_path)
with pytest.raises(MlflowException) as exc:
mfs.deploy(app_name="missing-flavor",
model_uri=logged_model_path,
flavor=None)
assert exc.value.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
@pytest.mark.large
def test_validate_deployment_flavor_validates_python_function_flavor_successfully(
pretrained_model):
model_config_path = os.path.join(
_download_artifact_from_uri(pretrained_model.model_uri), "MLmodel")
model_config = Model.load(model_config_path)
mfs._validate_deployment_flavor(
model_config=model_config, flavor=mlflow.pyfunc.FLAVOR_NAME)
@pytest.mark.large
def test_get_preferred_deployment_flavor_obtains_valid_flavor_from_model(pretrained_model):
model_config_path = os.path.join(
_download_artifact_from_uri(pretrained_model.model_uri), "MLmodel")
model_config = Model.load(model_config_path)
selected_flavor = mfs._get_preferred_deployment_flavor(model_config=model_config)
assert selected_flavor in mfs.SUPPORTED_DEPLOYMENT_FLAVORS
assert selected_flavor in model_config.flavors
@pytest.mark.large
def test_attempting_to_deploy_in_asynchronous_mode_without_archiving_throws_exception(
pretrained_model):
with pytest.raises(MlflowException) as exc:
mfs.deploy(app_name="test-app",
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE,
archive=False,
synchronous=False)
assert "Resources must be archived" in exc.value.message
assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_creates_sagemaker_and_s3_resources_with_expected_names_from_local(
pretrained_model, sagemaker_client):
app_name = "test-app"
mfs.deploy(app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE)
region_name = sagemaker_client.meta.region_name
s3_client = boto3.client("s3", region_name=region_name)
default_bucket = mfs._get_default_s3_bucket(region_name)
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_production_variants = endpoint_description["ProductionVariants"]
assert len(endpoint_production_variants) == 1
model_name = endpoint_production_variants[0]["VariantName"]
assert model_name in [
model["ModelName"] for model in sagemaker_client.list_models()["Models"]
]
object_names = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
assert any([model_name in object_name for object_name in object_names])
assert any([app_name in config["EndpointConfigName"]
for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]])
assert app_name in [endpoint["EndpointName"]
for endpoint in sagemaker_client.list_endpoints()["Endpoints"]]
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_cli_creates_sagemaker_and_s3_resources_with_expected_names_from_local(
pretrained_model, sagemaker_client):
app_name = "test-app"
result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke(
mfscli.commands,
[
'deploy',
'-a', app_name,
'-m', pretrained_model.model_uri,
'--mode', mfs.DEPLOYMENT_MODE_CREATE,
])
assert result.exit_code == 0
region_name = sagemaker_client.meta.region_name
s3_client = boto3.client("s3", region_name=region_name)
default_bucket = mfs._get_default_s3_bucket(region_name)
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_production_variants = endpoint_description["ProductionVariants"]
assert len(endpoint_production_variants) == 1
model_name = endpoint_production_variants[0]["VariantName"]
assert model_name in [
model["ModelName"] for model in sagemaker_client.list_models()["Models"]
]
object_names = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
assert any([model_name in object_name for object_name in object_names])
assert any([app_name in config["EndpointConfigName"]
for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]])
assert app_name in [endpoint["EndpointName"]
for endpoint in sagemaker_client.list_endpoints()["Endpoints"]]
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_creates_sagemaker_and_s3_resources_with_expected_names_from_s3(
pretrained_model, sagemaker_client):
local_model_path = _download_artifact_from_uri(pretrained_model.model_uri)
artifact_path = "model"
region_name = sagemaker_client.meta.region_name
default_bucket = mfs._get_default_s3_bucket(region_name)
s3_artifact_repo = S3ArtifactRepository('s3://{}'.format(default_bucket))
s3_artifact_repo.log_artifacts(local_model_path, artifact_path=artifact_path)
model_s3_uri = 's3://{bucket_name}/{artifact_path}'.format(
bucket_name=default_bucket, artifact_path=pretrained_model.model_path)
app_name = "test-app"
mfs.deploy(app_name=app_name,
model_uri=model_s3_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE)
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_production_variants = endpoint_description["ProductionVariants"]
assert len(endpoint_production_variants) == 1
model_name = endpoint_production_variants[0]["VariantName"]
assert model_name in [
model["ModelName"] for model in sagemaker_client.list_models()["Models"]
]
s3_client = boto3.client("s3", region_name=region_name)
object_names = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
assert any([model_name in object_name for object_name in object_names])
assert any([app_name in config["EndpointConfigName"]
for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]])
assert app_name in [endpoint["EndpointName"]
for endpoint in sagemaker_client.list_endpoints()["Endpoints"]]
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_cli_creates_sagemaker_and_s3_resources_with_expected_names_from_s3(
pretrained_model, sagemaker_client):
local_model_path = _download_artifact_from_uri(pretrained_model.model_uri)
artifact_path = "model"
region_name = sagemaker_client.meta.region_name
default_bucket = mfs._get_default_s3_bucket(region_name)
s3_artifact_repo = S3ArtifactRepository('s3://{}'.format(default_bucket))
s3_artifact_repo.log_artifacts(local_model_path, artifact_path=artifact_path)
model_s3_uri = 's3://{bucket_name}/{artifact_path}'.format(
bucket_name=default_bucket, artifact_path=pretrained_model.model_path)
app_name = "test-app"
result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke(
mfscli.commands,
[
'deploy',
'-a', app_name,
'-m', model_s3_uri,
'--mode', mfs.DEPLOYMENT_MODE_CREATE,
])
assert result.exit_code == 0
region_name = sagemaker_client.meta.region_name
s3_client = boto3.client("s3", region_name=region_name)
default_bucket = mfs._get_default_s3_bucket(region_name)
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_production_variants = endpoint_description["ProductionVariants"]
assert len(endpoint_production_variants) == 1
model_name = endpoint_production_variants[0]["VariantName"]
assert model_name in [
model["ModelName"] for model in sagemaker_client.list_models()["Models"]
]
object_names = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
assert any([model_name in object_name for object_name in object_names])
assert any([app_name in config["EndpointConfigName"]
for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]])
assert app_name in [endpoint["EndpointName"]
for endpoint in sagemaker_client.list_endpoints()["Endpoints"]]
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploying_application_with_preexisting_name_in_create_mode_throws_exception(
pretrained_model):
app_name = "test-app"
mfs.deploy(app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE)
with pytest.raises(MlflowException) as exc:
mfs.deploy(app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE)
assert "an application with the same name already exists" in exc.value.message
assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_synchronous_mode_waits_for_endpoint_creation_to_complete_before_returning(
pretrained_model, sagemaker_client):
endpoint_creation_latency = 10
get_sagemaker_backend(sagemaker_client.meta.region_name).set_endpoint_update_latency(
endpoint_creation_latency)
app_name = "test-app"
deployment_start_time = time.time()
mfs.deploy(app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE,
synchronous=True)
deployment_end_time = time.time()
assert (deployment_end_time - deployment_start_time) >= endpoint_creation_latency
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
assert endpoint_description["EndpointStatus"] == Endpoint.STATUS_IN_SERVICE
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_create_in_asynchronous_mode_returns_before_endpoint_creation_completes(
pretrained_model, sagemaker_client):
endpoint_creation_latency = 10
get_sagemaker_backend(sagemaker_client.meta.region_name).set_endpoint_update_latency(
endpoint_creation_latency)
app_name = "test-app"
deployment_start_time = time.time()
mfs.deploy(app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE,
synchronous=False,
archive=True)
deployment_end_time = time.time()
assert (deployment_end_time - deployment_start_time) < endpoint_creation_latency
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
assert endpoint_description["EndpointStatus"] == Endpoint.STATUS_CREATING
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_replace_in_asynchronous_mode_returns_before_endpoint_creation_completes(
pretrained_model, sagemaker_client):
endpoint_update_latency = 10
get_sagemaker_backend(sagemaker_client.meta.region_name).set_endpoint_update_latency(
endpoint_update_latency)
app_name = "test-app"
mfs.deploy(app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE,
synchronous=True)
update_start_time = time.time()
mfs.deploy(app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_REPLACE,
synchronous=False,
archive=True)
update_end_time = time.time()
assert (update_end_time - update_start_time) < endpoint_update_latency
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
assert endpoint_description["EndpointStatus"] == Endpoint.STATUS_UPDATING
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_create_mode_throws_exception_after_endpoint_creation_fails(
pretrained_model, sagemaker_client):
endpoint_creation_latency = 10
sagemaker_backend = get_sagemaker_backend(sagemaker_client.meta.region_name)
sagemaker_backend.set_endpoint_update_latency(endpoint_creation_latency)
boto_caller = botocore.client.BaseClient._make_api_call
def fail_endpoint_creations(self, operation_name, operation_kwargs):
"""
Processes all boto3 client operations according to the following rules:
- If the operation is an endpoint creation, create the endpoint and set its status to
``Endpoint.STATUS_FAILED``.
- Else, execute the client operation as normal
"""
result = boto_caller(self, operation_name, operation_kwargs)
if operation_name == "CreateEndpoint":
endpoint_name = operation_kwargs["EndpointName"]
sagemaker_backend.set_endpoint_latest_operation(
endpoint_name=endpoint_name,
operation=EndpointOperation.create_unsuccessful(
latency_seconds=endpoint_creation_latency))
return result
with mock.patch("botocore.client.BaseClient._make_api_call", new=fail_endpoint_creations),\
pytest.raises(MlflowException) as exc:
mfs.deploy(app_name="test-app",
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE)
assert "deployment operation failed" in exc.value.message
assert exc.value.error_code == ErrorCode.Name(INTERNAL_ERROR)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_add_mode_adds_new_model_to_existing_endpoint(pretrained_model, sagemaker_client):
app_name = "test-app"
mfs.deploy(app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE)
models_added = 1
for _ in range(11):
mfs.deploy(app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_ADD,
archive=True,
synchronous=False)
models_added += 1
endpoint_response = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_config_name = endpoint_response["EndpointConfigName"]
endpoint_config_response = sagemaker_client.describe_endpoint_config(
EndpointConfigName=endpoint_config_name)
production_variants = endpoint_config_response["ProductionVariants"]
assert len(production_variants) == models_added
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_replace_model_removes_preexisting_models_from_endpoint(
pretrained_model, sagemaker_client):
app_name = "test-app"
mfs.deploy(app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_ADD)
for _ in range(11):
mfs.deploy(app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_ADD,
archive=True,
synchronous=False)
endpoint_response_before_replacement = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_config_name_before_replacement =\
endpoint_response_before_replacement["EndpointConfigName"]
endpoint_config_response_before_replacement = sagemaker_client.describe_endpoint_config(
EndpointConfigName=endpoint_config_name_before_replacement)
production_variants_before_replacement =\
endpoint_config_response_before_replacement["ProductionVariants"]
deployed_models_before_replacement = [
variant["ModelName"] for variant in production_variants_before_replacement]
mfs.deploy(app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_REPLACE,
archive=True,
synchronous=False)
endpoint_response_after_replacement = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_config_name_after_replacement =\
endpoint_response_after_replacement["EndpointConfigName"]
endpoint_config_response_after_replacement = sagemaker_client.describe_endpoint_config(
EndpointConfigName=endpoint_config_name_after_replacement)
production_variants_after_replacement =\
endpoint_config_response_after_replacement["ProductionVariants"]
deployed_models_after_replacement = [
variant["ModelName"] for variant in production_variants_after_replacement]
assert len(deployed_models_after_replacement) == 1
assert all([model_name not in deployed_models_after_replacement
for model_name in deployed_models_before_replacement])
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_replace_mode_throws_exception_after_endpoint_update_fails(
pretrained_model, sagemaker_client):
endpoint_update_latency = 5
sagemaker_backend = get_sagemaker_backend(sagemaker_client.meta.region_name)
sagemaker_backend.set_endpoint_update_latency(endpoint_update_latency)
app_name = "test-app"
mfs.deploy(app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE)
boto_caller = botocore.client.BaseClient._make_api_call
def fail_endpoint_updates(self, operation_name, operation_kwargs):
"""
Processes all boto3 client operations according to the following rules:
- If the operation is an endpoint update, update the endpoint and set its status to
``Endpoint.STATUS_FAILED``.
- Else, execute the client operation as normal
"""
result = boto_caller(self, operation_name, operation_kwargs)
if operation_name == "UpdateEndpoint":
endpoint_name = operation_kwargs["EndpointName"]
sagemaker_backend.set_endpoint_latest_operation(
endpoint_name=endpoint_name,
operation=EndpointOperation.update_unsuccessful(
latency_seconds=endpoint_update_latency))
return result
with mock.patch("botocore.client.BaseClient._make_api_call", new=fail_endpoint_updates),\
pytest.raises(MlflowException) as exc:
mfs.deploy(app_name="test-app",
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_REPLACE)
assert "deployment operation failed" in exc.value.message
assert exc.value.error_code == ErrorCode.Name(INTERNAL_ERROR)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_replace_mode_waits_for_endpoint_update_completion_before_deleting_resources(
pretrained_model, sagemaker_client):
endpoint_update_latency = 10
sagemaker_backend = get_sagemaker_backend(sagemaker_client.meta.region_name)
sagemaker_backend.set_endpoint_update_latency(endpoint_update_latency)
app_name = "test-app"
mfs.deploy(app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE)
endpoint_config_name_before_replacement = sagemaker_client.describe_endpoint(
EndpointName=app_name)["EndpointConfigName"]
boto_caller = botocore.client.BaseClient._make_api_call
update_start_time = time.time()
def validate_deletes(self, operation_name, operation_kwargs):
"""
Processes all boto3 client operations according to the following rules:
- If the operation deletes an S3 or SageMaker resource, ensure that the deletion was
initiated after the completion of the endpoint update
- Else, execute the client operation as normal
"""
result = boto_caller(self, operation_name, operation_kwargs)
if "Delete" in operation_name:
# Confirm that a successful endpoint update occurred prior to the invocation of this
# delete operation
endpoint_info = sagemaker_client.describe_endpoint(EndpointName=app_name)
assert endpoint_info["EndpointStatus"] == Endpoint.STATUS_IN_SERVICE
assert endpoint_info["EndpointConfigName"] != endpoint_config_name_before_replacement
assert time.time() - update_start_time >= endpoint_update_latency
return result
with mock.patch("botocore.client.BaseClient._make_api_call", new=validate_deletes):
mfs.deploy(app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_REPLACE,
archive=False)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_replace_mode_with_archiving_does_not_delete_resources(
pretrained_model, sagemaker_client):
region_name = sagemaker_client.meta.region_name
sagemaker_backend = get_sagemaker_backend(region_name)
sagemaker_backend.set_endpoint_update_latency(5)
app_name = "test-app"
mfs.deploy(app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE)
s3_client = boto3.client("s3", region_name=region_name)
default_bucket = mfs._get_default_s3_bucket(region_name)
object_names_before_replacement = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]]
endpoint_configs_before_replacement = [
config["EndpointConfigName"] for config in
sagemaker_client.list_endpoint_configs()["EndpointConfigs"]]
models_before_replacement = [
model["ModelName"] for model in sagemaker_client.list_models()["Models"]]
sk_model = mlflow.sklearn.load_model(
path=pretrained_model.model_path, run_id=pretrained_model.run_id)
new_artifact_path = "model"
with mlflow.start_run():
mlflow.sklearn.log_model(sk_model=sk_model, artifact_path=new_artifact_path)
new_model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=new_artifact_path)
mfs.deploy(app_name=app_name,
model_uri=new_model_uri,
mode=mfs.DEPLOYMENT_MODE_REPLACE,
archive=True,
synchronous=True)
object_names_after_replacement = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]]
endpoint_configs_after_replacement = [
config["EndpointConfigName"] for config in
sagemaker_client.list_endpoint_configs()["EndpointConfigs"]]
models_after_replacement = [
model["ModelName"] for model in sagemaker_client.list_models()["Models"]]
assert all([object_name in object_names_after_replacement
for object_name in object_names_before_replacement])
assert all([endpoint_config in endpoint_configs_after_replacement
for endpoint_config in endpoint_configs_before_replacement])
assert all([model in models_after_replacement for model in models_before_replacement])
| 42.604069
| 100
| 0.728181
|
68842b90a583a5aea244d6f55aa00a3004c2bb04
| 3,743
|
py
|
Python
|
intel-sds-proto/vsm_configure_guide/packages/vsmclient/python-vsmclient/build/lib.linux-x86_64-2.7/vsmclient/v1/vsm_snapshots.py
|
opensds/proposals
|
03735f5e19203bdff698454f2633ca483c92129d
|
[
"Apache-2.0"
] | 5
|
2017-03-21T09:11:55.000Z
|
2018-11-19T14:44:36.000Z
|
intel-sds-proto/vsm_configure_guide/packages/vsmclient/python-vsmclient/vsmclient/v1/vsm_snapshots.py
|
opensds/proposals
|
03735f5e19203bdff698454f2633ca483c92129d
|
[
"Apache-2.0"
] | 3
|
2018-02-06T06:17:10.000Z
|
2020-07-10T17:29:47.000Z
|
intel-sds-proto/vsm_configure_guide/packages/vsmclient/python-vsmclient/build/lib.linux-x86_64-2.7/vsmclient/v1/vsm_snapshots.py
|
opensds/proposals
|
03735f5e19203bdff698454f2633ca483c92129d
|
[
"Apache-2.0"
] | 7
|
2018-02-06T03:54:13.000Z
|
2021-09-08T10:51:38.000Z
|
# Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume snapshot interface (1.1 extension).
"""
import urllib
from vsmclient import base
class Snapshot(base.Resource):
"""
A Snapshot is a point-in-time snapshot of an openstack vsm.
"""
def __repr__(self):
return "<Snapshot: %s>" % self.id
def delete(self):
"""
Delete this snapshot.
"""
self.manager.delete(self)
def update(self, **kwargs):
"""
Update the display_name or display_description for this snapshot.
"""
self.manager.update(self, **kwargs)
@property
def progress(self):
return self._info.get('os-extended-snapshot-attributes:progress')
@property
def project_id(self):
return self._info.get('os-extended-snapshot-attributes:project_id')
class SnapshotManager(base.ManagerWithFind):
"""
Manage :class:`Snapshot` resources.
"""
resource_class = Snapshot
def create(self, vsm_id, force=False,
display_name=None, display_description=None):
"""
Create a snapshot of the given vsm.
:param vsm_id: The ID of the vsm to snapshot.
:param force: If force is True, create a snapshot even if the vsm is
attached to an instance. Default is False.
:param display_name: Name of the snapshot
:param display_description: Description of the snapshot
:rtype: :class:`Snapshot`
"""
body = {'snapshot': {'vsm_id': vsm_id,
'force': force,
'display_name': display_name,
'display_description': display_description}}
return self._create('/snapshots', body, 'snapshot')
def get(self, snapshot_id):
"""
Get a snapshot.
:param snapshot_id: The ID of the snapshot to get.
:rtype: :class:`Snapshot`
"""
return self._get("/snapshots/%s" % snapshot_id, "snapshot")
def list(self, detailed=True, search_opts=None):
"""
Get a list of all snapshots.
:rtype: list of :class:`Snapshot`
"""
if search_opts is None:
search_opts = {}
qparams = {}
for opt, val in search_opts.iteritems():
if val:
qparams[opt] = val
query_string = "?%s" % urllib.urlencode(qparams) if qparams else ""
detail = ""
if detailed:
detail = "/detail"
return self._list("/snapshots%s%s" % (detail, query_string),
"snapshots")
def delete(self, snapshot):
"""
Delete a snapshot.
:param snapshot: The :class:`Snapshot` to delete.
"""
self._delete("/snapshots/%s" % base.getid(snapshot))
def update(self, snapshot, **kwargs):
"""
Update the display_name or display_description for a snapshot.
:param snapshot: The :class:`Snapshot` to delete.
"""
if not kwargs:
return
body = {"snapshot": kwargs}
self._update("/snapshots/%s" % base.getid(snapshot), body)
| 28.572519
| 78
| 0.595512
|
d27da2089adcce348ae644644bcb466a4dfb1013
| 773
|
py
|
Python
|
algorithmic_time_complexity_plotter/plotter.py
|
MihaiAC/Algorithms-and-Data-Structures
|
23356090816e1ca53707128d243592a358212e47
|
[
"MIT"
] | null | null | null |
algorithmic_time_complexity_plotter/plotter.py
|
MihaiAC/Algorithms-and-Data-Structures
|
23356090816e1ca53707128d243592a358212e47
|
[
"MIT"
] | null | null | null |
algorithmic_time_complexity_plotter/plotter.py
|
MihaiAC/Algorithms-and-Data-Structures
|
23356090816e1ca53707128d243592a358212e47
|
[
"MIT"
] | null | null | null |
from matplotlib import pyplot
from functools import partial
import timeit
class Plotter:
#generator = generates a list of appropiate arguments for the function, based on the previous list of arguments
#func = function of which the runtime will be measured
#n = number of plot points (executions of function func)
def plotter(func,generator,n):
x = range(1,n)
y = []
argss = []
for i in x:
argss = generator(argss)
#By increasing "number", the plot gets smoother, but takes longer to generate.
#"number" = number of times the timing is repeated.
y.append(timeit.timeit(partial(func,argss), number = 20))
#Plot the graph:
pyplot.plot(x,y)
pyplot.show()
| 35.136364
| 115
| 0.637775
|
d6b820669860a875206e6c576d38ec80bebf16e7
| 2,001
|
py
|
Python
|
code/autoencoder.py
|
stjordanis/ml-cheatsheet
|
d34e096032b7ae826868be8808aee01699cec491
|
[
"MIT"
] | 1,031
|
2019-12-04T23:51:59.000Z
|
2022-03-31T10:44:51.000Z
|
code/autoencoder.py
|
stjordanis/ml-cheatsheet
|
d34e096032b7ae826868be8808aee01699cec491
|
[
"MIT"
] | 41
|
2019-12-04T17:21:22.000Z
|
2022-02-26T08:43:01.000Z
|
code/autoencoder.py
|
stjordanis/ml-cheatsheet
|
d34e096032b7ae826868be8808aee01699cec491
|
[
"MIT"
] | 253
|
2019-12-06T06:46:08.000Z
|
2022-03-23T01:24:16.000Z
|
import torch.nn as nn
from torch.autograd import Variable
class Autoencoder(nn.Module):
def __init__(self, in_shape):
super().__init__()
c,h,w = in_shape
self.encoder = nn.Sequential(
nn.Linear(c*h*w, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 12),
nn.ReLU()
)
self.decoder = nn.Sequential(
nn.Linear(12, 64),
nn.ReLU(),
nn.Linear(64, 128),
nn.ReLU(),
nn.Linear(128, c*h*w),
nn.Sigmoid()
)
def forward(self, x):
bs,c,h,w = x.size()
x = x.view(bs, -1)
x = self.encoder(x)
x = self.decoder(x)
x = x.view(bs, c, h, w)
return x
class ConvAutoencoder(nn.Module):
def __init__(self, in_shape):
super().__init__()
c,h,w = in_shape
self.encoder = nn.Sequential(
nn.Conv2d(c, 16, kernel_size=3, stride=1, padding=1), # b, 16, 32, 32
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2), # b, 16, 16, 16
nn.Conv2d(16, 8, kernel_size=3, stride=1, padding=1), # b, 8, 16, 16
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2) # b, 8, 8, 8
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(8, 16, kernel_size=3, stride=2, padding=0), # 16, 17, 17
nn.ReLU(),
nn.ConvTranspose2d(16, c, kernel_size=3, stride=2, padding=1), # 3, 33, 33
CenterCrop(h, w), # 3, 32, 32
nn.Sigmoid()
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def train(net, loader, loss_func, optimizer):
net.train()
for inputs, _ in loader:
inputs = Variable(inputs)
output = net(inputs)
loss = loss_func(output, inputs)
optimizer.zero_grad()
loss.backward()
optimizer.step()
| 27.410959
| 88
| 0.50075
|
82f889092288f46a74d16aabc0c4571dad4852ce
| 1,757
|
py
|
Python
|
RRT.py
|
garymullen/python-visualization-of-the-RRT-algorithm-with-pygame
|
deeb7122ffc247982463e1358004a51c1886307a
|
[
"MIT"
] | 4
|
2021-05-24T00:56:53.000Z
|
2022-01-23T19:17:09.000Z
|
RRT.py
|
garymullen/python-visualization-of-the-RRT-algorithm-with-pygame
|
deeb7122ffc247982463e1358004a51c1886307a
|
[
"MIT"
] | 1
|
2021-08-07T11:20:26.000Z
|
2021-08-07T11:20:26.000Z
|
RRT.py
|
garymullen/python-visualization-of-the-RRT-algorithm-with-pygame
|
deeb7122ffc247982463e1358004a51c1886307a
|
[
"MIT"
] | 4
|
2021-07-24T15:19:32.000Z
|
2022-02-19T12:57:10.000Z
|
import pygame
from RRTbasePy import RRTGraph
from RRTbasePy import RRTMap
import time
def main():
dimensions =(512,512)
start=(50,50)
goal=(300,300)
obsdim=30
obsnum=50
iteration=0
t1=0
pygame.init()
map=RRTMap(start,goal,dimensions,obsdim,obsnum)
graph=RRTGraph(start,goal,dimensions,obsdim,obsnum)
obstacles=graph.makeobs()
map.drawMap(obstacles)
t1=time.time()
while (not graph.path_to_goal()):
time.sleep(0.005)
elapsed=time.time()-t1
t1=time.time()
#raise exception if timeout
if elapsed > 10:
print('timeout re-initiating the calculations')
raise
if iteration % 10 == 0:
X, Y, Parent = graph.bias(goal)
pygame.draw.circle(map.map, map.grey, (X[-1], Y[-1]), map.nodeRad*2, 0)
pygame.draw.line(map.map, map.Blue, (X[-1], Y[-1]), (X[Parent[-1]], Y[Parent[-1]]),
map.edgeThickness)
else:
X, Y, Parent = graph.expand()
pygame.draw.circle(map.map, map.grey, (X[-1], Y[-1]), map.nodeRad*2, 0)
pygame.draw.line(map.map, map.Blue, (X[-1], Y[-1]), (X[Parent[-1]], Y[Parent[-1]]),
map.edgeThickness)
if iteration % 5 == 0:
pygame.display.update()
iteration += 1
map.drawPath(graph.getPathCoords())
pygame.display.update()
pygame.event.clear()
pygame.event.wait(0)
if __name__ == '__main__':
result=False
while not result:
try:
main()
result=True
except:
result=False
| 19.741573
| 96
| 0.515083
|
34cc61dcdea320c9c0bbb06dcf12fe8d2dacee45
| 1,255
|
py
|
Python
|
modules/splunk_sdk.py
|
julianwieg/attack_range_local
|
b32de256726033de144790ad8640899f3b80cc48
|
[
"Apache-2.0"
] | 101
|
2020-09-20T23:21:24.000Z
|
2022-03-14T20:57:28.000Z
|
modules/splunk_sdk.py
|
julianwieg/attack_range_local
|
b32de256726033de144790ad8640899f3b80cc48
|
[
"Apache-2.0"
] | 34
|
2020-09-17T21:33:12.000Z
|
2022-03-09T09:40:31.000Z
|
modules/splunk_sdk.py
|
julianwieg/attack_range_local
|
b32de256726033de144790ad8640899f3b80cc48
|
[
"Apache-2.0"
] | 36
|
2020-09-20T03:29:21.000Z
|
2022-03-29T14:11:24.000Z
|
import sys
import time
from time import sleep
import splunklib.results as results
import splunklib.client as client
import splunklib.results as results
import requests
from xml.etree import ElementTree
def export_search(host, s, password, export_mode="raw", out=sys.stdout, username="admin", port=8089):
"""
Exports events from a search using Splunk REST API to a local file.
This is faster than performing a search/export from Splunk Python SDK.
@param host: splunk server address
@param s: search that matches events
@param password: Splunk server password
@param export_mode: default `raw`. `csv`, `xml`, or `json`
@param out: local file pointer to write the results
@param username: Splunk server username
@param port: Splunk server port
"""
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
r = requests.post("https://%s:%d/servicesNS/admin/search/search/jobs/export" % (host, port),
auth=(username, password),
data={'output_mode': export_mode,
'search': s,
'max_count': 1000000},
verify=False)
out.write(r.text.encode('utf-8'))
| 34.861111
| 101
| 0.661355
|
40be783019bc9221eb5d49fa012e23aeefd6872e
| 262
|
py
|
Python
|
mathgenerator/funcs/hcfFunc.py
|
furins/mathgenerator
|
3ba50015ef4d9abaa404b5d3b9bb272cfdcf2deb
|
[
"MIT"
] | null | null | null |
mathgenerator/funcs/hcfFunc.py
|
furins/mathgenerator
|
3ba50015ef4d9abaa404b5d3b9bb272cfdcf2deb
|
[
"MIT"
] | null | null | null |
mathgenerator/funcs/hcfFunc.py
|
furins/mathgenerator
|
3ba50015ef4d9abaa404b5d3b9bb272cfdcf2deb
|
[
"MIT"
] | null | null | null |
from .__init__ import *
def hcfFunc(maxVal=20):
a = random.randint(1, maxVal)
b = random.randint(1, maxVal)
x, y = a, b
while(y):
x, y = y, x % y
problem = f"HCF of {a} and {b} = "
solution = str(x)
return problem, solution
| 21.833333
| 38
| 0.549618
|
e0e35c7f7ad169d750cb4d1183be2e57bcc335fa
| 847
|
py
|
Python
|
Scripts/rst2latex.py
|
OlcaytoKorcan/project-management-system
|
5544f7abcd6549a7a6f2dd1f75ebbcbae5a1a09f
|
[
"MIT"
] | null | null | null |
Scripts/rst2latex.py
|
OlcaytoKorcan/project-management-system
|
5544f7abcd6549a7a6f2dd1f75ebbcbae5a1a09f
|
[
"MIT"
] | null | null | null |
Scripts/rst2latex.py
|
OlcaytoKorcan/project-management-system
|
5544f7abcd6549a7a6f2dd1f75ebbcbae5a1a09f
|
[
"MIT"
] | null | null | null |
#!c:\users\o\documents\github\project-management-system\scripts\python.exe
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
| 31.37037
| 76
| 0.681228
|
32a006cecf21432a2d1e5264b4f4fa9c538f647f
| 7,778
|
py
|
Python
|
python/ray/serve/http_util.py
|
kifarid/ray
|
43c97c2afb979987be82fa50048674e9b6776d5d
|
[
"Apache-2.0"
] | 3
|
2021-08-29T20:41:21.000Z
|
2022-01-31T18:47:51.000Z
|
python/ray/serve/http_util.py
|
QPC-database/amazon-ray
|
55aa4cac02a412b96252aea4e8c3f177a28324a1
|
[
"Apache-2.0"
] | 64
|
2021-06-19T07:06:15.000Z
|
2022-03-26T07:13:16.000Z
|
python/ray/serve/http_util.py
|
majacQ/ray
|
bc08c6cdcc7ddf4da751ca2a972defd3db509061
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
from dataclasses import dataclass
import inspect
import json
from typing import Any, Dict, List, Optional, Type
import starlette.requests
from ray.serve.exceptions import RayServeException
@dataclass
class HTTPRequestWrapper:
scope: Dict[Any, Any]
body: bytes
def build_starlette_request(scope, serialized_body: bytes):
"""Build and return a Starlette Request from ASGI payload.
This function is intended to be used immediately before task invocation
happens.
"""
# Simulates receiving HTTP body from TCP socket. In reality, the body has
# already been streamed in chunks and stored in serialized_body.
received = False
async def mock_receive():
nonlocal received
# If the request has already been received, starlette will keep polling
# for HTTP disconnect. We will pause forever. The coroutine should be
# cancelled by starlette after the response has been sent.
if received:
block_forever = asyncio.Event()
await block_forever.wait()
received = True
return {
"body": serialized_body,
"type": "http.request",
"more_body": False
}
return starlette.requests.Request(scope, mock_receive)
class Response:
"""ASGI compliant response class.
It is expected to be called in async context and pass along
`scope, receive, send` as in ASGI spec.
>>> await Response({"k": "v"}).send(scope, receive, send)
"""
def __init__(self, content=None, status_code=200):
"""Construct a HTTP Response based on input type.
Args:
content (optional): Any JSON serializable object.
status_code (int, optional): Default status code is 200.
"""
self.status_code = status_code
self.raw_headers = []
if content is None:
self.body = b""
self.set_content_type("text")
elif isinstance(content, bytes):
self.body = content
self.set_content_type("text")
elif isinstance(content, str):
self.body = content.encode("utf-8")
self.set_content_type("text-utf8")
else:
# Delayed import since utils depends on http_util
from ray.serve.utils import ServeEncoder
self.body = json.dumps(
content, cls=ServeEncoder, indent=2).encode()
self.set_content_type("json")
def set_content_type(self, content_type):
if content_type == "text":
self.raw_headers.append([b"content-type", b"text/plain"])
elif content_type == "text-utf8":
self.raw_headers.append(
[b"content-type", b"text/plain; charset=utf-8"])
elif content_type == "json":
self.raw_headers.append([b"content-type", b"application/json"])
else:
raise ValueError("Invalid content type {}".format(content_type))
async def send(self, scope, receive, send):
await send({
"type": "http.response.start",
"status": self.status_code,
"headers": self.raw_headers,
})
await send({"type": "http.response.body", "body": self.body})
async def receive_http_body(scope, receive, send):
body_buffer = []
more_body = True
while more_body:
message = await receive()
assert message["type"] == "http.request"
more_body = message["more_body"]
body_buffer.append(message["body"])
return b"".join(body_buffer)
class ASGIHTTPSender:
"""Implement the interface for ASGI sender, build Starlette Response"""
def __init__(self) -> None:
self.status_code: Optional[int] = 200
self.header: Dict[str, str] = {}
self.buffer: List[bytes] = []
async def __call__(self, message):
if (message["type"] == "http.response.start"):
self.status_code = message["status"]
for key, value in message["headers"]:
self.header[key.decode()] = value.decode()
elif (message["type"] == "http.response.body"):
self.buffer.append(message["body"])
else:
raise ValueError("ASGI type must be one of "
"http.responses.{body,start}.")
def build_starlette_response(self) -> starlette.responses.Response:
return starlette.responses.Response(
b"".join(self.buffer),
status_code=self.status_code,
headers=dict(self.header))
def make_fastapi_class_based_view(fastapi_app, cls: Type) -> None:
"""Transform the `cls`'s methods and class annotations to FastAPI routes.
Modified from
https://github.com/dmontagu/fastapi-utils/blob/master/fastapi_utils/cbv.py
Usage:
>>> app = FastAPI()
>>> class A:
@app.route("/{i}")
def func(self, i: int) -> str:
return self.dep + i
>>> # just running the app won't work, here.
>>> make_fastapi_class_based_view(app, A)
>>> # now app can be run properly
"""
# Delayed import to prevent ciruclar imports in workers.
from fastapi import Depends, APIRouter
from fastapi.routing import APIRoute
def get_current_servable_instance():
from ray import serve
return serve.get_replica_context().servable_object
# Find all the class method routes
member_methods = {
func
for _, func in inspect.getmembers(cls, inspect.isfunction)
}
class_method_routes = [
route for route in fastapi_app.routes
if isinstance(route, APIRoute) and route.endpoint in member_methods
]
# Modify these routes and mount it to a new APIRouter.
# We need to to this (instead of modifying in place) because we want to use
# the laster fastapi_app.include_router to re-run the dependency analysis
# for each routes.
new_router = APIRouter()
for route in class_method_routes:
fastapi_app.routes.remove(route)
# This block just adds a default values to the self parameters so that
# FastAPI knows to inject the object when calling the route.
# Before: def method(self, i): ...
# After: def method(self=Depends(...), *, i):...
old_endpoint = route.endpoint
old_signature = inspect.signature(old_endpoint)
old_parameters = list(old_signature.parameters.values())
if len(old_parameters) == 0:
# TODO(simon): make it more flexible to support no arguments.
raise RayServeException(
"Methods in FastAPI class-based view must have ``self`` as "
"their first argument.")
old_self_parameter = old_parameters[0]
new_self_parameter = old_self_parameter.replace(
default=Depends(get_current_servable_instance))
new_parameters = [new_self_parameter] + [
# Make the rest of the parameters keyword only because
# the first argument is no longer positional.
parameter.replace(kind=inspect.Parameter.KEYWORD_ONLY)
for parameter in old_parameters[1:]
]
new_signature = old_signature.replace(parameters=new_parameters)
setattr(route.endpoint, "__signature__", new_signature)
setattr(route.endpoint, "_serve_cls", cls)
new_router.routes.append(route)
fastapi_app.include_router(new_router)
# Remove endpoints that belong to other class based views.
routes = fastapi_app.routes
for route in routes:
if not isinstance(route, APIRoute):
continue
serve_cls = getattr(route.endpoint, "_serve_cls", None)
if serve_cls is not None and serve_cls != cls:
routes.remove(route)
| 35.354545
| 79
| 0.63371
|
24960a7047743d71bfcf4e5c16062a85246a5a5d
| 2,871
|
py
|
Python
|
data_generator.py
|
foamliu/Super-Resolution-Net
|
684a59f12ed0a0a89f7067c81bfd6fba8090f618
|
[
"MIT"
] | 14
|
2018-06-08T19:14:11.000Z
|
2019-07-23T06:09:10.000Z
|
data_generator.py
|
foamliu/Super-Resolution-Net
|
684a59f12ed0a0a89f7067c81bfd6fba8090f618
|
[
"MIT"
] | 1
|
2019-02-28T02:50:26.000Z
|
2019-12-18T07:28:11.000Z
|
data_generator.py
|
foamliu/Super-Resolution-Net
|
684a59f12ed0a0a89f7067c81bfd6fba8090f618
|
[
"MIT"
] | 6
|
2018-07-21T17:12:23.000Z
|
2019-12-09T11:08:23.000Z
|
import os
import random
from random import shuffle
import cv2 as cv
import imutils
import numpy as np
from keras.utils import Sequence
from config import batch_size, img_size, channel, image_folder
from utils import random_crop, preprocess_input
class DataGenSequence(Sequence):
def __init__(self, usage, scale):
self.usage = usage
self.scale = scale
if usage == 'train':
names_file = 'train_names.txt'
else:
names_file = 'valid_names.txt'
with open(names_file, 'r') as f:
self.names = f.read().splitlines()
np.random.shuffle(self.names)
def __len__(self):
return int(np.ceil(len(self.names) / float(batch_size)))
def __getitem__(self, idx):
i = idx * batch_size
out_img_rows, out_img_cols = img_size * self.scale, img_size * self.scale
length = min(batch_size, (len(self.names) - i))
batch_x = np.empty((length, img_size, img_size, channel), dtype=np.float32)
batch_y = np.empty((length, out_img_rows, out_img_cols, channel), dtype=np.float32)
for i_batch in range(length):
name = self.names[i + i_batch]
filename = os.path.join(image_folder, name)
# b: 0 <=b<=255, g: 0 <=g<=255, r: 0 <=r<=255.
image_bgr = cv.imread(filename)
gt = random_crop(image_bgr, self.scale)
if np.random.random_sample() > 0.5:
gt = np.fliplr(gt)
angle = random.choice((0, 90, 180, 270))
gt = imutils.rotate_bound(gt, angle)
x = cv.resize(gt, (img_size, img_size), cv.INTER_CUBIC)
batch_x[i_batch, :, :] = preprocess_input(x)
batch_y[i_batch, :, :] = gt
return batch_x, batch_y
def on_epoch_end(self):
np.random.shuffle(self.names)
def train_gen(scale):
return DataGenSequence('train', scale)
def valid_gen(scale):
return DataGenSequence('valid', scale)
def split_data():
names = [f for f in os.listdir(image_folder) if f.lower().endswith('.jpg')]
num_samples = len(names) # 1341430
print('num_samples: ' + str(num_samples))
num_train_samples = int(num_samples * 0.992)
print('num_train_samples: ' + str(num_train_samples))
num_valid_samples = num_samples - num_train_samples
print('num_valid_samples: ' + str(num_valid_samples))
valid_names = random.sample(names, num_valid_samples)
train_names = [n for n in names if n not in valid_names]
shuffle(valid_names)
shuffle(train_names)
# with open('names.txt', 'w') as file:
# file.write('\n'.join(names))
with open('valid_names.txt', 'w') as file:
file.write('\n'.join(valid_names))
with open('train_names.txt', 'w') as file:
file.write('\n'.join(train_names))
if __name__ == '__main__':
split_data()
| 28.425743
| 91
| 0.626959
|
d11a8dd7792b9a361db59e31f2a1ab13ddde4539
| 591
|
py
|
Python
|
run.py
|
kairos03/NetsPresso-ModelSearch-Dataset-Validator
|
b88b844867a6cb6e65962847c37cf196dcbcbc2d
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
kairos03/NetsPresso-ModelSearch-Dataset-Validator
|
b88b844867a6cb6e65962847c37cf196dcbcbc2d
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
kairos03/NetsPresso-ModelSearch-Dataset-Validator
|
b88b844867a6cb6e65962847c37cf196dcbcbc2d
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import importlib
from src.utils import validate
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Dataset validator.')
parser.add_argument('--dir', type=str, required=True, help='dataset path.')
parser.add_argument('--format', type=str, required=True, help='dataset format')
parser.add_argument('--yaml_path', type=str, required=True, help='yaml file path')
args = parser.parse_args()
dir_path, dataset_type, yaml_path = args.dir, args.format.lower(), args.yaml_path
validate(dir_path, dataset_type, yaml_path, online=False)
| 45.461538
| 86
| 0.739425
|
6f39112dfd59cfca7bc7ac085d42e398c7994856
| 12,928
|
py
|
Python
|
lstchain/io/lstcontainers.py
|
yrenier/cta-lstchain
|
4af91ebb37a33b5af5ea91dd53e29cdfc6b2f9a0
|
[
"BSD-3-Clause"
] | 1
|
2020-05-12T09:00:55.000Z
|
2020-05-12T09:00:55.000Z
|
lstchain/io/lstcontainers.py
|
yrenier/cta-lstchain
|
4af91ebb37a33b5af5ea91dd53e29cdfc6b2f9a0
|
[
"BSD-3-Clause"
] | 10
|
2020-11-18T10:12:46.000Z
|
2021-01-28T15:54:04.000Z
|
lstchain/io/lstcontainers.py
|
yrenier/cta-lstchain
|
4af91ebb37a33b5af5ea91dd53e29cdfc6b2f9a0
|
[
"BSD-3-Clause"
] | 1
|
2020-11-05T09:45:43.000Z
|
2020-11-05T09:45:43.000Z
|
"""
Functions to handle custom containers for the mono reconstruction of LST1
"""
import astropy.units as u
import numpy as np
from astropy.units import Quantity
from astropy.coordinates import Angle
from ctapipe.core import Container, Field
from ctapipe.image import leakage, concentration
from ctapipe.image import timing_parameters
from ctapipe.image.morphology import number_of_islands
from numpy import nan
from ..reco import utils
__all__ = [
'DL1MonitoringEventIndexContainer',
'DL1ParametersContainer',
'DispContainer',
'ExtraImageInfo',
'ExtraMCInfo',
'ExtraMCInfo',
'LSTEventType',
'MetaData',
'ThrownEventsHistogram',
]
class DL1ParametersContainer(Container):
"""
TODO: maybe fields could be inherited from ctapipe containers definition
For now I have not found an elegant way to do so
"""
intensity = Field(np.float64(np.nan), 'total intensity (size)')
log_intensity = Field(np.float64(np.nan), 'log of total intensity (size)')
x = Field(u.Quantity(np.nan, u.m), 'centroid x coordinate', unit=u.m)
y = Field(u.Quantity(np.nan, u.m), 'centroid x coordinate', unit=u.m)
r = Field(u.Quantity(np.nan, u.m), 'radial coordinate of centroid', unit=u.m)
phi = Field(Angle(np.nan, u.rad), 'polar coordinate of centroid',
unit=u.rad)
length = Field(u.Quantity(np.nan, u.deg), 'RMS spread along the major-axis',
unit=u.deg)
width = Field(u.Quantity(np.nan, u.deg), 'RMS spread along the minor-axis',
unit=u.deg)
psi = Field(Angle(np.nan, u.rad), 'rotation angle of ellipse', unit=u.rad)
skewness = Field(np.nan, 'measure of the asymmetry')
kurtosis = Field(np.nan, 'measure of the tailedness')
disp_norm = Field(None, 'disp_norm [m]', unit=u.m)
disp_dx = Field(None, 'disp_dx [m]', unit=u.m)
disp_dy = Field(None, 'disp_dy [m]', unit=u.m)
disp_angle = Field(None, 'disp_angle [rad]', unit=u.rad)
disp_sign = Field(None, 'disp_sign')
disp_miss = Field(None, 'disp_miss [m]', unit=u.m)
src_x = Field(None, 'source x coordinate in camera frame', unit=u.m)
src_y = Field(None, 'source y coordinate in camera frame', unit=u.m)
time_gradient = Field(np.nan, 'Time gradient in the camera')
intercept = Field(np.nan, 'Intercept')
leakage_intensity_width_1 = \
Field(np.float32(np.nan), 'Fraction of intensity in outermost pixels',
dtype=np.float32)
leakage_intensity_width_2 = \
Field(np.float32(np.nan), 'Fraction of intensity in two outermost '
'rings of pixels', dtype=np.float32)
leakage_pixels_width_1 = Field(np.nan, 'Fraction of signal pixels that are '
'border pixels')
leakage_pixels_width_2 = Field(np.nan, 'Fraction of signal pixels that are '
'in the two outermost rings of pixels')
n_pixels = Field(0, 'Number of pixels after cleaning')
concentration_cog = Field(np.nan, 'Fraction of intensity in three pixels '
'closest to the cog')
concentration_core = Field(np.nan, 'Fraction of intensity inside hillas '
'ellipse')
concentration_pixel = Field(np.nan, 'Fraction of intensity in brightest '
'pixel')
n_islands = Field(0, 'Number of Islands')
alt_tel = Field(None, 'Telescope altitude pointing',
unit=u.rad)
az_tel = Field(None, 'Telescope azimuth pointing',
unit=u.rad)
obs_id = Field(-1, 'Observation ID')
event_id = Field(-1, 'Event ID')
calibration_id = Field(-1, 'ID of the employed calibration event')
dragon_time = Field(None, 'Dragon time event trigger')
ucts_time = Field(None, 'UCTS time event trigger')
tib_time = Field(None, 'TIB time event trigger')
mc_energy = Field(None, 'Simulated Energy', unit=u.TeV)
log_mc_energy = Field(None, 'log of simulated energy/TeV')
mc_alt = Field(None, 'Simulated altitude', unit=u.rad)
mc_az = Field(None, 'Simulated azimuth', unit=u.rad)
mc_core_x = Field(None, 'Simulated impact point x position', unit=u.m)
mc_core_y = Field(None, 'Simulated impact point y position', unit=u.m)
mc_h_first_int = Field(None, 'Simulated first interaction height', unit=u.m)
mc_type = Field(-1, 'Simulated particle type')
mc_az_tel = Field(None, 'Telescope MC azimuth pointing', unit=u.rad)
mc_alt_tel = Field(None, 'Telescope MC altitude pointing', unit=u.rad)
mc_x_max = Field(None, "MC Xmax value", unit=u.g / u.cm**2)
mc_core_distance = Field(None, "Distance from the impact point to the telescope", unit=u.m)
mc_shower_primary_id = Field(None, "MC shower primary ID 0 (gamma), 1(e-),"
"2(mu-), 100*A+Z for nucleons and nuclei,"
"negative for antimatter.")
hadroness = Field(None, "Hadroness")
wl = Field(u.Quantity(np.nan), "width/length")
tel_id = Field(None, "Telescope Id")
tel_pos_x = Field(None, "Telescope x position in the ground")
tel_pos_y = Field(None, "Telescope y position in the ground")
tel_pos_z = Field(None, "Telescope z position in the ground")
trigger_type = Field(None, "trigger type")
ucts_trigger_type = Field(None, "UCTS trigger type")
trigger_time = Field(None, "trigger time")
lhfit_call_status = Field(None, "Status of the processing of the event "
"by the LH fit method")
# info not available in data
#num_trig_pix = Field(None, "Number of trigger groups (sectors) listed")
#trig_pix_id = Field(None, "pixels involved in the camera trigger")
def fill_hillas(self, hillas):
"""
fill Hillas parameters
hillas: HillasParametersContainer
# TODO : parameters should not be simply copied but inherited
(e.g. conserving unit definition)
"""
for key in hillas.keys():
self[key] = hillas[key]
def fill_mc(self, event, tel_pos):
"""
fill from mc
"""
try:
self.mc_energy = event.mc.energy
self.log_mc_energy = np.log10(self.mc_energy.to_value(u.TeV))
self.mc_alt = event.mc.alt
self.mc_az = event.mc.az
self.mc_core_x = event.mc.core_x
self.mc_core_y = event.mc.core_y
self.mc_h_first_int = event.mc.h_first_int
self.mc_x_max = event.mc.x_max
self.mc_alt_tel = event.mcheader.run_array_direction[1]
self.mc_az_tel = event.mcheader.run_array_direction[0]
self.mc_type = event.mc.shower_primary_id
distance = np.sqrt(
(event.mc.core_x - tel_pos[0]) ** 2 +
(event.mc.core_y - tel_pos[1]) ** 2
)
if np.isfinite(distance):
self.mc_core_distance = distance
except IndexError:
print("mc information not filled")
def fill_event_info(self, event):
self.obs_id = event.index.obs_id
self.event_id = event.index.event_id
def get_features(self, features_names):
return np.array([
self[k].value
if isinstance(self[k], Quantity)
else self[k]
for k in features_names
])
def set_disp(self, source_pos, hillas):
disp = utils.disp_parameters(hillas, source_pos[0], source_pos[1])
self.disp_norm = disp.norm
self.disp_dx = disp.dx
self.disp_dy = disp.dy
self.disp_angle = disp.angle
self.disp_sign = disp.sign
self.disp_miss = disp.miss
def set_timing_features(self, geom, image, peak_time, hillas):
try: # if np.polyfit fails (e.g. len(image) < deg + 1)
timepars = timing_parameters(geom, image, peak_time, hillas)
self.time_gradient = timepars.slope.value
self.intercept = timepars.intercept
except ValueError:
self.time_gradient = np.nan
self.intercept = np.nan
def set_leakage(self, geom, image, clean):
leakage_c = leakage(geom, image, clean)
self.leakage_intensity_width_1 = leakage_c.intensity_width_1
self.leakage_intensity_width_2 = leakage_c.intensity_width_2
self.leakage_pixels_width_1 = leakage_c.pixels_width_1
self.leakage_pixels_width_2 = leakage_c.pixels_width_2
def set_concentration(self, geom, image, hillas_parameters):
conc = concentration(geom, image, hillas_parameters)
self.concentration_cog = conc.cog
self.concentration_core = conc.core
self.concentration_pixel = conc.pixel
def set_n_islands(self, geom, clean):
n_islands, islands_mask = number_of_islands(geom, clean)
self.n_islands = n_islands
def set_telescope_info(self, subarray, telescope_id):
self.tel_id = telescope_id
tel_pos = subarray.positions[telescope_id]
self.tel_pos_x = tel_pos[0]
self.tel_pos_y = tel_pos[1]
self.tel_pos_z = tel_pos[2]
def set_source_camera_position(self, event, telescope):
source_pos = utils.get_event_pos_in_camera(event, telescope)
self.src_x = source_pos[0]
self.src_y = source_pos[1]
class DispContainer(Container):
"""
Disp vector container
"""
dx = Field(nan, 'x coordinate of the disp_norm vector')
dy = Field(nan, 'y coordinate of the disp_norm vector')
angle = Field(nan, 'Angle between the X axis and the disp_norm vector')
norm = Field(nan, 'Norm of the disp_norm vector')
sign = Field(nan, 'Sign of the disp_norm')
miss = Field(nan, 'miss parameter norm')
class ExtraMCInfo(Container):
obs_id = Field(0, "MC Run Identifier")
class ExtraImageInfo(Container):
""" attach the tel_id """
tel_id = Field(0, "Telescope ID")
selected_gain_channel = Field(None, "Selected gain channel")
class ThrownEventsHistogram(Container):
""" 2D histogram from SimTel files """
obs_id = Field(-1, 'MC run ID')
hist_id = Field(-1, 'Histogram ID')
num_entries = Field(-1, 'Number of entries in the histogram')
bins_energy = Field(None, 'array of energy bin lower edges, as in np.histogram')
bins_core_dist = Field(None, 'array of core-distance bin lower edges, as in np.histogram')
histogram = Field(None, "array of histogram entries, size (n_bins_x, n_bins_y)")
def fill_from_simtel(self, hist):
""" fill from a SimTel Histogram entry"""
self.hist_id = hist['id']
self.num_entries = hist['entries']
xbins = np.linspace(hist['lower_x'], hist['upper_x'], hist['n_bins_x'] + 1)
ybins = np.linspace(hist['lower_y'], hist['upper_y'], hist['n_bins_y'] + 1)
self.bins_core_dist = xbins
self.bins_energy = 10 ** ybins
self.histogram = hist['data']
self.meta['hist_title'] = hist['title']
self.meta['x_label'] = 'Log10 E (TeV)'
self.meta['y_label'] = '3D Core Distance (m)'
class MetaData(Container):
"""
Some metadata
"""
SOURCE_FILENAMES = Field([], "filename of the source file")
LSTCHAIN_VERSION = Field(None, "version of lstchain")
CTAPIPE_VERSION = Field(None, "version of ctapipe")
CONTACT = Field(None, "Person or institution responsible for this data product")
class DL1MonitoringEventIndexContainer(Container):
"""
Container with the calibration coefficients
"""
tel_id = Field(1, 'Index of telescope')
calibration_id = Field(-1, 'Index of calibration event for DL1 file')
pedestal_id = Field(-1, 'Index of pedestal event for DL1 file')
flatfield_id = Field(-1, 'Index of flat-field event for DL1 file')
class LSTEventType:
"""
Class to recognize event type from trigger bits
bit 0: Mono
bit 1: stereo
bit 2: Calibration
bit 3: Single Phe
bit 4: Softrig(from the UCTS)
bit 5: Pedestal
bit 6: slow control
bit 7: busy
"""
@staticmethod
def is_mono(trigger_type):
return trigger_type >> 0 & 1
@staticmethod
def is_stereo(trigger_type):
return trigger_type >> 1 & 1
@staticmethod
def is_calibration(trigger_type):
return trigger_type >> 2 & 1
@staticmethod
def is_single_pe(trigger_type):
return trigger_type >> 3 & 1
@staticmethod
def is_soft_trig(trigger_type):
return trigger_type >> 4 & 1
@staticmethod
def is_pedestal(trigger_type):
return trigger_type >> 5 & 1
@staticmethod
def is_slow_control(trigger_type):
return trigger_type >> 6 & 1
@staticmethod
def is_busy(trigger_type):
return trigger_type >> 7 & 1
@staticmethod
def is_unknown(trigger_type):
return trigger_type == -1
| 38.248521
| 95
| 0.639465
|
9540941d7e77d51d8827e7b5bc0756e42d6b9d07
| 2,768
|
py
|
Python
|
post_gnome/tests/test_nc_reader.py
|
rsignell-usgs/post_gnome
|
e24492751458570e00d07e7dd1958881f6dfa51b
|
[
"MIT",
"Unlicense"
] | 2
|
2017-02-15T20:45:42.000Z
|
2020-10-09T16:00:00.000Z
|
post_gnome/tests/test_nc_reader.py
|
rsignell-usgs/post_gnome
|
e24492751458570e00d07e7dd1958881f6dfa51b
|
[
"MIT",
"Unlicense"
] | 10
|
2015-06-25T23:42:11.000Z
|
2021-06-22T16:19:19.000Z
|
post_gnome/tests/test_nc_reader.py
|
rsignell-usgs/post_gnome
|
e24492751458570e00d07e7dd1958881f6dfa51b
|
[
"MIT",
"Unlicense"
] | 15
|
2016-01-11T20:49:10.000Z
|
2020-10-15T18:02:20.000Z
|
# for py2/3 compatibility
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import datetime
import pytest
import numpy as np
import netCDF4
from post_gnome import nc_particles
## test the Reader
def test_read_required():
"""
Does it find the required variables and attributes
Should be able to set up data_index
"""
r = nc_particles.Reader('sample.nc')
assert len(r.times) == 3
assert np.array_equal(r.data_index, np.array([0, 3, 7, 9]))
def test_read_existing_dataset():
nc = netCDF4.Dataset('sample.nc')
r = nc_particles.Reader(nc)
assert len(r.times) == 3
def test_str():
r = nc_particles.Reader('sample.nc')
print(r)
r.close()
assert True
## other tests fail (E RuntimeError: NetCDF: Not a valid ID)
## if this test is here -- no idea why, but I think NetCDF4 isn't cleaning up after itself well
def test_read_variables():
"""
does it find the data variables ?
"""
r = nc_particles.Reader('sample.nc')
# set(), because order doesn't matter
varnames = set(r.variables)
assert varnames == set(['latitude', 'depth', 'mass', 'id', 'longitude'])
def test_get_all_timesteps():
r = nc_particles.Reader('sample.nc')
data = r.get_all_timesteps(variables=['depth', 'mass', 'id'])
print(data)
assert 'depth' in data
assert 'mass' in data
assert 'id' in data
## better to check actual data, but what can you do?
def test_get_timestep():
r = nc_particles.Reader('sample.nc')
data = r.get_timestep(2, variables=['latitude', 'depth', 'mass', 'id', 'longitude'])
# specific results from the sample file
assert np.array_equal(data['longitude'], [-88.3, -88.1])
assert np.array_equal(data['latitude'], [28.1, 28.0])
assert np.array_equal(data['depth'], [0.0, 0.1])
assert np.array_equal(data['mass'], [0.05, 0.06])
assert np.array_equal(data['id'], [1, 3])
def test_get_individual_trajectory():
r = nc_particles.Reader('sample.nc')
path = r.get_individual_trajectory(1)
assert np.array_equal(path['latitude'], [28.0, 28.05, 28.1])
assert np.array_equal(path['longitude'], [-88.1, -88.2, -88.3])
def test_get_units():
r = nc_particles.Reader('sample.nc')
assert r.get_units('depth') == 'meters'
assert r.get_units('longitude') == 'degrees_east'
def test_get_attributes():
r = nc_particles.Reader('sample.nc')
assert r.get_attributes('depth') == {'units' : "meters",
'long_name' : "particle depth below sea surface",
'standard_name' : "depth",
'axis' : "z positive down",
}
| 32.564706
| 95
| 0.627529
|
1abb36793fe016be519eb96eb75623172a21c09a
| 287
|
py
|
Python
|
.binder/streamlit_call.py
|
op07n/minimal-streamlit-example
|
1af747769dee3a5414da57ca3b66ea915d0fd06a
|
[
"MIT"
] | null | null | null |
.binder/streamlit_call.py
|
op07n/minimal-streamlit-example
|
1af747769dee3a5414da57ca3b66ea915d0fd06a
|
[
"MIT"
] | null | null | null |
.binder/streamlit_call.py
|
op07n/minimal-streamlit-example
|
1af747769dee3a5414da57ca3b66ea915d0fd06a
|
[
"MIT"
] | null | null | null |
from subprocess import Popen
def load_jupyter_server_extension(nbapp):
"""serve the bokeh-app directory with bokeh server"""
Popen(["streamlit", "run", "st_runner.py", "apps", "--browser.serverAddress=0.0.0.0", "--server.enableCORS=False", "--browser.gatherUsageStats=False"])
| 41
| 155
| 0.721254
|
6c419b9401838ce31e68eea4acee8e44f393d97e
| 14,474
|
py
|
Python
|
flytekit/bin/entrypoint.py
|
cosmicBboy/flytek
|
84c6b52c7a9673326bc7d1c9e84ebcb0f00c61d1
|
[
"Apache-2.0"
] | null | null | null |
flytekit/bin/entrypoint.py
|
cosmicBboy/flytek
|
84c6b52c7a9673326bc7d1c9e84ebcb0f00c61d1
|
[
"Apache-2.0"
] | 1
|
2021-02-05T02:57:25.000Z
|
2021-02-05T02:57:25.000Z
|
flytekit/bin/entrypoint.py
|
cosmicBboy/flytekit
|
84c6b52c7a9673326bc7d1c9e84ebcb0f00c61d1
|
[
"Apache-2.0"
] | null | null | null |
import datetime as _datetime
import importlib as _importlib
import logging as _logging
import os as _os
import pathlib
import random as _random
import click as _click
from flyteidl.core import literals_pb2 as _literals_pb2
from flytekit.annotated.base_task import IgnoreOutputs, PythonTask
from flytekit.annotated.context_manager import ExecutionState, FlyteContext, SerializationSettings, get_image_config
from flytekit.annotated.promise import VoidPromise
from flytekit.common import constants as _constants
from flytekit.common import utils as _common_utils
from flytekit.common import utils as _utils
from flytekit.common.exceptions import scopes as _scopes
from flytekit.common.exceptions import system as _system_exceptions
from flytekit.common.tasks.sdk_runnable import ExecutionParameters
from flytekit.configuration import TemporaryConfiguration as _TemporaryConfiguration
from flytekit.configuration import internal as _internal_config
from flytekit.configuration import platform as _platform_config
from flytekit.configuration import sdk as _sdk_config
from flytekit.engines import loader as _engine_loader
from flytekit.interfaces import random as _flyte_random
from flytekit.interfaces.data import data_proxy as _data_proxy
from flytekit.interfaces.data.gcs import gcs_proxy as _gcs_proxy
from flytekit.interfaces.data.s3 import s3proxy as _s3proxy
from flytekit.interfaces.stats.taggable import get_stats as _get_stats
from flytekit.models import dynamic_job as _dynamic_job
from flytekit.models import literals as _literal_models
from flytekit.models.core import identifier as _identifier
from flytekit.tools.fast_registration import download_distribution as _download_distribution
def _compute_array_job_index():
# type () -> int
"""
Computes the absolute index of the current array job. This is determined by summing the compute-environment-specific
environment variable and the offset (if one's set). The offset will be set and used when the user request that the
job runs in a number of slots less than the size of the input.
:rtype: int
"""
offset = 0
if _os.environ.get("BATCH_JOB_ARRAY_INDEX_OFFSET"):
offset = int(_os.environ.get("BATCH_JOB_ARRAY_INDEX_OFFSET"))
return offset + int(_os.environ.get(_os.environ.get("BATCH_JOB_ARRAY_INDEX_VAR_NAME")))
def _map_job_index_to_child_index(local_input_dir, datadir, index):
local_lookup_file = local_input_dir.get_named_tempfile("indexlookup.pb")
idx_lookup_file = _os.path.join(datadir, "indexlookup.pb")
# if the indexlookup.pb does not exist, then just return the index
if not _data_proxy.Data.data_exists(idx_lookup_file):
return index
_data_proxy.Data.get_data(idx_lookup_file, local_lookup_file)
mapping_proto = _utils.load_proto_from_file(_literals_pb2.LiteralCollection, local_lookup_file)
if len(mapping_proto.literals) < index:
raise _system_exceptions.FlyteSystemAssertion(
"dynamic task index lookup array size: {} is smaller than lookup index {}".format(
len(mapping_proto.literals), index
)
)
return mapping_proto.literals[index].scalar.primitive.integer
def _dispatch_execute(ctx: FlyteContext, task_def: PythonTask, inputs_path: str, output_prefix: str):
"""
Dispatches execute to PythonTask
Step1: Download inputs and load into a literal map
Step2: Invoke task - dispatch_execute
Step3:
a: [Optional] Record outputs to output_prefix
b: OR if IgnoreOutputs is raised, then ignore uploading outputs
c: OR if an unhandled exception is retrieved - record it as an errors.pb
"""
try:
# Step1
local_inputs_file = _os.path.join(ctx.execution_state.working_dir, "inputs.pb")
ctx.file_access.get_data(inputs_path, local_inputs_file)
input_proto = _utils.load_proto_from_file(_literals_pb2.LiteralMap, local_inputs_file)
idl_input_literals = _literal_models.LiteralMap.from_flyte_idl(input_proto)
# Step2
outputs = task_def.dispatch_execute(ctx, idl_input_literals)
if isinstance(outputs, VoidPromise):
_logging.getLogger().warning("Task produces no outputs")
output_file_dict = {_constants.OUTPUT_FILE_NAME: _literal_models.LiteralMap(literals={})}
elif isinstance(outputs, _literal_models.LiteralMap):
output_file_dict = {_constants.OUTPUT_FILE_NAME: outputs}
elif isinstance(outputs, _dynamic_job.DynamicJobSpec):
output_file_dict = {_constants.FUTURES_FILE_NAME: outputs}
else:
_logging.getLogger().error(f"SystemError: received unknown outputs from task {outputs}")
# TODO This should probably cause an error file
return
for k, v in output_file_dict.items():
_common_utils.write_proto_to_file(v.to_flyte_idl(), _os.path.join(ctx.execution_state.engine_dir, k))
# Step3a
ctx.file_access.upload_directory(ctx.execution_state.engine_dir, output_prefix)
_logging.info(f"Outputs written successful the the output prefix {output_prefix}")
except Exception as e:
if isinstance(e, IgnoreOutputs):
# Step 3b
_logging.warning(f"IgnoreOutputs received! Outputs.pb will not be uploaded. reason {e}")
return
# Step 3c
_logging.error(f"Exception when executing task {task_def.name}, reason {str(e)}")
raise e
def _handle_annotated_task(task_def: PythonTask, inputs: str, output_prefix: str, raw_output_data_prefix: str):
"""
Entrypoint for all PythonTask extensions
"""
_click.echo("Running native-typed task")
cloud_provider = _platform_config.CLOUD_PROVIDER.get()
log_level = _internal_config.LOGGING_LEVEL.get() or _sdk_config.LOGGING_LEVEL.get()
_logging.getLogger().setLevel(log_level)
ctx = FlyteContext.current_context()
# Create directories
user_workspace_dir = ctx.file_access.local_access.get_random_directory()
_click.echo(f"Using user directory {user_workspace_dir}")
pathlib.Path(user_workspace_dir).mkdir(parents=True, exist_ok=True)
from flytekit import __version__ as _api_version
execution_parameters = ExecutionParameters(
execution_id=_identifier.WorkflowExecutionIdentifier(
project=_internal_config.EXECUTION_PROJECT.get(),
domain=_internal_config.EXECUTION_DOMAIN.get(),
name=_internal_config.EXECUTION_NAME.get(),
),
execution_date=_datetime.datetime.utcnow(),
stats=_get_stats(
# Stats metric path will be:
# registration_project.registration_domain.app.module.task_name.user_stats
# and it will be tagged with execution-level values for project/domain/wf/lp
"{}.{}.{}.user_stats".format(
_internal_config.TASK_PROJECT.get() or _internal_config.PROJECT.get(),
_internal_config.TASK_DOMAIN.get() or _internal_config.DOMAIN.get(),
_internal_config.TASK_NAME.get() or _internal_config.NAME.get(),
),
tags={
"exec_project": _internal_config.EXECUTION_PROJECT.get(),
"exec_domain": _internal_config.EXECUTION_DOMAIN.get(),
"exec_workflow": _internal_config.EXECUTION_WORKFLOW.get(),
"exec_launchplan": _internal_config.EXECUTION_LAUNCHPLAN.get(),
"api_version": _api_version,
},
),
logging=_logging,
tmp_dir=user_workspace_dir,
)
if cloud_provider == _constants.CloudProvider.AWS:
file_access = _data_proxy.FileAccessProvider(
local_sandbox_dir=_sdk_config.LOCAL_SANDBOX.get(), remote_proxy=_s3proxy.AwsS3Proxy(raw_output_data_prefix),
)
elif cloud_provider == _constants.CloudProvider.GCP:
file_access = _data_proxy.FileAccessProvider(
local_sandbox_dir=_sdk_config.LOCAL_SANDBOX.get(), remote_proxy=_gcs_proxy.GCSProxy(raw_output_data_prefix),
)
elif cloud_provider == _constants.CloudProvider.LOCAL:
# A fake remote using the local disk will automatically be created
file_access = _data_proxy.FileAccessProvider(local_sandbox_dir=_sdk_config.LOCAL_SANDBOX.get())
else:
raise Exception(f"Bad cloud provider {cloud_provider}")
with ctx.new_file_access_context(file_access_provider=file_access) as ctx:
# TODO: This is copied from serialize, which means there's a similarity here I'm not seeing.
env = {
_internal_config.CONFIGURATION_PATH.env_var: _internal_config.CONFIGURATION_PATH.get(),
_internal_config.IMAGE.env_var: _internal_config.IMAGE.get(),
}
serialization_settings = SerializationSettings(
project=_internal_config.TASK_PROJECT.get(),
domain=_internal_config.TASK_DOMAIN.get(),
version=_internal_config.TASK_VERSION.get(),
image_config=get_image_config(),
env=env,
)
# The reason we need this is because of dynamic tasks. Even if we move compilation all to Admin,
# if a dynamic task calls some task, t1, we have to write to the DJ Spec the correct task
# identifier for t1.
with ctx.new_serialization_settings(serialization_settings=serialization_settings) as ctx:
# Because execution states do not look up the context chain, it has to be made last
with ctx.new_execution_context(
mode=ExecutionState.Mode.TASK_EXECUTION, execution_params=execution_parameters
) as ctx:
_dispatch_execute(ctx, task_def, inputs, output_prefix)
@_scopes.system_entry_point
def _execute_task(task_module, task_name, inputs, output_prefix, raw_output_data_prefix, test):
with _TemporaryConfiguration(_internal_config.CONFIGURATION_PATH.get()):
with _utils.AutoDeletingTempDir("input_dir") as input_dir:
# Load user code
task_module = _importlib.import_module(task_module)
task_def = getattr(task_module, task_name)
# Everything else
if not test and not isinstance(task_def, PythonTask):
local_inputs_file = input_dir.get_named_tempfile("inputs.pb")
# Handle inputs/outputs for array job.
if _os.environ.get("BATCH_JOB_ARRAY_INDEX_VAR_NAME"):
job_index = _compute_array_job_index()
# TODO: Perhaps remove. This is a workaround to an issue we perceived with limited entropy in
# TODO: AWS batch array jobs.
_flyte_random.seed_flyte_random(
"{} {} {}".format(_random.random(), _datetime.datetime.utcnow(), job_index)
)
# If an ArrayTask is discoverable, the original job index may be different than the one specified in
# the environment variable. Look up the correct input/outputs in the index lookup mapping file.
job_index = _map_job_index_to_child_index(input_dir, inputs, job_index)
inputs = _os.path.join(inputs, str(job_index), "inputs.pb")
output_prefix = _os.path.join(output_prefix, str(job_index))
_data_proxy.Data.get_data(inputs, local_inputs_file)
input_proto = _utils.load_proto_from_file(_literals_pb2.LiteralMap, local_inputs_file)
_engine_loader.get_engine().get_task(task_def).execute(
_literal_models.LiteralMap.from_flyte_idl(input_proto),
context={"output_prefix": output_prefix, "raw_output_data_prefix": raw_output_data_prefix},
)
# New annotated style task
elif not test and isinstance(task_def, PythonTask):
_handle_annotated_task(task_def, inputs, output_prefix, raw_output_data_prefix)
@_click.group()
def _pass_through():
pass
_task_module_option = _click.option("--task-module", required=True)
_task_name_option = _click.option("--task-name", required=True)
_inputs_option = _click.option("--inputs", required=True)
_output_prefix_option = _click.option("--output-prefix", required=True)
_raw_output_date_prefix_option = _click.option("--raw-output-data-prefix", required=False)
_test = _click.option("--test", is_flag=True)
@_pass_through.command("pyflyte-execute")
@_click.option("--task-module", required=True)
@_click.option("--task-name", required=True)
@_click.option("--inputs", required=True)
@_click.option("--output-prefix", required=True)
@_click.option("--raw-output-data-prefix", required=False)
@_click.option("--test", is_flag=True)
def execute_task_cmd(task_module, task_name, inputs, output_prefix, raw_output_data_prefix, test):
_click.echo(_utils.get_version_message())
# Backwards compatibility - if Propeller hasn't filled this in, then it'll come through here as the original
# template string, so let's explicitly set it to None so that the downstream functions will know to fall back
# to the original shard formatter/prefix config.
if raw_output_data_prefix == "{{.rawOutputDataPrefix}}":
raw_output_data_prefix = None
_execute_task(task_module, task_name, inputs, output_prefix, raw_output_data_prefix, test)
@_pass_through.command("pyflyte-fast-execute")
@_click.option("--additional-distribution", required=False)
@_click.option("--dest-dir", required=False)
@_click.argument("task-execute-cmd", nargs=-1, type=_click.UNPROCESSED)
def fast_execute_task_cmd(additional_distribution, dest_dir, task_execute_cmd):
"""
Downloads a compressed code distribution specified by additional-distribution and then calls the underlying
task execute command for the updated code.
:param Text additional_distribution:
:param Text dest_dir:
:param task_execute_cmd:
:return:
"""
if additional_distribution is not None:
if not dest_dir:
dest_dir = _os.getcwd()
_download_distribution(additional_distribution, dest_dir)
# Use the commandline to run the task execute command rather than calling it directly in python code
# since the current runtime bytecode references the older user code, rather than the downloaded distribution.
_os.system(" ".join(task_execute_cmd))
if __name__ == "__main__":
_pass_through()
| 48.408027
| 120
| 0.717632
|
6e1f4432892e88fb8964a7870a1086f59cdb59d5
| 1,916
|
py
|
Python
|
consumers/venv/lib/python3.7/site-packages/mode/loop/__init__.py
|
spencerpomme/Public-Transit-Status-with-Apache-Kafka
|
2c85d7daadf4614fe7ce2eabcd13ff87236b1c7e
|
[
"MIT"
] | null | null | null |
consumers/venv/lib/python3.7/site-packages/mode/loop/__init__.py
|
spencerpomme/Public-Transit-Status-with-Apache-Kafka
|
2c85d7daadf4614fe7ce2eabcd13ff87236b1c7e
|
[
"MIT"
] | null | null | null |
consumers/venv/lib/python3.7/site-packages/mode/loop/__init__.py
|
spencerpomme/Public-Transit-Status-with-Apache-Kafka
|
2c85d7daadf4614fe7ce2eabcd13ff87236b1c7e
|
[
"MIT"
] | null | null | null |
"""AsyncIO event loop implementations.
This contains a registry of different AsyncIO loop implementations
to be used with Mode.
The choices available are:
aio **default**
Normal :mod:`asyncio` event loop policy.
eventlet
Use :pypi:`eventlet` as the event loop.
This uses :pypi:`aioeventlet` and will apply the
:pypi:`eventlet` monkey-patches.
To enable execute the following as the first thing that happens
when your program starts (e.g. add it as the top import of your
entrypoint module)::
>>> import mode.loop
>>> mode.loop.use('eventlet')
gevent
Use :pypi:`gevent` as the event loop.
This uses :pypi:`aiogevent` (+modifications) and will apply the
:pypi:`gevent` monkey-patches.
This choice enables you to run blocking Python code as if they
have invisible `async/await` syntax around it (NOTE: C extensions are
not usually gevent compatible).
To enable execute the following as the first thing that happens
when your program starts (e.g. add it as the top import of your
entrypoint module)::
>>> import mode.loop
>>> mode.loop.use('gevent')
uvloop
Event loop using :pypi:`uvloop`.
To enable execute the following as the first thing that happens
when your program starts (e.g. add it as the top import of your
entrypoint module)::
>>> import mode.loop
>>> mode.loop.use('uvloop')
"""
import importlib
from typing import Mapping, Optional
__all__ = ['LOOPS', 'use']
LOOPS: Mapping[str, Optional[str]] = {
'aio': None,
'eventlet': 'mode.loop.eventlet',
'gevent': 'mode.loop.gevent',
'uvloop': 'mode.loop.uvloop',
}
def use(loop: str) -> None:
"""Specify the event loop to use as a string.
Loop must be one of: aio, eventlet, gevent, uvloop.
"""
mod = LOOPS.get(loop, loop)
if mod is not None:
importlib.import_module(mod)
| 26.611111
| 73
| 0.670668
|
925846305232b47b95688c11f7dc0f7fd4a50264
| 4,759
|
py
|
Python
|
stem_cell_hypothesis/en_electra_base/joint/dep_srl.py
|
emorynlp/stem-cell-hypothesis
|
48a628093d93d653865fbac6409d179cddd99293
|
[
"Apache-2.0"
] | 4
|
2021-09-17T15:23:31.000Z
|
2022-02-28T10:18:04.000Z
|
stem_cell_hypothesis/en_electra_base/joint/dep_srl.py
|
emorynlp/stem-cell-hypothesis
|
48a628093d93d653865fbac6409d179cddd99293
|
[
"Apache-2.0"
] | null | null | null |
stem_cell_hypothesis/en_electra_base/joint/dep_srl.py
|
emorynlp/stem-cell-hypothesis
|
48a628093d93d653865fbac6409d179cddd99293
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2021-01-06 16:12
from typing import List
from elit.common.dataset import SortingSamplerBuilder
from elit.common.transform import NormalizeToken
from elit.components.mtl.loss_balancer import MovingAverageBalancer
from elit.components.mtl.multi_task_learning import MultiTaskLearning
from elit.components.mtl.tasks.constituency import CRFConstituencyParsing
from elit.components.mtl.tasks.dep import BiaffineDependencyParsing
from elit.components.mtl.tasks.ner.biaffine_ner import BiaffineNamedEntityRecognition
from elit.components.mtl.tasks.pos import TransformerTagging
from elit.components.mtl.tasks.srl.rank_srl import SpanRankingSemanticRoleLabeling
from elit.datasets.parsing.ptb import PTB_TOKEN_MAPPING
from elit.datasets.srl.ontonotes5.english import ONTONOTES5_POS_ENGLISH_TRAIN, ONTONOTES5_POS_ENGLISH_TEST, \
ONTONOTES5_POS_ENGLISH_DEV, ONTONOTES5_ENGLISH_TRAIN, ONTONOTES5_ENGLISH_TEST, ONTONOTES5_ENGLISH_DEV, \
ONTONOTES5_CON_ENGLISH_TRAIN, ONTONOTES5_CON_ENGLISH_DEV, ONTONOTES5_CON_ENGLISH_TEST, ONTONOTES5_DEP_ENGLISH_TEST, \
ONTONOTES5_DEP_ENGLISH_DEV, ONTONOTES5_DEP_ENGLISH_TRAIN, ONTONOTES5_SRL_ENGLISH_TRAIN, ONTONOTES5_SRL_ENGLISH_DEV, \
ONTONOTES5_SRL_ENGLISH_TEST
from elit.layers.embeddings.contextual_word_embedding import ContextualWordEmbedding
from elit.metrics.mtl import MetricDict
from elit.utils.log_util import cprint
from stem_cell_hypothesis import cdroot
def main():
cdroot()
scores: List[MetricDict] = []
for i in range(3):
tasks = {
# 'pos': TransformerTagging(
# ONTONOTES5_POS_ENGLISH_TRAIN,
# ONTONOTES5_POS_ENGLISH_DEV,
# ONTONOTES5_POS_ENGLISH_TEST,
# SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
# lr=1e-3,
# ),
# 'ner': BiaffineNamedEntityRecognition(
# ONTONOTES5_ENGLISH_TRAIN,
# ONTONOTES5_ENGLISH_DEV,
# ONTONOTES5_ENGLISH_TEST,
# SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
# lr=1e-3,
# doc_level_offset=True,
# ),
'srl': SpanRankingSemanticRoleLabeling(
ONTONOTES5_SRL_ENGLISH_TRAIN,
ONTONOTES5_SRL_ENGLISH_DEV,
ONTONOTES5_SRL_ENGLISH_TEST,
SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
lr=1e-3,
doc_level_offset=True,
),
'dep': BiaffineDependencyParsing(
ONTONOTES5_DEP_ENGLISH_TRAIN,
ONTONOTES5_DEP_ENGLISH_DEV,
ONTONOTES5_DEP_ENGLISH_TEST,
SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
lr=1e-3,
),
# 'con': CRFConstituencyParsing(
# ONTONOTES5_CON_ENGLISH_TRAIN,
# ONTONOTES5_CON_ENGLISH_DEV,
# ONTONOTES5_CON_ENGLISH_TEST,
# SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
# lr=1e-3,
# ),
}
mtl = MultiTaskLearning()
save_dir = f'data/model/mtl/ontonotes_electra_base_en/mtl/no-gate/dep_srl/{i}'
cprint(f'Model will be saved in [cyan]{save_dir}[/cyan]')
mtl.fit(
ContextualWordEmbedding(
'token',
'google/electra-base-discriminator',
average_subwords=True,
max_sequence_length=512,
word_dropout=.2,
),
tasks,
save_dir,
30,
lr=1e-3,
encoder_lr=5e-5,
grad_norm=1,
gradient_accumulation=4,
eval_trn=False,
transform=NormalizeToken(PTB_TOKEN_MAPPING, 'token'),
loss_balancer=MovingAverageBalancer(5, intrinsic_weighting=False),
# prefetch=10,
# cache='data/tmp'
)
cprint(f'Model saved in [cyan]{save_dir}[/cyan]')
mtl.load(save_dir)
if 'dep' in mtl.tasks:
mtl['dep'].config.tree = True
mtl['dep'].config.proj = True
mtl.save_config(save_dir)
for k, v in mtl.tasks.items():
v.trn = tasks[k].trn
v.dev = tasks[k].dev
v.tst = tasks[k].tst
metric = mtl.evaluate(save_dir)[0]
scores.append(metric)
print(f'{"-".join(tasks.keys())} {len(scores)} runs scores:')
for each in scores:
cprint(each.cstr())
if __name__ == '__main__':
import torch
# torch.multiprocessing.set_start_method('spawn') # See https://github.com/pytorch/pytorch/issues/40403
main()
| 40.675214
| 121
| 0.63921
|
22459a396d7b0a0eb35e7719c951c7eaa43f63c6
| 6,939
|
py
|
Python
|
backend/nameless_smoke_29209/settings.py
|
crowdbotics-apps/nameless-smoke-29209
|
0310cd6965c9578c28fbc5f097cfb3f5b13be412
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/nameless_smoke_29209/settings.py
|
crowdbotics-apps/nameless-smoke-29209
|
0310cd6965c9578c28fbc5f097cfb3f5b13be412
|
[
"FTL",
"AML",
"RSA-MD"
] | 9
|
2021-07-26T20:22:32.000Z
|
2021-07-26T20:22:37.000Z
|
backend/nameless_smoke_29209/settings.py
|
crowdbotics-apps/nameless-smoke-29209
|
0310cd6965c9578c28fbc5f097cfb3f5b13be412
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
Django settings for nameless_smoke_29209 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'nameless_smoke_29209.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'nameless_smoke_29209.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| 29.653846
| 112
| 0.731373
|
3144af2f76110b235b8d7a9595088b101ae780ad
| 66
|
py
|
Python
|
repositorytools/cli/commands/__init__.py
|
MaciejKucia/repositorytools
|
b9c6b2c3404e0395609dec05cae329b3576aa9a7
|
[
"Apache-2.0"
] | 32
|
2017-09-25T08:01:27.000Z
|
2022-03-16T21:53:37.000Z
|
repositorytools/cli/commands/__init__.py
|
MaciejKucia/repositorytools
|
b9c6b2c3404e0395609dec05cae329b3576aa9a7
|
[
"Apache-2.0"
] | 30
|
2015-05-28T11:02:31.000Z
|
2017-08-22T12:40:27.000Z
|
repositorytools/cli/commands/__init__.py
|
MaciejKucia/repositorytools
|
b9c6b2c3404e0395609dec05cae329b3576aa9a7
|
[
"Apache-2.0"
] | 16
|
2017-12-11T06:17:22.000Z
|
2021-10-09T06:35:33.000Z
|
__author__ = 'msamia'
from .artifact import *
from .repo import *
| 16.5
| 23
| 0.727273
|
bdea1feb316e9dc7ab85fa5f868681217edde391
| 5,508
|
py
|
Python
|
dist-packages/reportlab/lib/attrmap.py
|
Jianwei-Wang/python2.7_lib
|
911b8e81512e5ac5f13e669ab46f7693ed897378
|
[
"PSF-2.0"
] | 51
|
2015-01-20T19:50:34.000Z
|
2022-03-05T21:23:32.000Z
|
dist-packages/reportlab/lib/attrmap.py
|
Jianwei-Wang/python2.7_lib
|
911b8e81512e5ac5f13e669ab46f7693ed897378
|
[
"PSF-2.0"
] | 16
|
2015-11-15T04:23:43.000Z
|
2021-09-27T14:14:20.000Z
|
dist-packages/reportlab/lib/attrmap.py
|
Jianwei-Wang/python2.7_lib
|
911b8e81512e5ac5f13e669ab46f7693ed897378
|
[
"PSF-2.0"
] | 46
|
2015-03-28T10:18:14.000Z
|
2021-12-16T15:57:47.000Z
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/attrmap.py
__version__=''' $Id$ '''
__doc__='''Framework for objects whose assignments are checked. Used by graphics.
We developed reportlab/graphics prior to Python 2 and metaclasses. For the
graphics, we wanted to be able to declare the attributes of a class, check
them on assignment, and convert from string arguments. Examples of
attrmap-based objects can be found in reportlab/graphics/shapes. It lets
us defined structures like the one below, which are seen more modern form in
Django models and other frameworks.
We'll probably replace this one day soon, hopefully with no impact on client
code.
class Rect(SolidShape):
"""Rectangle, possibly with rounded corners."""
_attrMap = AttrMap(BASE=SolidShape,
x = AttrMapValue(isNumber),
y = AttrMapValue(isNumber),
width = AttrMapValue(isNumber),
height = AttrMapValue(isNumber),
rx = AttrMapValue(isNumber),
ry = AttrMapValue(isNumber),
)
'''
from reportlab.lib.validators import isAnything, DerivedValue
from reportlab.lib.utils import isSeq
from reportlab import rl_config
class CallableValue:
'''a class to allow callable initial values'''
def __init__(self,func,*args,**kw):
#assert iscallable(func)
self.func = func
self.args = args
self.kw = kw
def __call__(self):
return self.func(*self.args,**self.kw)
class AttrMapValue:
'''Simple multi-value holder for attribute maps'''
def __init__(self,validate=None,desc=None,initial=None, advancedUsage=0, **kw):
self.validate = validate or isAnything
self.desc = desc
self._initial = initial
self._advancedUsage = advancedUsage
for k,v in kw.items():
setattr(self,k,v)
def __getattr__(self,name):
#hack to allow callable initial values
if name=='initial':
if isinstance(self._initial,CallableValue): return self._initial()
return self._initial
elif name=='hidden':
return 0
raise AttributeError(name)
def __repr__(self):
return 'AttrMapValue(%s)' % ', '.join(['%s=%r' % i for i in self.__dict__.items()])
class AttrMap(dict):
def __init__(self,BASE=None,UNWANTED=[],**kw):
data = {}
if BASE:
if isinstance(BASE,AttrMap):
data = BASE
else:
if not isSeq(BASE): BASE = (BASE,)
for B in BASE:
am = getattr(B,'_attrMap',self)
if am is not self:
if am: data.update(am)
else:
raise ValueError('BASE=%s has wrong kind of value' % ascii(B))
dict.__init__(self,data)
self.remove(UNWANTED)
self.update(kw)
def remove(self,unwanted):
for k in unwanted:
try:
del self[k]
except KeyError:
pass
def clone(self,UNWANTED=[],**kw):
c = AttrMap(BASE=self,UNWANTED=UNWANTED)
c.update(kw)
return c
def validateSetattr(obj,name,value):
'''validate setattr(obj,name,value)'''
if rl_config.shapeChecking:
map = obj._attrMap
if map and name[0]!= '_':
#we always allow the inherited values; they cannot
#be checked until draw time.
if isinstance(value, DerivedValue):
#let it through
pass
else:
try:
validate = map[name].validate
if not validate(value):
raise AttributeError("Illegal assignment of '%s' to '%s' in class %s" % (value, name, obj.__class__.__name__))
except KeyError:
raise AttributeError("Illegal attribute '%s' in class %s" % (name, obj.__class__.__name__))
obj.__dict__[name] = value
def _privateAttrMap(obj,ret=0):
'''clone obj._attrMap if required'''
A = obj._attrMap
oA = getattr(obj.__class__,'_attrMap',None)
if ret:
if oA is A:
return A.clone(), oA
else:
return A, None
else:
if oA is A:
obj._attrMap = A.clone()
def _findObjectAndAttr(src, P):
'''Locate the object src.P for P a string, return parent and name of attribute
'''
P = P.split('.')
if len(P) == 0:
return None, None
else:
for p in P[0:-1]:
src = getattr(src, p)
return src, P[-1]
def hook__setattr__(obj):
if not hasattr(obj,'__attrproxy__'):
C = obj.__class__
import new
obj.__class__=new.classobj(C.__name__,(C,)+C.__bases__,
{'__attrproxy__':[],
'__setattr__':lambda self,k,v,osa=getattr(obj,'__setattr__',None),hook=hook: hook(self,k,v,osa)})
def addProxyAttribute(src,name,validate=None,desc=None,initial=None,dst=None):
'''
Add a proxy attribute 'name' to src with targets dst
'''
#sanity
assert hasattr(src,'_attrMap'), 'src object has no _attrMap'
A, oA = _privateAttrMap(src,1)
if not isSeq(dst): dst = dst,
D = []
DV = []
for d in dst:
if isSeq(d):
d, e = d[0], d[1:]
obj, attr = _findObjectAndAttr(src,d)
if obj:
dA = getattr(obj,'_attrMap',None)
| 33.180723
| 134
| 0.593319
|
574ea42f6af5980b3c62319e2f0f1ba1e9c5761d
| 26,727
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_08_01/aio/operations/_route_filter_rules_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_08_01/aio/operations/_route_filter_rules_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_08_01/aio/operations/_route_filter_rules_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteFilterRulesOperations:
"""RouteFilterRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs
) -> "models.RouteFilterRule":
"""Gets the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilterRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.RouteFilterRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "models.RouteFilterRule",
**kwargs
) -> "models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "models.RouteFilterRule",
**kwargs
) -> AsyncLROPoller["models.RouteFilterRule"]:
"""Creates or updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the create or update route filter
rule operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2018_08_01.models.RouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_08_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "models.PatchRouteFilterRule",
**kwargs
) -> "models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'PatchRouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "models.PatchRouteFilterRule",
**kwargs
) -> AsyncLROPoller["models.RouteFilterRule"]:
"""Updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the update route filter rule
operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2018_08_01.models.PatchRouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_08_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def list_by_route_filter(
self,
resource_group_name: str,
route_filter_name: str,
**kwargs
) -> AsyncIterable["models.RouteFilterRuleListResult"]:
"""Gets all RouteFilterRules in a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_08_01.models.RouteFilterRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteFilterRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'} # type: ignore
| 49.402957
| 221
| 0.671755
|
cd4b22b05346ba150ae1e3d72287c4250f2f87fd
| 360
|
py
|
Python
|
python_socks/async_/curio/__init__.py
|
sjamgade/python-socks
|
e9c7718b104c382ff381f059705d1e929ec63f22
|
[
"Apache-2.0"
] | null | null | null |
python_socks/async_/curio/__init__.py
|
sjamgade/python-socks
|
e9c7718b104c382ff381f059705d1e929ec63f22
|
[
"Apache-2.0"
] | null | null | null |
python_socks/async_/curio/__init__.py
|
sjamgade/python-socks
|
e9c7718b104c382ff381f059705d1e929ec63f22
|
[
"Apache-2.0"
] | null | null | null |
from ..._types import ProxyType
from ..._proxy_factory import ProxyFactory
from ._proxy import (
CurioProxy,
Socks5Proxy,
Socks4Proxy,
HttpProxy
)
class Proxy(ProxyFactory[CurioProxy]):
types = {
ProxyType.SOCKS4: Socks4Proxy,
ProxyType.SOCKS5: Socks5Proxy,
ProxyType.HTTP: HttpProxy,
}
__all__ = ('Proxy',)
| 18
| 42
| 0.669444
|
1cd969d6bc7ca029dbf1576d8bafdcee9960f59a
| 1,813
|
py
|
Python
|
Metodos_numericos/MetodosDeGauss/GaussJacobi.py
|
iOsnaaente/Faculdade_ECA-UFSM
|
aea8b8d66169b073c439b47ad990e45695cbe953
|
[
"MIT"
] | null | null | null |
Metodos_numericos/MetodosDeGauss/GaussJacobi.py
|
iOsnaaente/Faculdade_ECA-UFSM
|
aea8b8d66169b073c439b47ad990e45695cbe953
|
[
"MIT"
] | null | null | null |
Metodos_numericos/MetodosDeGauss/GaussJacobi.py
|
iOsnaaente/Faculdade_ECA-UFSM
|
aea8b8d66169b073c439b47ad990e45695cbe953
|
[
"MIT"
] | null | null | null |
from numpy import zeros, linspace, array
from numpy.linalg import norm
def gaussJacobi(A, B, Ap, e):
C = zeros((len(A), len(A)))
g = zeros((len(B),1))
# Construir as matrizes C e g
for i in range(len(A)):
g[i] = B[i]/A[i][i]
for j in range(len(A)):
if i == j :
C[i][j] = -A[i][j]/A[i][i]
# Testando a condição de parada
if norm(C, 1) < 1:
erro = 1
# Se quisermos saber quantas iterações foram feitas
#n = 0
while erro > e:
An = C*Ap + g
erro = norm(An-Ap)/norm(An)
Ap = An
#n = n + 1
return Ap
else:
return None
if __name__ == '__main__':
'''
t = int(input("Tamanho M da matriz A[MxM]:"))
A = [(map(float,input("linha "+str(x+1)+":").split())) for i in range(t)]
A = array(A)
B = [[x] for x in list(map(float,input('Digite os valores do vetor B [Mx1]: ').split()))]
B = array(B)
Ap = array([x for x in list(map(float,input('Digite os valores do vetor X[Mx1]: ').split()))])
e = input("Entre com o erro máximo tolerado: ")
'''
t = 8
A = array([[-10,1,-2,0], [-1,11,-1,3], [2,-1,10,-1], [0,3,-1,8]])
B = array([-6,25,-11,15])
Ap = array([0,0,0,0])
e = 0.001
valores = gaussJacobi(A, B, Ap, e)
if valores is not None:
print('Os valores convergem no ponto ', end='')
str_append = ''
soma = 0
for i in range (len(valores)):
str_append = str_append + "x%i: %10.8f" %(i, A[0][i]*valores[i])
soma = soma + A[0][i]*valores[i]
print(' y(x)=%10.8f onde:' %soma)
print(str_append)
else:
print('Não há convergência!!!')
| 25.9
| 102
| 0.472697
|
465dd63a290c6ff839bdc8d8fca6fefb889aa9bf
| 70,309
|
py
|
Python
|
binance/client.py
|
fzxa/python-binance
|
78a64a67cad02ca6397b0b95486b2bc830ae5ef0
|
[
"MIT"
] | 2
|
2018-12-26T12:09:30.000Z
|
2020-09-27T08:21:05.000Z
|
binance/client.py
|
fzxa/python-binance
|
78a64a67cad02ca6397b0b95486b2bc830ae5ef0
|
[
"MIT"
] | null | null | null |
binance/client.py
|
fzxa/python-binance
|
78a64a67cad02ca6397b0b95486b2bc830ae5ef0
|
[
"MIT"
] | 2
|
2018-08-19T16:30:15.000Z
|
2018-12-09T10:20:15.000Z
|
# coding=utf-8
import hashlib
import hmac
import requests
import time
from operator import itemgetter
from .helpers import date_to_milliseconds, interval_to_milliseconds
from .exceptions import BinanceAPIException, BinanceRequestException, BinanceWithdrawException
class Client(object):
API_URL = 'https://api.binance.com/api'
WITHDRAW_API_URL = 'https://api.binance.com/wapi'
WEBSITE_URL = 'https://www.binance.com'
PUBLIC_API_VERSION = 'v1'
PRIVATE_API_VERSION = 'v3'
WITHDRAW_API_VERSION = 'v3'
SYMBOL_TYPE_SPOT = 'SPOT'
ORDER_STATUS_NEW = 'NEW'
ORDER_STATUS_PARTIALLY_FILLED = 'PARTIALLY_FILLED'
ORDER_STATUS_FILLED = 'FILLED'
ORDER_STATUS_CANCELED = 'CANCELED'
ORDER_STATUS_PENDING_CANCEL = 'PENDING_CANCEL'
ORDER_STATUS_REJECTED = 'REJECTED'
ORDER_STATUS_EXPIRED = 'EXPIRED'
KLINE_INTERVAL_1MINUTE = '1m'
KLINE_INTERVAL_3MINUTE = '3m'
KLINE_INTERVAL_5MINUTE = '5m'
KLINE_INTERVAL_15MINUTE = '15m'
KLINE_INTERVAL_30MINUTE = '30m'
KLINE_INTERVAL_1HOUR = '1h'
KLINE_INTERVAL_2HOUR = '2h'
KLINE_INTERVAL_4HOUR = '4h'
KLINE_INTERVAL_6HOUR = '6h'
KLINE_INTERVAL_8HOUR = '8h'
KLINE_INTERVAL_12HOUR = '12h'
KLINE_INTERVAL_1DAY = '1d'
KLINE_INTERVAL_3DAY = '3d'
KLINE_INTERVAL_1WEEK = '1w'
KLINE_INTERVAL_1MONTH = '1M'
SIDE_BUY = 'BUY'
SIDE_SELL = 'SELL'
ORDER_TYPE_LIMIT = 'LIMIT'
ORDER_TYPE_MARKET = 'MARKET'
ORDER_TYPE_STOP_LOSS = 'STOP_LOSS'
ORDER_TYPE_STOP_LOSS_LIMIT = 'STOP_LOSS_LIMIT'
ORDER_TYPE_TAKE_PROFIT = 'TAKE_PROFIT'
ORDER_TYPE_TAKE_PROFIT_LIMIT = 'TAKE_PROFIT_LIMIT'
ORDER_TYPE_LIMIT_MAKER = 'LIMIT_MAKER'
TIME_IN_FORCE_GTC = 'GTC' # Good till cancelled
TIME_IN_FORCE_IOC = 'IOC' # Immediate or cancel
TIME_IN_FORCE_FOK = 'FOK' # Fill or kill
ORDER_RESP_TYPE_ACK = 'ACK'
ORDER_RESP_TYPE_RESULT = 'RESULT'
ORDER_RESP_TYPE_FULL = 'FULL'
# For accessing the data returned by Client.aggregate_trades().
AGG_ID = 'a'
AGG_PRICE = 'p'
AGG_QUANTITY = 'q'
AGG_FIRST_TRADE_ID = 'f'
AGG_LAST_TRADE_ID = 'l'
AGG_TIME = 'T'
AGG_BUYER_MAKES = 'm'
AGG_BEST_MATCH = 'M'
def __init__(self, api_key, api_secret, requests_params=None):
"""Binance API Client constructor
:param api_key: Api Key
:type api_key: str.
:param api_secret: Api Secret
:type api_secret: str.
:param requests_params: optional - Dictionary of requests params to use for all calls
:type requests_params: dict.
"""
self.API_KEY = api_key
self.API_SECRET = api_secret
self.session = self._init_session()
self._requests_params = requests_params
# init DNS and SSL cert
self.ping()
def _init_session(self):
session = requests.session()
session.headers.update({'Accept': 'application/json',
'User-Agent': 'binance/python',
'X-MBX-APIKEY': self.API_KEY})
return session
def _create_api_uri(self, path, signed=True, version=PUBLIC_API_VERSION):
v = self.PRIVATE_API_VERSION if signed else version
return self.API_URL + '/' + v + '/' + path
def _create_withdraw_api_uri(self, path):
return self.WITHDRAW_API_URL + '/' + self.WITHDRAW_API_VERSION + '/' + path
def _create_website_uri(self, path):
return self.WEBSITE_URL + '/' + path
def _generate_signature(self, data):
ordered_data = self._order_params(data)
query_string = '&'.join(["{}={}".format(d[0], d[1]) for d in ordered_data])
m = hmac.new(self.API_SECRET.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256)
return m.hexdigest()
def _order_params(self, data):
"""Convert params to list with signature as last element
:param data:
:return:
"""
has_signature = False
params = []
for key, value in data.items():
if key == 'signature':
has_signature = True
else:
params.append((key, value))
# sort parameters by key
params.sort(key=itemgetter(0))
if has_signature:
params.append(('signature', data['signature']))
return params
def _request(self, method, uri, signed, force_params=False, **kwargs):
# set default requests timeout
kwargs['timeout'] = 10
# add our global requests params
if self._requests_params:
kwargs.update(self._requests_params)
data = kwargs.get('data', None)
if data and isinstance(data, dict):
kwargs['data'] = data
if signed:
# generate signature
kwargs['data']['timestamp'] = int(time.time() * 1000)
kwargs['data']['signature'] = self._generate_signature(kwargs['data'])
# sort get and post params to match signature order
if data:
# find any requests params passed and apply them
if 'requests_params' in kwargs['data']:
# merge requests params into kwargs
kwargs.update(kwargs['data']['requests_params'])
del(kwargs['data']['requests_params'])
# sort post params
kwargs['data'] = self._order_params(kwargs['data'])
# if get request assign data array to params value for requests lib
if data and (method == 'get' or force_params):
kwargs['params'] = kwargs['data']
del(kwargs['data'])
response = getattr(self.session, method)(uri, **kwargs)
return self._handle_response(response)
def _request_api(self, method, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
uri = self._create_api_uri(path, signed, version)
return self._request(method, uri, signed, **kwargs)
def _request_withdraw_api(self, method, path, signed=False, **kwargs):
uri = self._create_withdraw_api_uri(path)
return self._request(method, uri, signed, True, **kwargs)
def _request_website(self, method, path, signed=False, **kwargs):
uri = self._create_website_uri(path)
return self._request(method, uri, signed, **kwargs)
def _handle_response(self, response):
"""Internal helper for handling API responses from the Binance server.
Raises the appropriate exceptions when necessary; otherwise, returns the
response.
"""
if not str(response.status_code).startswith('2'):
raise BinanceAPIException(response)
try:
return response.json()
except ValueError:
raise BinanceRequestException('Invalid Response: %s' % response.text)
def _get(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('get', path, signed, version, **kwargs)
def _post(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('post', path, signed, version, **kwargs)
def _put(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('put', path, signed, version, **kwargs)
def _delete(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('delete', path, signed, version, **kwargs)
# Exchange Endpoints
def get_products(self):
"""Return list of products currently listed on Binance
Use get_exchange_info() call instead
:returns: list - List of product dictionaries
:raises: BinanceRequestException, BinanceAPIException
"""
products = self._request_website('get', 'exchange/public/product')
return products
def get_exchange_info(self):
"""Return rate limits and list of symbols
:returns: list - List of product dictionaries
.. code-block:: python
{
"timezone": "UTC",
"serverTime": 1508631584636,
"rateLimits": [
{
"rateLimitType": "REQUESTS",
"interval": "MINUTE",
"limit": 1200
},
{
"rateLimitType": "ORDERS",
"interval": "SECOND",
"limit": 10
},
{
"rateLimitType": "ORDERS",
"interval": "DAY",
"limit": 100000
}
],
"exchangeFilters": [],
"symbols": [
{
"symbol": "ETHBTC",
"status": "TRADING",
"baseAsset": "ETH",
"baseAssetPrecision": 8,
"quoteAsset": "BTC",
"quotePrecision": 8,
"orderTypes": ["LIMIT", "MARKET"],
"icebergAllowed": false,
"filters": [
{
"filterType": "PRICE_FILTER",
"minPrice": "0.00000100",
"maxPrice": "100000.00000000",
"tickSize": "0.00000100"
}, {
"filterType": "LOT_SIZE",
"minQty": "0.00100000",
"maxQty": "100000.00000000",
"stepSize": "0.00100000"
}, {
"filterType": "MIN_NOTIONAL",
"minNotional": "0.00100000"
}
]
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('exchangeInfo')
def get_symbol_info(self, symbol):
"""Return information about a symbol
:param symbol: required e.g BNBBTC
:type symbol: str
:returns: Dict if found, None if not
.. code-block:: python
{
"symbol": "ETHBTC",
"status": "TRADING",
"baseAsset": "ETH",
"baseAssetPrecision": 8,
"quoteAsset": "BTC",
"quotePrecision": 8,
"orderTypes": ["LIMIT", "MARKET"],
"icebergAllowed": false,
"filters": [
{
"filterType": "PRICE_FILTER",
"minPrice": "0.00000100",
"maxPrice": "100000.00000000",
"tickSize": "0.00000100"
}, {
"filterType": "LOT_SIZE",
"minQty": "0.00100000",
"maxQty": "100000.00000000",
"stepSize": "0.00100000"
}, {
"filterType": "MIN_NOTIONAL",
"minNotional": "0.00100000"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
res = self._get('exchangeInfo')
for item in res['symbols']:
if item['symbol'] == symbol.upper():
return item
return None
# General Endpoints
def ping(self):
"""Test connectivity to the Rest API.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#test-connectivity
:returns: Empty array
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ping')
def get_server_time(self):
"""Test connectivity to the Rest API and get the current server time.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#check-server-time
:returns: Current server time
.. code-block:: python
{
"serverTime": 1499827319559
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('time')
# Market Data Endpoints
def get_all_tickers(self):
"""Latest price for all symbols.
https://www.binance.com/restapipub.html#symbols-price-ticker
:returns: List of market tickers
.. code-block:: python
[
{
"symbol": "LTCBTC",
"price": "4.00000200"
},
{
"symbol": "ETHBTC",
"price": "0.07946600"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ticker/allPrices')
def get_orderbook_tickers(self):
"""Best price/qty on the order book for all symbols.
https://www.binance.com/restapipub.html#symbols-order-book-ticker
:returns: List of order book market entries
.. code-block:: python
[
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
},
{
"symbol": "ETHBTC",
"bidPrice": "0.07946700",
"bidQty": "9.00000000",
"askPrice": "100000.00000000",
"askQty": "1000.00000000"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ticker/allBookTickers')
def get_order_book(self, **params):
"""Get the Order Book for the market
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#order-book
:param symbol: required
:type symbol: str
:param limit: Default 100; max 1000
:type limit: int
:returns: API response
.. code-block:: python
{
"lastUpdateId": 1027024,
"bids": [
[
"4.00000000", # PRICE
"431.00000000", # QTY
[] # Can be ignored
]
],
"asks": [
[
"4.00000200",
"12.00000000",
[]
]
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('depth', data=params)
def get_recent_trades(self, **params):
"""Get recent trades (up to last 500).
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#recent-trades-list
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"time": 1499865549590,
"isBuyerMaker": true,
"isBestMatch": true
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('trades', data=params)
def get_historical_trades(self, **params):
"""Get older trades.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#recent-trades-list
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:param fromId: TradeId to fetch from. Default gets most recent trades.
:type fromId: str
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"time": 1499865549590,
"isBuyerMaker": true,
"isBestMatch": true
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('historicalTrades', data=params)
def get_aggregate_trades(self, **params):
"""Get compressed, aggregate trades. Trades that fill at the time,
from the same order, with the same price will have the quantity aggregated.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list
:param symbol: required
:type symbol: str
:param fromId: ID to get aggregate trades from INCLUSIVE.
:type fromId: str
:param startTime: Timestamp in ms to get aggregate trades from INCLUSIVE.
:type startTime: int
:param endTime: Timestamp in ms to get aggregate trades until INCLUSIVE.
:type endTime: int
:param limit: Default 500; max 500.
:type limit: int
:returns: API response
.. code-block:: python
[
{
"a": 26129, # Aggregate tradeId
"p": "0.01633102", # Price
"q": "4.70443515", # Quantity
"f": 27781, # First tradeId
"l": 27781, # Last tradeId
"T": 1498793709153, # Timestamp
"m": true, # Was the buyer the maker?
"M": true # Was the trade the best price match?
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('aggTrades', data=params)
def aggregate_trade_iter(self, symbol, start_str=None, last_id=None):
"""Iterate over aggregate trade data from (start_time or last_id) to
the end of the history so far.
If start_time is specified, start with the first trade after
start_time. Meant to initialise a local cache of trade data.
If last_id is specified, start with the trade after it. This is meant
for updating a pre-existing local trade data cache.
Only allows start_str or last_id—not both. Not guaranteed to work
right if you're running more than one of these simultaneously. You
will probably hit your rate limit.
See dateparser docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Symbol string e.g. ETHBTC
:type symbol: str
:param start_str: Start date string in UTC format or timestamp in milliseconds. The iterator will
return the first trade occurring later than this time.
:type start_str: str|int
:param last_id: aggregate trade ID of the last known aggregate trade.
Not a regular trade ID. See https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list.
:returns: an iterator of JSON objects, one per trade. The format of
each object is identical to Client.aggregate_trades().
:type last_id: int
"""
if start_str is not None and last_id is not None:
raise ValueError(
'start_time and last_id may not be simultaneously specified.')
# If there's no last_id, get one.
if last_id is None:
# Without a last_id, we actually need the first trade. Normally,
# we'd get rid of it. See the next loop.
if start_str is None:
trades = self.get_aggregate_trades(symbol=symbol, fromId=0)
else:
# The difference between startTime and endTime should be less
# or equal than an hour and the result set should contain at
# least one trade.
if type(start_str) == int:
start_ts = start_str
else:
start_ts = date_to_milliseconds(start_str)
trades = self.get_aggregate_trades(
symbol=symbol,
startTime=start_ts,
endTime=start_ts + (60 * 60 * 1000))
for t in trades:
yield t
last_id = trades[-1][self.AGG_ID]
while True:
# There is no need to wait between queries, to avoid hitting the
# rate limit. We're using blocking IO, and as long as we're the
# only thread running calls like this, Binance will automatically
# add the right delay time on their end, forcing us to wait for
# data. That really simplifies this function's job. Binance is
# fucking awesome.
trades = self.get_aggregate_trades(symbol=symbol, fromId=last_id)
# fromId=n returns a set starting with id n, but we already have
# that one. So get rid of the first item in the result set.
trades = trades[1:]
if len(trades) == 0:
return
for t in trades:
yield t
last_id = trades[-1][self.AGG_ID]
def get_klines(self, **params):
"""Kline/candlestick bars for a symbol. Klines are uniquely identified by their open time.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#klinecandlestick-data
:param symbol: required
:type symbol: str
:param interval: -
:type interval: str
:param limit: - Default 500; max 500.
:type limit: int
:param startTime:
:type startTime: int
:param endTime:
:type endTime: int
:returns: API response
.. code-block:: python
[
[
1499040000000, # Open time
"0.01634790", # Open
"0.80000000", # High
"0.01575800", # Low
"0.01577100", # Close
"148976.11427815", # Volume
1499644799999, # Close time
"2434.19055334", # Quote asset volume
308, # Number of trades
"1756.87402397", # Taker buy base asset volume
"28.46694368", # Taker buy quote asset volume
"17928899.62484339" # Can be ignored
]
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('klines', data=params)
def _get_earliest_valid_timestamp(self, symbol, interval):
"""Get earliest valid open timestamp from Binance
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:return: first valid timestamp
"""
kline = self.get_klines(
symbol=symbol,
interval=interval,
limit=1,
startTime=0,
endTime=None
)
return kline[0][0]
def get_historical_klines(self, symbol, interval, start_str, end_str=None):
"""Get Historical Klines from Binance
See dateparser docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:param start_str: Start date string in UTC format or timestamp in milliseconds
:type start_str: str|int
:param end_str: optional - end date string in UTC format or timestamp in milliseconds (default will fetch everything up to now)
:type end_str: str|int
:return: list of OHLCV values
"""
# init our list
output_data = []
# setup the max limit
limit = 500
# convert interval to useful value in seconds
timeframe = interval_to_milliseconds(interval)
# convert our date strings to milliseconds
if type(start_str) == int:
start_ts = start_str
else:
start_ts = date_to_milliseconds(start_str)
# establish first available start timestamp
first_valid_ts = self._get_earliest_valid_timestamp(symbol, interval)
start_ts = max(start_ts, first_valid_ts)
# if an end time was passed convert it
end_ts = None
if end_str:
if type(end_str) == int:
end_ts = end_str
else:
end_ts = date_to_milliseconds(end_str)
idx = 0
while True:
# fetch the klines from start_ts up to max 500 entries or the end_ts if set
temp_data = self.get_klines(
symbol=symbol,
interval=interval,
limit=limit,
startTime=start_ts,
endTime=end_ts
)
# handle the case where exactly the limit amount of data was returned last loop
if not len(temp_data):
break
# append this loops data to our output data
output_data += temp_data
# set our start timestamp using the last value in the array
start_ts = temp_data[-1][0]
idx += 1
# check if we received less than the required limit and exit the loop
if len(temp_data) < limit:
# exit the while loop
break
# increment next call by our timeframe
start_ts += timeframe
# sleep after every 3rd call to be kind to the API
if idx % 3 == 0:
time.sleep(1)
return output_data
def get_historical_klines_generator(self, symbol, interval, start_str, end_str=None):
"""Get Historical Klines from Binance
See dateparser docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:param start_str: Start date string in UTC format or timestamp in milliseconds
:type start_str: str|int
:param end_str: optional - end date string in UTC format or timestamp in milliseconds (default will fetch everything up to now)
:type end_str: str|int
:return: generator of OHLCV values
"""
# setup the max limit
limit = 500
# convert interval to useful value in seconds
timeframe = interval_to_milliseconds(interval)
# convert our date strings to milliseconds
if type(start_str) == int:
start_ts = start_str
else:
start_ts = date_to_milliseconds(start_str)
# establish first available start timestamp
first_valid_ts = self._get_earliest_valid_timestamp(symbol, interval)
start_ts = max(start_ts, first_valid_ts)
# if an end time was passed convert it
end_ts = None
if end_str:
if type(end_str) == int:
end_ts = end_str
else:
end_ts = date_to_milliseconds(end_str)
idx = 0
while True:
# fetch the klines from start_ts up to max 500 entries or the end_ts if set
output_data = self.get_klines(
symbol=symbol,
interval=interval,
limit=limit,
startTime=start_ts,
endTime=end_ts
)
# handle the case where exactly the limit amount of data was returned last loop
if not len(output_data):
break
# yield data
for o in output_data:
yield o
# set our start timestamp using the last value in the array
start_ts = output_data[-1][0]
idx += 1
# check if we received less than the required limit and exit the loop
if len(output_data) < limit:
# exit the while loop
break
# increment next call by our timeframe
start_ts += timeframe
# sleep after every 3rd call to be kind to the API
if idx % 3 == 0:
time.sleep(1)
def get_ticker(self, **params):
"""24 hour price change statistics.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#24hr-ticker-price-change-statistics
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"priceChange": "-94.99999800",
"priceChangePercent": "-95.960",
"weightedAvgPrice": "0.29628482",
"prevClosePrice": "0.10002000",
"lastPrice": "4.00000200",
"bidPrice": "4.00000000",
"askPrice": "4.00000200",
"openPrice": "99.00000000",
"highPrice": "100.00000000",
"lowPrice": "0.10000000",
"volume": "8913.30000000",
"openTime": 1499783499040,
"closeTime": 1499869899040,
"fristId": 28385, # First tradeId
"lastId": 28460, # Last tradeId
"count": 76 # Trade count
}
OR
.. code-block:: python
[
{
"priceChange": "-94.99999800",
"priceChangePercent": "-95.960",
"weightedAvgPrice": "0.29628482",
"prevClosePrice": "0.10002000",
"lastPrice": "4.00000200",
"bidPrice": "4.00000000",
"askPrice": "4.00000200",
"openPrice": "99.00000000",
"highPrice": "100.00000000",
"lowPrice": "0.10000000",
"volume": "8913.30000000",
"openTime": 1499783499040,
"closeTime": 1499869899040,
"fristId": 28385, # First tradeId
"lastId": 28460, # Last tradeId
"count": 76 # Trade count
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ticker/24hr', data=params)
def get_symbol_ticker(self, **params):
"""Latest price for a symbol or symbols.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#24hr-ticker-price-change-statistics
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"price": "4.00000200"
}
OR
.. code-block:: python
[
{
"symbol": "LTCBTC",
"price": "4.00000200"
},
{
"symbol": "ETHBTC",
"price": "0.07946600"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ticker/price', data=params, version=self.PRIVATE_API_VERSION)
def get_orderbook_ticker(self, **params):
"""Latest price for a symbol or symbols.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#symbol-order-book-ticker
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
}
OR
.. code-block:: python
[
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
},
{
"symbol": "ETHBTC",
"bidPrice": "0.07946700",
"bidQty": "9.00000000",
"askPrice": "100000.00000000",
"askQty": "1000.00000000"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ticker/bookTicker', data=params, version=self.PRIVATE_API_VERSION)
# Account Endpoints
def create_order(self, **params):
"""Send in a new order
Any order with an icebergQty MUST have timeInForce set to GTC.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#new-order--trade
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param type: required
:type type: str
:param timeInForce: required if limit order
:type timeInForce: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with LIMIT, STOP_LOSS_LIMIT, and TAKE_PROFIT_LIMIT to create an iceberg order.
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
Response ACK:
.. code-block:: python
{
"symbol":"LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1" # Will be newClientOrderId
"transactTime": 1499827319559
}
Response RESULT:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "0.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL"
}
Response FULL:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "0.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL",
"fills": [
{
"price": "4000.00000000",
"qty": "1.00000000",
"commission": "4.00000000",
"commissionAsset": "USDT"
},
{
"price": "3999.00000000",
"qty": "5.00000000",
"commission": "19.99500000",
"commissionAsset": "USDT"
},
{
"price": "3998.00000000",
"qty": "2.00000000",
"commission": "7.99600000",
"commissionAsset": "USDT"
},
{
"price": "3997.00000000",
"qty": "1.00000000",
"commission": "3.99700000",
"commissionAsset": "USDT"
},
{
"price": "3995.00000000",
"qty": "1.00000000",
"commission": "3.99500000",
"commissionAsset": "USDT"
}
]
}
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
return self._post('order', True, data=params)
def order_limit(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit order
Any order with an icebergQty MUST have timeInForce set to GTC.
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with LIMIT, STOP_LOSS_LIMIT, and TAKE_PROFIT_LIMIT to create an iceberg order.
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'type': self.ORDER_TYPE_LIMIT,
'timeInForce': timeInForce
})
return self.create_order(**params)
def order_limit_buy(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit buy order
Any order with an icebergQty MUST have timeInForce set to GTC.
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param stopPrice: Used with stop orders
:type stopPrice: decimal
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_BUY,
})
return self.order_limit(timeInForce=timeInForce, **params)
def order_limit_sell(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit sell order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param stopPrice: Used with stop orders
:type stopPrice: decimal
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_SELL
})
return self.order_limit(timeInForce=timeInForce, **params)
def order_market(self, **params):
"""Send in a new market order
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param quantity: required
:type quantity: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'type': self.ORDER_TYPE_MARKET
})
return self.create_order(**params)
def order_market_buy(self, **params):
"""Send in a new market buy order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_BUY
})
return self.order_market(**params)
def order_market_sell(self, **params):
"""Send in a new market sell order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_SELL
})
return self.order_market(**params)
def create_test_order(self, **params):
"""Test new order creation and signature/recvWindow long. Creates and validates a new order but does not send it into the matching engine.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#test-new-order-trade
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param type: required
:type type: str
:param timeInForce: required if limit order
:type timeInForce: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: The number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
return self._post('order/test', True, data=params)
def get_order(self, **params):
"""Check an order's status. Either orderId or origClientOrderId must be sent.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#query-order-user_data
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param origClientOrderId: optional
:type origClientOrderId: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('order', True, data=params)
def get_all_orders(self, **params):
"""Get all account orders; active, canceled, or filled.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#all-orders-user_data
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param limit: Default 500; max 500.
:type limit: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('allOrders', True, data=params)
def cancel_order(self, **params):
"""Cancel an active order. Either orderId or origClientOrderId must be sent.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#cancel-order-trade
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param origClientOrderId: optional
:type origClientOrderId: str
:param newClientOrderId: Used to uniquely identify this cancel. Automatically generated by default.
:type newClientOrderId: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"origClientOrderId": "myOrder1",
"orderId": 1,
"clientOrderId": "cancelMyOrder1"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._delete('order', True, data=params)
def get_open_orders(self, **params):
"""Get all open orders on a symbol.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#current-open-orders-user_data
:param symbol: optional
:type symbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('openOrders', True, data=params)
# User Stream Endpoints
def get_account(self, **params):
"""Get current account information.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-information-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"makerCommission": 15,
"takerCommission": 15,
"buyerCommission": 0,
"sellerCommission": 0,
"canTrade": true,
"canWithdraw": true,
"canDeposit": true,
"balances": [
{
"asset": "BTC",
"free": "4723846.89208129",
"locked": "0.00000000"
},
{
"asset": "LTC",
"free": "4763368.68006011",
"locked": "0.00000000"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('account', True, data=params)
def get_asset_balance(self, asset, **params):
"""Get current asset balance.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-information-user_data
:param asset: required
:type asset: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: dictionary or None if not found
.. code-block:: python
{
"asset": "BTC",
"free": "4723846.89208129",
"locked": "0.00000000"
}
:raises: BinanceRequestException, BinanceAPIException
"""
res = self.get_account(**params)
# find asset balance in list of balances
if "balances" in res:
for bal in res['balances']:
if bal['asset'].lower() == asset.lower():
return bal
return None
def get_my_trades(self, **params):
"""Get trades for a specific symbol.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-trade-list-user_data
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:param fromId: TradeId to fetch from. Default gets most recent trades.
:type fromId: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"commission": "10.10000000",
"commissionAsset": "BNB",
"time": 1499865549590,
"isBuyer": true,
"isMaker": false,
"isBestMatch": true
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('myTrades', True, data=params)
def get_system_status(self):
"""Get system status detail.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#system-status-system
:returns: API response
.. code-block:: python
{
"status": 0, # 0: normal,1:system maintenance
"msg": "normal" # normal or System maintenance.
}
:raises: BinanceAPIException
"""
return self._request_withdraw_api('get', 'systemStatus.html')
def get_account_status(self, **params):
"""Get account status detail.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#account-status-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"msg": "Order failed:Low Order fill rate! Will be reactivated after 5 minutes.",
"success": true,
"objs": [
"5"
]
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'accountStatus.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg'])
return res
def get_dust_log(self, **params):
"""Get log of small amounts exchanged for BNB.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#dustlog-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success": true,
"results": {
"total": 2, //Total counts of exchange
"rows": [
{
"transfered_total": "0.00132256", # Total transfered BNB amount for this exchange.
"service_charge_total": "0.00002699", # Total service charge amount for this exchange.
"tran_id": 4359321,
"logs": [ # Details of this exchange.
{
"tranId": 4359321,
"serviceChargeAmount": "0.000009",
"uid": "10000015",
"amount": "0.0009",
"operateTime": "2018-05-03 17:07:04",
"transferedAmount": "0.000441",
"fromAsset": "USDT"
},
{
"tranId": 4359321,
"serviceChargeAmount": "0.00001799",
"uid": "10000015",
"amount": "0.0009",
"operateTime": "2018-05-03 17:07:04",
"transferedAmount": "0.00088156",
"fromAsset": "ETH"
}
],
"operate_time": "2018-05-03 17:07:04" //The time of this exchange.
},
{
"transfered_total": "0.00058795",
"service_charge_total": "0.000012",
"tran_id": 4357015,
"logs": [ // Details of this exchange.
{
"tranId": 4357015,
"serviceChargeAmount": "0.00001",
"uid": "10000015",
"amount": "0.001",
"operateTime": "2018-05-02 13:52:24",
"transferedAmount": "0.00049",
"fromAsset": "USDT"
},
{
"tranId": 4357015,
"serviceChargeAmount": "0.000002",
"uid": "10000015",
"amount": "0.0001",
"operateTime": "2018-05-02 13:51:11",
"transferedAmount": "0.00009795",
"fromAsset": "ETH"
}
],
"operate_time": "2018-05-02 13:51:11"
}
]
}
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'userAssetDribbletLog.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg'])
return res
def get_trade_fee(self, **params):
"""Get trade fee.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#trade-fee-user_data
:param symbol: optional
:type symbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"tradeFee": [
{
"symbol": "ADABNB",
"maker": 0.9000,
"taker": 1.0000
}, {
"symbol": "BNBBTC",
"maker": 0.3000,
"taker": 0.3000
}
],
"success": true
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'tradeFee.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg'])
return res
def get_asset_details(self, **params):
"""Fetch details on assets.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#asset-detail-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success": true,
"assetDetail": {
"CTR": {
"minWithdrawAmount": "70.00000000", //min withdraw amount
"depositStatus": false,//deposit status
"withdrawFee": 35, // withdraw fee
"withdrawStatus": true, //withdraw status
"depositTip": "Delisted, Deposit Suspended" //reason
},
"SKY": {
"minWithdrawAmount": "0.02000000",
"depositStatus": true,
"withdrawFee": 0.01,
"withdrawStatus": true
}
}
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'assetDetail.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg'])
return res
# Withdraw Endpoints
def withdraw(self, **params):
"""Submit a withdraw request.
https://www.binance.com/restapipub.html
Assumptions:
- You must have Withdraw permissions enabled on your API key
- You must have withdrawn to the address specified through the website and approved the transaction via email
:param asset: required
:type asset: str
:type address: required
:type address: str
:type addressTag: optional - Secondary address identifier for coins like XRP,XMR etc.
:type address: str
:param amount: required
:type amount: decimal
:param name: optional - Description of the address, default asset value passed will be used
:type name: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"msg": "success",
"success": true,
"id":"7213fea8e94b4a5593d507237e5a555b"
}
:raises: BinanceRequestException, BinanceAPIException, BinanceWithdrawException
"""
# force a name for the withdrawal if one not set
if 'asset' in params and 'name' not in params:
params['name'] = params['asset']
res = self._request_withdraw_api('post', 'withdraw.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg'])
return res
def get_deposit_history(self, **params):
"""Fetch deposit history.
https://www.binance.com/restapipub.html
:param asset: optional
:type asset: str
:type status: 0(0:pending,1:success) optional
:type status: int
:param startTime: optional
:type startTime: long
:param endTime: optional
:type endTime: long
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"depositList": [
{
"insertTime": 1508198532000,
"amount": 0.04670582,
"asset": "ETH",
"status": 1
}
],
"success": true
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'depositHistory.html', True, data=params)
def get_withdraw_history(self, **params):
"""Fetch withdraw history.
https://www.binance.com/restapipub.html
:param asset: optional
:type asset: str
:type status: 0(0:Email Sent,1:Cancelled 2:Awaiting Approval 3:Rejected 4:Processing 5:Failure 6Completed) optional
:type status: int
:param startTime: optional
:type startTime: long
:param endTime: optional
:type endTime: long
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"withdrawList": [
{
"amount": 1,
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"asset": "ETH",
"applyTime": 1508198532000
"status": 4
},
{
"amount": 0.005,
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"txId": "0x80aaabed54bdab3f6de5868f89929a2371ad21d666f20f7393d1a3389fad95a1",
"asset": "ETH",
"applyTime": 1508198532000,
"status": 4
}
],
"success": true
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'withdrawHistory.html', True, data=params)
def get_deposit_address(self, **params):
"""Fetch a deposit address for a symbol
https://www.binance.com/restapipub.html
:param asset: required
:type asset: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"success": true,
"addressTag": "1231212",
"asset": "BNB"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'depositAddress.html', True, data=params)
def get_withdraw_fee(self, **params):
"""Fetch the withdrawal fee for an asset
:param asset: required
:type asset: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"withdrawFee": "0.0005",
"success": true
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'withdrawFee.html', True, data=params)
# User Stream Endpoints
def stream_get_listen_key(self):
"""Start a new user data stream and return the listen key
If a stream already exists it should return the same key.
If the stream becomes invalid a new key is returned.
Can be used to keep the user stream alive.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#start-user-data-stream-user_stream
:returns: API response
.. code-block:: python
{
"listenKey": "pqia91ma19a5s61cv6a81va65sdf19v8a65a1a5s61cv6a81va65sdf19v8a65a1"
}
:raises: BinanceRequestException, BinanceAPIException
"""
res = self._post('userDataStream', False, data={})
return res['listenKey']
def stream_keepalive(self, listenKey):
"""PING a user data stream to prevent a time out.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#keepalive-user-data-stream-user_stream
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
params = {
'listenKey': listenKey
}
return self._put('userDataStream', False, data=params)
def stream_close(self, listenKey):
"""Close out a user data stream.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#close-user-data-stream-user_stream
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
params = {
'listenKey': listenKey
}
return self._delete('userDataStream', False, data=params)
| 34.031462
| 251
| 0.538722
|
670e229496f29510882f893323b38836fd781192
| 6,212
|
py
|
Python
|
dohproxy/client_protocol.py
|
rfinnie/doh-proxy
|
3340626e7c2ee1d094f3c81feb693be712f337a2
|
[
"BSD-3-Clause"
] | null | null | null |
dohproxy/client_protocol.py
|
rfinnie/doh-proxy
|
3340626e7c2ee1d094f3c81feb693be712f337a2
|
[
"BSD-3-Clause"
] | null | null | null |
dohproxy/client_protocol.py
|
rfinnie/doh-proxy
|
3340626e7c2ee1d094f3c81feb693be712f337a2
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#
import aioh2
import asyncio
import dns.message
import priority
import struct
import urllib.parse
from dohproxy import constants, utils
class StubServerProtocol:
def __init__(self, args, logger=None, client_store=None):
self.logger = logger
self.args = args
self._lock = asyncio.Lock()
if logger is None:
self.logger = utils.configure_logger('StubServerProtocol')
# The client is wrapped in a mutable dictionary, so it may be shared
# across multiple contexts if passed from higher in the chain.
if client_store is None:
self.client_store = {'client': None}
else:
self.client_store = client_store
async def get_client(self):
if self.client_store['client'] is not None:
if self.client_store['client']._conn is not None:
return self.client_store['client']
# Open client connection
self.logger.debug('Opening connection to {}'.format(self.args.domain))
sslctx = utils.create_custom_ssl_context(
insecure=self.args.insecure,
cafile=self.args.cafile
)
remote_addr = self.args.remote_address \
if self.args.remote_address else self.args.domain
client = await aioh2.open_connection(
remote_addr,
self.args.port,
functional_timeout=0.1,
ssl=sslctx,
server_hostname=self.args.domain)
rtt = await client.wait_functional()
if rtt:
self.logger.debug('Round-trip time: %.1fms' % (rtt * 1000))
self.client_store['client'] = client
return client
def connection_made(self, transport):
pass
def connection_lost(self, exc):
pass
def on_answer(self, addr, msg):
pass
def on_message_received(self, stream_id, msg):
"""
Takes a wired format message returned from a DOH server and convert it
to a python dns message.
"""
return dns.message.from_wire(msg)
async def on_start_request(self, client, headers, end_stream):
return await client.start_request(headers, end_stream=end_stream)
async def on_send_data(self, client, stream_id, body):
return await client.send_data(stream_id, body, end_stream=True)
def on_recv_response(self, stream_id, headers):
self.logger.debug('Response headers: {}'.format(headers))
def _make_get_path(self, content):
params = utils.build_query_params(content)
self.logger.debug('Query parameters: {}'.format(params))
params_str = urllib.parse.urlencode(params)
if self.args.debug:
url = utils.make_url(self.args.domain, self.args.uri)
self.logger.debug('Sending {}?{}'.format(url, params_str))
return self.args.uri + '?' + params_str
async def make_request(self, addr, dnsq):
# FIXME: maybe aioh2 should allow registering to connection_lost event
# so we can find out when the connection get disconnected.
with await self._lock:
client = await self.get_client()
headers = {'Accept': constants.DOH_MEDIA_TYPE}
path = self.args.uri
qid = dnsq.id
dnsq.id = 0
body = b''
headers = [
(':authority', self.args.domain),
(':method', self.args.post and 'POST' or 'GET'),
(':scheme', 'https'),
]
if self.args.post:
headers.append(('content-type', constants.DOH_MEDIA_TYPE))
body = dnsq.to_wire()
else:
path = self._make_get_path(dnsq.to_wire())
headers.insert(0, (':path', path))
headers.extend([
('content-length', str(len(body))),
])
# Start request with headers
# FIXME: Find a better way to close old streams. See GH#11
try:
stream_id = await self.on_start_request(client, headers, not body)
except priority.priority.TooManyStreamsError:
client = await self.get_client()
stream_id = await self.on_start_request(client, headers, not body)
self.logger.debug(
'Stream ID: {} / Total streams: {}'.format(
stream_id, len(client._streams)
)
)
# Send my name "world" as whole request body
if body:
await self.on_send_data(client, stream_id, body)
# Receive response headers
headers = await client.recv_response(stream_id)
self.on_recv_response(stream_id, headers)
# FIXME handled error with servfail
# Read all response body
resp = await client.read_stream(stream_id, -1)
dnsr = self.on_message_received(stream_id, resp)
dnsr.id = qid
self.on_answer(addr, dnsr.to_wire())
# Read response trailers
trailers = await client.recv_trailers(stream_id)
self.logger.debug('Response trailers: {}'.format(trailers))
class StubServerProtocolUDP(StubServerProtocol):
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
dnsq = dns.message.from_wire(data)
asyncio.ensure_future(self.make_request(addr, dnsq))
def on_answer(self, addr, msg):
self.transport.sendto(msg, addr)
class StubServerProtocolTCP(StubServerProtocol):
def connection_made(self, transport):
self.transport = transport
self.addr = transport.get_extra_info('peername')
self.buffer = b''
def data_received(self, data):
self.buffer = utils.handle_dns_tcp_data(
self.buffer + data, self.receive_helper
)
def receive_helper(self, dnsq):
asyncio.ensure_future(self.make_request(self.addr, dnsq))
def on_answer(self, addr, msg):
self.transport.write(struct.pack('!H', len(msg)) + msg)
def eof_received(self):
self.transport.close()
| 33.042553
| 78
| 0.629105
|
ce72e0c8e22fdbdfd00bc6eb0596c2eeb03207ec
| 4,163
|
py
|
Python
|
mobidziennik.py
|
JakubKoralewski/mobidziennik-do-kalendarza
|
bab06b843e1b5d011625549c56551dabff079787
|
[
"MIT"
] | null | null | null |
mobidziennik.py
|
JakubKoralewski/mobidziennik-do-kalendarza
|
bab06b843e1b5d011625549c56551dabff079787
|
[
"MIT"
] | null | null | null |
mobidziennik.py
|
JakubKoralewski/mobidziennik-do-kalendarza
|
bab06b843e1b5d011625549c56551dabff079787
|
[
"MIT"
] | null | null | null |
from robobrowser import RoboBrowser
#from bs4 import BeautifulSoup
import re
br = RoboBrowser()
br.open('https://lo2kalisz.mobidziennik.pl/dziennik/')
form = br.get_form()
form['login'] = input("Podaj login: ")
form['haslo'] = input("Podaj haslo: ")
br.submit_form(form)
br.open('https://lo2kalisz.mobidziennik.pl/dziennik/planzajec/?bez-zastepstw=1')
def determineDay(percent):
percent = str(percent)
if percent == '0.5':
return 0
elif percent == '20.5':
return 1
elif percent == '40.5':
return 2
elif percent == '60.5':
return 3
elif percent == '80.5':
return 4
else:
return 'error'
numOfLessons = i = numOfMatches = 0
calendar = {}
for bigDiv in br.find_all(class_='plansc_cnt_w'):
numOfLessons += 1
bigDiv = str(bigDiv)
# RegEx the left value, ex.: style="width:19%;left: two digits + (maybe)dot + (maybe) digit %;"
percent = re.search(r'style="width:\d\d%;left:(\d{1,2}.?\d?)%;', bigDiv)[1]
title = re.search(
r'title="(?P<startTime>\d\d:\d\d) - (?P<endTime>\d\d:\d\d)<br />(?P<name>.*)<br />(?P<info>.*) (?P<classroom>\(.*\))"', bigDiv)
# get start, end, name, classroom, which day it is and additional info
try:
i += 1
dayNum = determineDay(percent)
startTime = title[1]
endTime = title[2]
name = title[3]
info = title[4]
classroom = title[5]
except TypeError:
pass
# this is done for comparing number of tries against succeeded results
try:
title[0]
numOfMatches += 1
except:
pass
# save to dict
calendar[numOfMatches] = {
'name': name,
'dayNum': dayNum,
'startTime': startTime,
'endTime': endTime,
'classroom': classroom,
'info': info,
}
from icalendar import Calendar, Event
from datetime import datetime, date, timedelta
#import pytz
import os
import random
import string
c = Calendar()
e = Event()
c.add('prodid', '-//JakubKoralewski//github.com//')
c.add('version', '2.0')
def randomWord(length):
letters = string.ascii_letters
return ''.join((random.choice(letters)) for i in range(length))
for i in calendar.keys():
e = Event()
name = calendar[i]['name']
dayNum = int(calendar[i]['dayNum'])
startTime = calendar[i]['startTime']
endTime = calendar[i]['endTime']
classroom = calendar[i]['classroom']
info = calendar[i]['info']
todaysDate = datetime.today()
todaysDay = date.isoweekday(todaysDate)
uid = str(todaysDate).replace(" ", "") + \
str(randomWord(8))+'@github.com'
# split 14:36 into 14 and 36
startHour = int(startTime[0:2])
startMinutes = int(startTime[3:6])
endHour = int(endTime[0:2])
endMinutes = int(endTime[3:6])
# get the day from which to start adding
mondayDelta = todaysDay - 1
firstMonday = todaysDate - timedelta(days=mondayDelta)
# print(firstMonday)
summary = '{} - {}'.format(name, classroom)
crlf = chr(13)+chr(10)
description = '{}\r\nLekcja: {}\r\nKlasa: {}'.format(
info, name, classroom)
year = date.today().year
month = date.today().month
day = firstMonday + timedelta(days=dayNum)
day = day.day
#print('day: {}'.format(day))
e.add('summary', summary)
e.add('description', description)
e.add('dtstart', datetime(year, month, day, startHour, startMinutes))
e.add('dtend', datetime(year, month, day, endHour, endMinutes))
e.add('uid', uid)
e.add('dtstamp', todaysDate)
if month >= 9:
e.add('rrule', {'freq': 'weekly', 'until': datetime(year+1, 6, 30)})
else:
e.add('rrule', {'freq': 'weekly', 'until': datetime(year, 6, 30)})
c.add_component(e)
print(summary)
with open('calendar.ics', 'wb') as calendar:
print(
f'writing your calendar to {os.getcwd() + chr(92) + "calendar.ics"}.')
calendar.write(c.to_ical())
input('Press anything to close.')
| 28.319728
| 148
| 0.582513
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.