hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
921d87ebd3a06e264af37b57c15f3b426169fe5a | 5,667 | py | Python | novaclient/tests/unit/v2/contrib/fakes.py | alvarolopez/python-novaclient | ef7cb1d44d47a1273810603fd96d982d7f0bd7d6 | [
"Apache-1.1"
] | null | null | null | novaclient/tests/unit/v2/contrib/fakes.py | alvarolopez/python-novaclient | ef7cb1d44d47a1273810603fd96d982d7f0bd7d6 | [
"Apache-1.1"
] | null | null | null | novaclient/tests/unit/v2/contrib/fakes.py | alvarolopez/python-novaclient | ef7cb1d44d47a1273810603fd96d982d7f0bd7d6 | [
"Apache-1.1"
] | null | null | null | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from novaclient.tests.unit.v2 import fakes
from novaclient.v2 import client
FAKE_REQUEST_ID_LIST = fakes.FAKE_REQUEST_ID_LIST
FAKE_RESPONSE_HEADERS = fakes.FAKE_RESPONSE_HEADERS
class FakeClient(fakes.FakeClient):
def __init__(self, *args, **kwargs):
client.Client.__init__(self, 'username', 'password',
'project_id', 'auth_url',
extensions=kwargs.get('extensions'),
direct_use=False)
self.client = FakeHTTPClient(**kwargs)
class FakeHTTPClient(fakes.FakeHTTPClient):
def get_os_tenant_networks(self):
return (200, FAKE_RESPONSE_HEADERS, {
'networks': [{"label": "1", "cidr": "10.0.0.0/24",
'project_id': '4ffc664c198e435e9853f2538fbcd7a7',
'id': '1'}]})
def get_os_tenant_networks_1(self, **kw):
return (200, FAKE_RESPONSE_HEADERS, {
'network': {"label": "1", "cidr": "10.0.0.0/24",
'project_id': '4ffc664c198e435e9853f2538fbcd7a7',
'id': '1'}})
def post_os_tenant_networks(self, **kw):
return (201, FAKE_RESPONSE_HEADERS, {
'network': {"label": "1", "cidr": "10.0.0.0/24",
'project_id': '4ffc664c198e435e9853f2538fbcd7a7',
'id': '1'}})
def delete_os_tenant_networks_1(self, **kw):
return (204, FAKE_RESPONSE_HEADERS, None)
def get_os_baremetal_nodes(self, **kw):
return (
200, FAKE_RESPONSE_HEADERS, {
'nodes': [
{
"id": 1,
"instance_uuid": None,
"interfaces": [],
"cpus": 2,
"local_gb": 10,
"memory_mb": 5,
"pm_address": "2.3.4.5",
"pm_user": "pmuser",
"pm_password": "pmpass",
"prov_mac_address": "aa:bb:cc:dd:ee:ff",
"prov_vlan_id": 1,
"service_host": "somehost",
"terminal_port": 8080,
}
]
}
)
def get_os_baremetal_nodes_1(self, **kw):
return (
200, FAKE_RESPONSE_HEADERS, {
'node': {
"id": 1,
"instance_uuid": None,
"pm_address": "1.2.3.4",
"interfaces": [],
"cpus": 2,
"local_gb": 10,
"memory_mb": 5,
"pm_user": "pmuser",
"pm_password": "pmpass",
"prov_mac_address": "aa:bb:cc:dd:ee:ff",
"prov_vlan_id": 1,
"service_host": "somehost",
"terminal_port": 8080,
}
}
)
def post_os_baremetal_nodes(self, **kw):
return (
200, FAKE_RESPONSE_HEADERS, {
'node': {
"id": 1,
"instance_uuid": None,
"cpus": 2,
"local_gb": 10,
"memory_mb": 5,
"pm_address": "2.3.4.5",
"pm_user": "pmuser",
"pm_password": "pmpass",
"prov_mac_address": "aa:bb:cc:dd:ee:ff",
"prov_vlan_id": 1,
"service_host": "somehost",
"terminal_port": 8080,
}
}
)
def delete_os_baremetal_nodes_1(self, **kw):
return (202, FAKE_RESPONSE_HEADERS, {})
def post_os_baremetal_nodes_1_action(self, **kw):
body = kw['body']
action = list(body)[0]
if action == "add_interface":
return (
200, FAKE_RESPONSE_HEADERS, {
'interface': {
"id": 2,
"address": "bb:cc:dd:ee:ff:aa",
"datapath_id": 1,
"port_no": 2,
}
}
)
elif action == "remove_interface":
return (202, FAKE_RESPONSE_HEADERS, {})
else:
return (500, {}, {})
def post_os_assisted_volume_snapshots(self, **kw):
return (202, FAKE_RESPONSE_HEADERS,
{'snapshot': {'id': 'blah', 'volumeId': '1'}})
def delete_os_assisted_volume_snapshots_x(self, **kw):
return (202, FAKE_RESPONSE_HEADERS, {})
def post_os_server_external_events(self, **kw):
return (200, FAKE_RESPONSE_HEADERS, {
'events': [
{'name': 'test-event',
'status': 'completed',
'tag': 'tag',
'server_uuid': 'fake-uuid1'},
{'name': 'test-event',
'status': 'completed',
'tag': 'tag',
'server_uuid': 'fake-uuid2'}]})
| 36.095541 | 75 | 0.470796 |
1fbcee968bba97eac0cc9b13eadb31d2449e671d | 1,005 | py | Python | taiga/events/backends/__init__.py | threefoldtech/Threefold-Circles | cbc433796b25cf7af9a295af65d665a4a279e2d6 | [
"Apache-2.0"
] | null | null | null | taiga/events/backends/__init__.py | threefoldtech/Threefold-Circles | cbc433796b25cf7af9a295af65d665a4a279e2d6 | [
"Apache-2.0"
] | 12 | 2019-11-25T14:08:32.000Z | 2021-06-24T10:35:51.000Z | taiga/events/backends/__init__.py | threefoldtech/Threefold-Circles | cbc433796b25cf7af9a295af65d665a4a279e2d6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .base import get_events_backend
__all__ = ["get_events_backend"]
| 45.681818 | 74 | 0.760199 |
d779c3961dd291860b45df97f1726d3f0fea09a9 | 7,960 | py | Python | src/pytkdocs/serializer.py | shashankrnr32/pytkdocs | bf04764f1608970643932329c9f6c8c63a0c5632 | [
"0BSD"
] | 1 | 2021-04-30T23:34:03.000Z | 2021-04-30T23:34:03.000Z | src/pytkdocs/serializer.py | shashankrnr32/pytkdocs | bf04764f1608970643932329c9f6c8c63a0c5632 | [
"0BSD"
] | null | null | null | src/pytkdocs/serializer.py | shashankrnr32/pytkdocs | bf04764f1608970643932329c9f6c8c63a0c5632 | [
"0BSD"
] | null | null | null | """
This module defines function to serialize objects.
These functions simply take objects as parameters and return dictionaries that can be dumped by `json.dumps`.
"""
import inspect
import re
from typing import Any, Match, Optional, Pattern
from pytkdocs.objects import Object, Source
from pytkdocs.parsers.docstrings.base import AnnotatedObject, Attribute, Parameter, Section
try:
from typing import GenericMeta # type: ignore
except ImportError:
# in 3.7, GenericMeta doesn't exist but we don't need it
class GenericMeta(type): # type: ignore # noqa: WPS440 (variable overlap)
"""GenericMeta type."""
RE_OPTIONAL: Pattern = re.compile(r"Union\[(.+), NoneType\]")
"""Regular expression to match optional annotations of the form `Union[T, NoneType]`."""
RE_FORWARD_REF: Pattern = re.compile(r"_?ForwardRef\('([^']+)'\)")
"""Regular expression to match forward-reference annotations of the form `_ForwardRef('T')`."""
def rebuild_optional(match: Match) -> str:
"""
Rebuild `Union[T, None]` as `Optional[T]`.
Arguments:
match: The match object when matching against a regular expression (by the parent caller).
Returns:
The rebuilt type string.
"""
group = match.group(1)
brackets_level = 0
for char in group:
if char == "," and brackets_level == 0:
return f"Union[{group}]"
if char == "[":
brackets_level += 1
elif char == "]":
brackets_level -= 1
return f"Optional[{group}]"
def annotation_to_string(annotation: Any) -> str:
"""
Return an annotation as a string.
Arguments:
annotation: The annotation to return as a string.
Returns:
The annotation as a string.
"""
if annotation is inspect.Signature.empty:
return ""
if inspect.isclass(annotation) and not isinstance(annotation, GenericMeta):
string = annotation.__name__
else:
string = str(annotation).replace("typing.", "")
string = RE_FORWARD_REF.sub(lambda match: match.group(1), string)
string = RE_OPTIONAL.sub(rebuild_optional, string)
return string # noqa: WPS331 (false-positive, string is not only used for the return)
def serialize_annotated_object(obj: AnnotatedObject) -> dict:
"""
Serialize an instance of [`AnnotatedObject`][pytkdocs.parsers.docstrings.base.AnnotatedObject].
Arguments:
obj: The object to serialize.
Returns:
A JSON-serializable dictionary.
"""
return {"description": obj.description, "annotation": annotation_to_string(obj.annotation)}
def serialize_attribute(attribute: Attribute) -> dict:
"""
Serialize an instance of [`Attribute`][pytkdocs.parsers.docstrings.base.Attribute].
Arguments:
attribute: The attribute to serialize.
Returns:
A JSON-serializable dictionary.
"""
return {
"name": attribute.name,
"description": attribute.description,
"annotation": annotation_to_string(attribute.annotation),
}
def serialize_parameter(parameter: Parameter) -> dict:
"""
Serialize an instance of [`Parameter`][pytkdocs.parsers.docstrings.base.Parameter].
Arguments:
parameter: The parameter to serialize.
Returns:
A JSON-serializable dictionary.
"""
serialized = serialize_annotated_object(parameter)
serialized.update(
{
"name": parameter.name,
"kind": str(parameter.kind),
"default": parameter.default_string,
"is_optional": parameter.is_optional,
"is_required": parameter.is_required,
"is_args": parameter.is_args,
"is_kwargs": parameter.is_kwargs,
},
)
return serialized
def serialize_signature_parameter(parameter: inspect.Parameter) -> dict:
"""
Serialize an instance of `inspect.Parameter`.
Arguments:
parameter: The parameter to serialize.
Returns:
A JSON-serializable dictionary.
"""
serialized = {"kind": str(parameter.kind), "name": parameter.name}
if parameter.annotation is not parameter.empty:
serialized["annotation"] = annotation_to_string(parameter.annotation)
if parameter.default is not parameter.empty:
serialized["default"] = repr(parameter.default)
return serialized
def serialize_signature(signature: inspect.Signature) -> dict:
"""
Serialize an instance of `inspect.Signature`.
Arguments:
signature: The signature to serialize.
Returns:
A JSON-serializable dictionary.
"""
if signature is None:
return {}
serialized: dict = {
"parameters": [serialize_signature_parameter(value) for name, value in signature.parameters.items()],
}
if signature.return_annotation is not inspect.Signature.empty:
serialized["return_annotation"] = annotation_to_string(signature.return_annotation)
return serialized
def serialize_docstring_section(section: Section) -> dict: # noqa: WPS231 (not complex)
"""
Serialize an instance of `inspect.Signature`.
Arguments:
section: The section to serialize.
Returns:
A JSON-serializable dictionary.
"""
serialized = {"type": section.type}
if section.type == section.Type.MARKDOWN:
serialized.update({"value": section.value}) # type: ignore
elif section.type == section.Type.RETURN:
serialized.update({"value": serialize_annotated_object(section.value)}) # type: ignore
elif section.type == section.Type.EXCEPTIONS:
serialized.update({"value": [serialize_annotated_object(exc) for exc in section.value]}) # type: ignore
elif section.type == section.Type.PARAMETERS:
serialized.update({"value": [serialize_parameter(param) for param in section.value]}) # type: ignore
elif section.type == section.Type.ATTRIBUTES:
serialized.update({"value": [serialize_attribute(attr) for attr in section.value]}) # type: ignore
elif section.type == section.Type.EXAMPLES:
serialized.update({"value": section.value}) # type: ignore
return serialized
def serialize_source(source: Optional[Source]) -> dict:
"""
Serialize an instance of [`Source`][pytkdocs.objects.Source].
Arguments:
source: The source to serialize.
Returns:
A JSON-serializable dictionary.
"""
if source:
return {"code": source.code, "line_start": source.line_start}
return {}
def serialize_object(obj: Object) -> dict:
"""
Serialize an instance of a subclass of [`Object`][pytkdocs.objects.Object].
Arguments:
obj: The object to serialize.
Returns:
A JSON-serializable dictionary.
"""
serialized = {
"name": obj.name,
"path": obj.path,
"category": obj.category,
"file_path": obj.file_path,
"relative_file_path": obj.relative_file_path,
"properties": sorted(set(obj.properties + obj.name_properties)),
"parent_path": obj.parent_path,
"has_contents": obj.has_contents(),
"docstring": obj.docstring,
"docstring_sections": [serialize_docstring_section(sec) for sec in obj.docstring_sections],
"source": serialize_source(obj.source),
"children": {child.path: serialize_object(child) for child in obj.children},
"attributes": [attr.path for attr in obj.attributes],
"methods": [meth.path for meth in obj.methods],
"functions": [func.path for func in obj.functions],
"modules": [mod.path for mod in obj.modules],
"classes": [clas.path for clas in obj.classes],
}
if hasattr(obj, "type"): # noqa: WPS421 (hasattr)
serialized["type"] = annotation_to_string(obj.type) # type: ignore
if hasattr(obj, "signature"): # noqa: WPS421 (hasattr)
serialized["signature"] = serialize_signature(obj.signature) # type: ignore
return serialized
| 32.757202 | 112 | 0.66407 |
d66f088619ab3c18978d4063fc454570ddc6728e | 984 | py | Python | timus/1013.py | nurseiit/compete | 7e36898860a279e397bcb51308d0aef0860cc448 | [
"MIT"
] | 3 | 2019-06-28T17:12:44.000Z | 2021-07-22T01:16:01.000Z | timus/1013.py | nurseiit/compete | 7e36898860a279e397bcb51308d0aef0860cc448 | [
"MIT"
] | null | null | null | timus/1013.py | nurseiit/compete | 7e36898860a279e397bcb51308d0aef0860cc448 | [
"MIT"
] | null | null | null | def matmulmod(a, b, mod):
n = len(a)
m = len(a[0])
p = len(b[0])
c = [[0 for i in range(p)] for j in range(n)]
for i in range(n):
for j in range(m):
for k in range(p):
c[i][k] = (c[i][k] + a[i][j] * b[j][k]) % mod
return c
def matpowmod(a, n, mod):
res = [[1, 0], [0, 1]]
while n > 0:
if n % 2 == 1:
res = matmulmod(res, a, mod)
a = matmulmod(a, a, mod)
n = n // 2
return res
def fast(n, k, mod):
M = [[k, 1], [-1, -1]]
a = [[k - 1, 0]]
return matmulmod(a, matpowmod(M, n - 1, mod), mod)[0][0]
def main():
n = int(input())
k = int(input())
mod = int(input())
print(fast(n, k, mod))
if __name__ == '__main__':
main()
"""
C is const = k
(a, b) * M = (a * C - b, a - b)
M is 2x2 =>
w | x
-----
y | z
(a, b) * M = (w * a + y * b, x * a + z * b)
= (C * a + (-1) * b, 1 * a + (-1) * b)
M = C | 1
-1 | -1
"""
| 18.566038 | 61 | 0.386179 |
cf82c2b7848a3dc07ff73d7ae8929ad3ed1d8bc4 | 10,378 | py | Python | dataloader/loader.py | Jvictor97/AWR-Adaptive-Weighting-Regression | 2c29f8ac3d824edfff07465232ffed8e4d837ebf | [
"MIT"
] | 90 | 2020-03-16T15:18:57.000Z | 2022-03-16T10:02:52.000Z | dataloader/loader.py | Jvictor97/AWR-Adaptive-Weighting-Regression | 2c29f8ac3d824edfff07465232ffed8e4d837ebf | [
"MIT"
] | 16 | 2020-05-01T03:11:44.000Z | 2021-12-14T13:03:38.000Z | dataloader/loader.py | Jvictor97/AWR-Adaptive-Weighting-Regression | 2c29f8ac3d824edfff07465232ffed8e4d837ebf | [
"MIT"
] | 16 | 2020-05-21T09:07:04.000Z | 2022-02-22T13:00:19.000Z | from torch.utils.data import Dataset
import numpy as np
import cv2
from util.util import uvd2xyz, xyz2uvd
from scipy.sparse import coo_matrix
class Loader(Dataset):
def __init__(self, root, phase, img_size, dataset_name):
assert phase in ['train', 'test']
self.seed = np.random.RandomState(23455)
self.root = root
self.phase = phase
self.img_size = img_size
self.dataset_name = dataset_name
# randomly choose one of the augment options
self.aug_ops = ['trans', 'scale', 'rot', None]
def crop(self, img, center, csize, dsize):
'''
Crop hand region out of depth images, scales inverse to the distance of hand to camers
:param center: center of mass, in image coordinates (u,v,d), d in mm
:param csize: cube size, 3D crop volume in mm
:param dsize: depth image size, resolution of cropped image, (w,h)
:return: cropped hand depth image, transformation matrix for joints, center of mass in image coordinates
'''
assert len(csize) == 3
assert len(dsize) == 2
# calculate boundaries according to cube size and center
# crop hand out of original depth image
ustart, uend, vstart, vend, zstart, zend = self.center2bounds(center, csize)
cropped = self.bounds2crop(img, ustart, uend, vstart, vend, zstart, zend)
# resize depth image to same resolution
w, h= (uend - ustart), (vend - vstart)
# scale the longer side to corresponding dsize
scale = min(dsize[0] / w, dsize[1] / h)
size = (int(w * scale), int(h * scale))
cropped = cv2.resize(cropped, size, interpolation=cv2.INTER_NEAREST)
# pad another side to corresponding dsize
res = np.zeros(dsize, dtype = np.float32)
ustart, vstart = (dsize - size)/2.
uend, vend = ustart+size[0], vstart + size[1]
res[int(vstart):int(vend), int(ustart):int(uend)] = cropped
transmat = self.center2transmat(center, csize, dsize)
return res, transmat
def random_aug(self, sigma_trans=None, sigma_scale=None, sigma_rot=None):
# create random augmentation paras
# choose one of [trans, scale, rot, None] to augment
if sigma_trans is None:
sigma_trans = 35.
if sigma_scale is None:
sigma_scale = 0.05
if sigma_rot is None:
sigma_rot = 180.
aug_idx = self.seed.randint(0, len(self.aug_ops))
aug_op = self.aug_ops[aug_idx]
# trans parameters for x,y,z axis
# normalization distribution of N(0, sigma_trans)
trans = self.seed.randn(3) * sigma_trans
# normalization distribution of N(1, sigma_scale)
scale = abs(1. + self.seed.randn() * sigma_scale)
rot = self.seed.uniform(-sigma_rot, sigma_rot)
return aug_op, trans, scale, rot
def augment(self, img, jt_xyz, center, cube, M, aug_op, trans, scale, rot):
depth_max = img.max()
if aug_op == 'trans':
img, jt_xyz, center, M = self.translate(img, jt_xyz, center, cube, M, trans, pad_value=0)
elif aug_op == 'rot':
img, jt_xyz = self.rotate(img, jt_xyz, center, rot, pad_value=0)
elif aug_op == 'scale':
img, cube, M = self.scale(img, center, cube, M, scale, pad_value=0)
img = self.normalize(depth_max, img, center, cube)
return img, jt_xyz, cube, center, M
def normalize(self, depth_max, img, center, cube):
img[img == depth_max] = center[2] + (cube[2] / 2.)
# invalid points are assigned as bg
img[img == 0] = center[2] + (cube[2] / 2.)
img_min = center[2] - (cube[2] / 2.) # foreground, normalise to -1, should not be to much
img_max = center[2] + (cube[2] / 2.)
img = np.clip(img, img_min, img_max)
# print('sum:', (img==img_min).sum())
# scale depth 'sum:', values to [-1, 1]
img -= center[2]
img /= (cube[2] / 2.)
return img # normalize to [-1, 1]
def translate(self, img, jt_xyz, center, cube, M, trans, pad_value=0):
'''
Translate center.
:param center: center of mass in image coordinated, (u,v,d)
'''
if np.allclose(trans, 0.):
return img, jt_xyz, center, M
new_center = xyz2uvd(uvd2xyz(center, self.paras, self.flip) + trans, self.paras, self.flip)
if not (np.allclose(center[2], 0.)) or np.allclose(new_center[2], 0.):
new_M = self.center2transmat(new_center, cube, np.array(img.shape))
# print(img[img>0].min()-1)
img = self.recrop(img, new_center, cube, new_M, np.linalg.inv(M), img.shape, thresh_z=True, bg=pad_value, nv_val=np.min(img[img>0])-1)
else:
new_M = M
jt_xyz = jt_xyz + uvd2xyz(center, self.paras, self.flip) - uvd2xyz(new_center, self.paras, self.flip)
return img, jt_xyz, new_center, new_M
def recrop(self, img, center, cube, M, M_inv, dsize, thresh_z=True, bg=0., nv_val=0.):
img = cv2.warpPerspective(img, np.dot(M, M_inv), dsize, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=float(bg))
# img[np.isclose(img, 32000.)] = bg # outliers will appear on the edge
img[img < nv_val] = bg # let 0 < depth < depth.min()-1 be background, avoiding outliers around hand
if thresh_z:
_, _, _, _, zstart, zend = self.center2bounds(center, cube)
mask1 = np.logical_and(img < zstart, img != 0)
mask2 = np.logical_and(img > zend, img != 0)
img[mask1] = zstart
img[mask2] = 0.
return img.astype(np.float32)
def rotate(self, img, jt_xyz, center, rot, pad_value=0):
'''
Rotate hand in image coordinates.
:return: rotated img, new jt_xyz, rotation angle in degree
'''
if np.allclose(rot, 0.):
return img, jt_xyz, rot
rot = np.mod(rot, 360)
# -rot means rotate clockwisely
rotM = cv2.getRotationMatrix2D((img.shape[1] // 2, img.shape[0] // 2), -rot, 1)
img = cv2.warpAffine(img, rotM, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=pad_value)
center_xyz = uvd2xyz(center, self.paras, self.flip)
jt_uvd = xyz2uvd(jt_xyz + center_xyz, self.paras, self.flip)
jt_uvd = self.rotate_pts(jt_uvd, center, rot)
jt_xyz = uvd2xyz(jt_uvd, self.paras, self.flip) - center_xyz
return img, jt_xyz
def scale(self, img, center, cube, M, scale, pad_value=0):
'''
Scale hand by applying different cube size.
'''
if np.allclose(scale, 1.):
return img, cube, M
new_cube = cube * scale
if not np.allclose(center[2], 0.):
new_M = self.center2transmat(center, new_cube, np.array(img.shape))
img = self.recrop(img, center, new_cube, new_M, np.linalg.inv(M), img.shape, bg=pad_value, nv_val=np.min(img[img>0])-1)
# new_img = self.recrop(img, center, cube, new_M, np.linalg.inv(M), img.shape, bg=pad_value, nv_val=32000.)
else:
new_M = M
return img, new_cube, new_M
def center2bounds(self, center, csize):
ustart, vstart = center[:2] - (csize[:2] / 2.) / center[2] * self.paras[:2] + 0.5
uend, vend= center[:2] + (csize[:2] / 2.) / center[2] * self.paras[:2] + 0.5
zstart = center[2] - csize[2] / 2.
zend = center[2] + csize[2] / 2.
return int(ustart), int(uend), int(vstart), int(vend), zstart, zend
def bounds2crop(self, img, ustart, uend, vstart, vend, zstart, zend, thresh_z=True, bg=0):
'''
Use boundaries to crop hand out of original depth image.
:return: cropped image
'''
h, w = img.shape[:2]
bbox = [max(vstart,0), min(vend,h), max(ustart,0), min(uend,w)]
img = img[bbox[0]:bbox[1], bbox[2]:bbox[3]]
# add pixels that are out of the image in order to keep aspect ratio
img = np.pad(img, ((abs(vstart)-bbox[0], abs(vend)-bbox[1]),(abs(ustart)-bbox[2], abs(uend)-bbox[3])), mode='constant', constant_values=bg)
if thresh_z:
mask1 = np.logical_and(img < zstart, img != 0)
mask2 = np.logical_and(img > zend, img != 0)
img[mask1] = zstart
img[mask2] = 0
return img
def center2transmat(self, center, csize, dsize):
'''
Calculate affine transform matrix for scale and translate from crop.
:param dsize: organized as (w,h), cv2 img.shape (h,w,c)
'''
assert len(csize) == 3
assert len(dsize) == 2
# calculate boundaries according to cube size and center
# crop hand out of original depth image
ustart, uend, vstart, vend, _, _ = self.center2bounds(center, csize)
trans1 = np.eye(3)
trans1[0][2] = -ustart
trans1[1][2] = -vstart
w = (uend - ustart)
h = (vend - vstart)
# scale the longer side to corresponding dsize
scale = min(dsize[0] / w, dsize[1] / h)
size = (int(w * scale), int(h * scale))
scale *= np.eye(3)
scale[2][2] = 1
# pad another side to corresponding dsize
trans2 = np.eye(3)
trans2[0][2] = int(np.floor(dsize[0] / 2. - size[0] / 2.))
trans2[1][2] = int(np.floor(dsize[1] / 2. - size[1] / 2.))
return np.dot(trans2, np.dot(scale, trans1)).astype(np.float32)
def rotate_pts(self, pt, center, angle):
'''
Rotate single point clockwisely.
'''
alpha = angle * np.pi / 180.
pt_rot = pt.copy()
pt_rot[:, 0] = (pt[:, 0]-center[0]) * np.cos(alpha) - (pt[:, 1]-center[1]) * np.sin(alpha)
pt_rot[:, 1] = (pt[:, 0]-center[0]) * np.sin(alpha) + (pt[:, 1]-center[1]) * np.cos(alpha)
pt_rot[:, :2] += center[:2]
return pt_rot.astype(np.float32)
def transform_jt_uvd(self, jt_uvd, M):
pts_trans = np.hstack([jt_uvd[:,:2], np.ones((jt_uvd.shape[0], 1))])
pts_trans = np.dot(M, pts_trans.T).T
pts_trans[:, :2] /= pts_trans[:, 2:]
return np.hstack([pts_trans[:, :2], jt_uvd[:, 2:]]).astype(np.float32)
| 39.610687 | 148 | 0.581422 |
5892dca5bef621824f52a62ad9f5d1dc476200bd | 1,071 | py | Python | tests/math/unary/test_scipy_mirror.py | kw-0/MyGrad | 307f1bb5f2391e7f4df49fe43a7acf9d1e8ea141 | [
"MIT"
] | 147 | 2018-07-14T01:37:35.000Z | 2022-03-29T06:37:58.000Z | tests/math/unary/test_scipy_mirror.py | kw-0/MyGrad | 307f1bb5f2391e7f4df49fe43a7acf9d1e8ea141 | [
"MIT"
] | 223 | 2018-05-31T14:13:18.000Z | 2022-02-27T18:53:49.000Z | tests/math/unary/test_scipy_mirror.py | kw-0/MyGrad | 307f1bb5f2391e7f4df49fe43a7acf9d1e8ea141 | [
"MIT"
] | 27 | 2018-06-17T14:42:05.000Z | 2021-10-31T00:21:09.000Z | import hypothesis.extra.numpy as hnp
import hypothesis.strategies as st
import numpy as np
import pytest
from hypothesis import given, settings
from numpy.testing import assert_array_equal
from scipy import special
from mygrad.math._special import logsumexp
from tests.custom_strategies import valid_axes
@settings(deadline=None, max_examples=500)
@given(
data=st.data(),
x=hnp.arrays(
shape=hnp.array_shapes(min_dims=0),
dtype=np.float64,
elements=st.floats(),
),
keepdims=st.booleans(),
)
@pytest.mark.filterwarnings("ignore: overflow")
@pytest.mark.filterwarnings("ignore: invalid")
def test_logsumexp(data: st.SearchStrategy, x: np.ndarray, keepdims: bool):
axes = data.draw(valid_axes(ndim=x.ndim), label="axes")
mygrad_result = logsumexp(x, axis=axes, keepdims=keepdims)
scipy_result = special.logsumexp(x, axis=axes, keepdims=keepdims)
assert_array_equal(
mygrad_result,
scipy_result,
err_msg="mygrad's implementation of logsumexp does "
"not match that of scipy's",
)
| 30.6 | 75 | 0.727358 |
f1faca6c9b464f026432ddbb22e7958eeb485dd3 | 7,529 | py | Python | tests/test_optic.py | dkirkby/batoid | 734dccc289eb7abab77a62cdc14563ed5981753b | [
"BSD-2-Clause"
] | null | null | null | tests/test_optic.py | dkirkby/batoid | 734dccc289eb7abab77a62cdc14563ed5981753b | [
"BSD-2-Clause"
] | null | null | null | tests/test_optic.py | dkirkby/batoid | 734dccc289eb7abab77a62cdc14563ed5981753b | [
"BSD-2-Clause"
] | null | null | null | import batoid
import numpy as np
import os
from test_helpers import timer, do_pickle, all_obj_diff
import time
import yaml
@timer
def test_optic():
if __name__ == '__main__':
nside = 128
else:
nside = 32
rays = batoid.rayGrid(20, 12.0, 0.005, 0.005, -1.0, nside, 500e-9, 1.0, batoid.ConstMedium(1.0))
nrays = len(rays)
print("Tracing {} rays.".format(nrays))
t_fast = 0.0
t_slow = 0.0
fn = os.path.join(batoid.datadir, "HSC", "HSC.yaml")
config = yaml.load(open(fn))
telescope = batoid.parse.parse_optic(config['opticalSystem'])
do_pickle(telescope)
t0 = time.time()
rays_fast, _ = telescope.trace(rays)
t1 = time.time()
rays_slow = batoid.RayVector([telescope.trace(r)[0] for r in rays])
t2 = time.time()
assert rays_fast == rays_slow
t_fast = t1 - t0
t_slow = t2 - t1
print("Fast trace: {:5.3f} s".format(t_fast))
print(" {} rays per second".format(int(nrays/t_fast)))
print("Slow trace: {:5.3f} s".format(t_slow))
print(" {} rays per second".format(int(nrays/t_slow)))
@timer
def test_traceFull():
if __name__ == '__main__':
nside = 128
else:
nside = 32
rays = batoid.rayGrid(20, 12.0, 0.005, 0.005, -1.0, nside, 500e-9, 1.0, batoid.ConstMedium(1.0))
nrays = len(rays)
print("Tracing {} rays.".format(nrays))
fn = os.path.join(batoid.datadir, "HSC", "HSC.yaml")
config = yaml.load(open(fn))
telescope = batoid.parse.parse_optic(config['opticalSystem'])
tf = telescope.traceFull(rays)
rays, _ = telescope.trace(rays)
assert rays == tf[-1]['out']
@timer
def test_traceReverse():
if __name__ == '__main__':
nside = 128
else:
nside = 32
fn = os.path.join(batoid.datadir, "HSC", "HSC.yaml")
config = yaml.load(open(fn))
telescope = batoid.parse.parse_optic(config['opticalSystem'])
init_rays = batoid.rayGrid(20, 12.0, 0.005, 0.005, -1.0, nside, 500e-9, 1.0, batoid.ConstMedium(1.0))
forward_rays, _ = telescope.trace(init_rays, outCoordSys=batoid.CoordSys())
# Now, turn the result rays around and trace backwards
forward_rays = forward_rays.propagatedToTime(40.0)
reverse_rays = batoid.RayVector(
[batoid.Ray(r.r, -r.v, -r.t, r.wavelength) for r in forward_rays]
)
final_rays, _ = telescope.traceReverse(reverse_rays, outCoordSys=batoid.CoordSys())
# propagate all the way to t=0
final_rays = final_rays.propagatedToTime(0.0)
w = np.where(np.logical_not(final_rays.vignetted))[0]
for idx in w:
np.testing.assert_allclose(init_rays[idx].x, final_rays[idx].x)
np.testing.assert_allclose(init_rays[idx].y, final_rays[idx].y)
np.testing.assert_allclose(init_rays[idx].z, final_rays[idx].z)
np.testing.assert_allclose(init_rays[idx].vx, -final_rays[idx].vx)
np.testing.assert_allclose(init_rays[idx].vy, -final_rays[idx].vy)
np.testing.assert_allclose(init_rays[idx].vz, -final_rays[idx].vz)
np.testing.assert_allclose(final_rays[idx].t, 0)
@timer
def test_shift():
np.random.seed(5)
fn = os.path.join(batoid.datadir, "HSC", "HSC.yaml")
config = yaml.load(open(fn))
telescope = batoid.parse.parse_optic(config['opticalSystem'])
shift = np.random.uniform(low=-1, high=1, size=3)
assert telescope.withGlobalShift(shift).withGlobalShift(-shift) == telescope
for item in telescope.itemDict:
shifted = telescope.withGloballyShiftedOptic(item, shift)
shifted = shifted.withGloballyShiftedOptic(item, -shift)
assert telescope == shifted
@timer
def test_rotation():
try:
import galsim
except ImportError:
print("optic rotation test requires GalSim")
return
np.random.seed(57)
fn = os.path.join(batoid.datadir, "HSC", "HSC.yaml")
config = yaml.load(open(fn))
telescope = batoid.parse.parse_optic(config['opticalSystem'])
rot = batoid.RotX(np.random.uniform(low=0.0, high=2*np.pi))
rot = rot.dot(batoid.RotY(np.random.uniform(low=0.0, high=2*np.pi)))
rot = rot.dot(batoid.RotZ(np.random.uniform(low=0.0, high=2*np.pi)))
rotInv = np.linalg.inv(rot)
# It's hard to test the two telescopes for equality due to rounding errors, so we test by
# comparing zernikes
rotTel = telescope.withLocalRotation(rot).withLocalRotation(rotInv)
theta_x = np.random.uniform(-0.005, 0.005)
theta_y = np.random.uniform(-0.005, 0.005)
wavelength = 750e-9
np.testing.assert_allclose(
batoid.psf.zernike(telescope, theta_x, theta_y, wavelength),
batoid.psf.zernike(rotTel, theta_x, theta_y, wavelength),
atol=1e-5
)
for item in telescope.itemDict:
rotTel = telescope.withLocallyRotatedOptic(item, rot)
rotTel = rotTel.withLocallyRotatedOptic(item, rotInv)
rotTel2 = telescope.withLocallyRotatedOptic(item, np.eye(3))
theta_x = np.random.uniform(-0.005, 0.005)
theta_y = np.random.uniform(-0.005, 0.005)
np.testing.assert_allclose(
batoid.psf.zernike(telescope, theta_x, theta_y, wavelength),
batoid.psf.zernike(rotTel, theta_x, theta_y, wavelength),
atol=1e-5
)
np.testing.assert_allclose(
batoid.psf.zernike(telescope, theta_x, theta_y, wavelength),
batoid.psf.zernike(rotTel2, theta_x, theta_y, wavelength),
atol=1e-5
)
@timer
def test_thread():
fn = os.path.join(batoid.datadir, "HSC", "HSC.yaml")
config = yaml.load(open(fn))
telescope = batoid.parse.parse_optic(config['opticalSystem'])
rayGrid = batoid.rayGrid(
telescope.dist, telescope.pupilSize,
0.0, 0.0, -1.0,
32, 750e-9, 1.0, telescope.inMedium)
batoid._batoid.setNThread(4)
assert batoid._batoid.getNThread() == 4
rays4, _ = telescope.trace(rayGrid)
batoid._batoid.setNThread(2)
assert batoid._batoid.getNThread() == 2
rays2, _ = telescope.trace(rayGrid)
batoid._batoid.setNThread(1)
assert batoid._batoid.getNThread() == 1
rays1, _ = telescope.trace(rayGrid)
assert rays1 == rays2 == rays4
@timer
def test_ne():
objs = [
batoid.Mirror(batoid.Plane()),
batoid.Detector(batoid.Plane()),
batoid.Baffle(batoid.Plane()),
batoid.RefractiveInterface(batoid.Plane()),
batoid.Mirror(batoid.Paraboloid(0.1)),
batoid.Detector(batoid.Paraboloid(0.1)),
batoid.Baffle(batoid.Paraboloid(0.1)),
batoid.RefractiveInterface(batoid.Paraboloid(0.1)),
batoid.Mirror(batoid.Plane(), obscuration=batoid.ObscCircle(0.1)),
batoid.Mirror(batoid.Plane(), inMedium=batoid.ConstMedium(1.1)),
batoid.Mirror(batoid.Plane(), outMedium=batoid.ConstMedium(1.1)),
batoid.Mirror(batoid.Plane(), coordSys=batoid.CoordSys([0,0,1])),
batoid.CompoundOptic([
batoid.Mirror(batoid.Plane()),
batoid.Mirror(batoid.Plane())
]),
batoid.CompoundOptic(
[batoid.Mirror(batoid.Plane()),
batoid.Baffle(batoid.Plane())]
),
batoid.Lens(
[batoid.RefractiveInterface(batoid.Plane()),
batoid.RefractiveInterface(batoid.Plane())],
batoid.ConstMedium(1.1)
)
]
all_obj_diff(objs)
if __name__ == '__main__':
test_optic()
test_traceFull()
test_traceReverse()
test_shift()
test_rotation()
test_thread()
test_ne()
| 31.767932 | 105 | 0.644441 |
26e8e46ad104701dba3bb3458d78f98f21b1be6d | 1,584 | py | Python | profiles_PROJECT/profiles_api/serializers.py | TheKinng96/rest-api | 7835651547d82504e9e764eaa44ff00c73b9dbf3 | [
"MIT"
] | null | null | null | profiles_PROJECT/profiles_api/serializers.py | TheKinng96/rest-api | 7835651547d82504e9e764eaa44ff00c73b9dbf3 | [
"MIT"
] | 1 | 2020-04-28T10:35:49.000Z | 2020-04-28T10:35:49.000Z | profiles_PROJECT/profiles_api/serializers.py | TheKinng96/rest-api | 7835651547d82504e9e764eaa44ff00c73b9dbf3 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from profiles_api.models import UserProfile
from profiles_api.models import ProfileFeedItem
class HelloSerializer(serializers.Serializer):
"""Serializes a name field for testing our APIView"""
name = serializers.CharField(max_length=10) #serializer function for post
class UserProfileSerializer(serializers.ModelSerializer):
"""Serializes a user profile object"""
class Meta:
model = UserProfile
fields = ('id', 'email', 'name', 'password') #tuples
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type': 'password'}
}
} #to make password can by updated
def create(self, validated_data):
"""Create and return a new user"""
user = UserProfile.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password']
)
return user
def update(self, instance, validated_data):
"""Handle updating user account"""
if 'password' in validated_data:
password = validated_data.pop('password')
instance.set_password(password)
return super().update(instance, validated_data)
class ProfileFeedItemSerializer(serializers.ModelSerializer):
"""Serializes profile feed items"""
class Meta:
model = ProfileFeedItem
fields = ('id', 'user_profile', 'status_text', 'created_on')
extra_kwargs = {'user_profile': {'read_only': True}} | 32.326531 | 77 | 0.645202 |
eb789a0d8e6489c75467d86f35fa2b311ed7fdbf | 18,580 | py | Python | mmt/featureExtractor.py | Montimage/acas | 49c345cee5eabbda4833119de5403316139031b5 | [
"Apache-2.0"
] | null | null | null | mmt/featureExtractor.py | Montimage/acas | 49c345cee5eabbda4833119de5403316139031b5 | [
"Apache-2.0"
] | null | null | null | mmt/featureExtractor.py | Montimage/acas | 49c345cee5eabbda4833119de5403316139031b5 | [
"Apache-2.0"
] | null | null | null | import numpy
import pandas as pd
from scipy.stats import entropy
"""
Deals with calculation of actual ML features.
feature_names - predefined col names for the final ML feature dataframe
"""
feature_names = ['ip.pkts_per_flow', 'duration', 'ip.header_len',
'ip.payload_len', 'ip.avg_bytes_tot_len', 'time_between_pkts_sum',
'time_between_pkts_avg', 'time_between_pkts_max',
'time_between_pkts_min', 'time_between_pkts_std', '(-0.001, 50.0]',
'(50.0, 100.0]', '(100.0, 150.0]', '(150.0, 200.0]', '(200.0, 250.0]',
'(250.0, 300.0]', '(300.0, 350.0]', '(350.0, 400.0]', '(400.0, 450.0]',
'(450.0, 500.0]', '(500.0, 550.0]', 'tcp_pkts_per_flow', 'pkts_rate',
'tcp_bytes_per_flow', 'byte_rate', 'tcp.tcp_session_payload_up_len',
'tcp.tcp_session_payload_down_len', '(-0.001, 150.0]',
'(150.0, 300.0]', '(300.0, 450.0]', '(450.0, 600.0]', '(600.0, 750.0]',
'(750.0, 900.0]', '(900.0, 1050.0]', '(1050.0, 1200.0]',
'(1200.0, 1350.0]', '(1350.0, 1500.0]', '(1500.0, 10000.0]', 'tcp.fin',
'tcp.syn', 'tcp.rst', 'tcp.psh', 'tcp.ack', 'tcp.urg', 'sport_g', 'sport_le', 'dport_g',
'dport_le', 'mean_tcp_pkts', 'std_tcp_pkts', 'min_tcp_pkts',
'max_tcp_pkts', 'entropy_tcp_pkts', 'mean_tcp_len', 'std_tcp_len',
'min_tcp_len', 'max_tcp_len', 'entropy_tcp_len', 'ssl.tls_version']
def calculateFeatures(ip_traffic, tcp_traffic, tls_traffic):
"""
Calculates ML features based on traffic extracted from mmt-probe .csv. Features are calculated per flow and direction
where direction is identified by mmt-probe. Remark: features are calculated and returned including the direction
and session id, both columns should be dropped before feeding them into ML model
:param ip_traffic:
:param tcp_traffic:
:param tls_traffic:
:return: Ip of flows and dataframe with ML features (per flow+direction)
"""
print("Extracting features")
# Bins of packet lengths and time between packets based on
# "MalDetect: A Structure of Encrypted Malware Traffic Detection" by Jiyuan Liu et al.
bins_len = [0, 150, 300, 450, 600, 750, 900, 1050, 1200, 1350, 1500, 10000]
bins_time = [0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550]
## saving unique ips based on ip_traffic
ips = ip_traffic.groupby(["ip.session_id", "meta.direction"])[["ip.src", "ip.dst"]].apply(
lambda x: list(numpy.unique(x)))
ips = ips.to_frame().reset_index()
ips.columns = ["ip.session_id", "meta.direction", "ip"]
ips["ip.session_id"] = ips["ip.session_id"].astype(int)
ips["meta.direction"] = ips["meta.direction"].astype(int)
ip_traffic.drop(columns=["ip.src", "ip.dst"], inplace=True)
ip_traffic = ip_traffic.apply(pd.to_numeric)
tcp_traffic = tcp_traffic.apply(pd.to_numeric)
tls_traffic = tls_traffic.apply(pd.to_numeric)
ip_traffic['meta.direction'] = ip_traffic['meta.direction'].astype(int)
tcp_traffic['meta.direction'] = tcp_traffic['meta.direction'].astype(int)
tcp_traffic['tcp.src_port'] = tcp_traffic['tcp.src_port'].astype(int)
tcp_traffic['tcp.dest_port'] = tcp_traffic['tcp.dest_port'].astype(int)
tls_traffic['meta.direction'] = tls_traffic['meta.direction'].astype(int)
## deleting tcp and tls samples that have ip.session_id that was not present in ip_traffic (means that ip.session_id is wrongly assigned?)
ids_tcp = tcp_traffic["ip.session_id"].unique().tolist()
ids_ip = ip_traffic["ip.session_id"].unique().tolist()
ids_tls = tls_traffic["ip.session_id"].unique().tolist()
diff_tcp = set(ids_tcp) - set(ids_ip)
diff_tls = set(ids_tls) - set(ids_ip)
tcp_traffic = tcp_traffic[~tcp_traffic['ip.session_id'].isin(diff_tcp)]
tls_traffic = tls_traffic[~tls_traffic['ip.session_id'].isin(diff_tls)]
ip_traffic.set_index(["ip.session_id", "meta.direction"], inplace=True)
tcp_traffic.set_index(["ip.session_id", "meta.direction"], inplace=True)
## Overall counters
# total_traffic_nb = any_traffic['time'].count() ## total number of any packets in csv
# ip_total_nb = ip_traffic.groupby("ip.session_id")['time'].count().sum() ## total number of ip packets in csv
ip_pkts_per_flow = ip_traffic.groupby(["ip.session_id", "meta.direction"])['time'].count().reset_index().rename(
columns={"time": "ip.pkts_per_flow"}) ## number of ip packets per session id
## Duration of flow: time between first and last received packet in one flow (i.e. in one direction per one session id)
duration = ip_traffic.groupby(["ip.session_id", "meta.direction"])[
['ip.first_packet_time']].min().reset_index().merge(
ip_traffic.groupby(["ip.session_id", "meta.direction"])[['ip.last_packet_time']].max().reset_index())
duration['duration'] = duration['ip.last_packet_time'] - duration['ip.first_packet_time']
features = ip_pkts_per_flow.merge(duration).drop(columns=['ip.first_packet_time', 'ip.last_packet_time'])
duration = duration.iloc[0:0]
ip_total_per_session = ip_pkts_per_flow.iloc[0:0]
#####
ip_header_len = ip_traffic.groupby(["ip.session_id", "meta.direction"])["ip.header_len"].sum().reset_index()
features = features.merge(ip_header_len)
ip_tot_len = ip_traffic.groupby(["ip.session_id", "meta.direction"])["ip.tot_len"].sum().reset_index().rename(
columns={"ip.tot_len": "ip.bytes_tot_len"}) ### ?? TODO
ip_tot_len["ip.payload_len"] = ip_tot_len["ip.bytes_tot_len"] - ip_header_len["ip.header_len"]
ip_tot_len = ip_tot_len.drop(columns='ip.bytes_tot_len')
features = features.merge(ip_tot_len)
ip_header_len = ip_header_len.iloc[0:0]
ip_tot_len = ip_tot_len.iloc[0:0]
ip_avg_len = ip_traffic.groupby(["ip.session_id"])["ip.tot_len"].mean().reset_index().rename(
columns={"ip.tot_len": "ip.avg_bytes_tot_len"})
features = features.merge(ip_avg_len)
ip_avg_len = ip_avg_len.iloc[0:0]
# Packet Time
ip_traffic['delta'] = (ip_traffic['time'] - ip_traffic['time'].shift()).fillna(0)
ip_traffic['delta'] = ip_traffic['delta'] * 1000 # seconds to ms
# df = ip_traffic.copy()
# df = ip_traffic[['ip.session_id', 'meta.direction', 'delta']].copy()
df = ip_traffic[['delta']].copy()
#####
print("Times between packets")
time_between_pkts_sum = df.groupby(['ip.session_id', 'meta.direction'])['delta'].sum().reset_index().rename(
columns={"delta": "time_between_pkts_sum"})
time_between_pkts_avg = df.groupby(['ip.session_id', 'meta.direction'])['delta'].mean().reset_index().rename(
columns={"delta": "time_between_pkts_avg"})
time_between_pkts_max = df.groupby(['ip.session_id', 'meta.direction'])['delta'].max().reset_index().rename(
columns={"delta": "time_between_pkts_max"})
time_between_pkts_min = df.groupby(['ip.session_id', 'meta.direction'])['delta'].min().reset_index().rename(
columns={"delta": "time_between_pkts_min"})
time_between_pkts_std = df.groupby(['ip.session_id', 'meta.direction'])['delta'].std().reset_index().rename(
columns={"delta": "time_between_pkts_std"})
features = features.merge(time_between_pkts_sum)
features = features.merge(time_between_pkts_avg)
features = features.merge(time_between_pkts_max)
features = features.merge(time_between_pkts_min)
features = features.merge(time_between_pkts_std)
time_between_pkts_sum = time_between_pkts_sum[0:0]
time_between_pkts_avg = time_between_pkts_avg[0:0]
time_between_pkts_max = time_between_pkts_max[0:0]
time_between_pkts_min = time_between_pkts_min[0:0]
time_between_pkts_std = time_between_pkts_std[0:0]
print("SPTime Sequence")
time = df.groupby(['ip.session_id', 'meta.direction'])['delta'].value_counts(bins=bins_time, sort=False).to_frame()
df = df.iloc[0:0]
time = time.rename(columns={'delta': 'county'}).reset_index()
sptime = time.pivot_table(index=['ip.session_id', 'meta.direction'], columns='delta',
values='county') # ,fill_value=0)
sptime.columns = sptime.columns.astype(str)
sptime = sptime.reset_index()
features = features.merge(sptime)
time = time.iloc[0:0]
sptime = sptime.iloc[0:0]
if not tcp_traffic.empty:
print("TCP features")
# TCP packets number per flow
tcp_pkts_per_flow = tcp_traffic.groupby(["ip.session_id", "meta.direction"])[
['tcp.src_port']].count().reset_index().rename(
columns={
"tcp.src_port": "tcp_pkts_per_flow"}) ## number of tcp packets per flow, and per direction (0 = client->server)
features = pd.merge(features, tcp_pkts_per_flow, how='outer', left_on=["ip.session_id", "meta.direction"],
right_on=["ip.session_id", "meta.direction"])
features['pkts_rate'] = features['tcp_pkts_per_flow'] / features['duration']
tcp_pkts_per_flow = tcp_pkts_per_flow.iloc[0:0]
# # TCP bytes sum per flow
tcp_bytes_per_flow = tcp_traffic.groupby(["ip.session_id", "meta.direction"])[
['tcp.payload_len']].sum().reset_index().rename(
columns={
"tcp.payload_len": "tcp_bytes_per_flow"}) ## sum of tcp bytes per flow per direction (0 = client->server)
features = pd.merge(features, tcp_bytes_per_flow, how='outer', on=["ip.session_id", "meta.direction"])
features['byte_rate'] = features['tcp_pkts_per_flow'] / features['duration']
tcp_bytes_per_flow = tcp_bytes_per_flow.iloc[0:0]
features = features.merge(tcp_traffic.groupby(['ip.session_id', 'meta.direction'])[
'tcp.tcp_session_payload_up_len'].count().reset_index(), how='outer', on=["ip.session_id", "meta.direction"])
features = features.merge(tcp_traffic.groupby(['ip.session_id', 'meta.direction'])[
'tcp.tcp_session_payload_down_len'].count().reset_index(), how='outer', on=["ip.session_id", "meta.direction"])
## Sequence: Packet length and time sequences counted in bins, each bin stored as separate column
# Packet length
print("SPL Sequence")
len = tcp_traffic.groupby(['ip.session_id', 'meta.direction'])['tcp.payload_len'].value_counts(bins=bins_len,
sort=False).to_frame()
len = len.rename(columns={'tcp.payload_len': 'county'}).reset_index()
# pivot_table to get columns out of segregated and divided packet lengths
spl = len.pivot_table(index=['ip.session_id', 'meta.direction'], columns='tcp.payload_len',
values='county') # ,fill_value=0)
len = len.iloc[0:0]
spl.columns = spl.columns.astype(str)
spl = spl.reset_index()
features = pd.merge(features, spl, how='outer', left_on=["ip.session_id", "meta.direction"],
right_on=["ip.session_id", "meta.direction"])
spl = spl.iloc[0:0]
print("Flags")
# Flags: counts the number of turned on flags for each session and direction
flag_list = ['tcp.fin', 'tcp.syn', 'tcp.rst', 'tcp.psh', 'tcp.ack', 'tcp.urg']
tcp_flags_cnt_flow = tcp_traffic.groupby(['ip.session_id', 'meta.direction'])[flag_list].aggregate(
lambda g: g.eq(
1.0).sum()).reset_index() # .drop(columns=['tcp.src_port', 'tcp.dest_port', 'tcp.payload_len','tcp.tcp_session_payload_up_len', 'tcp.tcp_session_payload_down_len'])
features = pd.merge(features, tcp_flags_cnt_flow, how='outer', left_on=["ip.session_id", "meta.direction"],
right_on=["ip.session_id", "meta.direction"])
tcp_flags_cnt_flow = tcp_flags_cnt_flow.iloc[0:0]
## Source and destination ports greater/less or equal to 1024 ( > ephemeral ports)
# src ports
print("Ports")
# tcp_traffic.groupby(['ip.session_id', 'meta.direction'])[['tcp.src_port']].apply(lambda x: len(x[x>3])/len(x) )
sports = tcp_traffic.groupby(['ip.session_id', 'meta.direction'])[['tcp.src_port']].apply(
lambda x: (x > 1024).sum()).reset_index().rename(columns={'tcp.src_port': 'sport_g'}).merge(
tcp_traffic.groupby(['ip.session_id', 'meta.direction'])[['tcp.src_port']].agg(
lambda x: (x <= 1024).sum()).reset_index().rename(columns={'tcp.src_port': 'sport_le'})
)
features = pd.merge(features, sports, how='outer', left_on=["ip.session_id", "meta.direction"],
right_on=["ip.session_id", "meta.direction"])
sports = sports.iloc[0:0]
# dest port
dports = tcp_traffic.groupby(['ip.session_id', 'meta.direction'])[['tcp.dest_port']].apply(
lambda x: (x > 1024).sum()).reset_index().rename(columns={'tcp.dest_port': 'dport_g'}).merge(
tcp_traffic.groupby(['ip.session_id', 'meta.direction'])[['tcp.dest_port']].agg(
lambda x: (x <= 1024).sum()).reset_index().rename(columns={'tcp.dest_port': 'dport_le'})
)
features = pd.merge(features, dports, how='outer', left_on=["ip.session_id", "meta.direction"],
right_on=["ip.session_id", "meta.direction"])
dports = dports.iloc[0:0]
print("Min/max pkts")
mean_tcp_pkts = tcp_traffic.groupby(['ip.session_id', 'meta.direction'])[
'tcp.src_port'].mean().reset_index().rename(
columns={"tcp.src_port": "mean_tcp_pkts"})
std_tcp_pkts = tcp_traffic.groupby(['ip.session_id', 'meta.direction'])[
'tcp.src_port'].std().reset_index().rename(
columns={"tcp.src_port": "std_tcp_pkts"})
min_tcp_pkts = tcp_traffic.groupby(['ip.session_id', 'meta.direction'])[
'tcp.src_port'].min().reset_index().rename(
columns={"tcp.src_port": "min_tcp_pkts"})
max_tcp_pkts = tcp_traffic.groupby(['ip.session_id', 'meta.direction'])[
'tcp.src_port'].max().reset_index().rename(
columns={"tcp.src_port": "max_tcp_pkts"})
features = features.merge(mean_tcp_pkts, how='outer', on=["ip.session_id", "meta.direction"])
features = features.merge(std_tcp_pkts, how='outer', on=["ip.session_id", "meta.direction"])
features = features.merge(min_tcp_pkts, how='outer', on=["ip.session_id", "meta.direction"])
features = features.merge(max_tcp_pkts, how='outer', on=["ip.session_id", "meta.direction"])
mean_tcp_pkts = mean_tcp_pkts[0:0]
std_tcp_pkts = std_tcp_pkts[0:0]
min_tcp_pkts = min_tcp_pkts[0:0]
max_tcp_pkts = max_tcp_pkts[0:0]
print("Entropy pkts")
entropy_tcp_pkts = tcp_traffic.groupby(['ip.session_id', 'meta.direction'])['tcp.src_port'].apply(
lambda x: entropy(
x.value_counts(), base=2)).to_frame().reset_index().rename(columns={'tcp.src_port': 'entropy_tcp_pkts'})
features = pd.merge(features, entropy_tcp_pkts, how='outer', left_on=["ip.session_id", "meta.direction"],
right_on=["ip.session_id", "meta.direction"])
entropy_tcp_pkts = entropy_tcp_pkts.iloc[0:0]
# Min, max, std and mean of packet length in each session+direction
print("Min/max pkts")
mean_tcp_len = tcp_traffic.groupby(['ip.session_id', 'meta.direction'])[
'tcp.payload_len'].mean().reset_index().rename(
columns={"tcp.payload_len": "mean_tcp_len"})
std_tcp_len = tcp_traffic.groupby(['ip.session_id', 'meta.direction'])[
'tcp.payload_len'].std().reset_index().rename(
columns={"tcp.payload_len": "std_tcp_len"})
min_tcp_len = tcp_traffic.groupby(['ip.session_id', 'meta.direction'])[
'tcp.payload_len'].min().reset_index().rename(
columns={"tcp.payload_len": "min_tcp_len"})
max_tcp_len = tcp_traffic.groupby(['ip.session_id', 'meta.direction'])[
'tcp.payload_len'].max().reset_index().rename(
columns={"tcp.payload_len": "max_tcp_len"})
features = features.merge(mean_tcp_len, how='outer', left_on=["ip.session_id", "meta.direction"],
right_on=["ip.session_id", "meta.direction"])
features = features.merge(std_tcp_len, how='outer', left_on=["ip.session_id", "meta.direction"],
right_on=["ip.session_id", "meta.direction"])
features = features.merge(min_tcp_len, how='outer', left_on=["ip.session_id", "meta.direction"],
right_on=["ip.session_id", "meta.direction"])
features = features.merge(max_tcp_len, how='outer', left_on=["ip.session_id", "meta.direction"],
right_on=["ip.session_id", "meta.direction"])
mean_tcp_len = mean_tcp_len[0:0]
std_tcp_len = std_tcp_len[0:0]
min_tcp_len = min_tcp_len[0:0]
max_tcp_len = max_tcp_len[0:0]
print("Entropy len")
#TODO: if MMT-probe will be able to provide any other attributes of TLS traffic they should be processed here
if not tls_traffic.empty:
print("TLS features")
# TLS packets number per flow
tls_pkts_per_flow = tls_traffic.groupby(["ip.session_id", "meta.direction"])[
['ssl.tls_version']].count().reset_index().rename(
columns={"time": "tls_pkts_per_flow"})
features = pd.merge(features, tls_pkts_per_flow, how='outer', on=["ip.session_id", "meta.direction"])
tls_pkts_per_flow = tls_pkts_per_flow.iloc[0:0]
#Features should have always same columns (as predefined), hence in case some features were not calculated due to
# the lack of data (e.g. no TCP packets) the columns should be added anyway filled with 0 values
features = features.reindex(features.columns.union(feature_names, sort=False), axis=1, fill_value=0)
# ips = features['ip.session_id', 'meta.direction']
# features.drop(columns=['ip.session_id', 'meta.direction'], inplace=True)
# features.reset_index(inplace=True)
# features.drop(columns=['delta'], inplace=True)
print("Created {} feature samples".format(features.shape[0]))
return ips, features
| 56.132931 | 181 | 0.636598 |
a76c81ca157c4e88a20827feb9460ccada22e47b | 5,572 | py | Python | tests/test_precision_recall_metrics.py | michaelwang123/PaddleRec | 4feb0a7f962e918bdfa4f7289a9ddfd08d459824 | [
"Apache-2.0"
] | 2,739 | 2020-04-28T05:12:48.000Z | 2022-03-31T16:01:49.000Z | tests/test_precision_recall_metrics.py | michaelwang123/PaddleRec | 4feb0a7f962e918bdfa4f7289a9ddfd08d459824 | [
"Apache-2.0"
] | 205 | 2020-05-14T13:29:14.000Z | 2022-03-31T13:01:50.000Z | tests/test_precision_recall_metrics.py | michaelwang123/PaddleRec | 4feb0a7f962e918bdfa4f7289a9ddfd08d459824 | [
"Apache-2.0"
] | 545 | 2020-05-14T13:19:13.000Z | 2022-03-24T07:53:05.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from paddlerec.core.metrics import PrecisionRecall
import paddle
import paddle.fluid as fluid
def calc_precision(tp_count, fp_count):
if tp_count > 0.0 or fp_count > 0.0:
return tp_count / (tp_count + fp_count)
return 1.0
def calc_recall(tp_count, fn_count):
if tp_count > 0.0 or fn_count > 0.0:
return tp_count / (tp_count + fn_count)
return 1.0
def calc_f1_score(precision, recall):
if precision > 0.0 or recall > 0.0:
return 2 * precision * recall / (precision + recall)
return 0.0
def get_states(idxs, labels, cls_num, weights=None, batch_nums=1):
ins_num = idxs.shape[0]
# TP FP TN FN
states = np.zeros((cls_num, 4)).astype('float32')
for i in range(ins_num):
w = weights[i] if weights is not None else 1.0
idx = idxs[i][0]
label = labels[i][0]
if idx == label:
states[idx][0] += w
for j in range(cls_num):
states[j][2] += w
states[idx][2] -= w
else:
states[label][3] += w
states[idx][1] += w
for j in range(cls_num):
states[j][2] += w
states[label][2] -= w
states[idx][2] -= w
return states
def compute_metrics(states, cls_num):
total_tp_count = 0.0
total_fp_count = 0.0
total_fn_count = 0.0
macro_avg_precision = 0.0
macro_avg_recall = 0.0
for i in range(cls_num):
total_tp_count += states[i][0]
total_fp_count += states[i][1]
total_fn_count += states[i][3]
macro_avg_precision += calc_precision(states[i][0], states[i][1])
macro_avg_recall += calc_recall(states[i][0], states[i][3])
metrics = []
macro_avg_precision /= cls_num
macro_avg_recall /= cls_num
metrics.append(macro_avg_precision)
metrics.append(macro_avg_recall)
metrics.append(calc_f1_score(macro_avg_precision, macro_avg_recall))
micro_avg_precision = calc_precision(total_tp_count, total_fp_count)
metrics.append(micro_avg_precision)
micro_avg_recall = calc_recall(total_tp_count, total_fn_count)
metrics.append(micro_avg_recall)
metrics.append(calc_f1_score(micro_avg_precision, micro_avg_recall))
return np.array(metrics).astype('float32')
class TestPrecisionRecall(unittest.TestCase):
def setUp(self):
self.ins_num = 64
self.cls_num = 10
self.batch_nums = 3
self.datas = []
self.states = np.zeros((self.cls_num, 4)).astype('float32')
for i in range(self.batch_nums):
probs = np.random.uniform(0, 1.0, (self.ins_num,
self.cls_num)).astype('float32')
idxs = np.array(np.argmax(
probs, axis=1)).reshape(self.ins_num, 1).astype('int32')
labels = np.random.choice(range(self.cls_num),
self.ins_num).reshape(
(self.ins_num, 1)).astype('int32')
self.datas.append((probs, labels))
states = get_states(idxs, labels, self.cls_num)
self.states = np.add(self.states, states)
self.metrics = compute_metrics(self.states, self.cls_num)
self.place = fluid.core.CPUPlace()
def build_network(self):
predict = fluid.data(
name="predict",
shape=[-1, self.cls_num],
dtype='float32',
lod_level=0)
label = fluid.data(
name="label", shape=[-1, 1], dtype='int32', lod_level=0)
precision_recall = PrecisionRecall(
input=predict, label=label, class_num=self.cls_num)
return precision_recall
def test_forward(self):
precision_recall = self.build_network()
metrics = precision_recall.get_result()
fetch_vars = []
metric_keys = []
for item in metrics.items():
fetch_vars.append(item[1])
metric_keys.append(item[0])
exe = fluid.Executor(self.place)
exe.run(fluid.default_startup_program())
for i in range(self.batch_nums):
outs = exe.run(
fluid.default_main_program(),
feed={'predict': self.datas[i][0],
'label': self.datas[i][1]},
fetch_list=fetch_vars,
return_numpy=True)
outs = dict(zip(metric_keys, outs))
self.assertTrue(np.allclose(outs['[TP FP TN FN]'], self.states))
self.assertTrue(np.allclose(outs['precision_recall_f1'], self.metrics))
def test_exception(self):
self.assertRaises(Exception, PrecisionRecall)
self.assertRaises(
Exception,
PrecisionRecall,
input=self.datas[0][0],
label=self.datas[0][1],
class_num=self.cls_num)
if __name__ == '__main__':
unittest.main()
| 34.184049 | 79 | 0.611989 |
92c9e1100f6ea8f99947cec4a0929343375fdd9a | 6,447 | py | Python | solvers/lasso_utils/celebA_estimators.py | mjsong32/GlowRED | 5189b299452e39b99e63ad2d024d2754d66cf3d5 | [
"MIT"
] | 2 | 2019-08-30T12:10:55.000Z | 2021-07-08T20:50:44.000Z | solvers/lasso_utils/celebA_estimators.py | mjsong32/GlowRED | 5189b299452e39b99e63ad2d024d2754d66cf3d5 | [
"MIT"
] | null | null | null | solvers/lasso_utils/celebA_estimators.py | mjsong32/GlowRED | 5189b299452e39b99e63ad2d024d2754d66cf3d5 | [
"MIT"
] | 1 | 2021-05-12T17:15:31.000Z | 2021-05-12T17:15:31.000Z | """Estimators for compressed sensing"""
# pylint: disable = C0301, C0103, C0111, R0914
import copy
import heapq
import numpy as np
from . import utils
#import utils
import scipy.fftpack as fftpack
import pywt
def dct2(image_channel):
return fftpack.dct(fftpack.dct(image_channel.T, norm='ortho').T, norm='ortho')
def idct2(image_channel):
return fftpack.idct(fftpack.idct(image_channel.T, norm='ortho').T, norm='ortho')
def vec(channels):
image = np.zeros((64, 64, 3))
for i, channel in enumerate(channels):
image[:, :, i] = channel
return image.reshape([-1])
def devec(vector):
image = np.reshape(vector, [64, 64, 3])
channels = [image[:, :, i] for i in range(3)]
return channels
def wavelet_basis(path='./solvers/lasso_utils/wavelet_basis.npy'):
W_ = np.load(path)
# W_ initially has shape (4096,64,64), i.e. 4096 64x64 images
# reshape this into 4096x4096, where each row is an image
# take transpose to make columns images
W_ = W_.reshape((4096, 4096))
W = np.zeros((12288, 12288))
W[0::3, 0::3] = W_
W[1::3, 1::3] = W_
W[2::3, 2::3] = W_
return W
def lasso_dct_estimator(hparams): #pylint: disable = W0613
"""LASSO with DCT"""
def estimator(A_val, y_batch_val, hparams):
# One can prove that taking 2D DCT of each row of A,
# then solving usual LASSO, and finally taking 2D ICT gives the correct answer.
A_new = copy.deepcopy(A_val)
for i in range(A_val.shape[1]):
A_new[:, i] = vec([dct2(channel) for channel in devec(A_new[:, i])])
x_hat_batch = []
for j in range(hparams.batch_size):
y_val = y_batch_val[j]
z_hat = utils.solve_lasso(A_new, y_val, hparams)
x_hat = vec([idct2(channel) for channel in devec(z_hat)]).T
x_hat = np.maximum(np.minimum(x_hat, 1), -1)
x_hat_batch.append(x_hat)
return x_hat_batch
return estimator
def lasso_wavelet_estimator(hparams): #pylint: disable = W0613
"""LASSO with Wavelet"""
def estimator(A_val, y_batch_val, hparams):
x_hat_batch = []
W = wavelet_basis()
WA = np.dot(W, A_val)
for j in range(hparams.batch_size):
y_val = y_batch_val[j]
z_hat = utils.solve_lasso(WA, y_val, hparams)
x_hat = np.dot(z_hat, W)
x_hat_max = np.abs(x_hat).max()
x_hat = x_hat / (1.0 * x_hat_max)
x_hat_batch.append(x_hat)
x_hat_batch = np.asarray(x_hat_batch)
return x_hat_batch
return estimator
def lasso_wavelet_ycbcr_estimator(hparams): #pylint: disable = W0613
"""LASSO with Wavelet in YCbCr"""
def estimator(A_val, y_batch_val, hparams):
x_hat_batch = []
W = wavelet_basis()
# U, V = utils.RGB_matrix()
# V = (V/127.5) - 1.0
# U = U/127.5
def convert(W):
# convert W from YCbCr to RGB
W_ = W.copy()
V = np.zeros((12288, 1))
# R
V[0::3] = ((255.0/219.0)*(-16.0)) + ((255.0*0.701/112.0)*(-128.0))
W_[:, 0::3] = (255.0/219.0)*W[:, 0::3] + (0.0)*W[:, 1::3] + (255.0*0.701/112.0)*W[:, 2::3]
# G
V[1::3] = ((255.0/219.0)*(-16.0)) - ((0.886*0.114*255.0/(112.0*0.587)) *(-128.0)) - ((255.0*0.701*0.299/(112.0*0.587))*(-128.0))
W_[:, 1::3] = (255.0/219.0)*W[:, 0::3] - (0.886*0.114*255.0/(112.0*0.587))*W[:, 1::3] - (255.0*0.701*0.299/(112.0*0.587))*W[:, 2::3]
# B
V[2::3] = ((255.0/219.0)*(-16.0)) + ((0.886*255.0/(112.0))*(-128.0))
W_[:, 2::3] = (255.0/219.0)*W[:, 0::3] + (0.886*255.0/(112.0))*W[:, 1::3] + 0.0*W[:, 2::3]
return W_, V
# WU = np.dot(W, U.T)
WU, V = convert(W)
WU = WU/127.5
V = (V/127.5) - 1.0
WA = np.dot(WU, A_val)
y_batch_val_temp = y_batch_val - np.dot(V.T, A_val)
for j in range(hparams.batch_size):
y_val = y_batch_val_temp[j]
z_hat = utils.solve_lasso(WA, y_val, hparams)
x_hat = np.dot(z_hat, WU) + V.ravel()
x_hat_max = np.abs(x_hat).max()
x_hat = x_hat / (1.0 * x_hat_max)
x_hat_batch.append(x_hat)
x_hat_batch = np.asarray(x_hat_batch)
return x_hat_batch
return estimator
def k_sparse_wavelet_estimator(hparams): #pylint: disable = W0613
"""Best k-sparse wavelet projector"""
def estimator(A_val, y_batch_val, hparams): #pylint: disable = W0613
if hparams.measurement_type != 'project':
raise RuntimeError
y_batch_val /= np.sqrt(hparams.n_input)
x_hat_batch = []
for y_val in y_batch_val:
y_val_reshaped = np.reshape(y_val, [64, 64, 3])
x_hat_reshaped = k_sparse_reconstr(y_val_reshaped, hparams.sparsity)
x_hat_flat = np.reshape(x_hat_reshaped, [-1])
x_hat_batch.append(x_hat_flat)
x_hat_batch = np.asarray(x_hat_batch)
x_hat_batch = np.maximum(np.minimum(x_hat_batch, 1), -1)
return x_hat_batch
return estimator
def get_wavelet(x):
coefs_list = []
for i in range(3):
coefs_list.append(pywt.wavedec2(x[:, :, i], 'db1'))
return coefs_list
def get_image(coefs_list):
x = np.zeros((64, 64, 3))
for i in range(3):
x[:, :, i] = pywt.waverec2(coefs_list[i], 'db1')
return x
def get_heap(coefs_list):
heap = []
for t, coefs in enumerate(coefs_list):
for i, a in enumerate(coefs):
for j, b in enumerate(a):
for m, c in enumerate(b):
try:
for n, val in enumerate(c):
heapq.heappush(heap, (-abs(val), [t, i, j, m, n, val]))
except:
val = c
heapq.heappush(heap, (-abs(val), [t, i, j, m, val]))
return heap
def k_sparse_reconstr(x, k):
coefs_list = get_wavelet(x)
heap = get_heap(coefs_list)
y = 0*x
coefs_list_sparse = get_wavelet(y)
for i in range(k):
_, idxs_val = heapq.heappop(heap)
if len(idxs_val) == 5:
t, i, j, m, val = idxs_val
coefs_list_sparse[t][i][j][m] = val
else:
t, i, j, m, n, val = idxs_val
coefs_list_sparse[t][i][j][m][n] = val
x_sparse = get_image(coefs_list_sparse)
return x_sparse
| 33.578125 | 144 | 0.557934 |
e98a4c2646cab1799d4d936415b4fc3d15e7c1b1 | 9,978 | py | Python | config/scratch/mask_r50v1b_fpn_gn_scratch_2x.py | happywu/simpledet-1 | 5d1de1edfbe745b05b49d9c19eca1e496ded11b7 | [
"Apache-2.0"
] | 3,195 | 2019-01-29T09:08:46.000Z | 2022-03-29T08:20:44.000Z | config/scratch/mask_r50v1b_fpn_gn_scratch_2x.py | happywu/simpledet-1 | 5d1de1edfbe745b05b49d9c19eca1e496ded11b7 | [
"Apache-2.0"
] | 275 | 2019-01-29T10:16:12.000Z | 2022-03-15T17:56:39.000Z | config/scratch/mask_r50v1b_fpn_gn_scratch_2x.py | happywu/simpledet-1 | 5d1de1edfbe745b05b49d9c19eca1e496ded11b7 | [
"Apache-2.0"
] | 563 | 2019-01-29T09:32:07.000Z | 2022-03-22T06:58:01.000Z | from symbol.builder import add_anchor_to_arg
from symbol.builder import ResNetV1bFPN as Backbone
from models.FPN.builder import FPNNeck as Neck
from models.FPN.builder import FPNRoiAlign as RoiExtractor
from models.FPN.builder import FPNBbox2fcHead as BboxHead
from mxnext.complicate import normalizer_factory
from models.maskrcnn.builder import MaskFasterRcnn as Detector
from models.maskrcnn.builder import MaskFPNRpnHead as RpnHead
from models.maskrcnn.builder import MaskFasterRcnn4ConvHead as MaskHead
from models.maskrcnn.builder import BboxPostProcessor
from models.maskrcnn.process_output import process_output
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 2 if is_train else 1
fp16 = False
loader_worker = 8
class KvstoreParam:
kvstore = "nccl"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
normalizer = normalizer_factory(type="gn")
class BackboneParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
depth = 50
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class RpnParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image
nnvm_proposal = True
nnvm_rpn_target = False
class anchor_generate:
scale = (8,)
ratio = (0.5, 1.0, 2.0)
stride = (4, 8, 16, 32, 64)
image_anchor = 256
max_side = 1400
class anchor_assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
image_anchor = 256
pos_fraction = 0.5
class head:
conv_channel = 256
mean = (0, 0, 0, 0)
std = (1, 1, 1, 1)
class proposal:
pre_nms_top_n = 2000 if is_train else 1000
post_nms_top_n = 2000 if is_train else 1000
nms_thr = 0.7
min_bbox_side = 0
class subsample_proposal:
proposal_wo_gt = False
image_roi = 512
fg_fraction = 0.25
fg_thr = 0.5
bg_thr_hi = 0.5
bg_thr_lo = 0.0
class bbox_target:
num_reg_class = 81
class_agnostic = False
weight = (1.0, 1.0, 1.0, 1.0)
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = 1 + 80
image_roi = 512
batch_image = General.batch_image
class regress_target:
class_agnostic = False
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class MaskParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
resolution = 28
dim_reduced = 256
num_fg_roi = int(RpnParam.subsample_proposal.image_roi * RpnParam.subsample_proposal.fg_fraction)
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 7
stride = (4, 8, 16, 32)
roi_canonical_scale = 224
roi_canonical_level = 4
class MaskRoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 14
stride = (4, 8, 16, 32)
roi_canonical_scale = 224
roi_canonical_level = 4
class DatasetParam:
if is_train:
image_set = ("coco_train2017", )
else:
image_set = ("coco_val2017", )
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.02 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = None
class schedule:
mult = 2
begin_epoch = 0
end_epoch = 6 * mult
lr_iter = [60000 * mult * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
80000 * mult * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image / 3.0
iter = 500
class TestParam:
min_det_score = 0.05
max_det_per_image = 100
process_roidb = lambda x: x
process_output = lambda x, y: process_output(x, y)
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_minival2014.json"
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
rpn_head = RpnHead(RpnParam, MaskParam)
roi_extractor = RoiExtractor(RoiParam)
mask_roi_extractor = RoiExtractor(MaskRoiParam)
bbox_head = BboxHead(BboxParam)
mask_head = MaskHead(BboxParam, MaskParam, MaskRoiParam)
bbox_post_processer = BboxPostProcessor(TestParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(backbone, neck, rpn_head, roi_extractor, mask_roi_extractor, bbox_head, mask_head)
test_sym = None
else:
train_sym = None
test_sym = detector.get_test_symbol(backbone, neck, rpn_head, roi_extractor, mask_roi_extractor, bbox_head, mask_head, bbox_post_processer)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
from_scratch = True
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = "pretrain_model/resnet%s_v1b" % BackboneParam.depth
epoch = 0
fixed_param = []
def process_weight(sym, arg, aux):
for stride in RpnParam.anchor_generate.stride:
add_anchor_to_arg(
sym, arg, aux, RpnParam.anchor_generate.max_side,
stride, RpnParam.anchor_generate.scale,
RpnParam.anchor_generate.ratio)
# data processing
class NormParam:
mean = tuple(i * 255 for i in (0.485, 0.456, 0.406)) # RGB order
std = tuple(i * 255 for i in (0.229, 0.224, 0.225))
# data processing
class ResizeParam:
short = 800
long = 1333
class PadParam:
short = 800
long = 1333
max_num_gt = 100
max_len_gt_poly = 2500
class AnchorTarget2DParam:
def __init__(self):
self.generate = self._generate()
class _generate:
def __init__(self):
self.stride = (4, 8, 16, 32, 64)
self.short = (200, 100, 50, 25, 13)
self.long = (334, 167, 84, 42, 21)
scales = (8)
aspects = (0.5, 1.0, 2.0)
class assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
class sample:
image_anchor = 256
pos_fraction = 0.5
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, Resize2DImageBbox, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord, Norm2DImage
from models.maskrcnn.input import PreprocessGtPoly, EncodeGtPoly, \
Resize2DImageBboxMask, Flip2DImageBboxMask, Pad2DImageBboxMask
from models.FPN.input import PyramidAnchorTarget2D
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
PreprocessGtPoly(),
Resize2DImageBboxMask(ResizeParam),
Flip2DImageBboxMask(),
EncodeGtPoly(PadParam),
Pad2DImageBboxMask(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data"]
label_name = ["im_info", "gt_bbox", "gt_poly"]
if not RpnParam.nnvm_rpn_target:
transform.append(PyramidAnchorTarget2D(AnchorTarget2DParam()))
label_name += ["rpn_cls_label", "rpn_reg_target", "rpn_reg_weight"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
import core.detection_metric as metric
from models.maskrcnn.metric import SigmoidCELossMetric
rpn_acc_metric = metric.AccWithIgnore(
"RpnAcc",
["rpn_cls_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
rpn_l1_metric = metric.L1(
"RpnL1",
["rpn_reg_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
# for bbox, the label is generated in network so it is an output
box_acc_metric = metric.AccWithIgnore(
"RcnnAcc",
["bbox_cls_loss_output", "bbox_label_blockgrad_output"],
[]
)
box_l1_metric = metric.L1(
"RcnnL1",
["bbox_reg_loss_output", "bbox_label_blockgrad_output"],
[]
)
mask_cls_metric = SigmoidCELossMetric(
"MaskCE",
["mask_loss_output"],
[]
)
metric_list = [rpn_acc_metric, rpn_l1_metric, box_acc_metric, box_l1_metric,]
return General, KvstoreParam, RpnParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list
| 29.696429 | 147 | 0.592704 |
a71c78de72a2115f3c0cc23e020102d4420b8309 | 3,912 | py | Python | blobDetectorParameters.py | qenops/dDisplay | 3a7846378733d95c17b6274cc3ebe775bbd8f758 | [
"Apache-2.0"
] | null | null | null | blobDetectorParameters.py | qenops/dDisplay | 3a7846378733d95c17b6274cc3ebe775bbd8f758 | [
"Apache-2.0"
] | null | null | null | blobDetectorParameters.py | qenops/dDisplay | 3a7846378733d95c17b6274cc3ebe775bbd8f758 | [
"Apache-2.0"
] | null | null | null | import cv2
import numpy as np
# Generate and display the images
def update(dummy=None):
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = max(cv2.getTrackbarPos('Min Threshold', 'Blob Detector'),1)
params.maxThreshold = cv2.getTrackbarPos('Max Threshold', 'Blob Detector')
params.thresholdStep = cv2.getTrackbarPos('Threshold Step', 'Blob Detector')
params.minDistBetweenBlobs = cv2.getTrackbarPos('Min Distance', 'Blob Detector')
params.minRepeatability = int(cv2.getTrackbarPos('Min Repeatability', 'Blob Detector'))
params.filterByArea = cv2.getTrackbarPos('Area', 'Blob Detector')
params.minArea = max(cv2.getTrackbarPos('Min Area', 'Blob Detector'),1)
params.maxArea = cv2.getTrackbarPos('Max Area', 'Blob Detector')
params.filterByCircularity = cv2.getTrackbarPos('Circularity', 'Blob Detector')
params.minCircularity = cv2.getTrackbarPos('Min Circularity', 'Blob Detector')/100.
params.maxCircularity = cv2.getTrackbarPos('Max Circularity', 'Blob Detector')/100.
params.filterByInertia = cv2.getTrackbarPos('Inertia', 'Blob Detector')
params.minInertiaRatio = cv2.getTrackbarPos('Min Inertia', 'Blob Detector')/100.
params.maxInertiaRatio = cv2.getTrackbarPos('Max Inertia', 'Blob Detector')/100.
params.filterByConvexity = cv2.getTrackbarPos('Convexity', 'Blob Detector')
params.minConvexity = cv2.getTrackbarPos('Min Convexity', 'Blob Detector')/100.
params.maxConvexity = cv2.getTrackbarPos('Max Convexity', 'Blob Detector')/100.
return params
def setup(minArea=16,maxArea=75,minThreshold=0,maxThreshold=150):
'''
size_t ;
bool filterByColor;
uchar blobColor;
'''
cv2.namedWindow('Blob Detector')
cv2.createTrackbar('Min Threshold', 'Blob Detector', minThreshold, 255, update)
cv2.createTrackbar('Max Threshold', 'Blob Detector', maxThreshold, 255, update)
cv2.createTrackbar('Threshold Step', 'Blob Detector', 10, 30, update)
cv2.createTrackbar('Min Distance', 'Blob Detector', 10, 300, update)
cv2.createTrackbar('Min Repeatability', 'Blob Detector', 2, 20, update)
cv2.createTrackbar('Area', 'Blob Detector', 1, 1, update)
cv2.createTrackbar('Min Area', 'Blob Detector', minArea, 300, update)
cv2.createTrackbar('Max Area', 'Blob Detector', maxArea, 1000, update)
cv2.createTrackbar('Circularity', 'Blob Detector', 0, 1, update)
cv2.createTrackbar('Min Circularity', 'Blob Detector', 90, 100, update)
cv2.createTrackbar('Max Circularity', 'Blob Detector', 100, 100, update)
cv2.createTrackbar('Inertia', 'Blob Detector', 0, 1, update)
cv2.createTrackbar('Min Inertia', 'Blob Detector', 90, 100, update)
cv2.createTrackbar('Max Inertia', 'Blob Detector', 100, 100, update)
cv2.createTrackbar('Convexity', 'Blob Detector', 0, 1, update)
cv2.createTrackbar('Min Convexity', 'Blob Detector', 90, 100, update)
cv2.createTrackbar('Max Convexity', 'Blob Detector', 100, 100, update)
def blobDetectorParameterTune(image):
setup()
img = image
while True:
ch = 0xFF & cv2.waitKey(100)
if ch == 27 or ch == -1:
break
params = update()
detector = cv2.SimpleBlobDetector_create(params)
frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# inverse the image
frame2 = np.invert(frame)
ret,thresh = cv2.threshold(frame2,params.maxThreshold,255,cv2.THRESH_TRUNC)
# Detect blobs.
keypoints = detector.detect(thresh)
output = img.copy()
output = cv2.drawKeypoints(output, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow('Blob Detector Output', output)
cv2.destroyAllWindows()
'''
import sys, cv2
sys.path.append(r'../python')
from dDisplay import blobDetectorParameters as blob
img = cv2.imread('blobShared.png')
blob.blobDetectorParameterTune(img)
'''
| 49.518987 | 122 | 0.704499 |
de704ed20294488b0219ab22b985451e51b62bc7 | 7,956 | py | Python | examples/catalyst_rl/dqn.py | Thiefwerty/catalyst | 58c4e0e3ca3928f7402cfc750fbc9a77e44a2b66 | [
"Apache-2.0"
] | 2,693 | 2019-01-23T19:16:12.000Z | 2022-03-31T02:12:42.000Z | examples/catalyst_rl/dqn.py | Thiefwerty/catalyst | 58c4e0e3ca3928f7402cfc750fbc9a77e44a2b66 | [
"Apache-2.0"
] | 763 | 2019-01-22T20:12:56.000Z | 2022-03-27T18:36:10.000Z | examples/catalyst_rl/dqn.py | Thiefwerty/catalyst | 58c4e0e3ca3928f7402cfc750fbc9a77e44a2b66 | [
"Apache-2.0"
] | 445 | 2019-01-23T17:07:09.000Z | 2022-03-30T05:38:45.000Z | # flake8: noqa
from typing import Sequence
import os
from buffer import OffpolicyReplayBuffer
from db import RedisDB
from misc import GameCallback, soft_update, Trajectory
import numpy as np
from sampler import ISampler
import gym
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from catalyst import dl, metrics, utils
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
# DQN
class Sampler(ISampler):
def get_action(self, env, actor: nn.Module, state: np.array, epsilon: float = -1) -> int:
if np.random.random() < epsilon:
action = env.action_space.sample()
else:
state = torch.tensor(state[None], dtype=torch.float32)
q_values = actor(state).detach().cpu().numpy()[0]
action = np.argmax(q_values)
return int(action)
def get_trajectory(
self,
env: gym.Env,
actor: nn.Module,
device,
sampler_index: int = None,
trajectory_index: int = None,
t_max: int = 1000,
) -> Trajectory:
if sampler_index is not None:
epsilon = float(pow(0.9996, trajectory_index + 1) / (sampler_index + 1))
else:
epsilon = None
state = env.reset()
observations, actions, rewards, dones = [], [], [], []
for t in range(t_max):
action = self.get_action(env, actor, state=state, epsilon=epsilon)
next_state, reward, done, _ = env.step(action)
observations.append(state)
actions.append(action)
rewards.append(reward)
dones.append(done)
state = next_state
if done:
break
trajectory = Trajectory(observations, actions, rewards, dones)
return trajectory
def get_network(env, num_hidden: int = 128):
inner_fn = utils.get_optimal_inner_init(nn.ReLU)
outer_fn = utils.outer_init
network = torch.nn.Sequential(
nn.Linear(env.observation_space.shape[0], num_hidden),
nn.ReLU(),
nn.Linear(num_hidden, num_hidden),
nn.ReLU(),
)
head = nn.Linear(num_hidden, env.action_space.n)
network.apply(inner_fn)
head.apply(outer_fn)
return torch.nn.Sequential(network, head)
# Catalyst.RL
class CustomRunner(dl.Runner):
def __init__(
self,
*,
gamma: float,
tau: float,
tau_period: int = 1,
origin_key: str = "origin",
target_key: str = "target",
**kwargs,
):
super().__init__(**kwargs)
self.gamma: float = gamma
self.tau: float = tau
self.tau_period: int = tau_period
self.origin_key: str = origin_key
self.target_key: str = target_key
self.origin_network: nn.Module = None
self.target_network: nn.Module = None
def on_stage_start(self, runner: dl.IRunner):
super().on_stage_start(runner)
self.origin_network = self.model[self.origin_key]
self.target_network = self.model[self.target_key]
soft_update(self.target_network, self.origin_network, 1.0)
def on_loader_start(self, runner: dl.IRunner):
super().on_loader_start(runner)
self.meters = {key: metrics.AdditiveMetric(compute_on_call=False) for key in ["loss"]}
def handle_batch(self, batch: Sequence[np.array]):
# model train/valid step
states, actions, rewards, next_states, dones = (
batch["state"].squeeze_(1).to(torch.float32),
batch["action"].to(torch.int64),
batch["reward"].to(torch.float32),
batch["next_state"].squeeze_(1).to(torch.float32),
batch["done"].to(torch.bool),
)
# get q-values for all actions in current states
state_qvalues = self.origin_network(states)
# select q-values for chosen actions
state_action_qvalues = state_qvalues.gather(1, actions.unsqueeze(-1)).squeeze(-1)
# compute q-values for all actions in next states
# compute V*(next_states) using predicted next q-values
# at the last state we shall use simplified formula:
# Q(s,a) = r(s,a) since s' doesn't exist
with torch.no_grad():
next_state_qvalues = self.target_network(next_states)
next_state_values = next_state_qvalues.max(1)[0]
next_state_values[dones] = 0.0
next_state_values = next_state_values.detach()
# compute "target q-values" for loss,
# it's what's inside square parentheses in the above formula.
target_state_action_qvalues = next_state_values * self.gamma + rewards
# mean squared error loss to minimize
loss = self.criterion(state_action_qvalues, target_state_action_qvalues.detach())
self.batch_metrics.update({"loss": loss})
for key in ["loss"]:
self.meters[key].update(self.batch_metrics[key].item(), self.batch_size)
if self.is_train_loader:
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
if self.global_batch_step % self.tau_period == 0:
soft_update(self.target_network, self.origin_network, self.tau)
def on_loader_end(self, runner: dl.IRunner):
for key in ["loss"]:
self.loader_metrics[key] = self.meters[key].compute()[0]
super().on_loader_end(runner)
if __name__ == "__main__":
# data
num_samplers = 2
batch_size = 256
epoch_size = int(1e2) * batch_size
buffer_size = int(1e5)
# runner settings, ~training
gamma = 0.99
tau = 0.01
tau_period = 1 # in batches
# optimization
lr = 3e-4
db_server = RedisDB()
# You can change game
# env_name = "LunarLander-v2"
env_name = "CartPole-v1"
env = gym.make(env_name)
replay_buffer = OffpolicyReplayBuffer(
observation_space=env.observation_space,
action_space=env.action_space,
epoch_len=epoch_size,
capacity=buffer_size,
n_step=1,
gamma=gamma,
history_len=1,
)
network, target_network = get_network(env), get_network(env)
utils.set_requires_grad(target_network, requires_grad=False)
models = nn.ModuleDict({"origin": network, "target": target_network})
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(network.parameters(), lr=lr)
loaders = {"train_game": DataLoader(replay_buffer, batch_size=batch_size)}
runner = CustomRunner(gamma=gamma, tau=tau, tau_period=tau_period)
runner.train(
# for simplicity reasons, let's run everything on single gpu
engine=dl.DeviceEngine("cuda"),
model=models,
criterion=criterion,
optimizer=optimizer,
loaders=loaders,
logdir="./logs_dqn",
num_epochs=50,
verbose=True,
valid_loader="_epoch_",
valid_metric="reward",
minimize_valid_metric=False,
load_best_on_end=True,
callbacks=[
GameCallback(
sampler_fn=Sampler,
env=env,
replay_buffer=replay_buffer,
db_server=db_server,
actor_key="origin",
num_samplers=num_samplers,
min_transactions_num=epoch_size,
)
],
)
# env = gym.wrappers.Monitor(gym.make(env_name), directory="videos_dqn", force=True)
# generate_sessions(env=env, network=runner.model["origin"], num_sessions=100)
# env.close()
# # show video
# from IPython.display import HTML
# import os
#
# video_names = list(filter(lambda s: s.endswith(".mp4"), os.listdir("./videos_dqn/")))
#
# HTML("""
# <video width="640" height="480" controls>
# <source src="{}" type="video/mp4">
# </video>
# """.format("./videos/" + video_names[-1]))
# # this may or may not be _last_ video. Try other indices
| 32.080645 | 94 | 0.619532 |
b777e28f1a899ec4019e29d7680545dec9e50a1b | 924 | py | Python | geometry/ib3shapes.py | matinraayai/ibex | 7792d1299a04da360faa1cd8a16a4c5a3990b48c | [
"MIT"
] | 3 | 2018-08-10T21:11:09.000Z | 2019-07-26T13:47:24.000Z | geometry/ib3shapes.py | matinraayai/ibex | 7792d1299a04da360faa1cd8a16a4c5a3990b48c | [
"MIT"
] | null | null | null | geometry/ib3shapes.py | matinraayai/ibex | 7792d1299a04da360faa1cd8a16a4c5a3990b48c | [
"MIT"
] | 6 | 2018-03-05T20:14:11.000Z | 2020-07-23T18:39:16.000Z | from copy import deepcopy
from ibex.utilities.constants import *
class IBBox:
def __init__(self, mins, maxs):
self.mins = list(mins)
self.maxs = list(maxs)
def __str__(self):
return '({},{},{})-({},{},{})'.format(self.mins[IB_X], self.mins[IB_Y], self.mins[IB_Z], self.maxs[IB_X], self.maxs[IB_Y], self.maxs[IB_Z])
def Intersection(self, other):
# get the new bounding box
if (self.mins[IB_Z] < other.mins[IB_Z]): self.mins[IB_Z] = other.mins[IB_Z]
if (self.mins[IB_Y] < other.mins[IB_Y]): self.mins[IB_Y] = other.mins[IB_Y]
if (self.mins[IB_X] < other.mins[IB_X]): self.mins[IB_X] = other.mins[IB_X]
if (self.maxs[IB_Z] > other.maxs[IB_Z]): self.maxs[IB_Z] = other.maxs[IB_Z]
if (self.maxs[IB_Y] > other.maxs[IB_Y]): self.maxs[IB_Y] = other.maxs[IB_Y]
if (self.maxs[IB_X] > other.maxs[IB_X]): self.maxs[IB_X] = other.maxs[IB_X]
| 46.2 | 147 | 0.62013 |
410b9567103e14643c532178552c0b61c56e2b73 | 16,843 | py | Python | sysroot/usr/lib/python3/dist-packages/urllib3/poolmanager.py | 219-design/sysroot_qt5.15.0_binaries_armv6zk_rpizero | c3ad917b65b970c451148391ef1c2483593702ed | [
"MIT"
] | null | null | null | sysroot/usr/lib/python3/dist-packages/urllib3/poolmanager.py | 219-design/sysroot_qt5.15.0_binaries_armv6zk_rpizero | c3ad917b65b970c451148391ef1c2483593702ed | [
"MIT"
] | null | null | null | sysroot/usr/lib/python3/dist-packages/urllib3/poolmanager.py | 219-design/sysroot_qt5.15.0_binaries_armv6zk_rpizero | c3ad917b65b970c451148391ef1c2483593702ed | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import collections
import functools
import logging
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
from six.moves.urllib.parse import urljoin
from .request import RequestMethods
from .util.url import parse_url
from .util.retry import Retry
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version', 'ca_cert_dir', 'ssl_context')
# All known keyword arguments that could be provided to the pool manager, its
# pools, or the underlying connections. This is used to construct a pool key.
_key_fields = (
'key_scheme', # str
'key_host', # str
'key_port', # int
'key_timeout', # int or float or Timeout
'key_retries', # int or Retry
'key_strict', # bool
'key_block', # bool
'key_source_address', # str
'key_key_file', # str
'key_cert_file', # str
'key_cert_reqs', # str
'key_ca_certs', # str
'key_ssl_version', # str
'key_ca_cert_dir', # str
'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
'key_maxsize', # int
'key_headers', # dict
'key__proxy', # parsed proxy url
'key__proxy_headers', # dict
'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples
'key__socks_options', # dict
'key_assert_hostname', # bool or string
'key_assert_fingerprint', # str
'key_server_hostname', #str
)
#: The namedtuple class used to construct keys for the connection pool.
#: All custom key schemes should include the fields in this key at a minimum.
PoolKey = collections.namedtuple('PoolKey', _key_fields)
def _default_key_normalizer(key_class, request_context):
"""
Create a pool key out of a request context dictionary.
According to RFC 3986, both the scheme and host are case-insensitive.
Therefore, this function normalizes both before constructing the pool
key for an HTTPS request. If you wish to change this behaviour, provide
alternate callables to ``key_fn_by_scheme``.
:param key_class:
The class to use when constructing the key. This should be a namedtuple
with the ``scheme`` and ``host`` keys at a minimum.
:type key_class: namedtuple
:param request_context:
A dictionary-like object that contain the context for a request.
:type request_context: dict
:return: A namedtuple that can be used as a connection pool key.
:rtype: PoolKey
"""
# Since we mutate the dictionary, make a copy first
context = request_context.copy()
context['scheme'] = context['scheme'].lower()
context['host'] = context['host'].lower()
# These are both dictionaries and need to be transformed into frozensets
for key in ('headers', '_proxy_headers', '_socks_options'):
if key in context and context[key] is not None:
context[key] = frozenset(context[key].items())
# The socket_options key may be a list and needs to be transformed into a
# tuple.
socket_opts = context.get('socket_options')
if socket_opts is not None:
context['socket_options'] = tuple(socket_opts)
# Map the kwargs to the names in the namedtuple - this is necessary since
# namedtuples can't have fields starting with '_'.
for key in list(context.keys()):
context['key_' + key] = context.pop(key)
# Default to ``None`` for keys missing from the context
for field in key_class._fields:
if field not in context:
context[field] = None
return key_class(**context)
#: A dictionary that maps a scheme to a callable that creates a pool key.
#: This can be used to alter the way pool keys are constructed, if desired.
#: Each PoolManager makes a copy of this dictionary so they can be configured
#: globally here, or individually on the instance.
key_fn_by_scheme = {
'http': functools.partial(_default_key_normalizer, PoolKey),
'https': functools.partial(_default_key_normalizer, PoolKey),
}
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \\**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(self, scheme, host, port, request_context=None):
"""
Create a new :class:`ConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ('scheme', 'host', 'port'):
request_context.pop(key, None)
if scheme == 'http':
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
return pool_cls(host, port, **request_context)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self._merge_pool_kwargs(pool_kwargs)
request_context['scheme'] = scheme or 'http'
if not port:
port = port_by_scheme.get(request_context['scheme'].lower(), 80)
request_context['port'] = port
request_context['host'] = host
return self.connection_from_context(request_context)
def connection_from_context(self, request_context):
"""
Get a :class:`ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
scheme = request_context['scheme'].lower()
pool_key_constructor = self.key_fn_by_scheme[scheme]
pool_key = pool_key_constructor(request_context)
return self.connection_from_pool_key(pool_key, request_context=request_context)
def connection_from_pool_key(self, pool_key, request_context=None):
"""
Get a :class:`ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
scheme = request_context['scheme']
host = request_context['host']
port = request_context['port']
pool = self._new_pool(scheme, host, port, request_context=request_context)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url, pool_kwargs=None):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme,
pool_kwargs=pool_kwargs)
def _merge_pool_kwargs(self, override):
"""
Merge a dictionary of override values for self.connection_pool_kw.
This does not modify self.connection_pool_kw and returns a new dict.
Any keys in the override dictionary with a value of ``None`` are
removed from the merged dictionary.
"""
base_pool_kwargs = self.connection_pool_kw.copy()
if override:
for key, value in override.items():
if value is None:
try:
del base_pool_kwargs[key]
except KeyError:
pass
else:
base_pool_kwargs[key] = value
return base_pool_kwargs
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers.copy()
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = 'GET'
retries = kw.get('retries')
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
# Strip headers marked as unsafe to forward to the redirected location.
# Check remove_headers_on_redirect to avoid a potential network call within
# conn.is_same_host() which may use socket.gethostbyname() in the future.
if (retries.remove_headers_on_redirect
and not conn.is_same_host(redirect_location)):
for header in retries.remove_headers_on_redirect:
kw['headers'].pop(header, None)
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
raise
return response
kw['retries'] = retries
kw['redirect'] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary containing headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
if proxy.scheme not in ("http", "https"):
raise ProxySchemeUnknown(proxy.scheme)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme, pool_kwargs=pool_kwargs)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
headers = kw.get('headers', self.headers)
kw['headers'] = self._set_proxy_headers(url, headers)
return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
| 37.345898 | 92 | 0.643531 |
63bc39c2475de1798a07474cae5eca4c30949561 | 4,228 | py | Python | robot/Player.py | 1509098778/wukong-robot | 0c80bdc885a04a2e33051ddf4be119e5f8a9c194 | [
"MIT"
] | 1 | 2019-02-25T06:05:22.000Z | 2019-02-25T06:05:22.000Z | robot/Player.py | 1509098778/wukong-robot | 0c80bdc885a04a2e33051ddf4be119e5f8a9c194 | [
"MIT"
] | null | null | null | robot/Player.py | 1509098778/wukong-robot | 0c80bdc885a04a2e33051ddf4be119e5f8a9c194 | [
"MIT"
] | 1 | 2019-02-20T15:21:18.000Z | 2019-02-20T15:21:18.000Z | # -*- coding: utf-8-*-
import subprocess
import tempfile
import threading
import os
import wave
from . import utils
import pyaudio
from robot import logging
from ctypes import CFUNCTYPE, c_char_p, c_int, cdll
from contextlib import contextmanager
logger = logging.getLogger(__name__)
def py_error_handler(filename, line, function, err, fmt):
pass
ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)
c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)
@contextmanager
def no_alsa_error():
try:
asound = cdll.LoadLibrary('libasound.so')
asound.snd_lib_error_set_handler(c_error_handler)
yield
asound.snd_lib_error_set_handler(None)
except:
yield
pass
def play(fname, onCompleted=None):
# WavPlayer does not work well on my Macbook,
# henceforce I choose SoxPlayer
#player = getPlayerByFileName(fname)
player = SoxPlayer()
player.play(fname, onCompleted)
def getPlayerByFileName(fname):
foo, ext = os.path.splitext(fname)
if ext == '.mp3':
return SoxPlayer()
elif ext == '.wav':
return WavPlayer()
class AbstractSoundPlayer(threading.Thread):
def __init__(self, **kwargs):
super(AbstractSoundPlayer, self).__init__()
def play(self):
pass
def play_block(self):
pass
def stop(self):
pass
def is_playing(self):
return False
class SoxPlayer(AbstractSoundPlayer):
SLUG = 'SoxPlayer'
def __init__(self, **kwargs):
super(SoxPlayer, self).__init__(**kwargs)
self.playing = False
self.pipe = None
self.delete = False
self.volume = 1
def run(self):
cmd = ['play', '-v', str(self.volume), str(self.src)]
logger.debug('Executing %s', ' '.join(cmd))
with tempfile.TemporaryFile() as f:
self.pipe = subprocess.Popen(cmd, stdout=f, stderr=f)
self.playing = True
self.pipe.wait()
self.playing = False
f.seek(0)
output = f.read()
if output:
logger.debug("play Output was: '%s'", output)
if self.delete:
utils.check_and_delete(self.src)
if self.onCompleted:
self.onCompleted()
def play(self, src, delete=False, onCompleted=None, volume=1):
self.src = src
self.delete = delete
self.onCompleted = onCompleted
self.volume = volume
self.start()
def play_block(self):
self.run()
def stop(self):
if self.pipe:
self.onCompleted = None
self.pipe.kill()
if self.delete:
utils.check_and_delete(self.src)
def is_playing(self):
return self.playing
class WavPlayer(AbstractSoundPlayer):
SLUG = 'WavPlayer'
def __init__(self, **kwargs):
super(WavPlayer, self).__init__(**kwargs)
self.playing = False
self.stop = False
def run(self):
# play a voice
CHUNK = 1024
logger.debug("playing wave %s", self.src)
f = wave.open(self.src, "rb")
with no_alsa_error():
audio = pyaudio.PyAudio()
stream = audio.open(
format=audio.get_format_from_width(f.getsampwidth()),
channels=f.getnchannels(),
rate=f.getframerate(),
input=False,
output=True)
self.playing = True
stream.start_stream()
data = f.readframes(CHUNK)
while data != '' and not self.stop:
stream.write(data)
data = f.readframes(CHUNK)
print('data=="": {}, self.stop: {}'.format(data == '', self.stop))
self.playing = False
stream.stop_stream()
stream.close()
audio.terminate()
if self.onCompleted:
self.onCompleted()
def play(self, src, onCompleted=None):
self.src = src
self.onCompleted = onCompleted
self.start()
def play_block(self):
self.run()
def stop(self):
self.stop = True
utils.check_and_delete(self.src)
def is_playing(self):
return self.playing
| 25.166667 | 80 | 0.592479 |
3f6a047327235f152897369d97cf2e4fc8e4c61d | 348 | py | Python | 2018/01 - chronical calibration/01.py | Stannislav/Advent-of-Code | b9571e71e1acedf74be3bb32f03c9877f57c48dd | [
"MIT"
] | 2 | 2020-12-12T17:06:47.000Z | 2021-12-15T04:06:57.000Z | 2018/01 - chronical calibration/01.py | Stannislav/Advent-of-Code | b9571e71e1acedf74be3bb32f03c9877f57c48dd | [
"MIT"
] | null | null | null | 2018/01 - chronical calibration/01.py | Stannislav/Advent-of-Code | b9571e71e1acedf74be3bb32f03c9877f57c48dd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Read input
dfs = [int(line) for line in open('01_input.txt', 'r')]
# Part 1
print(f"Part 1: {sum(dfs)}")
# Part 2
f = 0
seen = {f}
found = False
while not found:
for df in dfs:
f += df
if f in seen:
print(f"Part 2: {f}")
found = True
break
seen.add(f)
| 13.92 | 55 | 0.497126 |
63bc715cc386e7ebb4a38dbe65276152c6662832 | 40,551 | py | Python | schema_utils_test.py | prayutsu/oppia | e82da7653f7bbfb9ded0e1ba16cd9f481ff5a786 | [
"Apache-2.0"
] | 2 | 2020-03-28T18:32:45.000Z | 2021-02-07T18:29:31.000Z | schema_utils_test.py | prayutsu/oppia | e82da7653f7bbfb9ded0e1ba16cd9f481ff5a786 | [
"Apache-2.0"
] | 35 | 2019-02-23T20:31:21.000Z | 2019-08-19T12:32:13.000Z | schema_utils_test.py | prayutsu/oppia | e82da7653f7bbfb9ded0e1ba16cd9f481ff5a786 | [
"Apache-2.0"
] | 1 | 2021-01-28T05:20:56.000Z | 2021-01-28T05:20:56.000Z | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for object schema definitions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import inspect
from core.domain import email_manager
from core.tests import test_utils
import feconf
import python_utils
import schema_utils
SCHEMA_KEY_ITEMS = schema_utils.SCHEMA_KEY_ITEMS
SCHEMA_KEY_LEN = schema_utils.SCHEMA_KEY_LEN
SCHEMA_KEY_PROPERTIES = schema_utils.SCHEMA_KEY_PROPERTIES
SCHEMA_KEY_TYPE = schema_utils.SCHEMA_KEY_TYPE
SCHEMA_KEY_POST_NORMALIZERS = schema_utils.SCHEMA_KEY_POST_NORMALIZERS
SCHEMA_KEY_CHOICES = schema_utils.SCHEMA_KEY_CHOICES
SCHEMA_KEY_NAME = schema_utils.SCHEMA_KEY_NAME
SCHEMA_KEY_SCHEMA = schema_utils.SCHEMA_KEY_SCHEMA
SCHEMA_KEY_OBJ_TYPE = schema_utils.SCHEMA_KEY_OBJ_TYPE
SCHEMA_KEY_VALIDATORS = schema_utils.SCHEMA_KEY_VALIDATORS
SCHEMA_KEY_DESCRIPTION = 'description'
SCHEMA_KEY_UI_CONFIG = 'ui_config'
# This key is used for 'type: custom' objects, as a way of indicating how
# default ui_config values defined in objects.py should be replaced. The value
# is a dictionary mapping the accessor of the object value to the ui_config.
# For example, for SubtitledHtml (defined as a dict), to replace the ui_config
# of the inner html schema, the accessor/key would be 'html'. Note that the
# existing ui_config is not replaced or deleted - the frontend needs to handle
# the override of the ui_config, usually in a custom object editor.
SCHEMA_KEY_REPLACEMENT_UI_CONFIG = 'replacement_ui_config'
# The following keys are always accepted as optional keys in any schema.
OPTIONAL_SCHEMA_KEYS = [
SCHEMA_KEY_CHOICES, SCHEMA_KEY_POST_NORMALIZERS, SCHEMA_KEY_UI_CONFIG,
SCHEMA_KEY_VALIDATORS]
SCHEMA_TYPE_BOOL = schema_utils.SCHEMA_TYPE_BOOL
# 'Custom' objects undergo an entirely separate normalization process, defined
# in the relevant extensions/objects/models/objects.py class.
SCHEMA_TYPE_CUSTOM = schema_utils.SCHEMA_TYPE_CUSTOM
SCHEMA_TYPE_DICT = schema_utils.SCHEMA_TYPE_DICT
SCHEMA_TYPE_FLOAT = schema_utils.SCHEMA_TYPE_FLOAT
SCHEMA_TYPE_HTML = schema_utils.SCHEMA_TYPE_HTML
SCHEMA_TYPE_INT = schema_utils.SCHEMA_TYPE_INT
SCHEMA_TYPE_LIST = schema_utils.SCHEMA_TYPE_LIST
SCHEMA_TYPE_UNICODE = schema_utils.SCHEMA_TYPE_UNICODE
SCHEMA_TYPE_UNICODE_OR_NONE = schema_utils.SCHEMA_TYPE_UNICODE_OR_NONE
ALLOWED_SCHEMA_TYPES = [
SCHEMA_TYPE_BOOL, SCHEMA_TYPE_CUSTOM, SCHEMA_TYPE_DICT, SCHEMA_TYPE_FLOAT,
SCHEMA_TYPE_HTML, SCHEMA_TYPE_INT, SCHEMA_TYPE_LIST,
SCHEMA_TYPE_UNICODE, SCHEMA_TYPE_UNICODE_OR_NONE]
ALLOWED_CUSTOM_OBJ_TYPES = [
'Filepath', 'LogicQuestion', 'MathExpressionContent', 'MusicPhrase',
'ParameterName', 'SanitizedUrl', 'Graph', 'ImageWithRegions',
'ListOfTabs', 'SkillSelector', 'SubtitledHtml', 'SubtitledUnicode',
'SvgFilename', 'CustomOskLetters']
# Schemas for the UI config for the various types. All of these configuration
# options are optional additions to the schema, and, if omitted, should not
# result in any errors.
# Note to developers: please keep this in sync with
# https://github.com/oppia/oppia/wiki/Schema-Based-Forms
UI_CONFIG_SPECS = {
SCHEMA_TYPE_BOOL: {},
SCHEMA_TYPE_DICT: {},
SCHEMA_TYPE_FLOAT: {},
SCHEMA_TYPE_HTML: {
'hide_complex_extensions': {
'type': SCHEMA_TYPE_BOOL,
},
'placeholder': {
'type': SCHEMA_TYPE_UNICODE,
}
},
SCHEMA_TYPE_INT: {},
SCHEMA_TYPE_LIST: {
'add_element_text': {
'type': SCHEMA_TYPE_UNICODE
}
},
SCHEMA_TYPE_UNICODE: {
'rows': {
'type': SCHEMA_TYPE_INT,
'validators': [{
'id': 'is_at_least',
'min_value': 1,
}]
},
'coding_mode': {
'type': SCHEMA_TYPE_UNICODE,
'choices': ['none', 'python', 'coffeescript'],
},
'placeholder': {
'type': SCHEMA_TYPE_UNICODE,
},
},
}
# Schemas for validators for the various types.
VALIDATOR_SPECS = {
SCHEMA_TYPE_BOOL: {},
SCHEMA_TYPE_DICT: {},
SCHEMA_TYPE_FLOAT: {
'is_at_least': {
'min_value': {
'type': SCHEMA_TYPE_FLOAT
}
},
'is_at_most': {
'max_value': {
'type': SCHEMA_TYPE_FLOAT
}
},
},
SCHEMA_TYPE_HTML: {},
SCHEMA_TYPE_INT: {
'is_at_least': {
'min_value': {
'type': SCHEMA_TYPE_INT
}
},
'is_at_most': {
'max_value': {
'type': SCHEMA_TYPE_INT
}
},
},
SCHEMA_TYPE_LIST: {
'has_length_at_least': {
'min_value': {
'type': SCHEMA_TYPE_INT,
'validators': [{
'id': 'is_at_least',
'min_value': 1,
}],
}
},
'has_length_at_most': {
'max_value': {
'type': SCHEMA_TYPE_INT,
'validators': [{
'id': 'is_at_least',
'min_value': 1,
}],
}
},
'is_uniquified': {}
},
SCHEMA_TYPE_UNICODE: {
'matches_regex': {
'regex': {
'type': SCHEMA_TYPE_UNICODE,
'validators': [{
'id': 'is_regex',
}]
}
},
'is_nonempty': {},
'is_regex': {},
'is_valid_email': {},
'is_valid_user_id': {},
'is_valid_math_expression': {
'algebraic': {
'type': SCHEMA_TYPE_BOOL
}
},
'is_valid_algebraic_expression': {},
'is_valid_numeric_expression': {},
'is_valid_math_equation': {},
'is_supported_audio_language_code': {},
'is_url_fragment': {},
'has_length_at_most': {
'max_value': {
'type': SCHEMA_TYPE_INT
}
}
},
}
def _validate_ui_config(obj_type, ui_config):
"""Validates the value of a UI configuration."""
reference_dict = UI_CONFIG_SPECS[obj_type]
assert set(ui_config.keys()) <= set(reference_dict.keys()), (
'Missing keys: %s, Extra keys: %s' % (
list(set(reference_dict.keys()) - set(ui_config.keys())),
list(set(ui_config.keys()) - set(reference_dict.keys()))))
for key, value in ui_config.items():
schema_utils.normalize_against_schema(
value, reference_dict[key])
def _validate_validator(obj_type, validator):
"""Validates the value of a 'validator' field."""
reference_dict = VALIDATOR_SPECS[obj_type]
assert 'id' in validator, 'id is not present in validator'
assert validator['id'] in reference_dict, (
'%s is not present in reference_dict' % validator['id'])
customization_keys = list(validator.keys())
customization_keys.remove('id')
assert (
set(customization_keys) ==
set(reference_dict[validator['id']].keys())), (
'Missing keys: %s, Extra keys: %s' % (
list(
set(reference_dict[validator['id']].keys()) -
set(customization_keys)),
list(
set(customization_keys) -
set(reference_dict[validator['id']].keys()))))
for key in customization_keys:
value = validator[key]
schema = reference_dict[validator['id']][key]
try:
schema_utils.normalize_against_schema(value, schema)
except Exception as e:
raise AssertionError(e)
# Check that the id corresponds to a valid normalizer function.
validator_fn = schema_utils.get_validator(validator['id'])
assert set(inspect.getargspec(validator_fn).args) == set(
customization_keys + ['obj']), (
'Missing keys: %s, Extra keys: %s' % (
list(
set(customization_keys + ['obj']) -
set(inspect.getargspec(validator_fn).args)),
list(
set(inspect.getargspec(validator_fn).args) -
set(customization_keys + ['obj']))))
def _validate_dict_keys(dict_to_check, required_keys, optional_keys):
"""Checks that all of the required keys, and possibly some of the optional
keys, are in the given dict.
Raises:
AssertionError. The validation fails.
"""
assert set(required_keys) <= set(dict_to_check.keys()), (
'Missing keys: %s' % dict_to_check)
assert set(dict_to_check.keys()) <= set(required_keys + optional_keys), (
'Extra keys: %s' % dict_to_check)
def validate_schema(schema):
"""Validates a schema.
This is meant to be a utility function that should be used by tests to
ensure that all schema definitions in the codebase are valid.
Each schema is a dict with at least a key called 'type'. The 'type' can
take one of the SCHEMA_TYPE_* values declared above. In addition, there
may be additional keys for specific types:
- 'list' requires an additional 'items' property, which specifies the type
of the elements in the list. It also allows for an optional 'len'
property which specifies the len of the list.
- 'dict' requires an additional 'properties' property, which specifies the
names of the keys in the dict, and schema definitions for their values.
There may also be an optional 'post_normalizers' key whose value is a list
of normalizers.
Raises:
AssertionError. The schema is not valid.
"""
assert isinstance(schema, dict), ('Expected dict, got %s' % schema)
assert SCHEMA_KEY_TYPE in schema, (
'%s is not present in schema key types' % SCHEMA_KEY_TYPE)
assert schema[SCHEMA_KEY_TYPE] in ALLOWED_SCHEMA_TYPES, (
'%s is not an allowed schema type' % schema[SCHEMA_KEY_TYPE])
if schema[SCHEMA_KEY_TYPE] == SCHEMA_TYPE_CUSTOM:
_validate_dict_keys(
schema,
[SCHEMA_KEY_TYPE, SCHEMA_KEY_OBJ_TYPE],
[SCHEMA_KEY_REPLACEMENT_UI_CONFIG])
assert schema[SCHEMA_KEY_OBJ_TYPE] in ALLOWED_CUSTOM_OBJ_TYPES, schema
elif schema[SCHEMA_KEY_TYPE] == SCHEMA_TYPE_LIST:
_validate_dict_keys(
schema,
[SCHEMA_KEY_ITEMS, SCHEMA_KEY_TYPE],
OPTIONAL_SCHEMA_KEYS + [SCHEMA_KEY_LEN])
validate_schema(schema[SCHEMA_KEY_ITEMS])
if SCHEMA_KEY_LEN in schema:
assert isinstance(schema[SCHEMA_KEY_LEN], int), (
'Expected int, got %s' % schema[SCHEMA_KEY_LEN])
assert schema[SCHEMA_KEY_LEN] > 0, (
'Expected length greater than 0, got %s' % (
schema[SCHEMA_KEY_LEN]))
elif schema[SCHEMA_KEY_TYPE] == SCHEMA_TYPE_DICT:
_validate_dict_keys(
schema,
[SCHEMA_KEY_PROPERTIES, SCHEMA_KEY_TYPE],
OPTIONAL_SCHEMA_KEYS)
assert isinstance(schema[SCHEMA_KEY_PROPERTIES], list), (
'Expected list, got %s' % schema[SCHEMA_KEY_LEN])
for prop in schema[SCHEMA_KEY_PROPERTIES]:
_validate_dict_keys(
prop,
[SCHEMA_KEY_NAME, SCHEMA_KEY_SCHEMA],
[SCHEMA_KEY_DESCRIPTION])
assert isinstance(prop[SCHEMA_KEY_NAME], python_utils.BASESTRING), (
'Expected %s, got %s' % (
python_utils.BASESTRING, prop[SCHEMA_KEY_NAME]))
validate_schema(prop[SCHEMA_KEY_SCHEMA])
if SCHEMA_KEY_DESCRIPTION in prop:
assert isinstance(
prop[SCHEMA_KEY_DESCRIPTION], python_utils.BASESTRING), (
'Expected %s, got %s' % (
python_utils.BASESTRING,
prop[SCHEMA_KEY_DESCRIPTION]))
else:
_validate_dict_keys(schema, [SCHEMA_KEY_TYPE], OPTIONAL_SCHEMA_KEYS)
if SCHEMA_KEY_UI_CONFIG in schema:
_validate_ui_config(
schema[SCHEMA_KEY_TYPE], schema[SCHEMA_KEY_UI_CONFIG])
if SCHEMA_KEY_POST_NORMALIZERS in schema:
assert isinstance(schema[SCHEMA_KEY_POST_NORMALIZERS], list), (
'Expected list, got %s' % schema[SCHEMA_KEY_POST_NORMALIZERS])
for post_normalizer in schema[SCHEMA_KEY_POST_NORMALIZERS]:
assert isinstance(post_normalizer, dict), (
'Expected dict, got %s' % post_normalizer)
assert 'id' in post_normalizer, (
'id is not present in %s' % post_normalizer)
# Check that the id corresponds to a valid normalizer function.
schema_utils.Normalizers.get(post_normalizer['id'])
# TODO(sll): Check the arguments too.
if SCHEMA_KEY_VALIDATORS in schema:
assert isinstance(schema[SCHEMA_KEY_VALIDATORS], list), (
'Expected list, got %s' % schema[SCHEMA_KEY_VALIDATORS])
for validator in schema[SCHEMA_KEY_VALIDATORS]:
assert isinstance(validator, dict), (
'Expected dict, got %s' % schema[SCHEMA_KEY_VALIDATORS])
assert 'id' in validator, (
'id is not present in %s' % validator)
_validate_validator(schema[SCHEMA_KEY_TYPE], validator)
class SchemaValidationUnitTests(test_utils.GenericTestBase):
"""Test validation of schemas."""
GLOBAL_VALIDATORS_SCHEMA = {
'type': schema_utils.SCHEMA_TYPE_DICT,
'properties': [{
'name': 'unicodeListProp',
'schema': {
'type': schema_utils.SCHEMA_TYPE_LIST,
'items': {
'type': schema_utils.SCHEMA_TYPE_UNICODE
}
},
}, {
'name': 'unicodeProp',
'schema': {
'type': schema_utils.SCHEMA_TYPE_UNICODE
},
}]
}
GLOBAL_VALIDATORS = [{
'id': 'does_not_contain_email'
}]
def test_schemas_are_correctly_validated(self):
"""Test validation of schemas."""
invalid_schemas_with_error_messages = [
([
'type'
], r'Expected dict, got \[u\'type\'\]'),
({
'type': 'invalid'
}, 'invalid is not an allowed schema type'),
({
'type': 'dict',
}, 'Missing keys: {u\'type\': u\'dict\'}'),
({
'type': 'list',
'items': {}
}, 'type is not present in schema key types'),
({
'type': 'list',
'items': {
'type': 'unicode'
},
'len': -1
}, 'Expected length greater than 0, got -1'),
({
'type': 'list',
'items': {
'type': 'unicode'
},
'len': 0
}, 'Expected length greater than 0, got 0'),
({
'type': 'list',
'items': {
'type': 'unicode'
},
'validators': [{
'id': 'has_length_at_most',
'max_value': 0
}]
},
r'Validation failed: is_at_least \({u\'min_value\': 1}\) for '
r'object 0'),
({
'type': 'dict',
'items': {
'type': 'float'
}
},
r'Missing keys: {u\'items\': {u\'type\': u\'float\'}, '
r'u\'type\': u\'dict\'}'),
({
'type': 'dict',
'properties': {
123: {
'type': 'unicode'
}
}
}, 'u\'len\''),
({
'type': 'unicode',
'validators': [{
'id': 'fake_validator',
}]
}, 'fake_validator is not present in reference_dict'),
({
'type': 'unicode',
'validators': [{
'id': 'is_nonempty',
'fake_arg': 'unused_value',
}]
}, r'Missing keys: \[\], Extra keys: \[u\'fake_arg\'\]'),
({
'type': 'unicode',
'validators': [{
'id': 'matches_regex',
}]
}, r'Missing keys: \[u\'regex\'\], Extra keys: \[\]'),
({
'type': 'float',
'validators': [{
'id': 'is_at_least',
'min_value': 'value_of_wrong_type',
}]
}, 'Could not convert unicode to float: value_of_wrong_type'),
({
'type': 'unicode',
'ui_config': {
'rows': -1,
}
},
r'Validation failed: is_at_least \({u\'min_value\': 1}\) for '
r'object -1'),
({
'type': 'unicode',
'ui_config': {
'coding_mode': 'invalid_mode',
}
},
r'Received invalid_mode which is not in the allowed range of '
r'choices: \[u\'none\', u\'python\', u\'coffeescript\'\]')]
valid_schemas = [{
'type': 'float'
}, {
'type': 'bool'
}, {
'type': 'dict',
'properties': [{
'name': 'str_property',
'schema': {
'type': 'unicode'
}
}]
}, {
'type': 'list',
'items': {
'type': 'list',
'items': {
'type': 'list',
'items': {
'type': 'bool'
},
'len': 100
}
}
}, {
'type': 'list',
'items': {
'type': 'unicode'
},
'validators': [{
'id': 'has_length_at_most',
'max_value': 3
}]
}, {
'type': 'float',
'validators': [{
'id': 'is_at_least',
'min_value': 3.0,
}]
}, {
'type': 'unicode',
'ui_config': {
'rows': 5,
}
}, {
'type': 'unicode',
'ui_config': {
'coding_mode': 'python',
}
}]
for schema in valid_schemas:
validate_schema(schema)
for schema, error_msg in invalid_schemas_with_error_messages:
with self.assertRaisesRegexp((AssertionError, KeyError), error_msg):
validate_schema(schema)
def test_normalize_against_schema_raises_exception(self):
"""Tests if normalize against schema raises exception
for invalid key.
"""
with self.assertRaisesRegexp(Exception, 'Invalid schema type: invalid'):
schema = {SCHEMA_KEY_TYPE: 'invalid'}
schema_utils.normalize_against_schema('obj', schema)
def test_is_nonempty_validator(self):
"""Tests if static method is_nonempty returns true iff obj
is not an empty str.
"""
is_nonempty = schema_utils.get_validator('is_nonempty')
self.assertTrue(is_nonempty('non-empty string'))
self.assertTrue(is_nonempty(' '))
self.assertTrue(is_nonempty(' '))
self.assertFalse(is_nonempty(''))
def test_is_at_most_validator(self):
"""Tests if static method is_at_most returns true iff obj
is at most a value.
"""
is_at_most = schema_utils.get_validator('is_at_most')
self.assertTrue(is_at_most(2, 3))
self.assertTrue(is_at_most(2, 2)) # boundary
self.assertFalse(is_at_most(2, 1))
def test_has_length_at_least_validator(self):
"""Tests if static method has_length_at_least returns true iff
given list has length of at least the given value.
"""
has_len_at_least = schema_utils.get_validator('has_length_at_least')
self.assertTrue(has_len_at_least(['elem'], 0))
self.assertTrue(has_len_at_least(['elem'], 1)) # boundary
self.assertFalse(has_len_at_least(['elem'], 2))
def test_get_raises_invalid_validator_id(self):
"""Tests if class method 'get' in _Validator raises exception
for invalid validator id.
"""
with self.assertRaisesRegexp(
Exception,
'Invalid validator id: some invalid validator method name'):
schema_utils.get_validator('some invalid validator method name')
def test_is_valid_algebraic_expression_validator(self):
"""Tests for the is_valid_algebraic_expression static method with
algebraic type.
"""
is_valid_algebraic_expression = schema_utils.get_validator(
'is_valid_algebraic_expression')
self.assertTrue(is_valid_algebraic_expression('a+b*2'))
self.assertFalse(is_valid_algebraic_expression('3+4/2'))
def test_is_valid_numeric_expression_validator(self):
"""Tests for the is_valid_numeric_expression static method with
numeric type.
"""
is_valid_numeric_expression = schema_utils.get_validator(
'is_valid_numeric_expression')
self.assertFalse(is_valid_numeric_expression('a+b*2'))
self.assertTrue(is_valid_numeric_expression('3+4/2'))
def test_is_valid_math_equation_validator(self):
"""Tests for the is_valid_math_equation static method."""
is_valid_math_equation = schema_utils.get_validator(
'is_valid_math_equation')
self.assertTrue(is_valid_math_equation('a+b=c'))
self.assertTrue(is_valid_math_equation('x^2+y^2=z^2'))
self.assertTrue(is_valid_math_equation('y = m*x + b'))
self.assertTrue(is_valid_math_equation('alpha^a + beta^b = gamma^(-c)'))
self.assertTrue(is_valid_math_equation('a+b=0'))
self.assertTrue(is_valid_math_equation('0=a+b'))
self.assertTrue(is_valid_math_equation('(a/b)+c=(4^3)*a'))
self.assertTrue(is_valid_math_equation('2^alpha-(-3) = 3'))
self.assertTrue(is_valid_math_equation('(a+b)^2 = a^2 + b^2 + 2*a*b'))
self.assertTrue(is_valid_math_equation('(a+b)^2 = a^2 + b^2 + 2ab'))
self.assertTrue(is_valid_math_equation('x/a + y/b = 1'))
self.assertTrue(is_valid_math_equation('3 = -5 + pi^x'))
self.assertTrue(is_valid_math_equation('0.4 + 0.5 = alpha * 4'))
self.assertTrue(is_valid_math_equation('sqrt(a+b)=c - gamma/2.4'))
self.assertTrue(is_valid_math_equation('abs(35 - x) = 22.3'))
self.assertFalse(is_valid_math_equation('3 -= 2/a'))
self.assertFalse(is_valid_math_equation('3 == 2/a'))
self.assertFalse(is_valid_math_equation('x + y = '))
self.assertFalse(is_valid_math_equation('(a+b = 0)'))
self.assertFalse(is_valid_math_equation('pi = 3.1415'))
self.assertFalse(is_valid_math_equation('a+b=0=a-b'))
self.assertFalse(is_valid_math_equation('alpha - beta/c'))
self.assertFalse(is_valid_math_equation('2^alpha-(-3*) = 3'))
self.assertFalse(is_valid_math_equation('a~b = 0'))
self.assertFalse(is_valid_math_equation('a+b<=0'))
self.assertFalse(is_valid_math_equation('a+b>=0'))
self.assertFalse(is_valid_math_equation('a+b<0'))
self.assertFalse(is_valid_math_equation('a+b>0'))
self.assertFalse(is_valid_math_equation('5+3=8'))
self.assertFalse(is_valid_math_equation('(a+(b)=0'))
self.assertFalse(is_valid_math_equation('a+b=c:)'))
def test_is_supported_audio_language_code(self):
is_supported_audio_language_code = schema_utils.get_validator(
'is_supported_audio_language_code')
self.assertTrue(is_supported_audio_language_code('en'))
self.assertTrue(is_supported_audio_language_code('fr'))
self.assertTrue(is_supported_audio_language_code('de'))
self.assertFalse(is_supported_audio_language_code(''))
self.assertFalse(is_supported_audio_language_code('zz'))
self.assertFalse(is_supported_audio_language_code('test'))
def test_is_url_fragment(self):
validate_url_fragment = schema_utils.get_validator(
'is_url_fragment')
self.assertTrue(validate_url_fragment('math'))
self.assertTrue(validate_url_fragment('computer-science'))
self.assertTrue(validate_url_fragment('bio-tech'))
self.assertFalse(validate_url_fragment(''))
self.assertFalse(validate_url_fragment('Abc'))
self.assertFalse(validate_url_fragment('!@#$%^&*()_+='))
def test_global_validators_raise_exception_when_error_in_dict(self):
with self.assertRaisesRegexp(
AssertionError,
r'^Validation failed: does_not_contain_email .* email@email.com$'
):
obj = {
'unicodeListProp': ['not email', 'not email 2'],
'unicodeProp': 'email@email.com'
}
schema_utils.normalize_against_schema(
obj, self.GLOBAL_VALIDATORS_SCHEMA,
global_validators=self.GLOBAL_VALIDATORS
)
def test_global_validators_raise_exception_when_error_in_list(self):
with self.assertRaisesRegexp(
AssertionError,
r'^Validation failed: does_not_contain_email .* email2@email.com$'
):
obj = {
'unicodeListProp': ['email2@email.com', 'not email 2'],
'unicodeProp': 'not email'
}
schema_utils.normalize_against_schema(
obj, self.GLOBAL_VALIDATORS_SCHEMA,
global_validators=self.GLOBAL_VALIDATORS
)
def test_global_validators_pass_when_no_error(self):
obj = {
'unicodeListProp': ['not email', 'not email 2'],
'unicodeProp': 'not email'
}
normalized_obj = schema_utils.normalize_against_schema(
obj, self.GLOBAL_VALIDATORS_SCHEMA,
global_validators=self.GLOBAL_VALIDATORS
)
self.assertEqual(obj, normalized_obj)
class SchemaNormalizationUnitTests(test_utils.GenericTestBase):
"""Test schema-based normalization of objects."""
def check_normalization(
self, schema, mappings, invalid_items_with_error_messages):
"""Validates the schema and tests that values are normalized correctly.
Args:
schema: dict. The schema to normalize the value
against. Each schema is a dict with at least a key called
'type'. The 'type' can take one of the SCHEMA_TYPE_* values
declared above.
mappings: list(tuple). A list of 2-element tuples.
The first element of each item is expected to be normalized to
the second.
invalid_items_with_error_messages: list(tuple(str, str)). A list of
values with their corresponding messages. Each value is expected
to raise an AssertionError when normalized.
"""
validate_schema(schema)
for raw_value, expected_value in mappings:
self.assertEqual(
schema_utils.normalize_against_schema(raw_value, schema),
expected_value)
for value, error_msg in invalid_items_with_error_messages:
with self.assertRaisesRegexp(Exception, error_msg):
schema_utils.normalize_against_schema(value, schema)
def test_float_schema(self):
schema = {
'type': schema_utils.SCHEMA_TYPE_FLOAT,
}
mappings = [(1.2, 1.2), (3, 3.0), (-1, -1.0), ('1', 1.0)]
invalid_values_with_error_messages = [
([13], r'Could not convert list to float: \[13\]'),
('abc', 'Could not convert unicode to float: abc'),
(None, 'Could not convert NoneType to float: None')]
self.check_normalization(
schema, mappings, invalid_values_with_error_messages)
def test_int_schema(self):
schema = {
'type': schema_utils.SCHEMA_TYPE_INT,
}
mappings = [(1.2, 1), (3.7, 3), (-1, -1), ('1', 1)]
invalid_values_with_error_messages = [
([13], r'Could not convert list to int: \[13\]'),
('abc', 'Could not convert unicode to int: abc'),
(None, 'Could not convert NoneType to int: None')]
self.check_normalization(
schema, mappings, invalid_values_with_error_messages)
def test_unicode_or_none_schema(self):
schema = {
'type': schema_utils.SCHEMA_TYPE_UNICODE_OR_NONE,
}
mappings = [('a', 'a'), ('', ''), (b'bytes', 'bytes'), (None, None)]
invalid_values_with_error_messages = [
([], r'Expected unicode string or None, received'),
]
self.check_normalization(
schema, mappings, invalid_values_with_error_messages)
def test_list_schema_with_len(self):
schema = {
'type': schema_utils.SCHEMA_TYPE_LIST,
'items': {
'type': schema_utils.SCHEMA_TYPE_UNICODE,
},
'len': 2,
}
mappings = [
(['a', 'b'], ['a', 'b']),
(['abc', ''], ['abc', '']),
(['adaA13', '13'], ['adaA13', '13'])]
invalid_values_with_error_messages = [
(['1', 13], 'Expected unicode string, received 13'),
({'a': 'b'}, r'Expected list, received {u\'a\': u\'b\'}'),
({}, 'Expected list, received {}'),
(None, 'Expected list, received None'),
(123, 'Expected list, received 123'),
('abc', 'Expected list, received abc'),
(['c'], 'Expected length of 2 got 1'),
([], 'Expected length of 2 got 0')]
self.check_normalization(
schema, mappings, invalid_values_with_error_messages)
def test_html_schema(self):
"""Tests for valid html schema, an html string. Note that
html.cleaner() is called in normalize_against_schema.
"""
schema = {
'type': schema_utils.SCHEMA_TYPE_HTML,
}
mappings = [
('<script></script>', ''),
(b'<script></script>', ''),
(
'<a class="webLink" href="https'
'://www.oppia.com/"><img src="images/oppia.png"></a>',
'<a href="https://www.oppia.com/"></a>')]
invalid_values_with_error_messages = [
(
['<script></script>', '<script></script>'],
r'Expected unicode HTML string, received \[u\'<script></script>'
r'\', u\'<script></script>\'\]')]
self.check_normalization(
schema, mappings, invalid_values_with_error_messages)
def test_schema_key_post_normalizers(self):
"""Test post normalizers in schema using basic html schema."""
schema_1 = {
'type': schema_utils.SCHEMA_TYPE_HTML,
'post_normalizers': [
{'id': 'normalize_spaces'}, # html strings with no extra spaces
]
}
obj_1 = 'a a'
normalize_obj_1 = schema_utils.normalize_against_schema(obj_1, schema_1)
self.assertEqual(u'a a', normalize_obj_1)
schema_2 = {
'type': schema_utils.SCHEMA_TYPE_HTML,
'post_normalizers': [
{'id': 'sanitize_url'}
]
}
obj_2 = 'http://www.oppia.org/splash/<script>'
normalize_obj_2 = schema_utils.normalize_against_schema(obj_2, schema_2)
self.assertEqual(u'http://www.oppia.org/splash/', normalize_obj_2)
def test_list_schema(self):
schema = {
'type': schema_utils.SCHEMA_TYPE_LIST,
'items': {
'type': schema_utils.SCHEMA_TYPE_UNICODE,
}
}
mappings = [
(['a', 'b'], ['a', 'b']),
(['c'], ['c']),
(['abc', ''], ['abc', '']),
([], []),
(['adaA13', '13'], ['adaA13', '13'])]
invalid_values_with_error_messages = [
(['1', 13], 'Expected unicode string, received 13'),
({'a': 'b'}, r'Expected list, received {u\'a\': u\'b\'}'),
({}, 'Expected list, received {}'),
(None, 'Expected list, received None'),
(123, 'Expected list, received 123'),
('abc', 'Expected list, received abc')]
self.check_normalization(
schema, mappings, invalid_values_with_error_messages)
def test_dict_schema(self):
schema = {
'type': schema_utils.SCHEMA_TYPE_DICT,
'properties': [{
'name': 'unicodeListProp',
'schema': {
'type': schema_utils.SCHEMA_TYPE_LIST,
'items': {
'type': schema_utils.SCHEMA_TYPE_UNICODE
}
},
}, {
'name': 'intProp',
'schema': {
'type': schema_utils.SCHEMA_TYPE_INT
},
}, {
'name': 'dictProp',
'schema': {
'type': schema_utils.SCHEMA_TYPE_DICT,
'properties': [{
'name': 'floatProp',
'schema': {
'type': schema_utils.SCHEMA_TYPE_FLOAT
}
}]
}
}]
}
mappings = [({
'unicodeListProp': [],
'intProp': 1,
'dictProp': {
'floatProp': 3
}
}, {
'unicodeListProp': [],
'intProp': 1,
'dictProp': {
'floatProp': 3.0
}
}), ({
'intProp': 10,
'unicodeListProp': ['abc', 'def'],
'dictProp': {
'floatProp': -1.0
}
}, {
'intProp': 10,
'unicodeListProp': ['abc', 'def'],
'dictProp': {
'floatProp': -1.0
}
})]
invalid_values_with_error_messages = [
({
'unicodeListProp': [],
'intPROP': 1,
'dictProp': {
'floatProp': 3.0
}
}, r'Missing keys: \[u\'intProp\'\], Extra keys: \[u\'intPROP\'\]'),
({
'unicodeListProp': ['aaa'],
'intProp': 1,
}, r'Missing keys: \[u\'dictProp\'\], Extra keys: \[\]'),
({
'unicodeListProp': [],
'intProp': 3,
'dictProp': {},
}, r'Missing keys: \[u\'floatProp\'\], Extra keys: \[\]'),
([
'unicodeListProp', 'intProp', 'dictProp'
],
r'Expected dict, received \[u\'unicodeListProp\', u\'intProp\', '
r'u\'dictProp\'\]'),
(None, 'Expected dict, received None'),
(123, 'Expected dict, received 123'),
('abc', 'Expected dict, received abc')]
self.check_normalization(
schema, mappings, invalid_values_with_error_messages)
def test_notification_user_ids_list_validator(self):
schema = email_manager.NOTIFICATION_USER_IDS_LIST_SCHEMA
valid_user_id_list = [
'uid_%s' % (chr(97 + i) * feconf.USER_ID_RANDOM_PART_LENGTH)
for i in python_utils.RANGE(0, 5)
]
big_user_id_list = [
'uid_%s' % (chr(97 + i) * feconf.USER_ID_RANDOM_PART_LENGTH)
for i in python_utils.RANGE(0, 7)
]
mappings = [
(
['uid_%s' % ('a' * feconf.USER_ID_RANDOM_PART_LENGTH)],
['uid_%s' % ('a' * feconf.USER_ID_RANDOM_PART_LENGTH)]
),
(valid_user_id_list, valid_user_id_list)]
invalid_values_with_error_messages = [
(
[u'uid_%s' % ('a' * 28)],
r'Validation failed: is_valid_user_id \({}\) for object '
r'%s' % 'uid_%s' % ('a' * 28)),
(
big_user_id_list,
r'Validation failed: has_length_at_most \({u\'max_value\': 5}\)'
r' for object \[.*\]'),
]
self.check_normalization(
schema, mappings, invalid_values_with_error_messages)
def test_normalize_spaces(self):
"""Test static method normalize_spaces; should collapse multiple
spaces.
"""
normalize_spaces = schema_utils.Normalizers.get('normalize_spaces')
self.assertEqual('dog cat', normalize_spaces('dog cat'))
self.assertEqual('dog cat', normalize_spaces(' dog cat'))
self.assertEqual('dog cat', normalize_spaces(' dog cat '))
self.assertNotEqual('dog cat', normalize_spaces('dogcat'))
def test_normalizer_get(self):
"""Tests the class method 'get' of Normalizers, should return the
normalizer method corresponding to the given normalizer id.
"""
normalize_spaces = schema_utils.Normalizers.get('normalize_spaces')
self.assertEqual('normalize_spaces', normalize_spaces.__name__)
def test_normalizer_get_raises_exception_for_invalid_id(self):
"""Tests if class method get of Normalizers raises exception when given
an invalid normalizer id.
"""
with self.assertRaisesRegexp(
Exception,
'Invalid normalizer id: some invalid normalizer method name'):
schema_utils.Normalizers.get('some invalid normalizer method name')
with self.assertRaisesRegexp(
Exception, 'Invalid normalizer id: normalize_space'):
# Test substring of an actual id.
schema_utils.Normalizers.get('normalize_space')
def test_normalizer_sanitize_url(self):
"""Tests if static method sanitize_url of Normalizers correctly
sanitizes a URL when given its string representation and raises
error for invalid URLs.
"""
sanitize_url = schema_utils.Normalizers.get('sanitize_url')
self.assertEqual(
'https://www.oppia.org/splash/',
sanitize_url('https://www.oppia.org/splash/'))
self.assertEqual(
'http://www.oppia.org/splash/',
sanitize_url('http://www.oppia.org/splash/'))
self.assertEqual(
sanitize_url('http://example.com/~path;parameters?q=arg#fragment'),
'http://example.com/%7Epath%3Bparameters?q%3Darg#fragment')
self.assertEqual(
'https://www.web.com/%3Cscript%20type%3D%22text/javascript%22%'
'3Ealert%28%27rm%20-rf%27%29%3B%3C/script%3E',
sanitize_url(
'https://www.web.com/<script type="text/javascript">alert(\'rm'
' -rf\');</script>'))
self.assertEqual('', sanitize_url(''))
# Raise AssertionError if string does not start with http:// or
# https://.
with self.assertRaisesRegexp(
AssertionError,
'Invalid URL: Sanitized URL should start with \'http://\' or'
' \'https://\'; received oppia.org'):
sanitize_url('oppia.org')
with self.assertRaisesRegexp(
AssertionError,
'Invalid URL: Sanitized URL should start with \'http://\' or'
' \'https://\'; received www.oppia.org'):
sanitize_url('www.oppia.org')
| 38.546578 | 80 | 0.56445 |
4f527119f3a8b5389a67b1a79de362ebfe605725 | 3,879 | py | Python | golem/report/utils.py | Racerinorbit/golem | b02c7acaed6e84ff565e34e8626e835ec451e2e4 | [
"MIT"
] | null | null | null | golem/report/utils.py | Racerinorbit/golem | b02c7acaed6e84ff565e34e8626e835ec451e2e4 | [
"MIT"
] | null | null | null | golem/report/utils.py | Racerinorbit/golem | b02c7acaed6e84ff565e34e8626e835ec451e2e4 | [
"MIT"
] | null | null | null | import os
from io import BytesIO
from golem import execution
from golem.browser import get_browser
def save_screenshot(reportdir, image_name, format='PNG', quality=None, width=None,
height=None, resize=None):
"""Modify screenshot format, size and quality before saving.
Pillow must be installed.
- format must be 'PNG' or 'JPEG'
- quality must be an int in 1..95 range.
Default is 75. Only applies to JPEG.
- width and height must be int greater than 0
- resize must be an int greater than 0.
Str in the format '55' or '55%' is also allowed.
"""
try:
from PIL import Image
except ModuleNotFoundError:
execution.logger.warning('Pillow must be installed in order to modify'
' screenshot format, size or quality')
return
extension = 'png'
resample_filter = Image.BOX # for PNG
# validate format
if format not in ['JPEG', 'PNG']:
raise ValueError("settings screenshots format should be 'jpg' or 'png'")
# validate quality
if quality is not None:
try:
quality = int(quality)
except ValueError:
raise ValueError('settings screenshots quality should be int')
if format == 'JPEG' and not 1 <= quality <= 95:
raise ValueError('settings screenshots quality should be in 1..95 range for jpg files')
# validate width
if width is not None:
try:
width = int(width)
except ValueError:
raise ValueError('settings screenshots width should be int')
if width < 0:
raise ValueError('settings screenshots width should be greater than 0')
# validate height
if height is not None:
try:
height = int(height)
except ValueError:
raise ValueError('settings screenshots height should be int')
if height < 0:
raise ValueError('settings screenshots height should be greater than 0')
# validate resize
if resize is not None:
if resize is str:
resize = resize.replace('%', '')
try:
resize = int(resize)
except ValueError:
raise ValueError('settings screenshots resize should be int')
if resize < 0:
raise ValueError('settings screenshots resize should be greater than 0')
base_png = get_browser().get_screenshot_as_png()
pil_image = Image.open(BytesIO(base_png))
if format == 'JPEG':
pil_image = pil_image.convert('RGB')
extension = 'jpg'
resample_filter = Image.BICUBIC
if any([width, height, resize]):
img_width, img_height = pil_image.size
if width and height:
new_width = width
new_height = height
elif width:
new_width = width
# maintain aspect ratio
new_height = round(new_width * img_height / img_width)
elif height:
new_height = height
# maintain aspect ratio
new_width = round(new_height * img_width / img_height)
else: # resize by %
new_width = round(pil_image.size[0] * resize / 100)
new_height = round(pil_image.size[1] * resize / 100)
pil_image = pil_image.resize((new_width, new_height), resample=resample_filter)
screenshot_filename = '{}.{}'.format(image_name, extension)
screenshot_path = os.path.join(reportdir, screenshot_filename)
if format == 'PNG':
pil_image.save(screenshot_path, format=format, optimize=True)
elif format == 'JPEG':
if quality is None:
pil_image.save(screenshot_path, format=format, optimize=True)
else:
pil_image.save(screenshot_path, format=format, optimize=True,
quality=quality)
return screenshot_filename
| 36.59434 | 99 | 0.617685 |
4ba9ce0a53ea794bfd769ba779bce030e089a3e4 | 3,085 | py | Python | pontoon/checks/migrations/0001_initial.py | Tratty/pontoon | ecb903d72f9274f02137b16669cc3c5859f6329c | [
"BSD-3-Clause"
] | 3 | 2020-01-27T12:26:20.000Z | 2022-02-03T09:56:02.000Z | pontoon/checks/migrations/0001_initial.py | texnoman/pontoon-src | 6b40ac229605e99966c3bdd1510b772c89d4de24 | [
"BSD-3-Clause"
] | 1 | 2021-03-24T12:33:03.000Z | 2021-03-24T12:50:19.000Z | pontoon/checks/migrations/0001_initial.py | texnoman/pontoon-src | 6b40ac229605e99966c3bdd1510b772c89d4de24 | [
"BSD-3-Clause"
] | 4 | 2020-01-26T21:28:43.000Z | 2021-06-10T15:25:19.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-23 10:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
("base", "0121_bug_1453999_disable_sync"),
]
operations = [
migrations.CreateModel(
name="Error",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"library",
models.CharField(
choices=[
(b"p", b"pontoon"),
(b"tt", b"translate-toolkit"),
(b"cl", b"compare-locales"),
],
db_index=True,
max_length=20,
),
),
("message", models.TextField()),
(
"translation",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="errors",
to="base.Translation",
),
),
],
options={"abstract": False},
),
migrations.CreateModel(
name="Warning",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"library",
models.CharField(
choices=[
(b"p", b"pontoon"),
(b"tt", b"translate-toolkit"),
(b"cl", b"compare-locales"),
],
db_index=True,
max_length=20,
),
),
("message", models.TextField()),
(
"translation",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="warnings",
to="base.Translation",
),
),
],
options={"abstract": False},
),
migrations.AlterUniqueTogether(
name="warning",
unique_together=set([("translation", "library", "message")]),
),
migrations.AlterUniqueTogether(
name="error", unique_together=set([("translation", "library", "message")]),
),
]
| 31.479592 | 87 | 0.363371 |
632601df1f2de2c100a4faeeae3b47a71261b520 | 2,026 | py | Python | towel/templatetags/modelview_list.py | enterstudio/towel | 6892788527b8a111cbf5963e909964aabc96d740 | [
"BSD-3-Clause"
] | null | null | null | towel/templatetags/modelview_list.py | enterstudio/towel | 6892788527b8a111cbf5963e909964aabc96d740 | [
"BSD-3-Clause"
] | null | null | null | towel/templatetags/modelview_list.py | enterstudio/towel | 6892788527b8a111cbf5963e909964aabc96d740 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, unicode_literals
from django import template
from django.db import models
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from towel.templatetags import towel_resources
register = template.Library()
register.inclusion_tag('towel/_pagination.html', takes_context=True)(
towel_resources.pagination)
register.inclusion_tag('towel/_ordering_link.html', takes_context=True)(
towel_resources.ordering_link)
register.filter(towel_resources.querystring)
@register.filter
def model_row(instance, fields):
"""
Shows a row in a modelview object list:
::
{% for object in object_list %}
<tr>
{% for verbose_name, field in object|model_row:"name,url" %}
<td>{{ field }}</td>
{% endfor %}
</tr>
{% endfor %}
"""
for name in fields.split(','):
try:
f = instance._meta.get_field(name)
except models.FieldDoesNotExist:
attr = getattr(instance, name)
if hasattr(attr, '__call__'):
yield (name, attr())
else:
yield (name, attr)
continue
if isinstance(f, models.ForeignKey):
fk = getattr(instance, f.name)
if hasattr(fk, 'get_absolute_url'):
value = mark_safe('<a href="%s">%s</a>' % (
fk.get_absolute_url(),
fk))
else:
value = fk
elif f.choices:
value = getattr(instance, 'get_%s_display' % f.name)()
elif isinstance(f, (models.BooleanField, models.NullBooleanField)):
value = getattr(instance, f.name)
value = {
True: _('yes'),
False: _('no'),
None: _('unknown'),
}.get(value, value)
else:
value = getattr(instance, f.name)
yield (f.verbose_name, value)
| 27.753425 | 76 | 0.563179 |
1fb4a2b6a285fe430c120cba4cd1dc6973abb739 | 1,464 | py | Python | submissions/Hess/myLegos.py | WhittKinley/Legos | 861a2651f481d0463003007694c895d3df3b0fee | [
"MIT"
] | null | null | null | submissions/Hess/myLegos.py | WhittKinley/Legos | 861a2651f481d0463003007694c895d3df3b0fee | [
"MIT"
] | null | null | null | submissions/Hess/myLegos.py | WhittKinley/Legos | 861a2651f481d0463003007694c895d3df3b0fee | [
"MIT"
] | null | null | null | from ev3dev.auto import OUTPUT_A, OUTPUT_B, OUTPUT_C, LargeMotor, MediumMotor
from ev3dev.auto import INPUT_1, INPUT_2, INPUT_3, TouchSensor, ColorSensor
import time
import ev3dev.ev3 as ev3
baseTouch = TouchSensor(INPUT_1)
armTouch = TouchSensor(INPUT_2)
colorSensor = ColorSensor(INPUT_3)
clawMotor = MediumMotor(OUTPUT_A)
armMotor = LargeMotor(OUTPUT_B)
baseMotor = LargeMotor(OUTPUT_C)
# Claw Opening
ev3.Sound.speak("Now opening claw.").wait()
clawMotor.run_forever(speed_sp = 100)
time.sleep(.5)
clawMotor.stop()
# Arm Moving to down position
ev3.Sound.speak("Now moving arm down.").wait()
armMotor.run_forever(speed_sp = 120)
time.sleep(2.5)
armMotor.stop()
# Claw closing on object to pick up
ev3.Sound.speak("Now closing claw.").wait()
clawMotor.run_forever(speed_sp = -100)
time.sleep(2)
# Arm moving to the up position
ev3.Sound.speak("Now moving arm up.").wait()
armMotor.run_forever(speed_sp = -360)
time.sleep(.85)
armMotor.stop()
# Base moving to center position
ev3.Sound.speak("Now moving base.").wait()
baseMotor.run_forever(speed_sp = -130)
time.sleep(2.25)
baseMotor.stop()
# Claw dropping object
ev3.Sound.speak("Now releasing claw.").wait()
clawMotor.stop()
# Base moving to its starting point
ev3.Sound.speak("Now returning to start point.").wait()
baseMotor.run_forever(speed_sp = 130)
time.sleep(2.25)
baseMotor.stop()
# :)
ev3.Sound.speak("Task complete!").wait()
ev3.Sound.speak("Dr. Hooper, Please Give Us an Aye Plus!").wait() | 27.111111 | 77 | 0.757514 |
d0e9379344716925dbfcf863a0bf6be249146425 | 139 | py | Python | test_data/parse/unexpected/method_definitions/argument_with_final/meta_model.py | gillistephan/aas-core-codegen | 5b89ea2ee35aecaca9a1bed7ac81d420cc560f29 | [
"MIT"
] | 5 | 2021-12-29T12:55:34.000Z | 2022-03-01T17:57:21.000Z | test_data/parse/unexpected/method_definitions/argument_with_final/meta_model.py | gillistephan/aas-core-codegen | 5b89ea2ee35aecaca9a1bed7ac81d420cc560f29 | [
"MIT"
] | 10 | 2021-12-29T02:15:55.000Z | 2022-03-09T11:04:22.000Z | test_data/parse/unexpected/method_definitions/argument_with_final/meta_model.py | gillistephan/aas-core-codegen | 5b89ea2ee35aecaca9a1bed7ac81d420cc560f29 | [
"MIT"
] | 2 | 2021-12-29T01:42:12.000Z | 2022-02-15T13:46:33.000Z | class Something:
def do_something(self, x: List[Final[int]]) -> None:
pass
__book_url__ = "dummy"
__book_version__ = "dummy"
| 17.375 | 56 | 0.661871 |
cabce6916c464edf3b8767e6a6282db43ddd035f | 3,056 | py | Python | scripts/tests/py_onnx/keras/export_scripts/upsample2D_enc_dec_mnist_keras_export.py | lauracanalini/eddl | c5efac642e8e1f99b31dfaaacd0a5a058b09923b | [
"MIT"
] | 30 | 2019-10-11T21:03:43.000Z | 2022-02-17T19:56:15.000Z | scripts/tests/py_onnx/keras/export_scripts/upsample2D_enc_dec_mnist_keras_export.py | lauracanalini/eddl | c5efac642e8e1f99b31dfaaacd0a5a058b09923b | [
"MIT"
] | 151 | 2019-10-16T06:47:23.000Z | 2022-03-07T15:15:58.000Z | scripts/tests/py_onnx/keras/export_scripts/upsample2D_enc_dec_mnist_keras_export.py | lauracanalini/eddl | c5efac642e8e1f99b31dfaaacd0a5a058b09923b | [
"MIT"
] | 20 | 2019-10-16T09:37:37.000Z | 2022-02-22T09:47:14.000Z | import argparse
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Input, Conv2D, UpSampling2D, MaxPooling2D
from tensorflow.keras.datasets import mnist
import keras2onnx
# Training settings
parser = argparse.ArgumentParser(description='Keras Conv+Upsample encoder decoder MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=5, metavar='N',
help='number of epochs to train (default: 5)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--output-path', type=str, default="onnx_models/upsample2D_enc_dec_mnist.onnx",
help='Output path to store the onnx file')
parser.add_argument('--output-metric', type=str, default="",
help='Output file path to store the metric value obtained in test set')
args = parser.parse_args()
# Load MNIST data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Prepare images (bs, 28, 28, 1)
x_train = x_train.reshape((x_train.shape[0], 28, 28, 1))
x_test = x_test.reshape((x_test.shape[0], 28, 28, 1))
# Use the input images as target outputs
y_train = x_train
y_test = x_test
print("Train data shape:", x_train.shape)
print("Train labels shape:", y_train.shape)
print("Test data shape:", x_test.shape)
print("Test labels shape:", y_test.shape)
# Definer encoder
model = Sequential()
model.add(Input(shape=(28, 28, 1)))
# Encoder
model.add(Conv2D(32, 3, padding="same", activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(64, 3, padding="same", activation="relu"))
model.add(MaxPooling2D(2, 2))
# Decoder
model.add(Conv2D(64, 3, padding="same", activation="relu"))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(32, 3, padding="same", activation="relu"))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(1, 1, padding="same", activation="sigmoid"))
model.compile(loss='mse',
optimizer="adam",
metrics=[])
model.summary()
# Training
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs)
# Evaluation
eval_loss = model.evaluate(x_test, y_test)
print("Evaluation result: Loss:", eval_loss)
# In case of providing output metric file, store the test mse value
if args.output_metric != "":
with open(args.output_metric, 'w') as ofile:
ofile.write(str(eval_loss))
# Convert to ONNX
onnx_model = keras2onnx.convert_keras(model, "upsample2D_mnist", debug_mode=1)
# Save ONNX to file
keras2onnx.save_model(onnx_model, args.output_path)
| 36.819277 | 99 | 0.697644 |
e1d8dd311c2168f5e230816b35a1a02b960900d6 | 4,284 | py | Python | client.py | mikepage/salto | 84161e417d406d10ead0977786553f202def8896 | [
"MIT"
] | 3 | 2021-02-02T15:46:56.000Z | 2021-04-28T06:55:33.000Z | client.py | mikepage/salto | 84161e417d406d10ead0977786553f202def8896 | [
"MIT"
] | null | null | null | client.py | mikepage/salto | 84161e417d406d10ead0977786553f202def8896 | [
"MIT"
] | 1 | 2021-01-18T11:57:38.000Z | 2021-01-18T11:57:38.000Z | import io
import socket
from logging import Logger
from time import sleep
from typing import Optional
from salto import common
from salto.message import Message
from salto.response import Response
class Client:
MAX_RETRIES = 3
CONNECT_TIMEOUT = 10 # seconds to connect the server
WRITE_TIMEOUT = 10 # seconds to write a request
READ_TIMEOUT = 30 # seconds to read some bytes. Must including waiting time to place the card
class InvalidAcknowledgement(Exception):
pass
# client = Client("192.168.1.120:8090")
def __init__(self, endpoint: str, logger: Optional[Logger] = None, lrc_skip: bool = False):
host, _, port = endpoint.partition(":")
self.host: str = host
self.port: int = int(port)
self.logger = logger
self.lrc_skip = lrc_skip
@property
def is_ready(self) -> bool:
return self.send_request(common.ENQ).is_ack
def create_connection(self) -> socket.socket:
return socket.create_connection((self.host, self.port), Client.CONNECT_TIMEOUT)
def send_request(self, request: bytes) -> Response:
with self.create_connection() as conn:
return self._send_request(conn, request)
def send_message(self, message: Message) -> Response:
return self.send_request(self.encode_message(message))
def encode_message(self, message: Message) -> bytes:
message_bytes = bytes(message)
lrc = common.LRC_SKIP if self.lrc_skip else common.lrc(message_bytes)
return common.STX + message_bytes + common.ETX + lrc
def _send_request(self, conn: socket.socket, request: bytes, attempt: int = 1) -> Response:
self._debug("out", request)
conn.settimeout(Client.WRITE_TIMEOUT)
conn.sendall(request)
conn.settimeout(Client.READ_TIMEOUT)
acknowledgement = conn.recv(1)
self._debug("in", acknowledgement)
if request == common.ENQ and acknowledgement in [common.ACK, common.NAK]:
return Response(acknowledgement)
elif acknowledgement == common.ACK:
return self.read_stx(conn)
elif acknowledgement == common.NAK:
if attempt < Client.MAX_RETRIES:
self.await_ready(conn)
return self._send_request(conn, request, attempt + 1)
else:
return Response(acknowledgement)
else:
raise Client.InvalidAcknowledgement(f"Invalid SALTO acknowledgement: {acknowledgement!r}")
def read_stx(self, conn: socket.socket) -> Response:
with io.BytesIO() as buffer:
while True:
conn.settimeout(Client.READ_TIMEOUT)
current_control_char = conn.recv(1)
buffer.write(current_control_char)
# Read until ETX
if current_control_char == common.ETX:
break
# Read the LCR char
conn.settimeout(Client.READ_TIMEOUT)
buffer.write(conn.recv(1))
response = buffer.getvalue()
self._debug("in", response)
return Response(response)
def await_ready(self, conn: socket.socket) -> None:
attempt = 1
while True:
self._debug("out", common.ENQ)
conn.settimeout(Client.WRITE_TIMEOUT)
conn.sendall(common.ENQ)
conn.settimeout(Client.READ_TIMEOUT)
acknowledgement = conn.recv(1)
self._debug("in", acknowledgement)
if acknowledgement == common.ACK or attempt >= Client.MAX_RETRIES:
break
attempt += 1
sleep(0.2)
def _debug(self, direction: str, message: bytes) -> None:
if self.logger is None:
return
message = message.replace(common.STX, b"STX ")
message = message.replace(common.ETX, b" ETX")
message = message.replace(common.ENQ, b"ENQ")
message = message.replace(common.ACK, b"ACK")
message = message.replace(common.NAK, b"NAK")
message = message.replace(common.LRC_SKIP, b"LRC_SKIP")
message = message.replace(Message.FIELD_DELIMITER, b"|")
self.logger.debug(f"[SALTO][{self.host}:{self.port}] {'->' if direction == 'out' else '<-'} {message!r}")
| 35.404959 | 113 | 0.626984 |
b59cd576dcadf57bebff93646287cb059ac4c492 | 2,141 | py | Python | main.py | igorplyukhin/Cloudy | d4acb2fe6e992a9ec95518ab2a3d240c610043a8 | [
"MIT"
] | null | null | null | main.py | igorplyukhin/Cloudy | d4acb2fe6e992a9ec95518ab2a3d240c610043a8 | [
"MIT"
] | null | null | null | main.py | igorplyukhin/Cloudy | d4acb2fe6e992a9ec95518ab2a3d240c610043a8 | [
"MIT"
] | null | null | null | import click
from config import STORAGE
from ApiError import ApiError
# TODO UnitTests
@click.group()
def cli():
pass
@cli.command()
@click.option('-lp', '--local_path', default='.', help='Path to save file')
@click.argument('cloud_path')
def download_file(local_path, cloud_path):
click.secho('Downloading...', fg='green')
STORAGE.download_file(local_path, cloud_path)
click.secho('Success', fg='green')
@cli.command()
@click.option('-z', '--is_zipped', is_flag=True, help='Specify if you want to compress file')
@click.argument('local_path')
@click.argument('cloud_path')
def upload_file(local_path, cloud_path, is_zipped):
"""
LOCALPATH is the path of a file to upload
CLOUDPATH is the path with the name of a new file in your cloud
"""
if is_zipped:
STORAGE.upload_zip_file(local_path, cloud_path)
else:
STORAGE.upload_file(local_path, cloud_path)
@cli.command()
@click.option('-z', '--is_zipped', is_flag=True, help='Specify if eou want to compress dir')
@click.argument('local_path')
@click.argument('cloud_path')
def upload_dir(local_path, cloud_path, is_zipped):
if is_zipped:
STORAGE.upload_zip_dir(local_path, cloud_path)
else:
STORAGE.upload_dir(local_path, cloud_path)
@cli.command()
@click.argument('cloud_path')
def get_dir(cloud_path):
"""
Prints cloud folder contents
:param cloud_path: Specify folder to show contents
Yandex root folder = '/'; Dropbox root folder ''
"""
resp = STORAGE.get_dir(cloud_path)
try:
for item in resp.json()['entries']:
click.secho(item['path_display'])
except KeyError:
for item in resp.json()['_embedded']['items']:
click.secho(item['path'])
@cli.command()
@click.argument('cloud_path')
def create_dir(cloud_path):
print(STORAGE.create_cloud_dir(cloud_path))
if __name__ == '__main__':
try:
cli()
except ApiError as e:
click.secho(f'ApiError: {e.description}', fg='red', bold=True)
except FileNotFoundError:
click.secho('File or directory does not exist', fg='red', bold=True)
| 26.432099 | 93 | 0.677254 |
456142d237684f67a52f370e2720a880665b4f25 | 1,844 | py | Python | smooth_edge.py | Derek-Wds/training_CartoonGAN | fb7f75cc389833fd5eb08bdb56f9d6e44ecc5dac | [
"MIT"
] | 2 | 2019-07-07T14:57:23.000Z | 2019-07-14T04:11:15.000Z | smooth_edge.py | Derek-Wds/training_CartoonGAN | fb7f75cc389833fd5eb08bdb56f9d6e44ecc5dac | [
"MIT"
] | 10 | 2019-10-23T05:44:51.000Z | 2022-03-11T23:52:00.000Z | smooth_edge.py | Derek-Wds/training_CartoonGAN | fb7f75cc389833fd5eb08bdb56f9d6e44ecc5dac | [
"MIT"
] | null | null | null | import numpy as np
import cv2, os
from tqdm import tqdm
# edge smoothing
def smooth_edge(path, img_size=256):
file_list = os.listdir(path)
save_path = 'dataset/smooth_cartoon_imgs'
if not os.path.exists(os.path.abspath()+'/'+save_path):
os.mkdir(save_path)
kernel_size = 5
kernel = np.ones((kernel_size, kernel_size), np.uint8)
gauss = cv2.getGaussianKernel(kernel_size, 0)
gauss = gauss * gauss.transpose(1, 0)
for f in tqdm(file_list):
file_name = os.path.basename(f)
# deal with rgb images
rgb_img = cv2.imread(path+'/'+f)
rgb_img = cv2.resize(rgb_img, (img_size, img_size), interpolation=cv2.INTER_CUBIC)
pad_img = np.pad(rgb_img, ((2, 2), (2, 2), (0, 0)), mode='reflect')
# deal with gray images
gray_img = cv2.imread(path+'/'+f, 0)
gray_img = cv2.resize(gray_img, (img_size, img_size), interpolation=cv2.INTER_CUBIC)
# get the edges and dilations
edges = cv2.Canny(gray_img, 100, 200)
dilation = cv2.dilate(edges, kernel)
# gaussian smoothing in dilated edge areas
result = np.copy(rgb_img)
idx = np.where(dilation != 0)
for i in range(np.sum(dilation != 0)):
result[idx[0][i], idx[1][i], 0] = np.sum(np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 0], gauss))
result[idx[0][i], idx[1][i], 1] = np.sum(np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 1], gauss))
result[idx[0][i], idx[1][i], 2] = np.sum(np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 2], gauss))
cv2.imwrite(os.path.join(save_path, file_name), result)
if __name__ == "__main__":
smooth_edge('dataset/cartoon_imgs')
| 40.086957 | 154 | 0.61551 |
99ef11f3111487434dba651fe48e4c0e623de25d | 5,269 | py | Python | setup.py | QihongL/brainiak | cdc53fa15eb779c7b3ac7c3e237e403b3237c73e | [
"Apache-2.0"
] | 2 | 2019-07-04T20:10:26.000Z | 2020-09-23T13:22:06.000Z | setup.py | QihongL/brainiak | cdc53fa15eb779c7b3ac7c3e237e403b3237c73e | [
"Apache-2.0"
] | 1 | 2021-04-09T15:31:27.000Z | 2021-04-09T15:31:27.000Z | setup.py | QihongL/brainiak | cdc53fa15eb779c7b3ac7c3e237e403b3237c73e | [
"Apache-2.0"
] | 1 | 2018-07-11T13:04:22.000Z | 2018-07-11T13:04:22.000Z | from distutils import sysconfig
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
import os
import sys
import setuptools
from copy import deepcopy
assert sys.version_info >= (3, 5), (
"Please use Python version 3.5 or higher, "
"lower versions are not supported"
)
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
ext_modules = [
Extension(
'brainiak.factoranalysis.tfa_extension',
['brainiak/factoranalysis/tfa_extension.cpp'],
),
Extension(
'brainiak.fcma.fcma_extension',
['brainiak/fcma/src/fcma_extension.cc'],
),
Extension(
'brainiak.fcma.cython_blas',
['brainiak/fcma/cython_blas.pyx'],
),
Extension(
'brainiak.eventseg._utils',
['brainiak/eventseg/_utils.pyx'],
),
]
# As of Python 3.6, CCompiler has a `has_flag` method.
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:
f.write('int main (int argc, char **argv) { return 0; }')
try:
compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14] compiler flag.
The c++14 is prefered over c++11 (when it is available).
"""
if has_flag(compiler, '-std=c++14'):
return '-std=c++14'
elif has_flag(compiler, '-std=c++11'):
return '-std=c++11'
else:
raise RuntimeError('Unsupported compiler -- at least C++11 support '
'is needed!')
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
'unix': ['-g0', '-fopenmp'],
}
# FIXME Workaround for using the Intel compiler by setting the CC env var
# Other uses of ICC (e.g., cc binary linked to icc) are not supported
if (('CC' in os.environ and 'icc' in os.environ['CC'])
or 'icc' in sysconfig.get_config_var('CC')):
c_opts['unix'] += ['-lirc', '-lintlc']
if sys.platform == 'darwin':
c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.9',
'-ftemplate-depth-1024']
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
if ct == 'unix':
opts.append('-DVERSION_INFO="%s"' %
self.distribution.get_version())
for ext in self.extensions:
ext.extra_compile_args = deepcopy(opts)
ext.extra_link_args = deepcopy(opts)
lang = ext.language or self.compiler.detect_language(ext.sources)
if lang == 'c++':
ext.extra_compile_args.append(cpp_flag(self.compiler))
ext.extra_link_args.append(cpp_flag(self.compiler))
build_ext.build_extensions(self)
def finalize_options(self):
super().finalize_options()
import numpy
import pybind11
self.include_dirs.extend([
numpy.get_include(),
pybind11.get_include(user=True),
pybind11.get_include(),
])
setup(
name='brainiak',
use_scm_version=True,
setup_requires=[
'cython',
# https://github.com/numpy/numpy/issues/14189
# https://github.com/brainiak/brainiak/issues/493
'numpy!=1.17.*,<1.20',
'pybind11>=1.7',
'scipy!=1.0.0',
'setuptools_scm',
],
install_requires=[
'cython',
# Previous versions fail of the Anaconda package fail on MacOS:
# https://travis-ci.org/brainiak/brainiak/jobs/545838666
'mpi4py>=3',
'nitime',
# https://github.com/numpy/numpy/issues/14189
# https://github.com/brainiak/brainiak/issues/493
'numpy!=1.17.*,<1.20',
'scikit-learn[alldeps]>=0.18',
# See https://github.com/scipy/scipy/pull/8082
'scipy!=1.0.0',
'statsmodels',
'pymanopt',
'theano>=1.0.4', # See https://github.com/Theano/Theano/pull/6671
'pybind11>=1.7',
'psutil',
'nibabel',
'joblib',
'wheel', # See https://github.com/astropy/astropy-helpers/issues/501
'pydicom',
],
extras_require={
'matnormal': [
'tensorflow',
'tensorflow_probability',
],
},
author='Princeton Neuroscience Institute and Intel Corporation',
author_email='mihai.capota@intel.com',
url='http://brainiak.org',
description='Brain Imaging Analysis Kit',
license='Apache 2',
keywords='neuroscience, algorithm, fMRI, distributed, scalable',
long_description=long_description,
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExt},
packages=find_packages(),
include_package_data=True,
python_requires='>=3.5',
zip_safe=False,
)
| 31.363095 | 77 | 0.607705 |
0929653f3d7fdf2e2ae8981d87d12144242b7828 | 6,444 | py | Python | qa/rpc-tests/bip65-cltv-p2p.py | danielaTorrezE/BCH | 704a5115998b68c662440b9fdd232579dd884d20 | [
"MIT"
] | null | null | null | qa/rpc-tests/bip65-cltv-p2p.py | danielaTorrezE/BCH | 704a5115998b68c662440b9fdd232579dd884d20 | [
"MIT"
] | null | null | null | qa/rpc-tests/bip65-cltv-p2p.py | danielaTorrezE/BCH | 704a5115998b68c662440b9fdd232579dd884d20 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP
from binascii import hexlify, unhexlify
import cStringIO
import time
def cltv_invalidate(tx):
'''Modify the signature in vin 0 of the tx to fail CLTV
Prepends -1 CLTV DROP in the scriptSig itself.
'''
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
'''
This test is meant to exercise BIP65 (CHECKLOCKTIMEVERIFY)
Connect to a single node.
Mine 2 (version 3) blocks (save the coinbases for later).
Generate 98 more version 3 blocks, verify the node accepts.
Mine 749 version 4 blocks, verify the node accepts.
Check that the new CLTV rules are not enforced on the 750th version 4 block.
Check that the new CLTV rules are enforced on the 751st version 4 block.
Mine 199 new version blocks.
Mine 1 old-version block.
Mine 1 new version block.
Mine 1 old version block, see that the node rejects.
'''
class BIP65Test(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=3']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx, None, None, "ALL")
tx = CTransaction()
f = cStringIO.StringIO(unhexlify(signresult['hex']))
tx.deserialize(f)
return tx
def get_tests(self):
self.coinbase_blocks = self.nodes[0].generate(2)
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = time.time()
''' 98 more version 3 blocks '''
test_blocks = []
for i in xrange(98):
block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 74 version 4 blocks '''
test_blocks = []
for i in xrange(74):
block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
block.nVersion = 4
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance(test_blocks, sync_every_block=False)
'''
Check that the new CLTV rules are not enforced in the 75th
version 3 block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
cltv_invalidate(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
block.nVersion = 4
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance([[block, True]])
'''
Check that the new CLTV rules are enforced in the 76th version 4
block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
cltv_invalidate(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 4
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
''' Mine 19 new version blocks on last valid tip '''
test_blocks = []
for i in xrange(19):
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 4
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 1 old version block '''
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance([[block, True]])
''' Mine 1 new version block '''
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 4
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance([[block, True]])
''' Mine 1 old version block, should be invalid '''
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
if __name__ == '__main__':
BIP65Test().main()
| 36.822857 | 100 | 0.630975 |
313143a66ae5548ce4c264a7489e3ee3a41d570a | 11,642 | py | Python | MetamorphicTests/all_mutants/sales_forecasting_file/142.py | anuragbms/Sales-forecasting-with-RNNs | 22b4639ecbb48381af53326ace94a3538201b586 | [
"Apache-2.0"
] | null | null | null | MetamorphicTests/all_mutants/sales_forecasting_file/142.py | anuragbms/Sales-forecasting-with-RNNs | 22b4639ecbb48381af53326ace94a3538201b586 | [
"Apache-2.0"
] | null | null | null | MetamorphicTests/all_mutants/sales_forecasting_file/142.py | anuragbms/Sales-forecasting-with-RNNs | 22b4639ecbb48381af53326ace94a3538201b586 | [
"Apache-2.0"
] | 1 | 2022-02-06T14:59:43.000Z | 2022-02-06T14:59:43.000Z | def gen_mutants():
import tensorflow as tf
import pandas
import numpy as np
DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'
DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'
TRAINED_MODEL_PATH = 'savedModel'
TIME_STEPS = 10
NUMBER_OF_DAYS_TO_FORECAST = 1
BATCH_SIZE = 100
NUM_EPOCHS = 100
LSTM_UNITS = 250
TENSORBOARD_LOGDIR = 'tensorboard_log'
data_train = pandas.read_csv(DATAFILE_TRAIN)
data_validate = pandas.read_csv(DATAFILE_VALIDATE)
data_train.head()
numTrainingData = len(data_train)
numValidationData = len(data_validate)
trainingData_date = data_train['date'][0:numTrainingData]
trainingData_sales = data_train['sales'][0:numTrainingData]
trainindData_price = data_train['price'][0:numTrainingData]
validationData_date = data_validate['date'][0:numValidationData]
validationData_sales = data_validate['sales'][0:numValidationData]
validationData_price = data_validate['price'][0:numValidationData]
trainingData_sales.head()
print(len(trainingData_sales))
print(len(validationData_sales))
trainingData_sales_min = min(trainingData_sales)
trainingData_sales_max = max(trainingData_sales)
trainingData_sales_range = trainingData_sales_max - trainingData_sales_min
trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]
validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]
print('Min:', trainingData_sales_min)
print('Range:', trainingData_sales_max - trainingData_sales_min)
trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]
targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start = start + 1
[trainingDataSequence_sales[i,:,0] for i in range(3)]
[targetDataSequence_sales[i] for i in range(3)]
a = np.arange(len(targetDataSequence_sales))
np.random.shuffle(a)
trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
loc = 0
for i in a:
trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]
targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]
loc += 1
trainingDataSequence_sales = trainingDataSequence_sales_shuffle
targetDataSequence_sales = targetDataSequence_sales_shuffle
validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]
validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start += 1
tf.reset_default_graph()
inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')
targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')
cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')
(output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)
lastCellOutput = output[:,-1,:]
print('', output)
print('state:', state)
print('lastCellOutput:', lastCellOutput)
weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))
bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))
forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')
forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')
print(forecast)
print(forecast_originalScale)
loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')
tf.summary.scalar(tensor=loss, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
minimize_step = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)
all_summary_ops = tf.summary.merge_all()
numSteps = 0
for e in range(NUM_EPOCHS):
print('starting training for epoch:', e + 1)
startLocation = 0
iteration = 0
for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]
(_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
tensorboard_writer.add_summary(summary_values, numSteps)
numSteps += 1
if (iteration + 1) % 1 == 0:
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
startLocation += BATCH_SIZE
if len(targetDataSequence_sales) > startLocation:
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]
(_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
totalValidationLoss = 0
startLocation = 0
print('starting validation')
for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):
validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]
(validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
startLocation += BATCH_SIZE
totalValidationLoss += validationLsBatch
print('first five predictions:', validationForecastBatch[0:5])
print('first five actuals :', validationBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
if startLocation < len(validationDataSequence_sales):
validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]
validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]
(validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
totalValidationLoss += validationLsBatch
print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)
print('----------- Saving Model')
tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\
{'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\
{'loss': loss, 'forecast_originalScale': forecast_originalScale})
print('saved model to:', TRAINED_MODEL_PATH)
print('----------- Finis') | 31.211796 | 232 | 0.629531 |
474ce80e19de8d37e960e7b121c0439e28a01a30 | 325 | py | Python | mpfmonitor/_version.py | kylenahas/mpf-monitor | e3cc22064ebb709788c770a8940d0b0f742a8741 | [
"MIT"
] | null | null | null | mpfmonitor/_version.py | kylenahas/mpf-monitor | e3cc22064ebb709788c770a8940d0b0f742a8741 | [
"MIT"
] | null | null | null | mpfmonitor/_version.py | kylenahas/mpf-monitor | e3cc22064ebb709788c770a8940d0b0f742a8741 | [
"MIT"
] | null | null | null | # mpf-monitor
__version__ = '0.54.0-dev.1'
__short_version__ = '0.54'
__bcp_version__ = '1.1'
__config_version__ = '5'
__mpf_version_required__ = '0.54.0-dev.0'
version = "MPF Monitor v{} (config_version={}, BCP v{}, Requires MPF v{})".format(
__version__, __config_version__, __bcp_version__, __mpf_version_required__)
| 32.5 | 82 | 0.735385 |
25aba7a39e6450ac047bd8cf5fbd9b2c23753a23 | 3,189 | py | Python | tests/pytests/unit/states/test_composer.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | 1 | 2015-04-01T21:38:46.000Z | 2015-04-01T21:38:46.000Z | tests/pytests/unit/states/test_composer.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | 9 | 2021-03-31T20:25:25.000Z | 2021-07-04T05:33:46.000Z | tests/pytests/unit/states/test_composer.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | null | null | null | """
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import pytest
import salt.states.composer as composer
from salt.exceptions import SaltException
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {composer: {}}
def test_installed():
"""
Test to verify that the correct versions of composer
dependencies are present.
"""
name = "CURL"
ret = {"name": name, "result": True, "comment": "", "changes": {}}
mock = MagicMock(return_value=True)
with patch.dict(composer.__salt__, {"composer.did_composer_install": mock}):
comt = "Composer already installed this directory"
ret.update({"comment": comt})
assert composer.installed(name, always_check=False) == ret
with patch.dict(composer.__opts__, {"test": True}):
comt = 'The state of "CURL" will be changed.'
changes = {
"new": "composer install will be run in CURL",
"old": "composer install has been run in CURL",
}
ret.update({"comment": comt, "result": None, "changes": changes})
assert composer.installed(name) == ret
with patch.dict(composer.__opts__, {"test": False}):
mock = MagicMock(side_effect=[SaltException, {}])
with patch.dict(composer.__salt__, {"composer.install": mock}):
comt = "Error executing composer in 'CURL': "
ret.update({"comment": comt, "result": False, "changes": {}})
assert composer.installed(name) == ret
comt = "Composer install completed successfully, output silenced by quiet flag"
ret.update({"comment": comt, "result": True})
assert composer.installed(name, quiet=True) == ret
def test_update():
"""
Test to composer update the directory to ensure we have
the latest versions of all project dependencies.
"""
name = "CURL"
ret = {"name": name, "result": True, "comment": "", "changes": {}}
changes = {
"new": "composer install/update will be run in CURL",
"old": "composer install has not yet been run in CURL",
}
mock = MagicMock(return_value=True)
with patch.dict(composer.__salt__, {"composer.did_composer_install": mock}):
with patch.dict(composer.__opts__, {"test": True}):
comt = 'The state of "CURL" will be changed.'
ret.update({"comment": comt, "result": None, "changes": changes})
assert composer.update(name) == ret
with patch.dict(composer.__opts__, {"test": False}):
mock = MagicMock(side_effect=[SaltException, {}])
with patch.dict(composer.__salt__, {"composer.update": mock}):
comt = "Error executing composer in 'CURL': "
ret.update({"comment": comt, "result": False, "changes": {}})
assert composer.update(name) == ret
comt = "Composer update completed successfully, output silenced by quiet flag"
ret.update({"comment": comt, "result": True})
assert composer.update(name, quiet=True) == ret
| 38.421687 | 95 | 0.60301 |
05292a09b53bbabdd8aef52620515b8e69c2e1b3 | 2,156 | py | Python | assets/ctfFiles/2021/idek2021/crypto/nameless/nameless.py | Angmar2722/Angmar2722.github.io | 6d79cfeeb6681b32d62caba167e14c2f4eeb2569 | [
"MIT"
] | null | null | null | assets/ctfFiles/2021/idek2021/crypto/nameless/nameless.py | Angmar2722/Angmar2722.github.io | 6d79cfeeb6681b32d62caba167e14c2f4eeb2569 | [
"MIT"
] | null | null | null | assets/ctfFiles/2021/idek2021/crypto/nameless/nameless.py | Angmar2722/Angmar2722.github.io | 6d79cfeeb6681b32d62caba167e14c2f4eeb2569 | [
"MIT"
] | 9 | 2021-06-30T07:57:08.000Z | 2022-01-03T09:22:35.000Z | #!/usr/bin/env python3
from Crypto.Util.number import getPrime, bytes_to_long
flag = bytes_to_long(open("flag.txt", "rb").read())
p = getPrime(1024)
q = getPrime(1024)
n = p*q
e = 65537
c = pow(flag, e, n)
print(f"{n = }")
print(f"{p**2 + q**2 = }")
print(f"{c = }")
# n = 17039353907577304435335064263404014554877715060984532599266619880863167873378099082282744647069063737519071263364836126585022715467571812084451441982393173641398961475914685815327955647115633127041896154455593434072255425400800779717723399468604805292082232853055652824142503280033249169812067036520117578584094798348819948005306782099055133323817492597665553443090585282100292603079932759878536941929823231580881942192749039900111873581375554659251791337260557811529597205007196563571790350676229812320194120553090511341491088451472118285832059742983329898372623700182290118257197824687682775782009980169859003817731
# p**2 + q**2 = 34254734236141177160574679812056859631858427160408786991475995766265871545173190051194038767461225382849521482292062983459474860288453334280315736001800236347672807900333594896297515619502911996316514299218938831378736595562870019767614772735193898275208842936903810908125651716713945099823849942766283224215669363078687494444967371294251548767512167452469907361824731739495988324619487099803563636546009036759134670516039262088500254966964852889263176272377467365967151127628965809347292638988052064278479647751273833336918088826074446862207626964731876317800211831559603043730904022957158490478667914769698472788362
# c = 12870370380105677159569686874593314643716517767455659912764832987663831817849402722874771360315463499459803247514426078866675686952348433836656840934671927466173330528381359767745015167610939855705805470288376941237662107279159556248387485524451540986787953598577323572841487131458590546170321983597795128547549803960136942090569419458036728363613060710384550676895546741408072019046530957103700345379626982758919062223712005709765751343132802610106335253368313457365776378662756844353849622352138042802036310704545247436297860319183507369367717753569233726139626694256257605892684852784606001755037052492614845787835
| 98 | 633 | 0.938312 |
7fb0034c9e297c6866730de21fccd50f239e2da7 | 2,046 | py | Python | symphony/cli/pyinventory/graphql/mutation/add_customer.py | idoshveki/magma | 8022267bd8b8d94913fbb9a0836880361d785446 | [
"BSD-3-Clause"
] | null | null | null | symphony/cli/pyinventory/graphql/mutation/add_customer.py | idoshveki/magma | 8022267bd8b8d94913fbb9a0836880361d785446 | [
"BSD-3-Clause"
] | null | null | null | symphony/cli/pyinventory/graphql/mutation/add_customer.py | idoshveki/magma | 8022267bd8b8d94913fbb9a0836880361d785446 | [
"BSD-3-Clause"
] | 1 | 2021-05-18T06:54:58.000Z | 2021-05-18T06:54:58.000Z | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from gql.gql.datetime_utils import DATETIME_FIELD
from gql.gql.graphql_client import GraphqlClient
from gql.gql.client import OperationException
from gql.gql.reporter import FailedOperationException
from functools import partial
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from time import perf_counter
from dataclasses_json import DataClassJsonMixin
from ..fragment.customer import CustomerFragment, QUERY as CustomerFragmentQuery
from ..input.add_customer import AddCustomerInput
QUERY: List[str] = CustomerFragmentQuery + ["""
mutation AddCustomerMutation($input: AddCustomerInput!) {
addCustomer(input: $input) {
...CustomerFragment
}
}
"""]
@dataclass
class AddCustomerMutation(DataClassJsonMixin):
@dataclass
class AddCustomerMutationData(DataClassJsonMixin):
@dataclass
class Customer(CustomerFragment):
pass
addCustomer: Customer
data: AddCustomerMutationData
@classmethod
# fmt: off
def execute(cls, client: GraphqlClient, input: AddCustomerInput) -> AddCustomerMutationData.Customer:
# fmt: off
variables = {"input": input}
try:
network_start = perf_counter()
response_text = client.call(''.join(set(QUERY)), variables=variables)
decode_start = perf_counter()
res = cls.from_json(response_text).data
decode_time = perf_counter() - decode_start
network_time = decode_start - network_start
client.reporter.log_successful_operation("AddCustomerMutation", variables, network_time, decode_time)
return res.addCustomer
except OperationException as e:
raise FailedOperationException(
client.reporter,
e.err_msg,
e.err_id,
"AddCustomerMutation",
variables,
)
| 32.47619 | 113 | 0.699413 |
8f625d4e5e84840037d548b8b541b3093dab8cbd | 3,219 | py | Python | example1/automata_example1.py | sebras/berkeman-accelerator-project_skeleton | ec71dee81bc511ee78cd3158847797537bf1c6c5 | [
"Apache-2.0"
] | 15 | 2018-04-20T18:49:33.000Z | 2020-07-31T04:50:16.000Z | example1/automata_example1.py | sebras/berkeman-accelerator-project_skeleton | ec71dee81bc511ee78cd3158847797537bf1c6c5 | [
"Apache-2.0"
] | 1 | 2018-05-25T06:56:00.000Z | 2018-06-05T17:56:47.000Z | example1/automata_example1.py | sebras/berkeman-accelerator-project_skeleton | ec71dee81bc511ee78cd3158847797537bf1c6c5 | [
"Apache-2.0"
] | 11 | 2018-04-20T18:49:41.000Z | 2021-01-15T10:58:36.000Z | ############################################################################
# #
# Copyright (c) 2018 eBay Inc. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
from dataset import Dataset
from jobid import resolve_jobid_filename
import blob
def main(urd):
# Example 1. Create a chain of datasets containing random data.
jid_prev = None
for n in range(5):
jid_ds = urd.build('example1_create_dataset',
datasets=dict(previous=jid_prev),
options=dict(approx_rows=100000, seed=n),
name='Created_number_%s' % (n,),
)
jid_prev = jid_ds
# Example 2. Export the last dataset in the chain to a tab
# separated textfile.
jid_exp = urd.build('csvexport',
datasets=dict(source=jid_ds),
options=dict(filename='random.tsv', separator='\t'),
)
filename = resolve_jobid_filename(jid_exp, 'random.tsv')
print('Exported file stored in \"%s\"' % (filename,))
# Example 3. Import the tab separated textfile and type it
jid_imp = urd.build('csvimport',
options=dict(filename=filename, separator='\t', labelsonfirstline=True),
)
jid_typ = urd.build('dataset_type',
datasets=dict(source=jid_imp),
options=dict(column2type=dict(rflt='number', rint='number')),
)
# Example 4. Run a method computing the average of a column, in a
# loop, one column at a time. The column name is an
# input parameter.
for column in Dataset(jid_typ).columns:
jid_avg = urd.build('example1_calc_average',
datasets=dict(source=jid_typ),
options=dict(column=column),
)
(s, n) = blob.load(jobid=jid_avg)
print("Column %s: sum=%f, length=%d, average=%f" % (column, s, n, s/n))
# Example 5. Create a new column that is the product of two
# existing columns.
jid_add = urd.build('example1_add_column',
datasets=dict(source=jid_typ),
)
# Example 6. Export a dataset with named columns in specified
# order.
jid_add_exp = urd.build('csvexport',
datasets=dict(source=jid_add),
options=dict(filename='prod.csv', labels=('prod', 'rflt', 'rint',)),
)
print(urd.joblist.pretty)
| 40.746835 | 76 | 0.543026 |
3e111acecf3a12432b238613f021f393b294523a | 959 | py | Python | tasks/__init__.py | ssfdust/smorest-sfs | 139f6817989ab041c81761d183169de20a26597e | [
"Apache-2.0"
] | 8 | 2020-05-11T07:11:03.000Z | 2022-03-25T01:58:18.000Z | tasks/__init__.py | ssfdust/smorest-sfs | 139f6817989ab041c81761d183169de20a26597e | [
"Apache-2.0"
] | null | null | null | tasks/__init__.py | ssfdust/smorest-sfs | 139f6817989ab041c81761d183169de20a26597e | [
"Apache-2.0"
] | 2 | 2020-05-11T03:53:38.000Z | 2021-03-25T01:11:15.000Z | # encoding: utf-8
# pylint: disable=invalid-name,wrong-import-position
"""
本项目Invoke Task的入口
基本上是frol/flask-restplus-server-example的一个翻版,
很多都是沿用的frol的配置,然后做了优化与汉化。
出处:https://github.com/frol/flask-restplus-server-example
"""
import os
import platform
from invoke import Collection
from invoke.executor import Executor
from . import app, logger
# NOTE: `namespace` or `ns` name is required!
namespace = Collection(app,)
def invoke_execute(context, command_name, **kwargs):
"""
执行Invoke Task的帮助函数
"""
results = Executor(namespace, config=context.config).execute((command_name, kwargs))
target_task = context.root_namespace[command_name]
return results[target_task]
namespace.configure(
{
"run": {
"shell": "/bin/sh"
if platform.system() != "Windows"
else os.environ.get("COMSPEC"),
},
"root_namespace": namespace,
"invoke_execute": invoke_execute,
}
)
| 22.302326 | 88 | 0.68196 |
366a799ddfcc2bf8bb387e248143f17a87bbf047 | 9,655 | py | Python | pypy/jit/tool/traceviewer.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | 1 | 2020-01-21T11:10:51.000Z | 2020-01-21T11:10:51.000Z | pypy/jit/tool/traceviewer.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | null | null | null | pypy/jit/tool/traceviewer.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
""" Usage: traceviewer.py [--use-threshold] loopfile
"""
import optparse
import sys
import re
import math
import py
import autopath
from pypy.translator.tool.graphpage import GraphPage
from pypy.translator.tool.make_dot import DotGen
from pypy.tool import logparser
from pypy.tool import progressbar
class SubPage(GraphPage):
def compute(self, graph):
self.links = {}
dotgen = DotGen(str(graph.no))
# split over debug_merge_points
counter = 0
lines = graph.content.split("\n")
lines_so_far = []
for line in lines:
line = re.sub('.\[.*\]', '', line)
boxes = re.findall('([pif]\d+)', line)
for box in boxes:
self.links[box] = box
if 'debug_merge_point' in line:
dotgen.emit_node('node%d' % counter, shape="box",
label="\n".join(lines_so_far))
if counter != 0:
dotgen.emit_edge('node%d' % (counter - 1), 'node%d' % counter)
counter += 1
lines_so_far = []
lines_so_far.append(line)
dotgen.emit_node('node%d' % counter, shape="box",
label="\n".join(lines_so_far))
dotgen.emit_edge('node%d' % (counter - 1), 'node%d' % counter)
self.source = dotgen.generate(target=None)
class Page(GraphPage):
def compute(self, graphs, counts):
dotgen = DotGen('trace')
self.loops = graphs
self.links = {}
self.cache = {}
for loop in self.loops:
loop.generate(dotgen, counts)
loop.getlinks(self.links)
self.cache["loop" + str(loop.no)] = loop
self.source = dotgen.generate(target=None)
def followlink(self, label):
return SubPage(self.cache[label])
BOX_COLOR = (128, 0, 96)
class BasicBlock(object):
counter = 0
startlineno = 0
def __init__(self, content):
self.content = content
self.no = self.counter
self.__class__.counter += 1
def name(self):
return 'node' + str(self.no)
def getlinks(self, links):
links[self.linksource] = self.name()
def generate(self, dotgen, counts):
val = counts.get(self.key, 0)
if False: #val > counts.threshold:
fillcolor = get_gradient_color(self.ratio)
else:
fillcolor = "white"
dotgen.emit_node(self.name(), label=self.header,
shape='box', fillcolor=fillcolor)
def get_content(self):
return self._content
def set_content(self, content):
self._content = content
groups = re.findall('Guard(\d+)', content)
if not groups:
self.first_guard = -1
self.last_guard = -1
else:
self.first_guard = int(groups[0])
self.last_guard = int(groups[-1])
content = property(get_content, set_content)
def get_gradient_color(ratio):
if ratio == 0:
return 'white'
ratio = math.log(ratio) # from -infinity to +infinity
#
# ratio: <---------------------- 1.8 --------------------->
# <-- towards green ---- YELLOW ---- towards red -->
#
ratio -= 1.8
ratio = math.atan(ratio * 5) / (math.pi/2)
# now ratio is between -1 and 1
if ratio >= 0.0:
# from yellow (ratio=0) to red (ratio=1)
return '#FF%02X00' % (int((1.0-ratio)*255.5),)
else:
# from yellow (ratio=0) to green (ratio=-1)
return '#%02XFF00' % (int((1.0+ratio)*255.5),)
class FinalBlock(BasicBlock):
def __init__(self, content, target):
self.target = target
BasicBlock.__init__(self, content)
def postprocess(self, loops, memo, counts):
postprocess_loop(self.target, loops, memo, counts)
def generate(self, dotgen, counts):
BasicBlock.generate(self, dotgen, counts)
if self.target is not None:
dotgen.emit_edge(self.name(), self.target.name())
class Block(BasicBlock):
def __init__(self, content, left, right):
self.left = left
self.right = right
BasicBlock.__init__(self, content)
def postprocess(self, loops, memo, counts):
postprocess_loop(self.left, loops, memo, counts)
postprocess_loop(self.right, loops, memo, counts)
def generate(self, dotgen, counts):
BasicBlock.generate(self, dotgen, counts)
dotgen.emit_edge(self.name(), self.left.name())
dotgen.emit_edge(self.name(), self.right.name())
def split_one_loop(real_loops, guard_s, guard_content, lineno, no, allloops):
for i in range(len(allloops) - 1, -1, -1):
loop = allloops[i]
if no < loop.first_guard or no > loop.last_guard:
continue
content = loop.content
pos = content.find(guard_s + '>')
if pos != -1:
newpos = content.rfind("\n", 0, pos)
oldpos = content.find("\n", pos)
assert newpos != -1
if oldpos == -1:
oldpos = len(content)
if isinstance(loop, Block):
left = Block(content[oldpos:], loop.left, loop.right)
else:
left = FinalBlock(content[oldpos:], None)
right = FinalBlock(guard_content, None)
mother = Block(content[:oldpos], len(allloops), len(allloops) + 1)
allloops[i] = mother
allloops.append(left)
allloops.append(right)
if hasattr(loop, 'loop_no'):
real_loops[loop.loop_no] = mother
mother.loop_no = loop.loop_no
mother.guard_s = guard_s
mother.startlineno = loop.startlineno
left.startlineno = loop.startlineno + content.count("\n", 0, pos)
right.startlineno = lineno
return
else:
raise Exception("Did not find")
MAX_LOOPS = 300
LINE_CUTOFF = 300
def splitloops(loops):
real_loops = []
counter = 1
bar = progressbar.ProgressBar(color='blue')
allloops = []
for i, loop in enumerate(loops):
if i > MAX_LOOPS:
return real_loops, allloops
bar.render((i * 100) / len(loops))
firstline = loop[:loop.find("\n")]
m = re.match('# Loop (\d+)', firstline)
if m:
no = int(m.group(1))
assert len(real_loops) == no
_loop = FinalBlock(loop, None)
real_loops.append(_loop)
_loop.startlineno = counter
_loop.loop_no = no
allloops.append(_loop)
else:
m = re.search("bridge out of Guard (\d+)", firstline)
assert m
guard_s = 'Guard' + m.group(1)
split_one_loop(real_loops, guard_s, loop, counter,
int(m.group(1)), allloops)
counter += loop.count("\n") + 2
return real_loops, allloops
def postprocess_loop(loop, loops, memo, counts):
if loop in memo:
return
memo.add(loop)
if loop is None:
return
m = re.search("debug_merge_point\('(<code object (.*?)> (.*?))'", loop.content)
if m is None:
name = '?'
loop.key = '?'
else:
name = m.group(2) + " " + m.group(3)
loop.key = m.group(1)
opsno = loop.content.count("\n")
lastline = loop.content[loop.content.rfind("\n", 0, len(loop.content) - 2):]
m = re.search('descr=<Loop(\d+)', lastline)
if m is not None:
assert isinstance(loop, FinalBlock)
loop.target = loops[int(m.group(1))]
bcodes = loop.content.count('debug_merge_point')
loop.linksource = "loop" + str(loop.no)
loop.header = ("%s loop%d\nrun %s times\n%d operations\n%d opcodes" %
(name, loop.no, counts.get(loop.key, '?'), opsno, bcodes))
loop.header += "\n" * (opsno / 100)
if bcodes == 0:
loop.ratio = opsno
else:
loop.ratio = float(opsno) / bcodes
content = loop.content
loop.content = "Logfile at %d\n" % loop.startlineno + content
loop.postprocess(loops, memo, counts)
def postprocess(loops, allloops, counts):
for loop in allloops:
if isinstance(loop, Block):
loop.left = allloops[loop.left]
loop.right = allloops[loop.right]
memo = set()
for loop in loops:
postprocess_loop(loop, loops, memo, counts)
class Counts(dict):
pass
def main(loopfile, use_threshold, view=True):
countname = py.path.local(loopfile + '.count')
if countname.check():
#counts = [line.split(':', 1) for line in countname.readlines()]
#counts = Counts([('<code' + k.strip("\n"), int(v.strip('\n').strip()))
# for v, k in counts])
counts = Counts([])
l = list(sorted(counts.values()))
if len(l) > 20 and use_threshold:
counts.threshold = l[-20]
else:
counts.threshold = 0
for_print = [(v, k) for k, v in counts.iteritems()]
for_print.sort()
else:
counts = {}
log = logparser.parse_log_file(loopfile)
loops = logparser.extract_category(log, "jit-log-opt-")
real_loops, allloops = splitloops(loops)
postprocess(real_loops, allloops, counts)
if view:
Page(allloops, counts).display()
if __name__ == '__main__':
parser = optparse.OptionParser(usage=__doc__)
parser.add_option('--use-threshold', dest='use_threshold',
action="store_true", default=False)
options, args = parser.parse_args(sys.argv)
if len(args) != 2:
print __doc__
sys.exit(1)
main(args[1], options.use_threshold)
| 33.877193 | 83 | 0.567478 |
6f2a0109512e0716106e805c16b2fd3114fcfac7 | 2,681 | py | Python | day04/day04.py | imrehg/AdventOfCode2020 | 6aa5bf122fdff33f2ff97b1652ecea7f3fbb679c | [
"MIT"
] | null | null | null | day04/day04.py | imrehg/AdventOfCode2020 | 6aa5bf122fdff33f2ff97b1652ecea7f3fbb679c | [
"MIT"
] | null | null | null | day04/day04.py | imrehg/AdventOfCode2020 | 6aa5bf122fdff33f2ff97b1652ecea7f3fbb679c | [
"MIT"
] | null | null | null | import sys
import re
required_fields = {"byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"}
def basic_validate(passports, required_fields):
count = 0
for passport in passports:
fields = set(passport.keys())
missing_required_fields = required_fields - fields
count += 1 if not missing_required_fields else 0
return count
def extended_validate(passports, required_fields):
count = 0
for passport in passports:
try:
byr = int(passport["byr"])
if not 1920 <= byr <= 2002:
continue
except:
continue
try:
iyr = int(passport["iyr"])
if not 2010 <= iyr <= 2020:
continue
except:
continue
try:
eyr = int(passport["eyr"])
if not 2020 <= eyr <= 2030:
continue
except:
continue
try:
if passport["hgt"].endswith("cm"):
if not 150 <= int(passport["hgt"][:-2]) <= 193:
continue
elif passport["hgt"].endswith("in"):
if not 59 <= int(passport["hgt"][:-2]) <= 76:
continue
else:
continue
except:
continue
try:
if not re.match("^#[0-9a-f]{6}$", passport["hcl"]):
continue
except:
continue
try:
if passport["ecl"] not in {"amb", "blu", "brn", "gry", "grn", "hzl", "oth"}:
continue
except:
continue
try:
if not re.match("^\d{9}$", passport["pid"]):
continue
except:
continue
# print(passport)
count += 1
return count
def read_passports_file(filename):
passports = []
with open(filename, "r") as f:
buff = ""
for line in f.readlines():
if line.strip() == "" and buff != "":
passports.append(buff)
buff = ""
else:
separator = " " if buff else ""
buff += separator + line.strip()
if buff != "":
passports.append(buff)
passports_parsed = [
{field.split(":")[0]: field.split(":")[1] for field in passport.split(" ")}
for passport in passports
]
return passports_parsed
if __name__ == "__main__":
input_file = sys.argv[1]
passports = read_passports_file(input_file)
res1 = basic_validate(passports, required_fields)
print(f"Result 1: {res1}")
res2 = extended_validate(passports, required_fields)
print(f"Result 2: {res2}")
| 26.284314 | 88 | 0.490862 |
ca156ef5783f48914def1592feaf5a7219aa8baf | 1,972 | py | Python | Rust/Hangperson/h04.py | lerina/drills | d7f374adec683a699c4dfd100985482e6717fda2 | [
"MIT"
] | null | null | null | Rust/Hangperson/h04.py | lerina/drills | d7f374adec683a699c4dfd100985482e6717fda2 | [
"MIT"
] | null | null | null | Rust/Hangperson/h04.py | lerina/drills | d7f374adec683a699c4dfd100985482e6717fda2 | [
"MIT"
] | null | null | null | #Step 4
import random
stages = ['''
+---+
| |
O |
/|\ |
/ \ |
|
=========
''', '''
+---+
| |
O |
/|\ |
/ |
|
=========
''', '''
+---+
| |
O |
/|\ |
|
|
=========
''', '''
+---+
| |
O |
/| |
|
|
=========''', '''
+---+
| |
O |
| |
|
|
=========
''', '''
+---+
| |
O |
|
|
|
=========
''', '''
+---+
| |
|
|
|
|
=========
''']
end_of_game = False
word_list = ["ardvark", "baboon", "camel"]
chosen_word = random.choice(word_list)
word_length = len(chosen_word)
#TODO-1: - Create a variable called 'lives' to keep track of the number of lives left.
#Set 'lives' to equal 6.
lives = len(stages) - 1
#Testing code
print("\033c")
print(f'Pssst, the solution is {chosen_word}.')
#Create blanks
display = []
for _ in range(word_length):
display += "_"
while not end_of_game:
guess = input("Guess a letter: ").lower()
#Check guessed letter
for position in range(word_length):
letter = chosen_word[position]
# print(f"Current position: {position}\n Current letter: {letter}\n Guessed letter: {guess}")
if letter == guess:
display[position] = letter
#TODO-2: - If guess is not a letter in the chosen_word,
#Then reduce 'lives' by 1.
if guess not in chosen_word:
lives -=1
#If lives goes down to 0 then the game should stop and it should print "You lose."
if lives <= 0:
print("You lose")
end_of_game = True
#Join all the elements in the list and turn it into a String.
#print("\033c")
print(f"{' '.join(display)}")
#Check if user has got all letters.
if "_" not in display:
end_of_game = True
print("You win.")
#TODO-3: - print the ASCII art from 'stages' that corresponds to the current number of 'lives' the user has remaining.
print(stages[lives])
| 17.607143 | 122 | 0.49645 |
98eab368f575087ab5b51209fedc08e55ca61e58 | 837 | py | Python | 30_day_leetcoding_challenge/2021_09/30-Partition_to_K_Equal_Sum_Subsets.py | QuenLo/leecode | ce861103949510dc54fd5cb336bd992c40748de2 | [
"MIT"
] | 6 | 2018-06-13T06:48:42.000Z | 2020-11-25T10:48:13.000Z | 30_day_leetcoding_challenge/2021_09/30-Partition_to_K_Equal_Sum_Subsets.py | QuenLo/leecode | ce861103949510dc54fd5cb336bd992c40748de2 | [
"MIT"
] | null | null | null | 30_day_leetcoding_challenge/2021_09/30-Partition_to_K_Equal_Sum_Subsets.py | QuenLo/leecode | ce861103949510dc54fd5cb336bd992c40748de2 | [
"MIT"
] | null | null | null | class Solution:
def canPartitionKSubsets(self, nums: List[int], k: int) -> bool:
total = sum(nums)
if (total % k) > 0: return False
sub_total = total // 4
visited = [False]*len(nums)
nums = sorted(nums)
return self.calling( nums, visited, 0, k, sub_total )
def calling( self, nums, visited, cur_sum, k, sub_total ):
if k == 1: return True
if (sub_total < cur_sum): return False
if cur_sum == sub_total: return self.calling( nums, visited, 0, k-1, sub_total )
for i in range( len(nums) ):
if visited[i]: continue
visited[i] = True
if ( self.calling( nums, visited, cur_sum+nums[i], k, sub_total ) ): return True
visited[i] = False
return False
| 33.48 | 92 | 0.53644 |
d5a77f40a3cd7488e91f28933388bc257e634e1a | 4,687 | py | Python | ShoeStore/authen/forms.py | TrongPhuCBL49/Shoe-Store-Django | 6dff4cf34783fe3504d06504a00022ccce06cb88 | [
"bzip2-1.0.6"
] | null | null | null | ShoeStore/authen/forms.py | TrongPhuCBL49/Shoe-Store-Django | 6dff4cf34783fe3504d06504a00022ccce06cb88 | [
"bzip2-1.0.6"
] | null | null | null | ShoeStore/authen/forms.py | TrongPhuCBL49/Shoe-Store-Django | 6dff4cf34783fe3504d06504a00022ccce06cb88 | [
"bzip2-1.0.6"
] | null | null | null | from django import forms
from django.forms import ModelForm
from django.contrib.auth import get_user_model
from .models import CustomerUser
from django.utils.translation import gettext_lazy as _
from django.core.exceptions import NON_FIELD_ERRORS
User = get_user_model()
class LoginForm(forms.Form):
username = forms.CharField(
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Username',
'onfocus': "this.placeholder = ''",
'onblur': "this.placeholder = 'Username'",
}
),
error_messages={'required': 'Username is required!'}
)
password = forms.CharField(
widget=forms.PasswordInput(
attrs={
'class': 'form-control',
'placeholder': 'Password',
'onfocus': "this.placeholder = ''",
'onblur': "this.placeholder = 'Password'",
}
),
error_messages={'required': 'Password is required!'}
)
class RegisterForm(forms.Form):
username = forms.CharField(
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Username',
}
),
error_messages={'required': 'Username is required!'}
)
email = forms.CharField(
widget=forms.EmailInput(
attrs={
'class': 'form-control',
'placeholder': 'Email Address',
}
),
error_messages={'required': 'Email Address is required!'}
)
password = forms.CharField(
widget=forms.PasswordInput(
attrs={
'class': 'form-control',
'placeholder': 'Password',
}
),
error_messages={'required': 'Password is required!'}
)
confirm_password = forms.CharField(
widget=forms.PasswordInput(
attrs={
'class': 'form-control',
'placeholder': 'Confirm Password',
}
),
error_messages={'required': 'Confirm Password is required!'}
)
first_name = forms.CharField(required=False, widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'First name',
}
))
last_name = forms.CharField(required=False, widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Last name',
}
))
country = forms.CharField(required=False, widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Country',
}
))
phone_number = forms.CharField(required=False, widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Phone number',
}
))
province = forms.CharField(required=False, widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Province',
}
))
district = forms.CharField(required=False, widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'District',
}
))
address = forms.CharField(required=False, widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Address',
}
))
postcode = forms.CharField(required=False, widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Postcode/ZIP',
}
))
def clean_username(self):
username = self.cleaned_data.get("username")
qs = User.objects.filter(username=username)
if qs.exists():
raise forms.ValidationError("Username is taken!")
return username
def clean_email(self):
email = self.cleaned_data.get("email")
qs = User.objects.filter(email=email)
if qs.exists():
raise forms.ValidationError("Email is taken!")
return email
def clean(self):
data = self.cleaned_data
password = self.cleaned_data.get("password")
confirm_password = self.cleaned_data.get("confirm_password")
if password != confirm_password:
raise forms.ValidationError("Password must match!")
return data
class GuestForm(forms.Form):
email = forms.CharField(
widget=forms.EmailInput(
attrs={
'class': 'form-control',
'placeholder': 'Email Address',
}
),
error_messages={'required': 'Email Address is required!'}
)
| 30.633987 | 74 | 0.546192 |
089138ad89274d7bc2778c69157ced27ae8329f4 | 3,255 | py | Python | pytorch_toolbelt/modules/agn.py | ternaus/pytorch-toolbelt | a952882bae1ded4f9a583cbdf87ba6f335ef3abf | [
"MIT"
] | 3 | 2019-10-02T04:05:57.000Z | 2020-01-13T02:26:09.000Z | pytorch_toolbelt/modules/agn.py | ternaus/pytorch-toolbelt | a952882bae1ded4f9a583cbdf87ba6f335ef3abf | [
"MIT"
] | null | null | null | pytorch_toolbelt/modules/agn.py | ternaus/pytorch-toolbelt | a952882bae1ded4f9a583cbdf87ba6f335ef3abf | [
"MIT"
] | 1 | 2019-12-02T05:40:03.000Z | 2019-12-02T05:40:03.000Z | import torch
import torch.nn as nn
import torch.nn.functional as functional
from pytorch_toolbelt.modules.activations import ACT_LEAKY_RELU, ACT_NONE, \
ACT_HARD_SIGMOID, ACT_HARD_SWISH, ACT_SWISH, ACT_SELU, ACT_ELU, ACT_RELU6, \
ACT_RELU, hard_swish, hard_sigmoid, swish
__all__ = ['AGN']
class AGN(nn.Module):
"""Activated Group Normalization
This gathers a `GroupNorm2d` and an activation function in a single module
"""
def __init__(self, num_features: int, num_groups: int,
eps=1e-5,
momentum=0.1,
activation=ACT_LEAKY_RELU,
slope=0.01):
"""Create an Activated Batch Normalization module
Parameters
----------
num_features : int
Number of feature channels in the input and output.
eps : float
Small constant to prevent numerical issues.
momentum : float
Momentum factor applied to compute running statistics as.
affine : bool
If `True` apply learned scale and shift transformation after normalization.
activation : str
Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
slope : float
Negative slope for the `leaky_relu` activation.
"""
super(AGN, self).__init__()
assert num_features % num_groups == 0
self.num_features = num_features
self.num_groups = num_groups
self.eps = eps
self.momentum = momentum
self.activation = activation
self.slope = slope
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
x = functional.group_norm(x, self.num_groups,
self.weight, self.bias, self.eps)
if self.activation == ACT_RELU:
return functional.relu(x, inplace=True)
elif self.activation == ACT_RELU6:
return functional.relu6(x, inplace=True)
elif self.activation == ACT_LEAKY_RELU:
return functional.leaky_relu(x, negative_slope=self.slope,
inplace=True)
elif self.activation == ACT_ELU:
return functional.elu(x, inplace=True)
elif self.activation == ACT_SELU:
return functional.selu(x, inplace=True)
elif self.activation == ACT_SWISH:
return swish(x)
elif self.activation == ACT_HARD_SWISH:
return hard_swish(x, inplace=True)
elif self.activation == ACT_HARD_SIGMOID:
return hard_sigmoid(x, inplace=True)
elif self.activation == ACT_NONE:
return x
else:
raise KeyError(self.activation)
def __repr__(self):
rep = '{name}({num_features},{num_groups}, eps={eps}' \
', activation={activation}'
if self.activation == "leaky_relu":
rep += ', slope={slope})'
else:
rep += ')'
return rep.format(name=self.__class__.__name__, **self.__dict__)
| 36.573034 | 87 | 0.603379 |
fa531f150f2d30304c34da181a2b2bdd277f8f39 | 5,446 | py | Python | wrapper/gf.py | sniemi/SamPy | e048756feca67197cf5f995afd7d75d8286e017b | [
"BSD-2-Clause"
] | 5 | 2016-05-28T14:12:28.000Z | 2021-04-22T10:23:12.000Z | wrapper/gf.py | sniemi/SamPy | e048756feca67197cf5f995afd7d75d8286e017b | [
"BSD-2-Clause"
] | null | null | null | wrapper/gf.py | sniemi/SamPy | e048756feca67197cf5f995afd7d75d8286e017b | [
"BSD-2-Clause"
] | 2 | 2015-07-13T10:04:10.000Z | 2021-04-22T10:23:23.000Z | """
This little wrapper can be used to run GF (R. Somerville's SAMs program) using threading.
:author: Sami-Matias Niemi
:contact: niemi@stsci.edu
:note: Threading is not really the best way to do this.
Instead, one should use multiprocessing to launch
several processes.
"""
import glob as g
import os
import time
import threading as t
import Queue as Q
class Run_GF_Threaded(t.Thread):
"""
Threaded way of running GF.
"""
def __init__(self,
queue,
out_path='/Users/niemi/Desktop/Research/run/',
param_template='/Users/niemi/Desktop/Research/orig_param_file',
gf_binary='/Users/niemi/Desktop/Research/gf_bolshoi/gf'):
t.Thread.__init__(self)
self.queue = queue
self.out_path = out_path
self.param_template = param_template
self.gf_binary = gf_binary
#Redefines subprocess.call -function
def _call(self, command, echo=False, wait=False):
"""
Actual command line call method.
"""
import subprocess
if echo:
print command
else:
#return subprocess.call(arg.split())
#Shell = True --> no arg.split()
#return subprocess.call(command, shell=True)
#call will do Popen().wait() so it will wait to be ready!
if wait:
return subprocess.Popen(command, shell=True).wait()
else:
return subprocess.Popen(command, shell=True)
def _remove_ending(self, string, ending='.dat'):
"""
Remove an ending from a filename. Filename must be
without a path.
"""
stop = string.find(ending)
return string[:stop]
def _modify_gf_param_file(self, new_file, out_path_line, path):
"""
Modifies the GF parameter file to point to a right folder.
Only modifies the date line and the path name of input and
output line.
"""
#read in a template
fh = open(self.param_template, 'r')
data = fh.readlines()
fh.close()
pos = -999
#write modified output
out = open(path + '/' + new_file, 'w')
for x, line in enumerate(data):
if line.startswith('#25 july 2010'):
today = time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime())
out.write('#' + today + '\n')
elif line.startswith('#pathname of input and output'):
pos = x + 1
else:
if x == pos:
out.write(out_path_line + '\n')
else:
out.write(line)
out.close()
def _check_dir(self, filename, new_file):
"""
Checks wheather the output directory exists or not.
Will create a new directory if required and skip
if the directory is already present. Will also
call the _modify_gf_param_file method.
:param filename: full path + filename of the input file
:param new_file: name of the new parameter file
:return: a list containing a boolean stating whether
the directory was present or not and the path
to the folder that was created.
"""
base = os.path.basename(filename)
path = self.out_path + self._remove_ending(base)
#create output directory
if not os.path.isdir(path):
#make new directory
os.mkdir(path)
print 'Path %s created' % path
#make a link to gf
os.link(self.gf_binary, path + '/gf')
#make a new param file and modify the template
out_path_line = '\"' + path + '/' + '\"'
self._modify_gf_param_file(new_file, out_path_line, path)
return True, path
else:
return False, path
def run(self):
"""
Method threading will call.
"""
while True:
new_file = 'param_file'
#grabs a file from queue
filename = self.queue.get()
x = self._check_dir(filename, new_file)
path = x[1]
if x[0]:
#run gf
print 'Started running gf in %s' % path
start_time = time.time()
command = path + '/gf ' + path + '/%s > /dev/null' % new_file
print 'Running command %s' % command
self._call(command, wait=True)
print 'Finished %s:\ngf took about %.0f minutes to run' % (path, -(start_time - time.time()) / 60.)
else:
print 'Path %s exists, skipping' % path
#signals to queue job is done
self.queue.task_done()
def main(input_files, cores=6):
"""
Main driver function of the wrapper.
"""
queue = Q.Queue()
#spawn a pool of threads, and pass them queue instance
for i in range(cores):
th = Run_GF_Threaded(queue)
th.setDaemon(True)
th.start()
#populate queue with data
for file in input_files:
queue.put(file)
#wait on the queue until everything has been processed
queue.join()
if __name__ == '__main__':
cores = 6
inputs = g.glob('/Users/niemi/Desktop/Research/Bolshoi/bolshoi_isotrees/*.dat')
#call the main function
main(inputs, cores)
print 'All done, check the output'
| 32.035294 | 115 | 0.560779 |
092a144b7b05d6b74a74b7b4551e7dd59dbe7d13 | 2,295 | py | Python | phantasy/tests/test_miscutils.py | archman/phantasy | ac362cd3a80f7d1cfc68c0722f8a4aad504d1edd | [
"BSD-3-Clause"
] | null | null | null | phantasy/tests/test_miscutils.py | archman/phantasy | ac362cd3a80f7d1cfc68c0722f8a4aad504d1edd | [
"BSD-3-Clause"
] | 1 | 2017-07-11T12:30:58.000Z | 2018-01-04T19:39:59.000Z | phantasy/tests/test_miscutils.py | archman/phantasy | ac362cd3a80f7d1cfc68c0722f8a4aad504d1edd | [
"BSD-3-Clause"
] | 1 | 2018-09-27T17:03:32.000Z | 2018-09-27T17:03:32.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Unittest for miscutils module
:author: Tong Zhang <zhangt@frib.msu.edu>
:date: 2016-11-22 11:52:21 AM EST
"""
import unittest
import numpy as np
from phantasy.library.misc import miscutils
import os
curdir = os.path.abspath(os.path.dirname(__file__))
class TestMiscUtils(unittest.TestCase):
def test_flatten_gen(self):
l0 = [1,2,3]
self.assertEqual(list(miscutils._flatten(l0)), [1,2,3])
l0 = [1,2,[3]]
self.assertEqual(list(miscutils._flatten(l0)), [1,2,3])
l0 = (1,2,[3])
self.assertEqual(list(miscutils._flatten(l0)), [1,2,3])
l0 = (1,2,(3,[4,5]))
self.assertEqual(list(miscutils._flatten(l0)), [1,2,3,4,5])
l0 = [1,2,3,[4,5],[6,[7,8,[9,10,['x',['y']]]]]]
l1 = list(miscutils._flatten(l0))
l2 = [1,2,3,4,5,6,7,8,9,10,'x','y']
self.assertEqual(l1, l2)
def test_flatten_list(self):
l0 = [1,2,3]
self.assertEqual(miscutils.flatten(l0), [1,2,3])
l0 = [1,2,[3]]
self.assertEqual(miscutils.flatten(l0), [1,2,3])
l0 = (1,2,[3])
self.assertEqual(miscutils.flatten(l0), [1,2,3])
l0 = (1,2,(3,[4,5]))
self.assertEqual(miscutils.flatten(l0), [1,2,3,4,5])
l0 = [1,2,3,[4,5],[6,[7,8,[9,10,['x',['y']]]]]]
l1 = miscutils.flatten(l0)
l2 = [1,2,3,4,5,6,7,8,9,10,'x','y']
self.assertEqual(l1, l2)
def test_get_interset(self):
a, b, c = [], [], []
self.assertEqual(miscutils.get_intersection(a, b, c), [])
a, b, c = [1], [2], []
self.assertEqual(miscutils.get_intersection(a, b, c), [])
a, b, c = [1,2], [2], []
self.assertEqual(miscutils.get_intersection(a, b, c), [2])
a, b, c = [1,2], [2], [2,3]
self.assertEqual(miscutils.get_intersection(a, b, c), [2])
a, b, c = [1,2], [3,4], [2,3]
self.assertEqual(miscutils.get_intersection(a, b, c), [])
def t_1():
l0 = [1,2,3]
l1 = miscutils.flatten(l0)
print(l1)
l0 = [1,2,3, (4,5)]
l1 = miscutils.flatten(l0)
print(l1)
l0 = [1,2,3,[4,5],[6,[7,8,[9,10,['x',['y']]]]]]
l1 = miscutils.flatten(l0)
print(l1)
if __name__ == '__main__':
t_1()
| 27 | 67 | 0.525926 |
ba0685f4f2ec8ce113818b63f2b2d5e92653ab63 | 19,368 | py | Python | utils.py | Neukiru/spell-checkr | 6a3892d9d8a0268005d3f82c288e460fbb15f035 | [
"MIT"
] | null | null | null | utils.py | Neukiru/spell-checkr | 6a3892d9d8a0268005d3f82c288e460fbb15f035 | [
"MIT"
] | null | null | null | utils.py | Neukiru/spell-checkr | 6a3892d9d8a0268005d3f82c288e460fbb15f035 | [
"MIT"
] | null | null | null | import re
import io
import os
import regex as re
from itertools import islice
import numpy as np
from keras.models import Model, load_model
from keras.layers import Input
from model import truncated_acc, truncated_loss
np.random.seed(1234)
SOS = '\t' # start of sequence.
EOS = '*' # end of sequence.
CHARS = list('აბგდევზთიკლმნოპჟრსტუფქღყშჩცძწხჰ- ')
whitelist = set('აბგდევზთიკლმნოპჟრსტუფქღყშჩცძწხჰ- ')
class CharacterTable(object):
"""Given a set of characters:
+ Encode them to a one-hot integer representation
+ Decode the one-hot integer representation to their character output
+ Decode a vector of probabilities to their character output
"""
def __init__(self, chars):
"""Initialize character table.
# Arguments
chars: Characters that can appear in the input.
"""
self.chars = sorted(set(chars))
self.char2index = dict((c, i) for i, c in enumerate(self.chars))
self.index2char = dict((i, c) for i, c in enumerate(self.chars))
self.size = len(self.chars)
def encode(self, C, nb_rows):
"""One-hot encode given string C.
# Arguments
C: string, to be encoded.
nb_rows: Number of rows in the returned one-hot encoding. This is
used to keep the # of rows for each data the same via padding.
"""
x = np.zeros((nb_rows, len(self.chars)), dtype=np.float32)
for i, c in enumerate(C):
x[i, self.char2index[c]] = 1.0
return x
def decode(self, x, calc_argmax=True):
"""Decode the given vector or 2D array to their character output.
# Arguments
x: A vector or 2D array of probabilities or one-hot encodings,
or a vector of character indices (used with `calc_argmax=False`).
calc_argmax: Whether to find the character index with maximum
probability, defaults to `True`.
"""
if calc_argmax:
indices = x.argmax(axis=-1)
else:
indices = x
chars = ''.join(self.index2char[ind] for ind in indices)
return indices, chars
def sample_multinomial(self, preds, temperature=1.0):
"""Sample index and character output from `preds`,
an array of softmax probabilities with shape (1, 1, nb_chars).
"""
# Reshaped to 1D array of shape (nb_chars,).
preds = np.reshape(preds, len(self.chars)).astype(np.float64)
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probs = np.random.multinomial(1, preds, 1)
index = np.argmax(probs)
char = self.index2char[index]
return index, char
def read_text(data_path, list_of_books):
text = ''
for book in list_of_books:
file_path = os.path.join(data_path, book)
strings = io.open(file_path,mode="r", encoding="utf-8").read()
text += strings + ' '
return text
def tokenize(text):
tokens = re.sub('[^\P{P}-]+', ' ', text)
tokens = re.sub('\s+',' ', tokens)
tokens = [''.join(filter(whitelist.__contains__, token))
for token in re.split("[-\n ]", tokens)]
return tokens
def add_segmentation(tokens,error_rate,n_gramms = 2):
np.random.seed(1234)
corrected_tokens = []
corrupted_tokens = []
for index, obj in enumerate(tokens):
rand = np.random.rand()
if rand <= error_rate:
if index < len(tokens) - (n_gramms - 1) :
corrected_tokens.append(" ".join(tokens[index:index + n_gramms]))
corrupted_tokens.append("".join(tokens[index:index + n_gramms]))
else:
pass
else:
if index < len(tokens) - (n_gramms - 1) :
corrected_tokens.append(" ".join(tokens[index:index + n_gramms]))
corrupted_tokens.append(" ".join(tokens[index:index + n_gramms]))
else:
pass
return corrected_tokens, corrupted_tokens
def add_segmentation_errors(tokens,error_rate, n_gramms = 2):
prohibited = False
np.random.seed(1234)
corrected_tokens = []
corrupted_tokens = []
for index, obj in enumerate(tokens):
rand = np.random.rand()
if rand <= error_rate and not prohibited:
if index < len(tokens) - (n_gramms - 1) :
corrected_tokens.append(" ".join(tokens[index:index + n_gramms]))
corrupted_tokens.append("".join(tokens[index:index + n_gramms]))
prohibited = not prohibited
else:
corrected_tokens.append(tokens[index])
corrupted_tokens.append(tokens[index])
else:
if not prohibited :
if index < len(tokens) - (n_gramms - 1) :
corrected_tokens.append(" ".join(tokens[index:index + n_gramms]))
corrupted_tokens.append(" ".join(tokens[index:index + n_gramms]))
else:
corrected_tokens.append(tokens[index])
corrupted_tokens.append(tokens[index])
prohibited = not prohibited
return corrected_tokens, corrupted_tokens
def add_speling_erors(token, error_rate):
"""Simulate some artificial spelling mistakes."""
assert(0.0 <= error_rate < 1.0)
if len(token) < 3:
return token
rand = np.random.rand()
# Here are 4 different ways spelling mistakes can occur,
# each of which has equal chance.
prob = error_rate / 4.0
if rand < prob:
# Replace a character with a random character.
random_char_index = np.random.randint(len(token))
token = token[:random_char_index] + np.random.choice(CHARS) \
+ token[random_char_index + 1:]
elif prob < rand < prob * 2:
# Delete a character.
random_char_index = np.random.randint(len(token))
token = token[:random_char_index] + token[random_char_index + 1:]
elif prob * 2 < rand < prob * 3:
# Add a random character.
random_char_index = np.random.randint(len(token))
token = token[:random_char_index] + np.random.choice(CHARS) \
+ token[random_char_index:]
elif prob * 3 < rand < prob * 4:
# Transpose 2 characters.
random_char_index = np.random.randint(len(token) - 1)
token = token[:random_char_index] + token[random_char_index + 1] \
+ token[random_char_index] + token[random_char_index + 2:]
else:
# No spelling errors.
pass
return token
def transform(tokens,corrupted_tokens, maxlen, error_rate=0.3, shuffle=True):
"""Transform tokens into model inputs and targets.
All inputs and targets are padded to maxlen with EOS character.
"""
if shuffle:
print('Shuffling data.')
shuffle_tokens = list(zip(tokens, corrupted_tokens))
np.random.shuffle(shuffle_tokens)
tokens,corrupted_tokens = [list(pack) for pack in zip(*shuffle_tokens)]
encoder_tokens = []
decoder_tokens = []
target_tokens = []
for corrupted_token,token in zip(corrupted_tokens,tokens):
encoder = add_speling_erors(corrupted_token, error_rate=error_rate)
encoder += EOS * (maxlen - len(encoder)) # Padded to maxlen.
encoder_tokens.append(encoder)
decoder = SOS + token
decoder += EOS * (maxlen - len(decoder))
decoder_tokens.append(decoder)
target = decoder[1:]
target += EOS * (maxlen - len(target))
target_tokens.append(target)
assert(len(encoder) == len(decoder) == len(target))
return encoder_tokens, decoder_tokens, target_tokens
def batch(tokens, maxlen, ctable, batch_size=128, reverse=False):
"""Split data into chunks of `batch_size` examples."""
def generate(tokens, reverse):
while(True): # This flag yields an infinite generator.
for token in tokens:
if reverse:
token = token[:-1][::-1]
yield token[:-1]
token_iterator = generate(tokens, reverse)
data_batch = np.zeros((batch_size, maxlen, ctable.size),
dtype=np.float32)
while(True):
for i in range(batch_size):
token = next(token_iterator)
data_batch[i] = ctable.encode(token, maxlen)
yield data_batch
def get_nth_line(file,n):
file.seek(0,0)
line_offset = []
offset = 0
for line in file:
line_offset.append(offset)
offset += len(line)
file.seek(0,0)
file.read(line_offset[n])
string = file.readline()
return string[:-1]
def batch_from_file(token_stream, maxlen, ctable, batch_size=128, reverse=False):
"""Split data into chunks of `batch_size` examples."""
def generate(token_stream, reverse):
while(True):
token_stream.seek(0,0) # This flag yields an infinite generator.
for token in token_stream:
if reverse:
token = token[:-1][::-1]
else:
token = token[:-1]
yield token
token_iterator = generate(token_stream, reverse)
data_batch = np.zeros((batch_size, maxlen, ctable.size),
dtype=np.float32)
while(True):
for i in range(batch_size):
token = next(token_iterator)
data_batch[i] = ctable.encode(token, maxlen)
yield data_batch
def preprocess_in_chuncks(data_path,list_of_books,num_lines,train_val_flag = 0,segmentation_rate = 0.5, n_grammes = 3):
if train_val_flag == 0:
train_val_flag = 'train'
else:
train_val_flag = 'val'
data_count = 0
maxlen = 0
for book in list_of_books:
file_path = os.path.join(data_path, book)
with open(file_path, mode = 'r', encoding= 'utf-8') as f:
tokenized_file = open('{}_tokenized_file.txt'.format(train_val_flag),'a')
corr_tokenized_file = open('{}_corr_tokenized_file.txt'.format(train_val_flag),'a')
while True:
next_n_lines = "".join(line for line in list(islice(f, num_lines)))
if not next_n_lines:
break
tokenized = tokenize(next_n_lines)
tokenized = list(filter(None, tokenized))
tokenized,corr_tokenized = add_segmentation(tokenized,segmentation_rate,n_grammes)
tmp_len = max([len(token) for token in tokenized]) + 2
if maxlen < tmp_len:
maxlen = tmp_len
tokenized_map = map(lambda x:x+'\n',tokenized)
corr_tokenized_map = map(lambda y:y+'\n',corr_tokenized)
data_count += len(set(tokenized))
corr_tokenized_file.writelines(list(corr_tokenized_map))
tokenized_file.writelines(list(tokenized_map))
print('preprocessing complete',)
return '{}_tokenized_file.txt'.format(train_val_flag),'{}_corr_tokenized_file.txt'.format(train_val_flag), maxlen, data_count
def transform_in_chunks(tokenized_file,corr_tokenized_file,chunk_size,maxlen,train_val_flag = 0,error_rate = 0.5,shuffle = False):
if train_val_flag == 0:
train_val_flag = 'train'
else:
train_val_flag = 'val'
tokenized_stream = open(tokenized_file,'r')
corr_tokenized_stream = open(corr_tokenized_file,'r')
encoder_file = os.path.exists(os.path.join(os.getcwd(), '{}_encoder.txt'.format(train_val_flag)))
if encoder_file:
open('{}_encoder.txt'.format(train_val_flag), 'w').close()
open('{}_decoder.txt'.format(train_val_flag), 'w').close()
open('{}_target.txt'.format(train_val_flag), 'w').close()
encoder_tokens = open('{}_encoder.txt'.format(train_val_flag),'a')
decoder_tokens = open('{}_decoder.txt'.format(train_val_flag),'a')
target_tokens = open('{}_target.txt'.format(train_val_flag),'a')
else:
encoder_tokens = open('{}_encoder.txt'.format(train_val_flag),'a+')
decoder_tokens = open('{}_decoder.txt'.format(train_val_flag),'a+')
target_tokens = open('{}_target.txt'.format(train_val_flag),'a+')
eof = False
while not eof:
tokens = []
corrupted_tokens = []
counter = 0
while counter < chunk_size:
counter += 1
next_token = tokenized_stream.readline()[:-1]
next_corr_token = corr_tokenized_stream.readline()[:-1]
if not next_token:
eof = True
break
tokens.append(next_token)
corrupted_tokens.append(next_corr_token)
if shuffle:
shuffle_tokens = list(zip(tokens, corrupted_tokens))
np.random.shuffle(shuffle_tokens)
tokens,corrupted_tokens = [list(pack) for pack in zip(*shuffle_tokens)]
for corrupted_token,token in zip(corrupted_tokens,tokens):
encoder = add_speling_erors( corrupted_token, error_rate)
encoder += EOS * (maxlen - len(encoder)) + '\n' # Padded to maxlen.
encoder_tokens.writelines(encoder)
decoder = SOS + token
decoder += EOS * (maxlen - len(decoder))+'\n'
decoder_tokens.writelines(decoder)
target = decoder[1:]
target += EOS * (maxlen - len(target))
target_tokens.writelines(target)
assert(len(encoder[:-1]) == len(decoder[:-1]) == len(target))
return '{}_encoder.txt'.format(train_val_flag),'{}_decoder.txt'.format(train_val_flag),'{}_target.txt'.format(train_val_flag)
def datagen(encoder_iter, decoder_iter, target_iter):
"""Utility function to load data into required model format."""
inputs = zip(encoder_iter, decoder_iter)
while(True):
encoder_input, decoder_input = next(inputs)
target = next(target_iter)
yield ([encoder_input, decoder_input], target)
def decode_sequences(input_length,inputs, targets, input_ctable, target_ctable,
maxlen, reverse, encoder_model, decoder_model,
nb_examples, sample_mode='argmax', random=True):
input_tokens = []
target_tokens = []
if random:
indices = np.random.randint(0, input_length, nb_examples)
else:
indices = range(nb_examples)
for index in indices:
input_tokens.append(get_nth_line(inputs,index))
target_tokens.append(get_nth_line(targets,index))
input_sequences = batch(input_tokens, maxlen, input_ctable,
nb_examples, reverse)
input_sequences = next(input_sequences)
# Procedure for inference mode (sampling):
# 1) Encode input and retrieve initial decoder state.
# 2) Run one step of decoder with this initial state
# and a start-of-sequence character as target.
# Output will be the next target character.
# 3) Repeat with the current target character and current states.
# Encode the input as state vectors.
enc_output, enc_h, enc_c = encoder_model.predict(input_sequences)
states_value = [enc_h,enc_c]
# Create batch of empty target sequences of length 1 character.
target_sequences = np.zeros((nb_examples, 1, target_ctable.size))
# Populate the first element of target sequence
# with the start-of-sequence character.
target_sequences[:, 0, target_ctable.char2index[SOS]] = 1.0
# Sampling loop for a batch of sequences.
# Exit condition: either hit max character limit
# or encounter end-of-sequence character.
decoded_tokens = [''] * nb_examples
for _ in range(maxlen):
# `char_probs` has shape
# (nb_examples, 1, nb_target_chars)
char_probs, h, c = decoder_model.predict(
[enc_output,target_sequences] + states_value)
# Reset the target sequences.
target_sequences = np.zeros((nb_examples, 1, target_ctable.size))
# Sample next character using argmax or multinomial mode.
sampled_chars = []
for i in range(nb_examples):
if sample_mode == 'argmax':
next_index, next_char = target_ctable.decode(
char_probs[i], calc_argmax=True)
elif sample_mode == 'multinomial':
next_index, next_char = target_ctable.sample_multinomial(
char_probs[i], temperature=0.5)
else:
raise Exception(
"`sample_mode` accepts `argmax` or `multinomial`.")
decoded_tokens[i] += next_char
sampled_chars.append(next_char)
# Update target sequence with index of next character.
target_sequences[i, 0, next_index] = 1.0
stop_char = set(sampled_chars)
if len(stop_char) == 1 and stop_char.pop() == EOS:
break
# Update states.
states_value = [h, c]
# Sampling finished.
input_tokens = [re.sub('[%s]' % EOS, '', token)
for token in input_tokens]
target_tokens = [re.sub('[%s]' % EOS, '', token)
for token in target_tokens]
decoded_tokens = [re.sub('[%s]' % EOS, '', token)
for token in decoded_tokens]
return input_tokens, target_tokens, decoded_tokens
def restore_model(path_to_full_model, hidden_size):
"""Restore model to construct the encoder and decoder."""
model = load_model(path_to_full_model, custom_objects={
'truncated_acc': truncated_acc, 'truncated_loss': truncated_loss})
encoder_inputs = model.input[0] # encoder_data
encoder_lstm1 = model.get_layer('encoder_lstm_1')
encoder_lstm2 = model.get_layer('encoder_lstm_2')
encoder_outputs = encoder_lstm1(encoder_inputs)
encoder_outputs, state_h, state_c = encoder_lstm2(encoder_outputs)
encoder_states = [state_h, state_c]
encoder_model = Model(inputs=encoder_inputs, outputs=[encoder_outputs] + encoder_states)
decoder_inputs = model.input[1] # decoder_data
decoder_state_input_h = Input(shape=(hidden_size,))
decoder_state_input_c = Input(shape=(hidden_size,))
decoder_enc_output = Input(shape=(None,hidden_size))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_lstm = model.get_layer('decoder_lstm')
decoder_outputs, state_h, state_c = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
dot_layer1 = model.get_layer('dot_layer1')
attention = dot_layer1([decoder_outputs, decoder_enc_output])
activation_layer = model.get_layer('activation_layer')
attention = activation_layer(attention)
dot_layer2 = model.get_layer('dot_layer2')
context = dot_layer2([attention, decoder_enc_output])
conc_layer = model.get_layer('conc_layer')
decoder_outputs = conc_layer([context, decoder_outputs])
decoder_softmax = model.get_layer('decoder_softmax')
decoder_outputs = decoder_softmax(decoder_outputs)
decoder_model = Model(inputs=[decoder_enc_output,decoder_inputs] + decoder_states_inputs,
outputs=[decoder_outputs] + decoder_states)
return encoder_model, decoder_model | 40.947146 | 135 | 0.62562 |
c66d0b1d0d2b245e6f10b5df4f61336b26f1d1dc | 5,402 | py | Python | src/dashboard/app_animation.py | drblahdblah/covid-19-analysis | abd6ed23a49bd1277708163cad0741c5b9f9698a | [
"MIT"
] | 3 | 2020-04-20T11:10:23.000Z | 2020-04-22T08:06:04.000Z | src/dashboard/app_animation.py | drblahdblah/covid-19-analysis | abd6ed23a49bd1277708163cad0741c5b9f9698a | [
"MIT"
] | null | null | null | src/dashboard/app_animation.py | drblahdblah/covid-19-analysis | abd6ed23a49bd1277708163cad0741c5b9f9698a | [
"MIT"
] | null | null | null | import plotly.graph_objects as go
from datetime import datetime
import pandas as pd
pd.set_option("display.max_columns", 500)
pd.set_option("display.max_rows", 1000)
pd.set_option("display.width", 1000)
url = "https://raw.githubusercontent.com/plotly/datasets/master/gapminderDataFiveYear.csv"
dataset = pd.read_csv(url)
print(dataset.head())
date_today = datetime.strftime(datetime.today(), '%d-%m-%Y')
stacked_df_path = f'../../data/output/complete_df/stacked/{date_today}/result.csv'
df = pd.read_csv(stacked_df_path, header=0)
days = df.Days.unique()
# print(f"Days: {df.Days.unique()}")
continents = df.Continent.unique()
# print(f"continents: {continents}")
# make figure
fig_dict = {
"data": [],
"layout": {},
"frames": []
}
df_total_new_cases = df.loc[(df.indicator == 'total_cases') | (df.indicator == 'new_cases')]
df_total_new_cases = df_total_new_cases.drop(labels=['Unnamed: 0'], axis=1)
# print(df_total_new_cases.head())
# blerg = df_total_new_cases[['Date', 'indicator', 'value']]
pivoted = (df_total_new_cases
.set_index(['Date', 'Country/Region', 'Continent', 'Days'])
.pivot_table(values='value',
index=['Date', 'Country/Region', 'Continent', 'Days'],
columns='indicator',
aggfunc='mean',
fill_value=0)
.reset_index()
)
print(pivoted.loc[pivoted.Continent == 'Australia'].head())
# fill in most of layout
fig_dict["layout"]["xaxis"] = {"range": [0, 6],
"title": "Total Cases",
'type': 'log'}
fig_dict["layout"]["yaxis"] = {"range": [0, 5],
"title": "New Cases",
"type": "log"}
fig_dict["layout"]["hovermode"] = "closest"
fig_dict["layout"]["sliders"] = {
"args": [
"transition", {
"duration": 400,
"easing": "cubic-in-out"
}
],
"initialValue": "0",
"plotlycommand": "animate",
"values": days,
"visible": True
}
fig_dict["layout"]["updatemenus"] = [
{
"buttons": [
{
"args": [None, {"frame": {"duration": 500,
"redraw": False},
"fromcurrent": True,
"transition": {"duration": 300,
"easing": "quadratic-in-out"}}
],
"label": "Play",
"method": "animate"
},
{
"args": [[None], {"frame": {"duration": 0,
"redraw": False},
"mode": "immediate",
"transition": {"duration": 0}}
],
"label": "Pause",
"method": "animate"
}
],
"direction": "left",
"pad": {"r": 10, "t": 87},
"showactive": False,
"type": "buttons",
"x": 0.1,
"xanchor": "right",
"y": 0,
"yanchor": "top"
}
]
sliders_dict = {
"active": 0,
"yanchor": "top",
"xanchor": "left",
"currentvalue": {
"font": {"size": 20},
"prefix": "Day:",
"visible": True,
"xanchor": "right"
},
"transition": {"duration": 300, "easing": "cubic-in-out"},
"pad": {"b": 10, "t": 50},
"len": 0.9,
"x": 0.1,
"y": 0,
"steps": []
}
# make data
day = 0
for Continent in continents:
dataset_by_day = pivoted[pivoted["Days"] == day]
dataset_by_year_and_cont = dataset_by_day[
dataset_by_day["Continent"] == Continent]
data_dict = {
"x": list(dataset_by_year_and_cont["total_cases"]),
"y": list(dataset_by_year_and_cont["new_cases"]),
"mode": "markers",
"text": list(dataset_by_year_and_cont["Country/Region"]),
"marker": {
"sizemode": "area",
"sizeref": 100,
"size": list(dataset_by_year_and_cont["total_cases"])
},
"name": Continent
}
fig_dict["data"].append(data_dict)
# make frames
for day in days:
frame = {"data": [], "name": str(day)}
for continent in continents:
dataset_by_year = pivoted[pivoted["Days"] == int(day)]
dataset_by_year_and_cont = dataset_by_year[
dataset_by_year["Continent"] == continent]
data_dict = {
"x": list(dataset_by_year_and_cont["total_cases"]),
"y": list(dataset_by_year_and_cont["new_cases"]),
"mode": "markers",
"text": list(dataset_by_year_and_cont["Country/Region"]),
"marker": {
"sizemode": "area",
"sizeref": 100,
"size": list(dataset_by_year_and_cont["total_cases"])
},
"name": continent
}
frame["data"].append(data_dict)
fig_dict["frames"].append(frame)
slider_step = {"args": [
[str(day)],
{"frame": {"duration": 300, "redraw": False},
"mode": "immediate",
"transition": {"duration": 300}}
],
"label": str(day),
"method": "animate"}
sliders_dict["steps"].append(slider_step)
fig_dict["layout"]["sliders"] = [sliders_dict]
fig = go.Figure(fig_dict)
fig.show()
| 30.178771 | 92 | 0.501851 |
5178ca2dd39c4543a89f3c0d8d72afd987d5ad22 | 10,618 | py | Python | graphslam/pose/se2.py | Golbstein/python-graphslam | cccc022b2f5d797f6511bda9e7dd3a24af403016 | [
"MIT"
] | 97 | 2020-02-24T00:34:56.000Z | 2022-03-23T11:43:19.000Z | graphslam/pose/se2.py | Golbstein/python-graphslam | cccc022b2f5d797f6511bda9e7dd3a24af403016 | [
"MIT"
] | 3 | 2020-02-18T15:46:40.000Z | 2022-03-17T02:01:51.000Z | graphslam/pose/se2.py | Golbstein/python-graphslam | cccc022b2f5d797f6511bda9e7dd3a24af403016 | [
"MIT"
] | 13 | 2020-06-09T08:27:27.000Z | 2021-11-23T14:05:14.000Z | # Copyright (c) 2020 Jeff Irion and contributors
r"""Representation of a pose in :math:`SE(2)`.
"""
import math
import numpy as np
from .base_pose import BasePose
from ..util import neg_pi_to_pi
class PoseSE2(BasePose):
r"""A representation of a pose in :math:`SE(2)`.
Parameters
----------
position : np.ndarray, list
The position in :math:`\mathbb{R}^2`
orientation : float
The angle of the pose (in radians)
"""
def __new__(cls, position, orientation):
obj = np.array([position[0], position[1], neg_pi_to_pi(orientation)], dtype=np.float64).view(cls)
return obj
def copy(self):
"""Return a copy of the pose.
Returns
-------
PoseSE2
A copy of the pose
"""
return PoseSE2(self[:2], self[2])
def to_array(self):
"""Return the pose as a numpy array.
Returns
-------
np.ndarray
The pose as a numpy array
"""
return np.array(self)
def to_compact(self):
"""Return the pose as a compact numpy array.
Returns
-------
np.ndarray
The pose as a compact numpy array
"""
return np.array(self)
def to_matrix(self):
"""Return the pose as an :math:`SE(2)` matrix.
Returns
-------
np.ndarray
The pose as an :math:`SE(2)` matrix
"""
return np.array([[np.cos(self[2]), -np.sin(self[2]), self[0]], [np.sin(self[2]), np.cos(self[2]), self[1]], [0., 0., 1.]], dtype=np.float64)
@classmethod
def from_matrix(cls, matrix):
"""Return the pose as an :math:`SE(2)` matrix.
Parameters
----------
matrix : np.ndarray
The :math:`SE(2)` matrix that will be converted to a `PoseSE2` instance
Returns
-------
PoseSE2
The matrix as a `PoseSE2` object
"""
return cls([matrix[0, 2], matrix[1, 2]], math.atan2(matrix[1, 0], matrix[0, 0]))
# ======================================================================= #
# #
# Properties #
# #
# ======================================================================= #
@property
def position(self):
"""Return the pose's position.
Returns
-------
np.ndarray
The position portion of the pose
"""
return np.array(self[:2])
@property
def orientation(self):
"""Return the pose's orientation.
Returns
-------
float
The angle of the pose
"""
return self[2]
@property
def inverse(self):
"""Return the pose's inverse.
Returns
-------
PoseSE2
The pose's inverse
"""
return PoseSE2([-self[0] * np.cos(self[2]) - self[1] * np.sin(self[2]), self[0] * np.sin(self[2]) - self[1] * np.cos([self[2]])], -self[2])
# ======================================================================= #
# #
# Magic Methods #
# #
# ======================================================================= #
def __add__(self, other):
r"""Add poses (i.e., pose composition): :math:`p_1 \oplus p_2`.
Parameters
----------
other : PoseSE2
The other pose
Returns
-------
PoseSE2
The result of pose composition
"""
return PoseSE2([self[0] + other[0] * np.cos(self[2]) - other[1] * np.sin(self[2]), self[1] + other[0] * np.sin(self[2]) + other[1] * np.cos(self[2])], neg_pi_to_pi(self[2] + other[2]))
def __sub__(self, other):
r"""Subtract poses (i.e., inverse pose composition): :math:`p_1 \ominus p_2`.
Parameters
----------
other : PoseSE2
The other pose
Returns
-------
PoseSE2
The result of inverse pose composition
"""
return PoseSE2([(self[0] - other[0]) * np.cos(other[2]) + (self[1] - other[1]) * np.sin(other[2]), (other[0] - self[0]) * np.sin(other[2]) + (self[1] - other[1]) * np.cos(other[2])], neg_pi_to_pi(self[2] - other[2]))
# ======================================================================= #
# #
# Jacobians #
# #
# ======================================================================= #
def jacobian_self_oplus_other_wrt_self(self, other):
r"""Compute the Jacobian of :math:`p_1 \oplus p_2` w.r.t. :math:`p_1`.
Parameters
----------
other : BasePose
The pose that is being added to ``self``
Returns
-------
np.ndarray
The Jacobian of :math:`p_1 \oplus p_2` w.r.t. :math:`p_1`.
"""
return np.array([[1., 0., -other[0] * np.sin(self[2]) - other[1] * np.cos(self[2])],
[0., 1., other[0] * np.cos(self[2]) - other[1] * np.sin(self[2])],
[0., 0., 1.]])
def jacobian_self_oplus_other_wrt_self_compact(self, other):
r"""Compute the Jacobian of :math:`p_1 \oplus p_2` w.r.t. :math:`p_1`.
Parameters
----------
other : BasePose
The pose that is being added to ``self``
Returns
-------
np.ndarray
The Jacobian of :math:`p_1 \oplus p_2` w.r.t. :math:`p_1`.
"""
return np.array([[1., 0., -other[0] * np.sin(self[2]) - other[1] * np.cos(self[2])],
[0., 1., other[0] * np.cos(self[2]) - other[1] * np.sin(self[2])],
[0., 0., 1.]])
def jacobian_self_oplus_other_wrt_other(self, other):
r"""Compute the Jacobian of :math:`p_1 \oplus p_2` w.r.t. :math:`p_2`.
Parameters
----------
other : BasePose
The pose that is being added to ``self``
Returns
-------
np.ndarray
The Jacobian of :math:`p_1 \oplus p_2` w.r.t. :math:`p_2`.
"""
return np.array([[np.cos(self[2]), -np.sin(self[2]), 0.],
[np.sin(self[2]), np.cos(self[2]), 0.],
[0., 0., 1.]])
def jacobian_self_oplus_other_wrt_other_compact(self, other):
r"""Compute the Jacobian of :math:`p_1 \oplus p_2` w.r.t. :math:`p_2`.
Parameters
----------
other : BasePose
The pose that is being added to ``self``
Returns
-------
np.ndarray
The Jacobian of :math:`p_1 \oplus p_2` w.r.t. :math:`p_2`.
"""
return np.array([[np.cos(self[2]), -np.sin(self[2]), 0.],
[np.sin(self[2]), np.cos(self[2]), 0.],
[0., 0., 1.]])
def jacobian_self_ominus_other_wrt_self(self, other):
r"""Compute the Jacobian of :math:`p_1 \ominus p_2` w.r.t. :math:`p_1`.
Parameters
----------
other : BasePose
The pose that is being subtracted from ``self``
Returns
-------
np.ndarray
The Jacobian of :math:`p_1 \ominus p_2` w.r.t. :math:`p_1`.
"""
return np.array([[np.cos(other[2]), np.sin(other[2]), 0.],
[-np.sin(other[2]), np.cos(other[2]), 0.],
[0., 0., 1.]])
def jacobian_self_ominus_other_wrt_self_compact(self, other):
r"""Compute the Jacobian of :math:`p_1 \ominus p_2` w.r.t. :math:`p_1`.
Parameters
----------
other : BasePose
The pose that is being subtracted from ``self``
Returns
-------
np.ndarray
The Jacobian of :math:`p_1 \ominus p_2` w.r.t. :math:`p_1`.
"""
return np.array([[np.cos(other[2]), np.sin(other[2]), 0.],
[-np.sin(other[2]), np.cos(other[2]), 0.],
[0., 0., 1.]])
def jacobian_self_ominus_other_wrt_other(self, other):
r"""Compute the Jacobian of :math:`p_1 \ominus p_2` w.r.t. :math:`p_2`.
Parameters
----------
other : BasePose
The pose that is being subtracted from ``self``
Returns
-------
np.ndarray
The Jacobian of :math:`p_1 \ominus p_2` w.r.t. :math:`p_2`.
"""
return np.array([[-np.cos(other[2]), -np.sin(other[2]), (other[0] - self[0]) * np.sin(other[2]) + (self[1] - other[1]) * np.cos(other[2])],
[np.sin(other[2]), -np.cos(other[2]), (other[0] - self[0]) * np.cos(other[2]) + (other[1] - self[1]) * np.sin(other[2])],
[0., 0., -1.]])
def jacobian_self_ominus_other_wrt_other_compact(self, other):
r"""Compute the Jacobian of :math:`p_1 \ominus p_2` w.r.t. :math:`p_2`.
Parameters
----------
other : BasePose
The pose that is being subtracted from ``self``
Returns
-------
np.ndarray
The Jacobian of :math:`p_1 \ominus p_2` w.r.t. :math:`p_2`.
"""
return np.array([[-np.cos(other[2]), -np.sin(other[2]), (other[0] - self[0]) * np.sin(other[2]) + (self[1] - other[1]) * np.cos(other[2])],
[np.sin(other[2]), -np.cos(other[2]), (other[0] - self[0]) * np.cos(other[2]) + (other[1] - self[1]) * np.sin(other[2])],
[0., 0., -1.]])
def jacobian_boxplus(self):
r"""Compute the Jacobian of :math:`p_1 \boxplus \Delta \mathbf{x}` w.r.t. :math:`\Delta \mathbf{x}` evaluated at :math:`\Delta \mathbf{x} = \mathbf{0}`.
Returns
-------
np.ndarray
The Jacobian of :math:`p_1 \boxplus \Delta \mathbf{x}` w.r.t. :math:`\Delta \mathbf{x}` evaluated at :math:`\Delta \mathbf{x} = \mathbf{0}`
"""
return np.array([[np.cos(self[2]), -np.sin(self[2]), 0.],
[np.sin(self[2]), np.cos(self[2]), 0.],
[0., 0., 1.]])
| 32.07855 | 224 | 0.437559 |
6a48cbd9786f026192a575a75f3b5e933e8faeae | 110,112 | py | Python | python/ray/data/dataset.py | spillai/ray | 8b4cb45088a170e37d7d92c2e8a3dacddf67e1bc | [
"Apache-2.0"
] | 1 | 2021-09-20T15:47:07.000Z | 2021-09-20T15:47:07.000Z | python/ray/data/dataset.py | spillai/ray | 8b4cb45088a170e37d7d92c2e8a3dacddf67e1bc | [
"Apache-2.0"
] | 52 | 2021-06-12T07:06:44.000Z | 2022-03-26T07:09:10.000Z | python/ray/data/dataset.py | spillai/ray | 8b4cb45088a170e37d7d92c2e8a3dacddf67e1bc | [
"Apache-2.0"
] | null | null | null | import logging
import time
from typing import List, Any, Callable, Iterator, Iterable, Generic, \
Dict, Optional, Union, TYPE_CHECKING, Tuple
from uuid import uuid4
if TYPE_CHECKING:
import pyarrow
import pandas
import mars
import modin
import dask
import pyspark
import ray.util.sgd
import torch
import tensorflow as tf
from ray.data.dataset_pipeline import DatasetPipeline
from ray.data.grouped_dataset import GroupedDataset, GroupKeyT, \
AggregateOnTs
import collections
import itertools
import numpy as np
import ray
from ray.types import ObjectRef
from ray.util.annotations import DeveloperAPI, PublicAPI
from ray.data.block import Block, BlockAccessor, BlockMetadata, T, U, \
BlockPartition, BlockPartitionMetadata, BlockExecStats
from ray.data.context import DatasetContext
from ray.data.datasource import (
Datasource, CSVDatasource, JSONDatasource, NumpyDatasource,
ParquetDatasource, BlockWritePathProvider, DefaultBlockWritePathProvider)
from ray.data.aggregate import AggregateFn, Sum, Max, Min, \
Mean, Std
from ray.data.impl.remote_fn import cached_remote_fn
from ray.data.impl.batcher import Batcher
from ray.data.impl.stats import DatasetStats
from ray.data.impl.compute import get_compute, cache_wrapper, \
CallableClass
from ray.data.impl.output_buffer import BlockOutputBuffer
from ray.data.impl.progress_bar import ProgressBar
from ray.data.impl.shuffle import simple_shuffle, _shuffle_reduce
from ray.data.impl.sort import sort_impl
from ray.data.impl.block_list import BlockList
from ray.data.impl.lazy_block_list import LazyBlockList
from ray.data.impl.delegating_block_builder import DelegatingBlockBuilder
# An output type of iter_batches() determined by the batch_format parameter.
BatchType = Union["pandas.DataFrame", "pyarrow.Table", np.ndarray, list]
logger = logging.getLogger(__name__)
# Whether we have warned of Datasets containing multiple epochs of data.
_epoch_warned = False
@PublicAPI(stability="beta")
class Dataset(Generic[T]):
"""Implements a distributed Arrow dataset.
Datasets are implemented as a list of ``ObjectRef[Block]``. The block
also determines the unit of parallelism. The default block type is the
``pyarrow.Table``. Arrow-incompatible objects are held in ``list`` blocks.
Since Datasets are just lists of Ray object refs, they can be passed
between Ray tasks and actors just like any other object. Datasets support
conversion to/from several more featureful dataframe libraries
(e.g., Spark, Dask, Modin, MARS), and are also compatible with distributed
TensorFlow / PyTorch.
Dataset supports parallel transformations such as .map(), .map_batches(),
and simple repartition, but currently not aggregations and joins.
"""
def __init__(self, blocks: BlockList, epoch: int, stats: DatasetStats):
"""Construct a Dataset (internal API).
The constructor is not part of the Dataset API. Use the ``ray.data.*``
read methods to construct a dataset.
"""
self._blocks: BlockList = blocks
self._uuid = uuid4().hex
self._epoch = epoch
self._stats = stats
self._stats.dataset_uuid = self._uuid
assert isinstance(self._blocks, BlockList), self._blocks
def map(self,
fn: Union[CallableClass, Callable[[T], U]],
*,
compute: Optional[str] = None,
**ray_remote_args) -> "Dataset[U]":
"""Apply the given function to each record of this dataset.
This is a blocking operation. Note that mapping individual records
can be quite slow. Consider using `.map_batches()` for performance.
Examples:
>>> # Transform python objects.
>>> ds.map(lambda x: x * 2)
>>> # Transform Arrow records.
>>> ds.map(lambda record: {"v2": record["value"] * 2})
>>> # Define a callable class that persists state across
>>> # function invocations for efficiency.
>>> class CachedModel:
... def __init__(self):
... self.model = init_model()
... def __call__(self, batch):
... return self.model(batch)
>>> # Apply the transform in parallel on GPUs. Since
>>> # compute="actors", the transform will be applied on an
>>> # autoscaling pool of Ray actors, each allocated 1 GPU by Ray.
>>> ds.map(CachedModel, compute="actors", num_gpus=1)
Time complexity: O(dataset size / parallelism)
Args:
fn: The function to apply to each record, or a class type
that can be instantiated to create such a callable.
compute: The compute strategy, either "tasks" (default) to use Ray
tasks, or "actors" to use an autoscaling Ray actor pool.
ray_remote_args: Additional resource requirements to request from
ray (e.g., num_gpus=1 to request GPUs for the map tasks).
"""
fn = cache_wrapper(fn)
context = DatasetContext.get_current()
stats_builder = self._stats.child_builder("map")
def transform(block: Block) -> Iterable[Block]:
DatasetContext._set_current(context)
block = BlockAccessor.for_block(block)
output_buffer = BlockOutputBuffer(None,
context.target_max_block_size)
for row in block.iter_rows():
output_buffer.add(fn(row))
if output_buffer.has_next():
yield output_buffer.next()
output_buffer.finalize()
if output_buffer.has_next():
yield output_buffer.next()
compute = get_compute(compute)
blocks = compute.apply(transform, ray_remote_args, self._blocks)
return Dataset(blocks, self._epoch, stats_builder.build(blocks))
def map_batches(self,
fn: Union[CallableClass, Callable[[BatchType], BatchType]],
*,
batch_size: Optional[int] = 4096,
compute: Optional[str] = None,
batch_format: str = "native",
**ray_remote_args) -> "Dataset[Any]":
"""Apply the given function to batches of records of this dataset.
This is a blocking operation.
Examples:
>>> # Transform batches in parallel.
>>> ds.map_batches(lambda batch: [v * 2 for v in batch])
>>> # Define a callable class that persists state across
>>> # function invocations for efficiency.
>>> class CachedModel:
... def __init__(self):
... self.model = init_model()
... def __call__(self, item):
... return self.model(item)
>>> # Apply the transform in parallel on GPUs. Since
>>> # compute="actors", the transform will be applied on an
>>> # autoscaling pool of Ray actors, each allocated 1 GPU by Ray.
>>> ds.map_batches(
... CachedModel,
... batch_size=256, compute="actors", num_gpus=1)
Time complexity: O(dataset size / parallelism)
Args:
fn: The function to apply to each record batch, or a class type
that can be instantiated to create such a callable.
batch_size: Request a specific batch size, or None to use entire
blocks as batches. Defaults to a system-chosen batch size.
compute: The compute strategy, either "tasks" (default) to use Ray
tasks, or "actors" to use an autoscaling Ray actor pool.
batch_format: Specify "native" to use the native block format,
"pandas" to select ``pandas.DataFrame`` as the batch format,
or "pyarrow" to select ``pyarrow.Table``.
ray_remote_args: Additional resource requirements to request from
ray (e.g., num_gpus=1 to request GPUs for the map tasks).
"""
if batch_size is not None and batch_size < 1:
raise ValueError("Batch size cannot be negative or 0")
import pyarrow as pa
import pandas as pd
fn = cache_wrapper(fn)
context = DatasetContext.get_current()
stats_builder = self._stats.child_builder("map_batches")
def transform(block: Block) -> Iterable[Block]:
DatasetContext._set_current(context)
output_buffer = BlockOutputBuffer(None,
context.target_max_block_size)
block = BlockAccessor.for_block(block)
total_rows = block.num_rows()
max_batch_size = batch_size
if max_batch_size is None:
max_batch_size = max(total_rows, 1)
for start in range(0, total_rows, max_batch_size):
# Build a block for each batch.
end = min(total_rows, start + max_batch_size)
# Make sure to copy if slicing to avoid the Arrow serialization
# bug where we include the entire base view on serialization.
view = block.slice(start, end, copy=batch_size is not None)
if batch_format == "native":
pass
elif batch_format == "pandas":
view = BlockAccessor.for_block(view).to_pandas()
elif batch_format == "pyarrow":
view = BlockAccessor.for_block(view).to_arrow()
else:
raise ValueError(
"The batch format must be one of 'native', 'pandas', "
"or 'pyarrow', got: {}".format(batch_format))
applied = fn(view)
if isinstance(applied, list) or isinstance(applied, pa.Table):
applied = applied
elif isinstance(applied, pd.core.frame.DataFrame):
applied = pa.Table.from_pandas(applied)
else:
raise ValueError("The map batches UDF returned the value "
f"{applied}, which is not allowed. "
"The return type must be either list, "
"pandas.DataFrame, or pyarrow.Table")
output_buffer.add_block(applied)
if output_buffer.has_next():
yield output_buffer.next()
output_buffer.finalize()
if output_buffer.has_next():
yield output_buffer.next()
compute = get_compute(compute)
blocks = compute.apply(transform, ray_remote_args, self._blocks)
return Dataset(blocks, self._epoch, stats_builder.build(blocks))
def flat_map(self,
fn: Union[CallableClass, Callable[[T], Iterable[U]]],
*,
compute: Optional[str] = None,
**ray_remote_args) -> "Dataset[U]":
"""Apply the given function to each record and then flatten results.
This is a blocking operation. Consider using ``.map_batches()`` for
better performance (the batch size can be altered in map_batches).
Examples:
>>> ds.flat_map(lambda x: [x, x ** 2, x ** 3])
Time complexity: O(dataset size / parallelism)
Args:
fn: The function to apply to each record, or a class type
that can be instantiated to create such a callable.
compute: The compute strategy, either "tasks" (default) to use Ray
tasks, or "actors" to use an autoscaling Ray actor pool.
ray_remote_args: Additional resource requirements to request from
ray (e.g., num_gpus=1 to request GPUs for the map tasks).
"""
fn = cache_wrapper(fn)
context = DatasetContext.get_current()
stats_builder = self._stats.child_builder("map")
def transform(block: Block) -> Iterable[Block]:
DatasetContext._set_current(context)
output_buffer = BlockOutputBuffer(None,
context.target_max_block_size)
block = BlockAccessor.for_block(block)
for row in block.iter_rows():
for r2 in fn(row):
output_buffer.add(r2)
if output_buffer.has_next():
yield output_buffer.next()
output_buffer.finalize()
if output_buffer.has_next():
yield output_buffer.next()
compute = get_compute(compute)
blocks = compute.apply(transform, ray_remote_args, self._blocks)
return Dataset(blocks, self._epoch, stats_builder.build(blocks))
def filter(self,
fn: Union[CallableClass, Callable[[T], bool]],
*,
compute: Optional[str] = None,
**ray_remote_args) -> "Dataset[T]":
"""Filter out records that do not satisfy the given predicate.
This is a blocking operation. Consider using ``.map_batches()`` for
better performance (you can implement filter by dropping records).
Examples:
>>> ds.filter(lambda x: x % 2 == 0)
Time complexity: O(dataset size / parallelism)
Args:
fn: The predicate to apply to each record, or a class type
that can be instantiated to create such a callable.
compute: The compute strategy, either "tasks" (default) to use Ray
tasks, or "actors" to use an autoscaling Ray actor pool.
ray_remote_args: Additional resource requirements to request from
ray (e.g., num_gpus=1 to request GPUs for the map tasks).
"""
fn = cache_wrapper(fn)
context = DatasetContext.get_current()
stats_builder = self._stats.child_builder("filter")
def transform(block: Block) -> Iterable[Block]:
DatasetContext._set_current(context)
block = BlockAccessor.for_block(block)
builder = block.builder()
for row in block.iter_rows():
if fn(row):
builder.add(row)
return [builder.build()]
compute = get_compute(compute)
blocks = compute.apply(transform, ray_remote_args, self._blocks)
return Dataset(blocks, self._epoch, stats_builder.build(blocks))
def repartition(self, num_blocks: int, *,
shuffle: bool = False) -> "Dataset[T]":
"""Repartition the dataset into exactly this number of blocks.
This is a blocking operation. After repartitioning, all blocks in the
returned dataset will have approximately the same number of rows.
Examples:
>>> # Set the number of output partitions to write to disk.
>>> ds.repartition(10).write_parquet(...)
Time complexity: O(dataset size / parallelism)
Args:
num_blocks: The number of blocks.
shuffle: Whether to perform a distributed shuffle during the
repartition. When shuffle is enabled, each output block
contains a subset of data rows from each input block, which
requires all-to-all data movement. When shuffle is disabled,
output blocks are created from adjacent input blocks,
minimizing data movement.
Returns:
The repartitioned dataset.
"""
stats = self._stats.child_builder("repartition")
if shuffle:
new_blocks, stage_info = simple_shuffle(self._blocks, num_blocks)
return Dataset(new_blocks, self._epoch,
stats.build_multistage(stage_info))
# Compute the (n-1) indices needed for an equal split of the data.
count = self.count()
indices = []
cur_idx = 0
for _ in range(num_blocks - 1):
cur_idx += count / num_blocks
indices.append(int(cur_idx))
assert len(indices) < num_blocks, (indices, num_blocks)
if indices:
splits = self.split_at_indices(indices)
# TODO this saves memory: self._blocks.clear()
else:
splits = [self]
# Coalesce each split into a single block.
reduce_task = cached_remote_fn(_shuffle_reduce).options(num_returns=2)
reduce_bar = ProgressBar("Repartition", position=0, total=len(splits))
reduce_out = [
reduce_task.remote(*s.get_internal_block_refs()) for s in splits
if s.num_blocks() > 0
]
del splits # Early-release memory.
new_blocks, new_metadata = zip(*reduce_out)
new_blocks, new_metadata = list(new_blocks), list(new_metadata)
new_metadata = reduce_bar.fetch_until_complete(new_metadata)
reduce_bar.close()
# Handle empty blocks.
if len(new_blocks) < num_blocks:
from ray.data.impl.arrow_block import ArrowBlockBuilder
from ray.data.impl.simple_block import SimpleBlockBuilder
num_empties = num_blocks - len(new_blocks)
dataset_format = self._dataset_format()
if dataset_format == "arrow":
builder = ArrowBlockBuilder()
else:
builder = SimpleBlockBuilder()
empty_block = builder.build()
empty_meta = BlockAccessor.for_block(empty_block).get_metadata(
input_files=None, exec_stats=BlockExecStats.TODO)
empty_blocks, empty_metadata = zip(*[(ray.put(empty_block),
empty_meta)
for _ in range(num_empties)])
new_blocks += empty_blocks
new_metadata += empty_metadata
return Dataset(
BlockList(new_blocks, new_metadata), self._epoch,
self._stats.child_TODO("repartition"))
def random_shuffle(
self,
*,
seed: Optional[int] = None,
num_blocks: Optional[int] = None,
_move: Optional[bool] = False,
_spread_resource_prefix: Optional[str] = None) -> "Dataset[T]":
"""Randomly shuffle the elements of this dataset.
This is a blocking operation similar to repartition().
Examples:
>>> # Shuffle this dataset randomly.
>>> ds.random_shuffle()
>>> # Shuffle this dataset with a fixed random seed.
>>> ds.random_shuffle(seed=12345)
Time complexity: O(dataset size / parallelism)
Args:
seed: Fix the random seed to use, otherwise one will be chosen
based on system randomness.
num_blocks: The number of output blocks after the shuffle, or None
to retain the number of blocks.
Returns:
The shuffled dataset.
"""
# Handle empty dataset.
if self.num_blocks() == 0:
return self
stats = self._stats.child_builder("random_shuffle")
if num_blocks is None:
num_blocks = self._blocks.executed_num_blocks() # Blocking.
new_blocks, stage_info = simple_shuffle(
self._move_blocks() if _move else self._blocks,
num_blocks,
random_shuffle=True,
random_seed=seed,
_spread_resource_prefix=_spread_resource_prefix)
return Dataset(new_blocks, self._epoch,
stats.build_multistage(stage_info))
def split(self,
n: int,
*,
equal: bool = False,
locality_hints: List[Any] = None) -> List["Dataset[T]"]:
"""Split the dataset into ``n`` disjoint pieces.
This returns a list of sub-datasets that can be passed to Ray tasks
and actors and used to read the dataset records in parallel.
Examples:
>>> # Split up a dataset to process over `n` worker actors.
>>> shards = ds.split(len(workers), locality_hints=workers)
>>> for shard, worker in zip(shards, workers):
... worker.consume.remote(shard)
Time complexity: O(1)
See also: ``Dataset.split_at_indices``
Args:
n: Number of child datasets to return.
equal: Whether to guarantee each split has an equal
number of records. This may drop records if they cannot be
divided equally among the splits.
locality_hints: A list of Ray actor handles of size ``n``. The
system will try to co-locate the blocks of the ith dataset
with the ith actor to maximize data locality.
Returns:
A list of ``n`` disjoint dataset splits.
"""
if n <= 0:
raise ValueError(f"The number of splits {n} is not positive.")
if locality_hints and len(locality_hints) != n:
raise ValueError(
f"The length of locality_hints {len(locality_hints)} "
"doesn't equal the number of splits {n}.")
def _partition_splits(splits: List[Dataset[T]], part_size: int,
counts_cache: Dict[str, int]):
"""Partition splits into two sets: splits that are smaller than the
target size and splits that are larger than the target size.
"""
splits = sorted(splits, key=lambda s: counts_cache[s._get_uuid()])
idx = next(i for i, split in enumerate(splits)
if counts_cache[split._get_uuid()] >= part_size)
return splits[:idx], splits[idx:]
def _equalize_larger_splits(splits: List[Dataset[T]], target_size: int,
counts_cache: Dict[str, int],
num_splits_required: int):
"""Split each split into one or more subsplits that are each the
target size, with at most one leftover split that's smaller
than the target size.
This assume that the given splits are sorted in ascending order.
"""
new_splits = []
leftovers = []
for split in splits:
size = counts_cache[split._get_uuid()]
if size == target_size:
new_splits.append(split)
continue
split_indices = list(range(target_size, size, target_size))
split_splits = split.split_at_indices(split_indices)
last_split_size = split_splits[-1].count()
if last_split_size < target_size:
# Last split is smaller than the target size, save it for
# our unioning of small splits.
leftover = split_splits.pop()
leftovers.append(leftover)
counts_cache[leftover._get_uuid()] = leftover.count()
if len(new_splits) + len(split_splits) >= num_splits_required:
# Short-circuit if the new splits will make us reach the
# desired number of splits.
new_splits.extend(
split_splits[:num_splits_required - len(new_splits)])
break
new_splits.extend(split_splits)
return new_splits, leftovers
def _equalize_smaller_splits(
splits: List[Dataset[T]], target_size: int,
counts_cache: Dict[str, int], num_splits_required: int):
"""Union small splits up to the target split size.
This assume that the given splits are sorted in ascending order.
"""
new_splits = []
union_buffer = []
union_buffer_size = 0
low = 0
high = len(splits) - 1
while low <= high:
# Union small splits up to the target split size.
low_split = splits[low]
low_count = counts_cache[low_split._get_uuid()]
high_split = splits[high]
high_count = counts_cache[high_split._get_uuid()]
if union_buffer_size + high_count <= target_size:
# Try to add the larger split to the union buffer first.
union_buffer.append(high_split)
union_buffer_size += high_count
high -= 1
elif union_buffer_size + low_count <= target_size:
union_buffer.append(low_split)
union_buffer_size += low_count
low += 1
else:
# Neither the larger nor smaller split fit in the union
# buffer, so we split the smaller split into a subsplit
# that will fit into the union buffer and a leftover
# subsplit that we add back into the candidate split list.
diff = target_size - union_buffer_size
diff_split, new_low_split = low_split.split_at_indices(
[diff])
union_buffer.append(diff_split)
union_buffer_size += diff
# We overwrite the old low split and don't advance the low
# pointer since (1) the old low split can be discarded,
# (2) the leftover subsplit is guaranteed to be smaller
# than the old low split, and (3) the low split should be
# the smallest split in the candidate split list, which is
# this subsplit.
splits[low] = new_low_split
counts_cache[new_low_split._get_uuid()] = low_count - diff
if union_buffer_size == target_size:
# Once the union buffer is full, we union together the
# splits.
assert len(union_buffer) > 1, union_buffer
first_ds = union_buffer[0]
new_split = first_ds.union(*union_buffer[1:])
new_splits.append(new_split)
# Clear the union buffer.
union_buffer = []
union_buffer_size = 0
if len(new_splits) == num_splits_required:
# Short-circuit if we've reached the desired number of
# splits.
break
return new_splits
def equalize(splits: List[Dataset[T]],
num_splits: int) -> List[Dataset[T]]:
if not equal:
return splits
counts = {s._get_uuid(): s.count() for s in splits}
total_rows = sum(counts.values())
# Number of rows for each split.
target_size = total_rows // num_splits
# Partition splits.
smaller_splits, larger_splits = _partition_splits(
splits, target_size, counts)
if len(smaller_splits) == 0 and num_splits < len(splits):
# All splits are already equal.
return splits
# Split larger splits.
new_splits, leftovers = _equalize_larger_splits(
larger_splits, target_size, counts, num_splits)
# Short-circuit if we've already reached the desired number of
# splits.
if len(new_splits) == num_splits:
return new_splits
# Add leftovers to small splits and re-sort.
smaller_splits += leftovers
smaller_splits = sorted(
smaller_splits, key=lambda s: counts[s._get_uuid()])
# Union smaller splits.
new_splits_small = _equalize_smaller_splits(
smaller_splits, target_size, counts,
num_splits - len(new_splits))
new_splits.extend(new_splits_small)
return new_splits
block_refs, metadata = zip(*self._blocks.get_blocks_with_metadata())
metadata_mapping = {b: m for b, m in zip(block_refs, metadata)}
if locality_hints is None:
return equalize([
Dataset(
BlockList(
list(blocks), [metadata_mapping[b] for b in blocks]),
self._epoch, self._stats)
for blocks in np.array_split(block_refs, n)
if not equal or len(blocks) > 0
], n)
# If the locality_hints is set, we use a two-round greedy algorithm
# to co-locate the blocks with the actors based on block
# and actor's location (node_id).
#
# The split algorithm tries to allocate equally-sized blocks regardless
# of locality. Thus we first calculate the expected number of blocks
# for each split.
#
# In the first round, for each actor, we look for all blocks that
# match the actor's node_id, then allocate those matched blocks to
# this actor until we reach the limit(expected number).
#
# In the second round: fill each actor's allocation with
# remaining unallocated blocks until we reach the limit.
def build_allocation_size_map(num_blocks: int,
actors: List[Any]) -> Dict[Any, int]:
"""Given the total number of blocks and a list of actors, calcuate
the expected number of blocks to allocate for each actor.
"""
num_actors = len(actors)
num_blocks_per_actor = num_blocks // num_actors
num_blocks_left = num_blocks - num_blocks_per_actor * n
num_blocks_by_actor = {}
for i, actor in enumerate(actors):
num_blocks_by_actor[actor] = num_blocks_per_actor
if i < num_blocks_left:
num_blocks_by_actor[actor] += 1
return num_blocks_by_actor
def build_block_refs_by_node_id(blocks: List[ObjectRef[Block]]
) -> Dict[str, List[ObjectRef[Block]]]:
"""Build the reverse index from node_id to block_refs. For
simplicity, if the block is stored on multiple nodes we
only pick the first one.
"""
block_ref_locations = ray.experimental.get_object_locations(blocks)
block_refs_by_node_id = collections.defaultdict(list)
for block_ref in blocks:
node_ids = block_ref_locations.get(block_ref, {}).get(
"node_ids", [])
node_id = node_ids[0] if node_ids else None
block_refs_by_node_id[node_id].append(block_ref)
return block_refs_by_node_id
def build_node_id_by_actor(actors: List[Any]) -> Dict[Any, str]:
"""Build a map from a actor to its node_id.
"""
actors_state = ray.state.actors()
return {
actor: actors_state.get(actor._actor_id.hex(), {}).get(
"Address", {}).get("NodeID")
for actor in actors
}
# expected number of blocks to be allocated for each actor
expected_block_count_by_actor = build_allocation_size_map(
len(block_refs), locality_hints)
# the reverse index from node_id to block_refs
block_refs_by_node_id = build_block_refs_by_node_id(block_refs)
# the map from actor to its node_id
node_id_by_actor = build_node_id_by_actor(locality_hints)
allocation_per_actor = collections.defaultdict(list)
# In the first round, for each actor, we look for all blocks that
# match the actor's node_id, then allocate those matched blocks to
# this actor until we reach the limit(expected number)
for actor in locality_hints:
node_id = node_id_by_actor[actor]
matching_blocks = block_refs_by_node_id[node_id]
expected_block_count = expected_block_count_by_actor[actor]
allocation = []
while matching_blocks and len(allocation) < expected_block_count:
allocation.append(matching_blocks.pop())
allocation_per_actor[actor] = allocation
# In the second round: fill each actor's allocation with
# remaining unallocated blocks until we reach the limit
remaining_block_refs = list(
itertools.chain.from_iterable(block_refs_by_node_id.values()))
for actor in locality_hints:
while len(allocation_per_actor[actor]
) < expected_block_count_by_actor[actor]:
allocation_per_actor[actor].append(remaining_block_refs.pop())
assert len(remaining_block_refs) == 0, len(remaining_block_refs)
return equalize([
Dataset(
BlockList(
allocation_per_actor[actor],
[metadata_mapping[b]
for b in allocation_per_actor[actor]]), self._epoch,
self._stats) for actor in locality_hints
], n)
def split_at_indices(self, indices: List[int]) -> List["Dataset[T]"]:
"""Split the dataset at the given indices (like np.split).
Examples:
>>> d1, d2, d3 = ray.data.range(10).split_at_indices([2, 5])
>>> d1.take()
[0, 1]
>>> d2.take()
[2, 3, 4]
>>> d3.take()
[5, 6, 7, 8, 9]
Time complexity: O(num splits)
See also: ``Dataset.split``
Args:
indices: List of sorted integers which indicate where the dataset
will be split. If an index exceeds the length of the dataset,
an empty dataset will be returned.
Returns:
The dataset splits.
"""
if len(indices) < 1:
raise ValueError("indices must be at least of length 1")
if sorted(indices) != indices:
raise ValueError("indices must be sorted")
if indices[0] < 0:
raise ValueError("indices must be positive")
rest = self
splits = []
prev = 0
for i in indices:
first, rest = rest._split(i - prev, return_right_half=True)
prev = i
splits.append(first)
splits.append(rest)
return splits
def union(self, *other: List["Dataset[T]"]) -> "Dataset[T]":
"""Combine this dataset with others of the same type.
The order of the blocks in the datasets is preserved, as is the
relative ordering between the datasets passed in the argument list.
Args:
other: List of datasets to combine with this one. The datasets
must have the same schema as this dataset, otherwise the
behavior is undefined.
Returns:
A new dataset holding the union of their data.
"""
context = DatasetContext.get_current()
calls: List[Callable[[], ObjectRef[BlockPartition]]] = []
metadata: List[BlockPartitionMetadata] = []
block_partitions: List[ObjectRef[BlockPartition]] = []
datasets = [self] + list(other)
for ds in datasets:
bl = ds._blocks
if isinstance(bl, LazyBlockList):
calls.extend(bl._calls)
metadata.extend(bl._metadata)
block_partitions.extend(bl._block_partitions)
else:
calls.extend([None] * bl.initial_num_blocks())
metadata.extend(bl._metadata)
if context.block_splitting_enabled:
block_partitions.extend([
ray.put([(b, m)])
for b, m in bl.get_blocks_with_metadata()
])
else:
block_partitions.extend(bl.get_blocks())
epochs = [ds._get_epoch() for ds in datasets]
max_epoch = max(*epochs)
if len(set(epochs)) > 1:
global _epoch_warned
if not _epoch_warned:
logger.warning(
"Dataset contains data from multiple epochs: {}, "
"likely due to a `rewindow()` call. The higher epoch "
"number {} will be used. This warning will not "
"be shown again.".format(set(epochs), max_epoch))
_epoch_warned = True
return Dataset(
LazyBlockList(calls, metadata, block_partitions), max_epoch,
self._stats.child_TODO("union"))
def groupby(self, key: "GroupKeyT") -> "GroupedDataset[T]":
"""Group the dataset by the key function or column name (Experimental).
This is a lazy operation.
Examples:
>>> # Group by a key function and aggregate.
>>> ray.data.range(100).groupby(lambda x: x % 3).count()
>>> # Group by an Arrow table column and aggregate.
>>> ray.data.from_items([
... {"A": x % 3, "B": x} for x in range(100)]).groupby(
... "A").count()
Time complexity: O(dataset size * log(dataset size / parallelism))
Args:
key: A key function or Arrow column name.
Returns:
A lazy GroupedDataset that can be aggregated later.
"""
from ray.data.grouped_dataset import GroupedDataset
return GroupedDataset(self, key)
def aggregate(self, *aggs: AggregateFn) -> U:
"""Aggregate the entire dataset as one group.
This is a blocking operation.
Examples:
>>> ray.data.range(100).aggregate(Max())
>>> ray.data.range_arrow(100).aggregate(
Max("value"), Mean("value"))
Time complexity: O(dataset size / parallelism)
Args:
aggs: Aggregations to do.
Returns:
If the input dataset is a simple dataset then the output is
a tuple of ``(agg1, agg2, ...)`` where each tuple element is
the corresponding aggregation result.
If the input dataset is an Arrow dataset then the output is
an ``ArrowRow`` where each column is the corresponding
aggregation result.
If the dataset is empty, return ``None``.
"""
ret = self.groupby(None).aggregate(*aggs).take(1)
return ret[0] if len(ret) > 0 else None
def _check_and_normalize_agg_on(self,
on: Optional["AggregateOnTs"],
skip_cols: Optional[List[str]] = None
) -> Optional["AggregateOnTs"]:
"""Checks whether the provided aggregation `on` arg is valid for this
type of dataset, and normalizes the value based on the Dataset type and
any provided columns to skip.
"""
if (on is not None
and (not isinstance(on, (str, Callable, list)) or
(isinstance(on, list)
and not (all(isinstance(on_, str) for on_ in on)
or all(isinstance(on_, Callable)
for on_ in on))))):
from ray.data.grouped_dataset import AggregateOnTs
raise TypeError(
f"`on` must be of type {AggregateOnTs}, but got {type(on)}")
if isinstance(on, list) and len(on) == 0:
raise ValueError(
"When giving a list for `on`, it must be nonempty.")
try:
dataset_format = self._dataset_format()
except ValueError:
# Dataset is empty/cleared, let downstream ops handle this.
return on
if dataset_format == "arrow":
# This should be cached from the ._dataset_format() check, so we
# don't fetch and we assert that the schema is not None.
schema = self.schema(fetch_if_missing=False)
assert schema is not None
if len(schema.names) == 0:
# Empty dataset, don't validate `on` since we generically
# handle empty datasets downstream.
return on
if on is None:
# If a null `on` is given for a table Dataset, coerce it to
# all columns sans any that we want to skip.
if skip_cols is None:
skip_cols = []
elif not isinstance(skip_cols, list):
skip_cols = [skip_cols]
on = [col for col in schema.names if col not in skip_cols]
# Check that column names refer to valid columns.
elif isinstance(on, str) and on not in schema.names:
raise ValueError(
f"on={on} is not a valid column name: {schema.names}")
elif isinstance(on, list) and isinstance(on[0], str):
for on_ in on:
if on_ not in schema.names:
raise ValueError(
f"on={on_} is not a valid column name: "
f"{schema.names}")
else:
if isinstance(on, str) or (isinstance(on, list)
and isinstance(on[0], str)):
raise ValueError(
"Can't aggregate on a column when using a simple Dataset; "
"use a callable `on` argument or use an Arrow Dataset "
"instead of a simple Dataset.")
return on
def _dataset_format(self) -> str:
"""Determine the format of the dataset. Possible values are: "arrow",
"simple".
This may block; if the schema is unknown, this will synchronously fetch
the schema for the first block.
"""
try:
import pyarrow as pa
except ModuleNotFoundError:
return "simple"
else:
# We need schema to properly validate, so synchronously
# fetch it if necessary.
schema = self.schema(fetch_if_missing=True)
if schema is None:
raise ValueError(
"Dataset is empty or cleared, can't determine the format"
" of the dataset")
if isinstance(schema, pa.Schema):
return "arrow"
return "simple"
def _aggregate_on(self, agg_cls: type, on: Optional["AggregateOnTs"],
*args, **kwargs):
"""Helper for aggregating on a particular subset of the dataset.
This validates the `on` argument, and converts a list of column names
or lambdas to a multi-aggregation. A null `on` results in a
multi-aggregation on all columns for an Arrow Dataset, and a single
aggregation on the entire row for a simple Dataset.
"""
aggs = self._build_multicolumn_aggs(
agg_cls, on, *args, skip_cols=None, **kwargs)
return self.aggregate(*aggs)
def _build_multicolumn_aggs(self,
agg_cls: type,
on: Optional["AggregateOnTs"],
*args,
skip_cols: Optional[List[str]] = None,
**kwargs):
"""Build set of aggregations for applying a single aggregation to
multiple columns.
"""
on = self._check_and_normalize_agg_on(on, skip_cols=skip_cols)
if not isinstance(on, list):
on = [on]
return [agg_cls(on_, *args, **kwargs) for on_ in on]
def sum(self, on: Optional["AggregateOnTs"] = None) -> U:
"""Compute sum over entire dataset.
This is a blocking operation.
Examples:
>>> ray.data.range(100).sum()
>>> ray.data.from_items([
... (i, i**2)
... for i in range(100)]).sum(lambda x: x[1])
>>> ray.data.range_arrow(100).sum("value")
>>> ray.data.from_items([
... {"A": i, "B": i**2}
... for i in range(100)]).sum(["A", "B"])
Args:
on: The data subset on which to compute the sum.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to return a scalar sum of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to return an ``ArrowRow``
containing the column-wise sum of all columns.
Returns:
The sum result.
For a simple dataset, the output is:
- ``on=None``: a scalar representing the sum of all rows,
- ``on=callable``: a scalar representing the sum of the outputs of
the callable called on each row,
- ``on=[callable_1, ..., calalble_n]``: a tuple of
``(sum_1, ..., sum_n)`` representing the sum of the outputs of
the corresponding callables called on each row.
For an Arrow dataset, the output is:
- ``on=None``: an ArrowRow containing the column-wise sum of all
columns,
- ``on="col"``: a scalar representing the sum of all items in
column ``"col"``,
- ``on=["col_1", ..., "col_n"]``: an n-column ``ArrowRow``
containing the column-wise sum of the provided columns.
If the dataset is empty, then the output is 0.
"""
ret = self._aggregate_on(Sum, on)
if ret is None:
return 0
elif len(ret) == 1:
return ret[0]
else:
return ret
def min(self, on: Optional["AggregateOnTs"] = None) -> U:
"""Compute minimum over entire dataset.
This is a blocking operation.
Examples:
>>> ray.data.range(100).min()
>>> ray.data.from_items([
... (i, i**2)
... for i in range(100)]).min(lambda x: x[1])
>>> ray.data.range_arrow(100).min("value")
>>> ray.data.from_items([
... {"A": i, "B": i**2}
... for i in range(100)]).min(["A", "B"])
Args:
on: The data subset on which to compute the min.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to return a scalar min of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to return an ``ArrowRow``
containing the column-wise min of all columns.
Returns:
The min result.
For a simple dataset, the output is:
- ``on=None``: a scalar representing the min of all rows,
- ``on=callable``: a scalar representing the min of the outputs
of the callable called on each row,
- ``on=[callable_1, ..., calalble_n]``: a tuple of
``(min_1, ..., min_n)`` representing the min of the outputs
of the corresponding callables called on each row.
For an Arrow dataset, the output is:
- ``on=None``: an ``ArrowRow`` containing the column-wise min of
all columns,
- ``on="col"``: a scalar representing the min of all items in
column ``"col"``,
- ``on=["col_1", ..., "col_n"]``: an n-column ``ArrowRow``
containing the column-wise min of the provided columns.
If the dataset is empty, then a ``ValueError`` is raised.
"""
ret = self._aggregate_on(Min, on)
if ret is None:
raise ValueError("Cannot compute min on an empty dataset")
elif len(ret) == 1:
return ret[0]
else:
return ret
def max(self, on: Optional["AggregateOnTs"] = None) -> U:
"""Compute maximum over entire dataset.
This is a blocking operation.
Examples:
>>> ray.data.range(100).max()
>>> ray.data.from_items([
... (i, i**2)
... for i in range(100)]).max(lambda x: x[1])
>>> ray.data.range_arrow(100).max("value")
>>> ray.data.from_items([
... {"A": i, "B": i**2}
... for i in range(100)]).max(["A", "B"])
Args:
on: The data subset on which to compute the max.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to return a scalar max of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to return an ``ArrowRow``
containing the column-wise max of all columns.
Returns:
The max result.
For a simple dataset, the output is:
- ``on=None``: a scalar representing the max of all rows,
- ``on=callable``: a scalar representing the max of the outputs of
the callable called on each row,
- ``on=[callable_1, ..., calalble_n]``: a tuple of
``(max_1, ..., max_n)`` representing the max of the outputs of
the corresponding callables called on each row.
For an Arrow dataset, the output is:
- ``on=None``: an ``ArrowRow`` containing the column-wise max of
all columns,
- ``on="col"``: a scalar representing the max of all items in
column ``"col"``,
- ``on=["col_1", ..., "col_n"]``: an n-column ``ArrowRow``
containing the column-wise max of the provided columns.
If the dataset is empty, then a ``ValueError`` is raised.
"""
ret = self._aggregate_on(Max, on)
if ret is None:
raise ValueError("Cannot compute max on an empty dataset")
elif len(ret) == 1:
return ret[0]
else:
return ret
def mean(self, on: Optional["AggregateOnTs"] = None) -> U:
"""Compute mean over entire dataset.
This is a blocking operation.
Examples:
>>> ray.data.range(100).mean()
>>> ray.data.from_items([
... (i, i**2)
... for i in range(100)]).mean(lambda x: x[1])
>>> ray.data.range_arrow(100).mean("value")
>>> ray.data.from_items([
... {"A": i, "B": i**2}
... for i in range(100)]).mean(["A", "B"])
Args:
on: The data subset on which to compute the mean.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to return a scalar mean of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to return an ``ArrowRow``
containing the column-wise mean of all columns.
Returns:
The mean result.
For a simple dataset, the output is:
- ``on=None``: a scalar representing the mean of all rows,
- ``on=callable``: a scalar representing the mean of the outputs
of the callable called on each row,
- ``on=[callable_1, ..., calalble_n]``: a tuple of
``(mean_1, ..., mean_n)`` representing the mean of the outputs
of the corresponding callables called on each row.
For an Arrow dataset, the output is:
- ``on=None``: an ``ArrowRow`` containing the column-wise mean of
all columns,
- ``on="col"``: a scalar representing the mean of all items in
column ``"col"``,
- ``on=["col_1", ..., "col_n"]``: an n-column ``ArrowRow``
containing the column-wise mean of the provided columns.
If the dataset is empty, then a ``ValueError`` is raised.
"""
ret = self._aggregate_on(Mean, on)
if ret is None:
raise ValueError("Cannot compute mean on an empty dataset")
elif len(ret) == 1:
return ret[0]
else:
return ret
def std(self, on: Optional["AggregateOnTs"] = None, ddof: int = 1) -> U:
"""Compute standard deviation over entire dataset.
This is a blocking operation.
Examples:
>>> ray.data.range(100).std()
>>> ray.data.from_items([
... (i, i**2)
... for i in range(100)]).std(lambda x: x[1])
>>> ray.data.range_arrow(100).std("value", ddof=0)
>>> ray.data.from_items([
... {"A": i, "B": i**2}
... for i in range(100)]).std(["A", "B"])
NOTE: This uses Welford's online method for an accumulator-style
computation of the standard deviation. This method was chosen due to
it's numerical stability, and it being computable in a single pass.
This may give different (but more accurate) results than NumPy, Pandas,
and sklearn, which use a less numerically stable two-pass algorithm.
See
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
Args:
on: The data subset on which to compute the std.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to return a scalar std of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to return an ``ArrowRow``
containing the column-wise std of all columns.
ddof: Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
Returns:
The standard deviation result.
For a simple dataset, the output is:
- ``on=None``: a scalar representing the std of all rows,
- ``on=callable``: a scalar representing the std of the outputs of
the callable called on each row,
- ``on=[callable_1, ..., calalble_n]``: a tuple of
``(std_1, ..., std_n)`` representing the std of the outputs of
the corresponding callables called on each row.
For an Arrow dataset, the output is:
- ``on=None``: an ``ArrowRow`` containing the column-wise std of
all columns,
- ``on="col"``: a scalar representing the std of all items in
column ``"col"``,
- ``on=["col_1", ..., "col_n"]``: an n-column ``ArrowRow``
containing the column-wise std of the provided columns.
If the dataset is empty, then a ``ValueError`` is raised.
"""
ret = self._aggregate_on(Std, on, ddof=ddof)
if ret is None:
raise ValueError("Cannot compute std on an empty dataset")
elif len(ret) == 1:
return ret[0]
else:
return ret
def sort(self,
key: Union[None, str, List[str], Callable[[T], Any]] = None,
descending: bool = False) -> "Dataset[T]":
"""Sort the dataset by the specified key column or key function.
(experimental support)
This is a blocking operation.
Examples:
>>> # Sort using the entire record as the key.
>>> ds.sort()
>>> # Sort by a single column in descending order.
>>> ds.sort("field1", descending=True)
>>> # Sort by a key function.
>>> ds.sort(lambda record: record["field1"] % 100)
>>> # Sort by multiple columns (not yet supported).
>>> ds.sort([("field1", "ascending"), ("field2", "descending")])
Time complexity: O(dataset size * log(dataset size / parallelism))
Args:
key:
- For Arrow tables, key must be a single column name.
- For datasets of Python objects, key can be either a lambda
function that returns a comparison key to sort by, or None
to sort by the original value.
descending: Whether to sort in descending order.
Returns:
A new, sorted dataset.
"""
# Handle empty dataset.
if self.num_blocks() == 0:
return self
return Dataset(
sort_impl(self._blocks, key, descending), self._epoch,
self._stats.child_TODO("sort"))
def zip(self, other: "Dataset[U]") -> "Dataset[(T, U)]":
"""Zip this dataset with the elements of another.
The datasets must have identical num rows, block types, and block sizes
(e.g., one was produced from a ``.map()`` of another). For Arrow
blocks, the schema will be concatenated, and any duplicate column
names disambiguated with _1, _2, etc. suffixes.
Time complexity: O(dataset size / parallelism)
Args:
other: The dataset to zip with on the right hand side.
Examples:
>>> ds = ray.data.range(5)
>>> ds.zip(ds).take()
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
Returns:
A Dataset with (k, v) pairs (or concatenated Arrow schema) where k
comes from the first dataset and v comes from the second.
"""
blocks1 = self.get_internal_block_refs()
blocks2 = other.get_internal_block_refs()
if len(blocks1) != len(blocks2):
# TODO(ekl) consider supporting if num_rows are equal.
raise ValueError(
"Cannot zip dataset of different num blocks: {} vs {}".format(
len(blocks1), len(blocks2)))
def do_zip(block1: Block, block2: Block) -> (Block, BlockMetadata):
b1 = BlockAccessor.for_block(block1)
result = b1.zip(block2)
br = BlockAccessor.for_block(result)
return result, br.get_metadata(
input_files=[], exec_stats=BlockExecStats.TODO)
do_zip_fn = cached_remote_fn(do_zip, num_returns=2)
blocks = []
metadata = []
for b1, b2 in zip(blocks1, blocks2):
res, meta = do_zip_fn.remote(b1, b2)
blocks.append(res)
metadata.append(meta)
# TODO(ekl) it might be nice to have a progress bar here.
metadata = ray.get(metadata)
return Dataset(
BlockList(blocks, metadata), self._epoch,
self._stats.child_TODO("zip"))
def limit(self, limit: int) -> "Dataset[T]":
"""Limit the dataset to the first number of records specified.
Examples:
>>> ds.limit(100).map(lambda x: x * 2).take()
Time complexity: O(limit specified)
Args:
limit: The size of the dataset to truncate to.
Returns:
The truncated dataset.
"""
left, _ = self._split(limit, return_right_half=False)
return left
def take(self, limit: int = 20) -> List[T]:
"""Take up to the given number of records from the dataset.
Time complexity: O(limit specified)
Args:
limit: The max number of records to return.
Returns:
A list of up to ``limit`` records from the dataset.
"""
output = []
for row in self.iter_rows():
output.append(row)
if len(output) >= limit:
break
return output
def take_all(self, limit: int = 100000) -> List[T]:
"""Take all the records in the dataset.
Time complexity: O(dataset size)
Args:
limit: Raise an error if the size exceeds the specified limit.
Returns:
A list of all the records in the dataset.
"""
output = []
for row in self.iter_rows():
output.append(row)
if len(output) > limit:
raise ValueError(
"The dataset has more than the given limit of {} records.".
format(limit))
return output
def show(self, limit: int = 20) -> None:
"""Print up to the given number of records from the dataset.
Time complexity: O(limit specified)
Args:
limit: The max number of records to print.
"""
for row in self.take(limit):
print(row)
def count(self) -> int:
"""Count the number of records in the dataset.
Time complexity: O(dataset size / parallelism), O(1) for parquet
Returns:
The number of records in the dataset.
"""
# Handle empty dataset.
if self.num_blocks() == 0:
return 0
# For parquet, we can return the count directly from metadata.
meta_count = self._meta_count()
if meta_count is not None:
return meta_count
get_num_rows = cached_remote_fn(_get_num_rows)
return sum(
ray.get([
get_num_rows.remote(block)
for block in self._blocks.get_blocks()
]))
def schema(self, fetch_if_missing: bool = False
) -> Union[type, "pyarrow.lib.Schema"]:
"""Return the schema of the dataset.
For datasets of Arrow records, this will return the Arrow schema.
For datasets of Python objects, this returns their Python type.
Time complexity: O(1)
Args:
fetch_if_missing: If True, synchronously fetch the schema if it's
not known. Default is False, where None is returned if the
schema is not known.
Returns:
The Python type or Arrow schema of the records, or None if the
schema is not known and fetch_if_missing is False.
"""
metadata = self._blocks.get_metadata()
# Some blocks could be empty, in which case we cannot get their schema.
# TODO(ekl) validate schema is the same across different blocks.
for m in metadata:
if m.schema is not None:
return m.schema
if not fetch_if_missing:
return None
# Need to synchronously fetch schema.
return self._blocks.ensure_schema_for_first_block()
def num_blocks(self) -> int:
"""Return the number of blocks of this dataset.
Note that during read and transform operations, the number of blocks
may be dynamically adjusted to respect memory limits, increasing the
number of blocks at runtime.
Time complexity: O(1)
Returns:
The number of blocks of this dataset.
"""
return self._blocks.initial_num_blocks()
def size_bytes(self) -> int:
"""Return the in-memory size of the dataset.
Time complexity: O(1)
Returns:
The in-memory size of the dataset in bytes, or None if the
in-memory size is not known.
"""
metadata = self._blocks.get_metadata()
if not metadata or metadata[0].size_bytes is None:
return None
return sum(m.size_bytes for m in metadata)
def input_files(self) -> List[str]:
"""Return the list of input files for the dataset.
Time complexity: O(num input files)
Returns:
The list of input files used to create the dataset, or an empty
list if the input files is not known.
"""
metadata = self._blocks.get_metadata()
files = set()
for m in metadata:
for f in m.input_files:
files.add(f)
return list(files)
def write_parquet(
self,
path: str,
*,
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
try_create_dir: bool = True,
arrow_open_stream_args: Optional[Dict[str, Any]] = None,
block_path_provider:
BlockWritePathProvider = DefaultBlockWritePathProvider(),
arrow_parquet_args_fn: Callable[[], Dict[str, Any]] = lambda: {},
**arrow_parquet_args) -> None:
"""Write the dataset to parquet.
This is only supported for datasets convertible to Arrow records.
To control the number of files, use ``.repartition()``.
Unless a custom block path provider is given, the format of the output
files will be {uuid}_{block_idx}.parquet, where ``uuid`` is an unique
id for the dataset.
Examples:
>>> ds.write_parquet("s3://bucket/path")
Time complexity: O(dataset size / parallelism)
Args:
path: The path to the destination root directory, where Parquet
files will be written to.
filesystem: The filesystem implementation to write to.
try_create_dir: Try to create all directories in destination path
if True. Does nothing if all directories already exist.
arrow_open_stream_args: kwargs passed to
pyarrow.fs.FileSystem.open_output_stream
block_path_provider: BlockWritePathProvider implementation to
write each dataset block to a custom output path.
arrow_parquet_args_fn: Callable that returns a dictionary of write
arguments to use when writing each block to a file. Overrides
any duplicate keys from arrow_parquet_args. This should be used
instead of arrow_parquet_args if any of your write arguments
cannot be pickled, or if you'd like to lazily resolve the write
arguments for each dataset block.
arrow_parquet_args: Options to pass to
pyarrow.parquet.write_table(), which is used to write out each
block to a file.
"""
self.write_datasource(
ParquetDatasource(),
path=path,
dataset_uuid=self._uuid,
filesystem=filesystem,
try_create_dir=try_create_dir,
open_stream_args=arrow_open_stream_args,
block_path_provider=block_path_provider,
write_args_fn=arrow_parquet_args_fn,
**arrow_parquet_args)
def write_json(
self,
path: str,
*,
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
try_create_dir: bool = True,
arrow_open_stream_args: Optional[Dict[str, Any]] = None,
block_path_provider:
BlockWritePathProvider = DefaultBlockWritePathProvider(),
pandas_json_args_fn: Callable[[], Dict[str, Any]] = lambda: {},
**pandas_json_args) -> None:
"""Write the dataset to json.
This is only supported for datasets convertible to Arrow records.
To control the number of files, use ``.repartition()``.
Unless a custom block path provider is given, the format of the output
files will be {self._uuid}_{block_idx}.json, where ``uuid`` is an
unique id for the dataset.
Examples:
>>> ds.write_json("s3://bucket/path")
Time complexity: O(dataset size / parallelism)
Args:
path: The path to the destination root directory, where json
files will be written to.
filesystem: The filesystem implementation to write to.
try_create_dir: Try to create all directories in destination path
if True. Does nothing if all directories already exist.
arrow_open_stream_args: kwargs passed to
pyarrow.fs.FileSystem.open_output_stream
block_path_provider: BlockWritePathProvider implementation to
write each dataset block to a custom output path.
pandas_json_args_fn: Callable that returns a dictionary of write
arguments to use when writing each block to a file. Overrides
any duplicate keys from pandas_json_args. This should be used
instead of pandas_json_args if any of your write arguments
cannot be pickled, or if you'd like to lazily resolve the write
arguments for each dataset block.
pandas_json_args: These args will be passed to
pandas.DataFrame.to_json(), which we use under the hood to
write out each Datasets block. These
are dict(orient="records", lines=True) by default.
"""
self.write_datasource(
JSONDatasource(),
path=path,
dataset_uuid=self._uuid,
filesystem=filesystem,
try_create_dir=try_create_dir,
open_stream_args=arrow_open_stream_args,
block_path_provider=block_path_provider,
write_args_fn=pandas_json_args_fn,
**pandas_json_args)
def write_csv(self,
path: str,
*,
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
try_create_dir: bool = True,
arrow_open_stream_args: Optional[Dict[str, Any]] = None,
block_path_provider:
BlockWritePathProvider = DefaultBlockWritePathProvider(),
arrow_csv_args_fn: Callable[[], Dict[str, Any]] = lambda: {},
**arrow_csv_args) -> None:
"""Write the dataset to csv.
This is only supported for datasets convertible to Arrow records.
To control the number of files, use ``.repartition()``.
Unless a custom block path provider is given, the format of the output
files will be {uuid}_{block_idx}.csv, where ``uuid`` is an unique id
for the dataset.
Examples:
>>> ds.write_csv("s3://bucket/path")
Time complexity: O(dataset size / parallelism)
Args:
path: The path to the destination root directory, where csv
files will be written to.
filesystem: The filesystem implementation to write to.
try_create_dir: Try to create all directories in destination path
if True. Does nothing if all directories already exist.
arrow_open_stream_args: kwargs passed to
pyarrow.fs.FileSystem.open_output_stream
block_path_provider: BlockWritePathProvider implementation to
write each dataset block to a custom output path.
arrow_csv_args_fn: Callable that returns a dictionary of write
arguments to use when writing each block to a file. Overrides
any duplicate keys from arrow_csv_args. This should be used
instead of arrow_csv_args if any of your write arguments
cannot be pickled, or if you'd like to lazily resolve the write
arguments for each dataset block.
arrow_csv_args: Other CSV write options to pass to pyarrow.
"""
self.write_datasource(
CSVDatasource(),
path=path,
dataset_uuid=self._uuid,
filesystem=filesystem,
try_create_dir=try_create_dir,
open_stream_args=arrow_open_stream_args,
block_path_provider=block_path_provider,
write_args_fn=arrow_csv_args_fn,
**arrow_csv_args)
def write_numpy(
self,
path: str,
*,
column: str = "value",
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
try_create_dir: bool = True,
arrow_open_stream_args: Optional[Dict[str, Any]] = None,
block_path_provider:
BlockWritePathProvider = DefaultBlockWritePathProvider()) -> None:
"""Write a tensor column of the dataset to npy files.
This is only supported for datasets convertible to Arrow records that
contain a TensorArray column. To control the number of files, use
``.repartition()``.
Unless a custom block path provider is given, the format of the output
files will be {self._uuid}_{block_idx}.npy, where ``uuid`` is an unique
id for the dataset.
Examples:
>>> ds.write_numpy("s3://bucket/path")
Time complexity: O(dataset size / parallelism)
Args:
path: The path to the destination root directory, where npy
files will be written to.
column: The name of the table column that contains the tensor to
be written. This defaults to "value".
filesystem: The filesystem implementation to write to.
try_create_dir: Try to create all directories in destination path
if True. Does nothing if all directories already exist.
arrow_open_stream_args: kwargs passed to
pyarrow.fs.FileSystem.open_output_stream
block_path_provider: BlockWritePathProvider implementation to
write each dataset block to a custom output path.
"""
self.write_datasource(
NumpyDatasource(),
path=path,
dataset_uuid=self._uuid,
column=column,
filesystem=filesystem,
try_create_dir=try_create_dir,
open_stream_args=arrow_open_stream_args,
block_path_provider=block_path_provider)
def write_datasource(self, datasource: Datasource[T],
**write_args) -> None:
"""Write the dataset to a custom datasource.
Examples:
>>> ds.write_datasource(CustomDatasourceImpl(...))
Time complexity: O(dataset size / parallelism)
Args:
datasource: The datasource to write to.
write_args: Additional write args to pass to the datasource.
"""
blocks, metadata = zip(*self._blocks.get_blocks_with_metadata())
write_results = datasource.do_write(blocks, metadata, **write_args)
progress = ProgressBar("Write Progress", len(write_results))
try:
progress.block_until_complete(write_results)
datasource.on_write_complete(ray.get(write_results))
except Exception as e:
datasource.on_write_failed(write_results, e)
raise
finally:
progress.close()
def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[T]:
"""Return a local row iterator over the dataset.
Examples:
>>> for i in ray.data.range(1000000).iter_rows():
... print(i)
Time complexity: O(1)
Args:
prefetch_blocks: The number of blocks to prefetch ahead of the
current block during the scan.
Returns:
A local iterator over the entire dataset.
"""
for batch in self.iter_batches(
prefetch_blocks=prefetch_blocks, batch_format="native"):
batch = BlockAccessor.for_block(batch)
for row in batch.iter_rows():
yield row
def iter_batches(self,
*,
prefetch_blocks: int = 0,
batch_size: int = None,
batch_format: str = "native",
drop_last: bool = False) -> Iterator[BatchType]:
"""Return a local batched iterator over the dataset.
Examples:
>>> for batch in ray.data.range(1000000).iter_batches():
... print(batch)
Time complexity: O(1)
Args:
prefetch_blocks: The number of blocks to prefetch ahead of the
current block during the scan.
batch_size: Record batch size, or None to let the system pick.
batch_format: The format in which to return each batch.
Specify "native" to use the current block format, "pandas" to
select ``pandas.DataFrame`` or "pyarrow" to select
``pyarrow.Table``. Default is "native".
drop_last: Whether to drop the last batch if it's incomplete.
Returns:
A list of iterators over record batches.
"""
time_start = time.perf_counter()
def format_batch(batch: Block, format: str) -> BatchType:
if batch_format == "native":
return batch
elif batch_format == "pandas":
batch = BlockAccessor.for_block(batch)
return batch.to_pandas()
elif batch_format == "pyarrow":
batch = BlockAccessor.for_block(batch)
return batch.to_arrow()
else:
raise ValueError(
f"The given batch format: {batch_format} "
f"is invalid. Supported batch type: {BatchType}")
batcher = Batcher(batch_size=batch_size)
def batch_block(block: ObjectRef[Block]):
with self._stats.iter_get_s.timer():
block = ray.get(block)
batcher.add(block)
while batcher.has_batch():
with self._stats.iter_format_batch_s.timer():
result = format_batch(batcher.next_batch(), batch_format)
with self._stats.iter_user_s.timer():
yield result
block_window = [] # Handle empty sliding window gracefully.
for block_window in _sliding_window(self._blocks.iter_blocks(),
prefetch_blocks + 1):
block_window = list(block_window)
with self._stats.iter_wait_s.timer():
ray.wait(block_window, num_returns=1, fetch_local=True)
yield from batch_block(block_window[0])
# Consume remainder of final block window.
for block in block_window[1:]:
yield from batch_block(block)
# Yield any remainder batches.
if batcher.has_any() and not drop_last:
with self._stats.iter_format_batch_s.timer():
result = format_batch(batcher.next_batch(), batch_format)
with self._stats.iter_user_s.timer():
yield result
self._stats.iter_total_s.add(time.perf_counter() - time_start)
def to_torch(self,
*,
label_column: Optional[str] = None,
feature_columns: Union[None, List[str],
List[List[str]],
Dict[str, List[str]]] = None,
label_column_dtype: Optional["torch.dtype"] = None,
feature_column_dtypes: Union[None, "torch.dtype",
List["torch.dtype"],
Dict[str, "torch.dtype"]] = None,
batch_size: int = 1,
prefetch_blocks: int = 0,
drop_last: bool = False,
unsqueeze_label_tensor: bool = True) -> \
"torch.utils.data.IterableDataset":
"""Return a Torch IterableDataset over this dataset.
This is only supported for datasets convertible to Arrow records.
It is recommended to use the returned ``IterableDataset`` directly
instead of passing it into a torch ``DataLoader``.
Each element in IterableDataset will be a tuple consisting of 2
elements. The first item contains the feature tensor(s), and the
second item is the label tensor. Those can take on different
forms, depending on the specified arguments.
For the features tensor (N is the ``batch_size`` and n, m, k
are the number of features per tensor):
* If ``feature_columns`` is a ``List[str]``, the features will be
a tensor of shape (N, n), with columns corresponding to
``feature_columns``
* If ``feature_columns`` is a ``List[List[str]]``, the features will be
a list of tensors of shape [(N, m),...,(N, k)], with columns of each
tensor corresponding to the elements of ``feature_columns``
* If ``feature_columns`` is a ``Dict[str, List[str]]``, the features
will be a dict of key-tensor pairs of shape
{key1: (N, m),..., keyN: (N, k)}, with columns of each
tensor corresponding to the value of ``feature_columns`` under the
key.
If ``unsqueeze_label_tensor=True`` (default), the label tensor will be
of shape (N, 1). Otherwise, it will be of shape (N,).
If ``label_column`` is specified as ``None``, then no column from the
``Dataset`` will be treated as the label, and the output label tensor
will be ``None``.
Note that you probably want to call ``.split()`` on this dataset if
there are to be multiple Torch workers consuming the data.
Time complexity: O(1)
Args:
label_column (Optional[str]): The name of the column used as the
label (second element of the output list). Can be None for
prediction, in which case the second element of returned
tuple will also be None.
feature_columns (Union[None, List[str], List[List[str]], \
Dict[str, List[str]]]): The names of the columns
to use as the features. Can be a list of lists or
a dict of string-list pairs for multi-tensor output.
If None, then use all columns except the label columns as
the features.
label_column_dtype (Optional[torch.dtype]): The torch dtype to
use for the label column. If None, then automatically infer
the dtype.
feature_column_dtypes (Union[None, torch.dtype, List[torch.dtype],\
Dict[str, torch.dtype]]): The dtypes to use for the feature
tensors. This should match the format of ``feature_columns``,
or be a single dtype, in which case it will be applied to
all tensors. If None, then automatically infer the dtype.
batch_size (int): How many samples per batch to yield at a time.
Defaults to 1.
prefetch_blocks (int): The number of blocks to prefetch ahead of
the current block during the scan.
drop_last (bool): Set to True to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If
False and the size of dataset is not divisible by the batch
size, then the last batch will be smaller. Defaults to False.
unsqueeze_label_tensor (bool): If set to True, the label tensor
will be unsqueezed (reshaped to (N, 1)). Otherwise, it will
be left as is, that is (N, ). In general, regression loss
functions expect an unsqueezed tensor, while classification
loss functions expect a squeezed one. Defaults to True.
Returns:
A torch IterableDataset.
"""
import torch
from ray.data.impl.torch_iterable_dataset import \
TorchIterableDataset
multi_input = feature_columns and (isinstance(feature_columns, dict) or
isinstance(feature_columns[0],
(list, tuple)))
# If an empty collection is passed in, treat it the same as None
if not feature_columns:
feature_columns = None
if (feature_column_dtypes
and not isinstance(feature_column_dtypes, torch.dtype)):
if isinstance(feature_columns, dict):
if not isinstance(feature_column_dtypes, dict):
raise TypeError(
"If `feature_columns` is a dict, "
"`feature_column_dtypes` must be None, `torch.dtype`,"
f" or dict, got {type(feature_column_dtypes)}.")
if set(feature_columns) != set(feature_column_dtypes):
raise ValueError(
"`feature_columns` and `feature_column_dtypes` "
"must have the same keys.")
elif isinstance(feature_columns[0], (list, tuple)):
if not isinstance(feature_column_dtypes, (list, tuple)):
raise TypeError(
"If `feature_columns` is a list of lists, "
"`feature_column_dtypes` must be None, `torch.dtype`,"
f" or a sequence, got {type(feature_column_dtypes)}.")
if len(feature_columns) != len(feature_column_dtypes):
raise ValueError(
"`feature_columns` and `feature_column_dtypes` "
"must have the same length.")
def make_generator():
for batch in self.iter_batches(
batch_size=batch_size,
batch_format="pandas",
prefetch_blocks=prefetch_blocks,
drop_last=drop_last):
if label_column:
label_vals = batch.pop(label_column).values
label_tensor = torch.as_tensor(
label_vals, dtype=label_column_dtype)
if unsqueeze_label_tensor:
label_tensor = label_tensor.view(-1, 1)
else:
label_tensor = None
def get_feature_tensors(
batch,
feature_columns: List[str],
feature_column_dtype: "torch.dtype",
assert_feature_columns_not_empty: bool = False
) -> torch.Tensor:
feature_tensors = []
if (assert_feature_columns_not_empty
and not feature_columns):
raise ValueError("`feature_columns` may not be empty")
if feature_columns:
batch = batch[feature_columns]
for col in batch.columns:
col_vals = batch[col].values
t = torch.as_tensor(
col_vals, dtype=feature_column_dtype)
t = t.view(-1, 1)
feature_tensors.append(t)
return torch.cat(feature_tensors, dim=1)
if not multi_input:
features_tensor = get_feature_tensors(
batch, feature_columns, feature_column_dtypes)
else:
if isinstance(feature_columns, dict):
features_tensor = {
key: get_feature_tensors(
batch,
feature_columns[key],
feature_column_dtypes[key]
if isinstance(feature_column_dtypes, dict) else
feature_column_dtypes,
assert_feature_columns_not_empty=True)
for key in feature_columns
}
else:
features_tensor = [
get_feature_tensors(
batch,
feature_columns[idx],
feature_column_dtypes[idx] if isinstance(
feature_column_dtypes, (list, tuple)) else
feature_column_dtypes,
assert_feature_columns_not_empty=True)
for idx in range(len(feature_columns))
]
yield (features_tensor, label_tensor)
return TorchIterableDataset(make_generator)
def to_tf(self,
*,
label_column: str,
output_signature: Tuple["tf.TypeSpec", "tf.TypeSpec"],
feature_columns: Optional[List[str]] = None,
prefetch_blocks: int = 0,
batch_size: int = 1) -> "tf.data.Dataset":
"""Return a TF Dataset over this dataset.
The TF Dataset will be created from the generator returned by the
``iter_batches`` method. ``prefetch_blocks`` and ``batch_size``
arguments will be passed to that method.
This is only supported for datasets convertible to Arrow records.
Requires all datasets to have the same columns.
It is recommended to call ``.split()`` on this dataset if
there are to be multiple TensorFlow workers consuming the data.
The elements generated must be compatible with the given
``output_signature`` argument (same as in
``tf.data.Dataset.from_generator``).
Time complexity: O(1)
Args:
label_column (str): The name of the column used as the label
(second element of the output tuple).
output_signature (Tuple[tf.TypeSpec, tf.TypeSpec]): A 2-element
tuple of `tf.TypeSpec` objects corresponding to
(features, label).
feature_columns (Optional[List[str]]): List of columns in datasets
to use. If None, all columns will be used.
prefetch_blocks: The number of blocks to prefetch ahead of the
current block during the scan.
batch_size: Record batch size. Defaults to 1.
Returns:
A tf.data.Dataset.
"""
# argument exception checking is done in from_generator
try:
import tensorflow as tf
except ImportError:
raise ValueError("tensorflow must be installed!")
def make_generator():
for batch in self.iter_batches(
prefetch_blocks=prefetch_blocks,
batch_size=batch_size,
batch_format="pandas"):
target_col = batch.pop(label_column)
if feature_columns:
batch = batch[feature_columns]
# TODO(Clark): Support batches containing our extension array
# TensorArray.
yield batch.values, target_col.values
dataset = tf.data.Dataset.from_generator(
make_generator, output_signature=output_signature)
return dataset
def to_dask(self) -> "dask.DataFrame":
"""Convert this dataset into a Dask DataFrame.
This is only supported for datasets convertible to Arrow records.
Note that this function will set the Dask scheduler to Dask-on-Ray
globally, via the config.
Time complexity: O(dataset size / parallelism)
Returns:
A Dask DataFrame created from this dataset.
"""
import dask
import dask.dataframe as dd
from ray.util.client.common import ClientObjectRef
from ray.util.dask import ray_dask_get
dask.config.set(scheduler=ray_dask_get)
@dask.delayed
def block_to_df(block: Block):
block = BlockAccessor.for_block(block)
if isinstance(block, (ray.ObjectRef, ClientObjectRef)):
raise ValueError(
"Dataset.to_dask() must be used with Dask-on-Ray, please "
"set the Dask scheduler to ray_dask_get (located in "
"ray.util.dask).")
return block.to_pandas()
# TODO(Clark): Give Dask a Pandas-esque schema via the Pyarrow schema,
# once that's implemented.
ddf = dd.from_delayed(
[block_to_df(block) for block in self._blocks.get_blocks()])
return ddf
def to_mars(self) -> "mars.DataFrame":
"""Convert this dataset into a MARS dataframe.
Time complexity: O(dataset size / parallelism)
Returns:
A MARS dataframe created from this dataset.
"""
raise NotImplementedError # P1
def to_modin(self) -> "modin.DataFrame":
"""Convert this dataset into a Modin dataframe.
This works by first converting this dataset into a distributed set of
Pandas dataframes (using ``.to_pandas_refs()``). Please see caveats
there. Then the individual dataframes are used to create the modin
DataFrame using
``modin.distributed.dataframe.pandas.partitions.from_partitions()``.
This is only supported for datasets convertible to Arrow records.
This function induces a copy of the data. For zero-copy access to the
underlying data, consider using ``.to_arrow()`` or
``.get_internal_block_refs()``.
Time complexity: O(dataset size / parallelism)
Returns:
A Modin dataframe created from this dataset.
"""
from modin.distributed.dataframe.pandas.partitions import (
from_partitions)
pd_objs = self.to_pandas_refs()
return from_partitions(pd_objs, axis=0)
def to_spark(self,
spark: "pyspark.sql.SparkSession") -> "pyspark.sql.DataFrame":
"""Convert this dataset into a Spark dataframe.
Time complexity: O(dataset size / parallelism)
Returns:
A Spark dataframe created from this dataset.
"""
import raydp
core_worker = ray.worker.global_worker.core_worker
locations = [
core_worker.get_owner_address(block)
for block in self.get_internal_block_refs()
]
return raydp.spark.ray_dataset_to_spark_dataframe(
spark, self.schema(), self.get_internal_block_refs(), locations)
def to_pandas(self, limit: int = 100000) -> "pandas.DataFrame":
"""Convert this dataset into a single Pandas DataFrame.
This is only supported for datasets convertible to Arrow records. An
error is raised if the number of records exceeds the provided limit.
Note that you can use ``.limit()`` on the dataset beforehand to
truncate the dataset manually.
Time complexity: O(dataset size)
Args:
limit: The maximum number of records to return. An error will be
raised if the limit is exceeded.
Returns:
A Pandas DataFrame created from this dataset, containing a limited
number of records.
"""
if self.count() > limit:
raise ValueError(
"The dataset has more than the given limit of {} records. "
"Use ds.limit(N).to_pandas().".format(limit))
blocks = self.get_internal_block_refs()
output = DelegatingBlockBuilder()
for block in blocks:
output.add_block(ray.get(block))
return BlockAccessor.for_block(output.build()).to_pandas()
def to_pandas_refs(self) -> List[ObjectRef["pandas.DataFrame"]]:
"""Convert this dataset into a distributed set of Pandas dataframes.
This is only supported for datasets convertible to Arrow records.
This function induces a copy of the data. For zero-copy access to the
underlying data, consider using ``.to_arrow()`` or
``.get_internal_block_refs()``.
Time complexity: O(dataset size / parallelism)
Returns:
A list of remote Pandas dataframes created from this dataset.
"""
block_to_df = cached_remote_fn(_block_to_df)
return [
block_to_df.remote(block) for block in self._blocks.get_blocks()
]
def to_numpy_refs(self, *, column: Optional[str] = None
) -> List[ObjectRef[np.ndarray]]:
"""Convert this dataset into a distributed set of NumPy ndarrays.
This is only supported for datasets convertible to NumPy ndarrays.
This function induces a copy of the data. For zero-copy access to the
underlying data, consider using ``.to_arrow()`` or
``.get_internal_block_refs()``.
Time complexity: O(dataset size / parallelism)
Args:
column: The name of the column to convert to numpy, or None to
specify the entire row. Required for Arrow tables.
Returns:
A list of remote NumPy ndarrays created from this dataset.
"""
block_to_ndarray = cached_remote_fn(_block_to_ndarray)
return [
block_to_ndarray.remote(block, column=column)
for block in self._blocks.get_blocks()
]
def to_arrow_refs(self) -> List[ObjectRef["pyarrow.Table"]]:
"""Convert this dataset into a distributed set of Arrow tables.
This is only supported for datasets convertible to Arrow records.
This function is zero-copy if the existing data is already in Arrow
format. Otherwise, the data will be converted to Arrow format.
Time complexity: O(1) unless conversion is required.
Returns:
A list of remote Arrow tables created from this dataset.
"""
blocks: List[ObjectRef[Block]] = self._blocks.get_blocks()
if self._dataset_format() == "arrow":
# Zero-copy path.
return blocks
block_to_arrow = cached_remote_fn(_block_to_arrow)
return [block_to_arrow.remote(block) for block in blocks]
def repeat(self, times: int = None) -> "DatasetPipeline[T]":
"""Convert this into a DatasetPipeline by looping over this dataset.
Transformations prior to the call to ``repeat()`` are evaluated once.
Transformations done on the returned pipeline are evaluated on each
loop of the pipeline over the base dataset.
Note that every repeat of the dataset is considered an "epoch" for
the purposes of ``DatasetPipeline.iter_epochs()``.
Examples:
>>> # Infinite pipeline of numbers [0, 5)
>>> ray.data.range(5).repeat().take()
[0, 1, 2, 3, 4, 0, 1, 2, 3, 4, ...]
>>> # Can apply transformations to the pipeline.
>>> ray.data.range(5).repeat().map(lambda x: -x).take()
[0, -1, -2, -3, -4, 0, -1, -2, -3, -4, ...]
>>> # Can shuffle each epoch (dataset) in the pipeline.
>>> ray.data.range(5).repeat().random_shuffle().take()
[2, 3, 0, 4, 1, 4, 0, 2, 1, 3, ...]
Args:
times: The number of times to loop over this dataset, or None
to repeat indefinitely.
"""
from ray.data.dataset_pipeline import DatasetPipeline
if times is not None and times < 1:
raise ValueError("`times` must be >= 1, got {}".format(times))
class Iterator:
def __init__(self, ds: "Dataset[T]"):
self._ds = ds
self._i = 0
def __next__(self) -> "Dataset[T]":
if times and self._i >= times:
raise StopIteration
self._ds._set_epoch(self._i)
self._i += 1
return lambda: self._ds
class Iterable:
def __init__(self, ds: "Dataset[T]"):
self._ds = ds
def __iter__(self):
return Iterator(self._ds)
return DatasetPipeline(Iterable(self), length=times or float("inf"))
def pipeline(self, *, parallelism: int = 10) -> "DatasetPipeline[T]":
raise DeprecationWarning("Use .window(blocks_per_window=n) instead of "
".pipeline(parallelism=n)")
def window(self, *, blocks_per_window: int = 10) -> "DatasetPipeline[T]":
"""Convert this into a DatasetPipeline by windowing over data blocks.
Transformations prior to the call to ``window()`` are evaluated in
bulk on the entire dataset. Transformations done on the returned
pipeline are evaluated incrementally per window of blocks as data is
read from the output of the pipeline.
Windowing execution allows for output to be read sooner without
waiting for all transformations to fully execute, and can also improve
efficiency if transforms use different resources (e.g., GPUs).
Without windowing::
[preprocessing......]
[inference.......]
[write........]
Time ----------------------------------------------------------->
With windowing::
[prep1] [prep2] [prep3]
[infer1] [infer2] [infer3]
[write1] [write2] [write3]
Time ----------------------------------------------------------->
Examples:
>>> # Create an inference pipeline.
>>> ds = ray.data.read_binary_files(dir)
>>> pipe = ds.window(blocks_per_window=10).map(infer)
DatasetPipeline(num_windows=40, num_stages=2)
>>> # The higher the stage parallelism, the shorter the pipeline.
>>> pipe = ds.window(blocks_per_window=20).map(infer)
DatasetPipeline(num_windows=20, num_stages=2)
>>> # Outputs can be incrementally read from the pipeline.
>>> for item in pipe.iter_rows():
... print(item)
Args:
blocks_per_window: The window size (parallelism) in blocks.
Increasing window size increases pipeline throughput, but also
increases the latency to initial output, since it decreases the
length of the pipeline. Setting this to infinity effectively
disables pipelining.
"""
from ray.data.dataset_pipeline import DatasetPipeline
outer_stats = self._stats
class Iterator:
def __init__(self, splits, epoch):
self._splits = splits.copy()
self._epoch = epoch
def __next__(self) -> "Dataset[T]":
if not self._splits:
raise StopIteration
blocks = self._splits.pop(0)
def gen():
return Dataset(blocks, self._epoch, outer_stats)
return gen
class Iterable:
def __init__(self, blocks, epoch):
self._splits = blocks.split(split_size=blocks_per_window)
self._epoch = epoch
def __iter__(self):
return Iterator(self._splits, self._epoch)
it = Iterable(self._blocks, self._epoch)
return DatasetPipeline(it, length=len(it._splits))
@DeveloperAPI
def get_internal_block_refs(self) -> List[ObjectRef[Block]]:
"""Get a list of references to the underlying blocks of this dataset.
This function can be used for zero-copy access to the data. It blocks
until the underlying blocks are computed.
Time complexity: O(1)
Returns:
A list of references to this dataset's blocks.
"""
return self._blocks.get_blocks()
@DeveloperAPI
def stats(self) -> str:
"""Returns a string containing execution timing information."""
return self._stats.summary_string()
def _move_blocks(self):
blocks = self._blocks.copy()
self._blocks.clear()
return blocks
def _split(self, index: int,
return_right_half: bool) -> ("Dataset[T]", "Dataset[T]"):
get_num_rows = cached_remote_fn(_get_num_rows)
split_block = cached_remote_fn(_split_block, num_returns=4)
count = 0
left_blocks = []
left_metadata = []
right_blocks = []
right_metadata = []
it = self._blocks.get_blocks_with_metadata()
for b, m in it:
if m.num_rows is None:
num_rows = ray.get(get_num_rows.remote(b))
else:
num_rows = m.num_rows
if count >= index:
if not return_right_half:
break
right_blocks.append(b)
right_metadata.append(m)
elif count + num_rows < index:
left_blocks.append(b)
left_metadata.append(m)
elif count + num_rows == index:
left_blocks.append(b)
left_metadata.append(m)
else:
b0, m0, b1, m1 = split_block.remote(b, m, index - count,
return_right_half)
left_blocks.append(b0)
left_metadata.append(ray.get(m0))
right_blocks.append(b1)
right_metadata.append(ray.get(m1))
count += num_rows
left = Dataset(
BlockList(left_blocks, left_metadata), self._epoch,
self._stats.child_TODO("split"))
if return_right_half:
right = Dataset(
BlockList(right_blocks, right_metadata), self._epoch,
self._stats.child_TODO("split"))
else:
right = None
return left, right
def _divide(self, block_idx: int) -> ("Dataset[T]", "Dataset[T]"):
left, right = self._blocks.divide(block_idx)
return Dataset(left, self._epoch, self._stats), Dataset(
right, self._epoch, self._stats)
def __repr__(self) -> str:
schema = self.schema()
if schema is None:
schema_str = "Unknown schema"
elif isinstance(schema, type):
schema_str = str(schema)
else:
schema_str = []
for n, t in zip(schema.names, schema.types):
if hasattr(t, "__name__"):
t = t.__name__
schema_str.append("{}: {}".format(n, t))
schema_str = ", ".join(schema_str)
schema_str = "{" + schema_str + "}"
count = self._meta_count()
return "Dataset(num_blocks={}, num_rows={}, schema={})".format(
self._blocks.initial_num_blocks(), count, schema_str)
def __str__(self) -> str:
return repr(self)
def _block_num_rows(self) -> List[int]:
get_num_rows = cached_remote_fn(_get_num_rows)
return ray.get(
[get_num_rows.remote(b) for b in self._blocks.get_blocks()])
def _block_size_bytes(self) -> List[int]:
get_size_bytes = cached_remote_fn(_get_size_bytes)
return ray.get(
[get_size_bytes.remote(b) for b in self._blocks.get_blocks()])
def _meta_count(self) -> Optional[int]:
metadata = self._blocks.get_metadata()
if metadata and metadata[0].num_rows is not None:
return sum(m.num_rows for m in metadata)
else:
return None
def _get_uuid(self) -> str:
return self._uuid
def _set_uuid(self, uuid: str) -> None:
self._uuid = uuid
def _get_epoch(self) -> int:
return self._epoch
def _set_epoch(self, epoch: int) -> None:
self._epoch = epoch
def _get_num_rows(block: Block) -> int:
block = BlockAccessor.for_block(block)
return block.num_rows()
def _get_size_bytes(block: Block) -> int:
block = BlockAccessor.for_block(block)
return block.size_bytes()
def _block_to_df(block: Block):
block = BlockAccessor.for_block(block)
return block.to_pandas()
def _block_to_ndarray(block: Block, column: Optional[str]):
block = BlockAccessor.for_block(block)
return block.to_numpy(column)
def _block_to_arrow(block: Block):
block = BlockAccessor.for_block(block)
return block.to_arrow()
def _sliding_window(iterable: Iterable, n: int):
"""Creates an iterator consisting of n-width sliding windows over
iterable. The sliding windows are constructed lazily such that an
element on the base iterator (iterable) isn't consumed until the
first sliding window containing that element is reached.
If n > len(iterable), then a single len(iterable) window is
returned.
Args:
iterable: The iterable on which the sliding window will be
created.
n: The width of the sliding window.
Returns:
An iterator of n-width windows over iterable.
If n > len(iterable), then a single len(iterable) window is
returned.
"""
it = iter(iterable)
window = collections.deque(itertools.islice(it, n), maxlen=n)
if len(window) > 0:
yield tuple(window)
for elem in it:
window.append(elem)
yield tuple(window)
def _split_block(
block: Block, meta: BlockMetadata, count: int, return_right_half: bool
) -> (Block, BlockMetadata, Optional[Block], Optional[BlockMetadata]):
block = BlockAccessor.for_block(block)
logger.debug("Truncating last block to size: {}".format(count))
b0 = block.slice(0, count, copy=True)
a0 = BlockAccessor.for_block(b0)
m0 = BlockMetadata(
num_rows=a0.num_rows(),
size_bytes=a0.size_bytes(),
schema=meta.schema,
input_files=meta.input_files,
exec_stats=BlockExecStats.TODO)
if return_right_half:
b1 = block.slice(count, block.num_rows(), copy=True)
a1 = BlockAccessor.for_block(b1)
m1 = BlockMetadata(
num_rows=a1.num_rows(),
size_bytes=a1.size_bytes(),
schema=meta.schema,
input_files=meta.input_files,
exec_stats=BlockExecStats.TODO)
else:
b1 = None
m1 = None
return b0, m0, b1, m1
| 40.964286 | 100 | 0.572308 |
b3e830905ac98e64a2097d47f31529f05cdea50f | 1,772 | py | Python | preprocess.py | Dumbledore-on-Strive/goodreads | 0de57174de4ee88399e71c4061d3dec3dd6480ae | [
"MIT"
] | null | null | null | preprocess.py | Dumbledore-on-Strive/goodreads | 0de57174de4ee88399e71c4061d3dec3dd6480ae | [
"MIT"
] | null | null | null | preprocess.py | Dumbledore-on-Strive/goodreads | 0de57174de4ee88399e71c4061d3dec3dd6480ae | [
"MIT"
] | 2 | 2021-04-13T07:57:57.000Z | 2021-04-14T16:39:00.000Z |
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
df = pd.read_csv("testing.csv")
df.columns
df = df.drop(['Unnamed: 0', 'place', 'url'], axis=1)
df = df.dropna()
df['num_pages'] = pd.to_numeric(df['num_pages'], downcast='integer')
df.head()
# Step 1: find min(avg_rating)
minValue = df['avg_rating'].min()
print('Minimum rating: ', minValue)
# Step2: find max(avg_rating)
maxValue = df['avg_rating'].max()
print('Maximum rating: ', maxValue)
# Step3 min max norm for average rating
def minmax_norm(data_column_name):
x = data_column_name
mean_norm_ratings = 1+((x-x.min())/(x.max()-x.min()))*9
return mean_norm_ratings
df['minmax_norm_ratings'] = minmax_norm(
df['avg_rating']).round(decimals=2) # little problem
df.head()
# # Mean normalization
#Step1 : average (avg_rating)
df_mean = df[["avg_rating"]].mean()
df_mean
def mean_norm(data_column_name):
x = data_column_name
mean_norm_ratings = 1+((x-x.mean())/(x.max()-x.min()))*9
return mean_norm_ratings
a = mean_norm(df["avg_rating"])
a.to_frame()
df['mean_norm_ratings'] = mean_norm(df['avg_rating']).round(decimals=2)
df.head()
cols = df.columns.tolist()
cols = cols[:8]+cols[-2:]+cols[8:-2]
df = df[cols]
df
df.to_csv('preprocess_data.csv')
# # Analyse
anay_goup = df.groupby("original_publish_year")[
'minmax_norm_ratings'].mean().round(decimals=2)
# In[63]:
anay_goup.to_frame()
# In[61]:
anay_goup.to_frame()
# # The Best Book Author
def get_book_author(name, df):
f = df[df.loc[:, 'author'] == name]
m = f['minmax_norm_ratings'].max()
return m
get_book_author('Cassandra Clare', df)
| 20.604651 | 86 | 0.638826 |
460f379ce9164b30f2cda4beffdc1a65bec47df0 | 14,494 | py | Python | test/test_librosa_compatibility.py | f0k/audio | b457cb711a9a849b1226ede93ae247cd1e06eddf | [
"BSD-2-Clause"
] | 2 | 2021-03-13T08:24:36.000Z | 2021-05-06T15:50:00.000Z | test/test_librosa_compatibility.py | f0k/audio | b457cb711a9a849b1226ede93ae247cd1e06eddf | [
"BSD-2-Clause"
] | null | null | null | test/test_librosa_compatibility.py | f0k/audio | b457cb711a9a849b1226ede93ae247cd1e06eddf | [
"BSD-2-Clause"
] | 1 | 2021-08-03T20:41:15.000Z | 2021-08-03T20:41:15.000Z | """Test suites for numerical compatibility with librosa"""
import os
import unittest
import torch
import torchaudio
import torchaudio.functional as F
from torchaudio.common_utils import IMPORT_LIBROSA
if IMPORT_LIBROSA:
import numpy as np
import librosa
import scipy
import pytest
import common_utils
class _LibrosaMixin:
"""Automatically skip tests if librosa is not available"""
def setUp(self):
super().setUp()
if not IMPORT_LIBROSA:
raise unittest.SkipTest('Librosa not available')
class TestFunctional(_LibrosaMixin, unittest.TestCase):
"""Test suite for functions in `functional` module."""
def test_griffinlim(self):
# NOTE: This test is flaky without a fixed random seed
# See https://github.com/pytorch/audio/issues/382
torch.random.manual_seed(42)
tensor = torch.rand((1, 1000))
n_fft = 400
ws = 400
hop = 100
window = torch.hann_window(ws)
normalize = False
momentum = 0.99
n_iter = 8
length = 1000
rand_init = False
init = 'random' if rand_init else None
specgram = F.spectrogram(tensor, 0, window, n_fft, hop, ws, 2, normalize).sqrt()
ta_out = F.griffinlim(specgram, window, n_fft, hop, ws, 1, normalize,
n_iter, momentum, length, rand_init)
lr_out = librosa.griffinlim(specgram.squeeze(0).numpy(), n_iter=n_iter, hop_length=hop,
momentum=momentum, init=init, length=length)
lr_out = torch.from_numpy(lr_out).unsqueeze(0)
torch.testing.assert_allclose(ta_out, lr_out, atol=5e-5, rtol=1e-5)
def _test_create_fb(self, n_mels=40, sample_rate=22050, n_fft=2048, fmin=0.0, fmax=8000.0):
librosa_fb = librosa.filters.mel(sr=sample_rate,
n_fft=n_fft,
n_mels=n_mels,
fmax=fmax,
fmin=fmin,
htk=True,
norm=None)
fb = F.create_fb_matrix(sample_rate=sample_rate,
n_mels=n_mels,
f_max=fmax,
f_min=fmin,
n_freqs=(n_fft // 2 + 1))
for i_mel_bank in range(n_mels):
torch.testing.assert_allclose(fb[:, i_mel_bank], torch.tensor(librosa_fb[i_mel_bank]),
atol=1e-4, rtol=1e-5)
def test_create_fb(self):
self._test_create_fb()
self._test_create_fb(n_mels=128, sample_rate=44100)
self._test_create_fb(n_mels=128, fmin=2000.0, fmax=5000.0)
self._test_create_fb(n_mels=56, fmin=100.0, fmax=9000.0)
self._test_create_fb(n_mels=56, fmin=800.0, fmax=900.0)
self._test_create_fb(n_mels=56, fmin=1900.0, fmax=900.0)
self._test_create_fb(n_mels=10, fmin=1900.0, fmax=900.0)
def test_amplitude_to_DB(self):
spec = torch.rand((6, 201))
amin = 1e-10
db_multiplier = 0.0
top_db = 80.0
# Power to DB
multiplier = 10.0
ta_out = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db)
lr_out = librosa.core.power_to_db(spec.numpy())
lr_out = torch.from_numpy(lr_out)
torch.testing.assert_allclose(ta_out, lr_out, atol=5e-5, rtol=1e-5)
# Amplitude to DB
multiplier = 20.0
ta_out = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db)
lr_out = librosa.core.amplitude_to_db(spec.numpy())
lr_out = torch.from_numpy(lr_out)
torch.testing.assert_allclose(ta_out, lr_out, atol=5e-5, rtol=1e-5)
@pytest.mark.parametrize('complex_specgrams', [
torch.randn(2, 1025, 400, 2)
])
@pytest.mark.parametrize('rate', [0.5, 1.01, 1.3])
@pytest.mark.parametrize('hop_length', [256])
def test_phase_vocoder(complex_specgrams, rate, hop_length):
# Using a decorator here causes parametrize to fail on Python 2
if not IMPORT_LIBROSA:
raise unittest.SkipTest('Librosa is not available')
# Due to cummulative sum, numerical error in using torch.float32 will
# result in bottom right values of the stretched sectrogram to not
# match with librosa.
complex_specgrams = complex_specgrams.type(torch.float64)
phase_advance = torch.linspace(0, np.pi * hop_length, complex_specgrams.shape[-3], dtype=torch.float64)[..., None]
complex_specgrams_stretch = F.phase_vocoder(complex_specgrams, rate=rate, phase_advance=phase_advance)
# == Test shape
expected_size = list(complex_specgrams.size())
expected_size[-2] = int(np.ceil(expected_size[-2] / rate))
assert complex_specgrams.dim() == complex_specgrams_stretch.dim()
assert complex_specgrams_stretch.size() == torch.Size(expected_size)
# == Test values
index = [0] * (complex_specgrams.dim() - 3) + [slice(None)] * 3
mono_complex_specgram = complex_specgrams[index].numpy()
mono_complex_specgram = mono_complex_specgram[..., 0] + \
mono_complex_specgram[..., 1] * 1j
expected_complex_stretch = librosa.phase_vocoder(mono_complex_specgram,
rate=rate,
hop_length=hop_length)
complex_stretch = complex_specgrams_stretch[index].numpy()
complex_stretch = complex_stretch[..., 0] + 1j * complex_stretch[..., 1]
assert np.allclose(complex_stretch, expected_complex_stretch, atol=1e-5)
def _load_audio_asset(*asset_paths, **kwargs):
file_path = common_utils.get_asset_path(*asset_paths)
sound, sample_rate = torchaudio.load(file_path, **kwargs)
return sound, sample_rate
def _test_compatibilities(n_fft, hop_length, power, n_mels, n_mfcc, sample_rate):
sound, sample_rate = _load_audio_asset('sinewave.wav')
sound_librosa = sound.cpu().numpy().squeeze() # (64000)
# test core spectrogram
spect_transform = torchaudio.transforms.Spectrogram(
n_fft=n_fft, hop_length=hop_length, power=power)
out_librosa, _ = librosa.core.spectrum._spectrogram(
y=sound_librosa, n_fft=n_fft, hop_length=hop_length, power=power)
out_torch = spect_transform(sound).squeeze().cpu()
torch.testing.assert_allclose(out_torch, torch.from_numpy(out_librosa), atol=1e-5, rtol=1e-5)
# test mel spectrogram
melspect_transform = torchaudio.transforms.MelSpectrogram(
sample_rate=sample_rate, window_fn=torch.hann_window,
hop_length=hop_length, n_mels=n_mels, n_fft=n_fft)
librosa_mel = librosa.feature.melspectrogram(
y=sound_librosa, sr=sample_rate, n_fft=n_fft,
hop_length=hop_length, n_mels=n_mels, htk=True, norm=None)
librosa_mel_tensor = torch.from_numpy(librosa_mel)
torch_mel = melspect_transform(sound).squeeze().cpu()
torch.testing.assert_allclose(
torch_mel.type(librosa_mel_tensor.dtype), librosa_mel_tensor, atol=5e-3, rtol=1e-5)
# test s2db
power_to_db_transform = torchaudio.transforms.AmplitudeToDB('power', 80.)
power_to_db_torch = power_to_db_transform(spect_transform(sound)).squeeze().cpu()
power_to_db_librosa = librosa.core.spectrum.power_to_db(out_librosa)
torch.testing.assert_allclose(power_to_db_torch, torch.from_numpy(power_to_db_librosa), atol=5e-3, rtol=1e-5)
mag_to_db_transform = torchaudio.transforms.AmplitudeToDB('magnitude', 80.)
mag_to_db_torch = mag_to_db_transform(torch.abs(sound)).squeeze().cpu()
mag_to_db_librosa = librosa.core.spectrum.amplitude_to_db(sound_librosa)
torch.testing.assert_allclose(mag_to_db_torch, torch.from_numpy(mag_to_db_librosa), atol=5e-3, rtol=1e-5)
power_to_db_torch = power_to_db_transform(melspect_transform(sound)).squeeze().cpu()
db_librosa = librosa.core.spectrum.power_to_db(librosa_mel)
db_librosa_tensor = torch.from_numpy(db_librosa)
torch.testing.assert_allclose(
power_to_db_torch.type(db_librosa_tensor.dtype), db_librosa_tensor, atol=5e-3, rtol=1e-5)
# test MFCC
melkwargs = {'hop_length': hop_length, 'n_fft': n_fft}
mfcc_transform = torchaudio.transforms.MFCC(
sample_rate=sample_rate, n_mfcc=n_mfcc, norm='ortho', melkwargs=melkwargs)
# librosa.feature.mfcc doesn't pass kwargs properly since some of the
# kwargs for melspectrogram and mfcc are the same. We just follow the
# function body in
# https://librosa.github.io/librosa/_modules/librosa/feature/spectral.html#melspectrogram
# to mirror this function call with correct args:
#
# librosa_mfcc = librosa.feature.mfcc(
# y=sound_librosa, sr=sample_rate, n_mfcc = n_mfcc,
# hop_length=hop_length, n_fft=n_fft, htk=True, norm=None, n_mels=n_mels)
librosa_mfcc = scipy.fftpack.dct(db_librosa, axis=0, type=2, norm='ortho')[:n_mfcc]
librosa_mfcc_tensor = torch.from_numpy(librosa_mfcc)
torch_mfcc = mfcc_transform(sound).squeeze().cpu()
torch.testing.assert_allclose(
torch_mfcc.type(librosa_mfcc_tensor.dtype), librosa_mfcc_tensor, atol=5e-3, rtol=1e-5)
class TestTransforms(_LibrosaMixin, unittest.TestCase):
"""Test suite for functions in `transforms` module."""
def test_basics1(self):
kwargs = {
'n_fft': 400,
'hop_length': 200,
'power': 2.0,
'n_mels': 128,
'n_mfcc': 40,
'sample_rate': 16000
}
_test_compatibilities(**kwargs)
def test_basics2(self):
kwargs = {
'n_fft': 600,
'hop_length': 100,
'power': 2.0,
'n_mels': 128,
'n_mfcc': 20,
'sample_rate': 16000
}
_test_compatibilities(**kwargs)
# NOTE: Test passes offline, but fails on TravisCI (and CircleCI), see #372.
@unittest.skipIf('CI' in os.environ, 'Test is known to fail on CI')
def test_basics3(self):
kwargs = {
'n_fft': 200,
'hop_length': 50,
'power': 2.0,
'n_mels': 128,
'n_mfcc': 50,
'sample_rate': 24000
}
_test_compatibilities(**kwargs)
def test_basics4(self):
kwargs = {
'n_fft': 400,
'hop_length': 200,
'power': 3.0,
'n_mels': 128,
'n_mfcc': 40,
'sample_rate': 16000
}
_test_compatibilities(**kwargs)
@unittest.skipIf("sox" not in common_utils.BACKENDS, "sox not available")
@common_utils.AudioBackendScope("sox")
def test_MelScale(self):
"""MelScale transform is comparable to that of librosa"""
n_fft = 2048
n_mels = 256
hop_length = n_fft // 4
# Prepare spectrogram input. We use torchaudio to compute one.
sound, sample_rate = _load_audio_asset('whitenoise_1min.mp3')
sound = sound.mean(dim=0, keepdim=True)
spec_ta = F.spectrogram(
sound, pad=0, window=torch.hann_window(n_fft), n_fft=n_fft,
hop_length=hop_length, win_length=n_fft, power=2, normalized=False)
spec_lr = spec_ta.cpu().numpy().squeeze()
# Perform MelScale with torchaudio and librosa
melspec_ta = torchaudio.transforms.MelScale(n_mels=n_mels, sample_rate=sample_rate)(spec_ta)
melspec_lr = librosa.feature.melspectrogram(
S=spec_lr, sr=sample_rate, n_fft=n_fft, hop_length=hop_length,
win_length=n_fft, center=True, window='hann', n_mels=n_mels, htk=True, norm=None)
# Note: Using relaxed rtol instead of atol
torch.testing.assert_allclose(melspec_ta, torch.from_numpy(melspec_lr[None, ...]), atol=1e-8, rtol=1e-3)
def test_InverseMelScale(self):
"""InverseMelScale transform is comparable to that of librosa"""
n_fft = 2048
n_mels = 256
n_stft = n_fft // 2 + 1
hop_length = n_fft // 4
# Prepare mel spectrogram input. We use torchaudio to compute one.
sound, sample_rate = _load_audio_asset(
'steam-train-whistle-daniel_simon.wav', offset=2**10, num_frames=2**14)
sound = sound.mean(dim=0, keepdim=True)
spec_orig = F.spectrogram(
sound, pad=0, window=torch.hann_window(n_fft), n_fft=n_fft,
hop_length=hop_length, win_length=n_fft, power=2, normalized=False)
melspec_ta = torchaudio.transforms.MelScale(n_mels=n_mels, sample_rate=sample_rate)(spec_orig)
melspec_lr = melspec_ta.cpu().numpy().squeeze()
# Perform InverseMelScale with torch audio and librosa
spec_ta = torchaudio.transforms.InverseMelScale(
n_stft, n_mels=n_mels, sample_rate=sample_rate)(melspec_ta)
spec_lr = librosa.feature.inverse.mel_to_stft(
melspec_lr, sr=sample_rate, n_fft=n_fft, power=2.0, htk=True, norm=None)
spec_lr = torch.from_numpy(spec_lr[None, ...])
# Align dimensions
# librosa does not return power spectrogram while torchaudio returns power spectrogram
spec_orig = spec_orig.sqrt()
spec_ta = spec_ta.sqrt()
threshold = 2.0
# This threshold was choosen empirically, based on the following observation
#
# torch.dist(spec_lr, spec_ta, p=float('inf'))
# >>> tensor(1.9666)
#
# The spectrograms reconstructed by librosa and torchaudio are not comparable elementwise.
# This is because they use different approximation algorithms and resulting values can live
# in different magnitude. (although most of them are very close)
# See
# https://github.com/pytorch/audio/pull/366 for the discussion of the choice of algorithm
# https://github.com/pytorch/audio/pull/448/files#r385747021 for the distribution of P-inf
# distance over frequencies.
torch.testing.assert_allclose(spec_ta, spec_lr, atol=threshold, rtol=1e-5)
threshold = 1700.0
# This threshold was choosen empirically, based on the following observations
#
# torch.dist(spec_orig, spec_ta, p=1)
# >>> tensor(1644.3516)
# torch.dist(spec_orig, spec_lr, p=1)
# >>> tensor(1420.7103)
# torch.dist(spec_lr, spec_ta, p=1)
# >>> tensor(943.2759)
assert torch.dist(spec_orig, spec_ta, p=1) < threshold
if __name__ == '__main__':
unittest.main()
| 41.411429 | 118 | 0.647854 |
71c23fb66112c537460c6b6d208dbe089a22542d | 3,582 | py | Python | opentmi_client/api/event/Event.py | OpenTMI/opentmi-pyclient | 034c539d36fe13a2d6538ea421e4c01f00f5687d | [
"MIT"
] | null | null | null | opentmi_client/api/event/Event.py | OpenTMI/opentmi-pyclient | 034c539d36fe13a2d6538ea421e4c01f00f5687d | [
"MIT"
] | 36 | 2018-06-18T10:03:58.000Z | 2022-03-30T00:16:31.000Z | opentmi_client/api/event/Event.py | OpenTMI/opentmi-pyclient | 034c539d36fe13a2d6538ea421e4c01f00f5687d | [
"MIT"
] | 1 | 2019-04-17T08:49:24.000Z | 2019-04-17T08:49:24.000Z | """
OpenTMI module for Event
"""
from opentmi_client.utils.Base import BaseApi
from opentmi_client.utils.decorators import setter_rules
from opentmi_client.api.event.Priority import Priority
from opentmi_client.api.event.Ref import Ref
class Event(BaseApi):
"""
Event Class
"""
def __init__(self):
"""
Constructor for Event
"""
super(Event, self).__init__()
self.priority = Priority()
self.ref = Ref()
def __str__(self):
return "{}".format(self.get("id", "unknown"))
@property
def traceid(self):
"""
Getter for traceid
:return: String
"""
return self.get("traceid")
@traceid.setter
@setter_rules()
def traceid(self, value):
"""
Setter for traceid
:param value: string
"""
self.set("traceid", value)
@property
def eid(self):
"""
Getter for event id
:return: String
"""
return self.get("id")
@eid.setter
@setter_rules()
def eid(self, value):
"""
Setter for event id
:param value: string
"""
self.set("id", value)
@property
def msgid(self):
"""
Getter for msgid
:return: String
"""
return self.get("msgid")
@msgid.setter
@setter_rules(
enum=['ALLOCATED', 'RELEASED', 'ENTER_MAINTENANCE',
'EXIT_MAINTENANCE', 'CREATED', 'DELETED', 'FLASHED'])
def msgid(self, value):
"""
Setter for msgid
:param value: string
"""
self.set("msgid", value)
@property
def msg(self):
"""
Getter for msg
:return: String
"""
return self.get("msg")
@msg.setter
@setter_rules()
def msg(self, value):
"""
Setter for msg
:param value: string
"""
self.set("msg", value)
@property
def tag(self):
"""
Getter for tag
:return: String
"""
return self.get("tag")
@tag.setter
@setter_rules()
def tag(self, value):
"""
Setter for tag
:param value: string
"""
self.set("tag", value)
@property
def duration(self):
"""
Getter for duration
:return: float
"""
return self.get("duration")
@duration.setter
@setter_rules(value_type=float)
def duration(self, value):
"""
Setter for duration
:param value: float
"""
self.set("duration", value)
@property
def spare(self):
"""
Getter for spare
:return: String
"""
return self.get("spare")
@spare.setter
def spare(self, value):
"""
Setter for spare
:param value: *
"""
self.set("spare", value)
@property
def priority(self):
"""
Getter for priority
:return: Priority
"""
return self.get("priority")
@priority.setter
@setter_rules(value_type=Priority)
def priority(self, value):
"""
Setter for priority
:param value: Priority
"""
self.set("priority", value)
@property
def ref(self):
"""
Getter for ref
:return: Ref
"""
return self.get("ref")
@ref.setter
@setter_rules(value_type=Ref)
def ref(self, value):
"""
Setter for ref
:param value: Ref
"""
self.set("ref", value)
| 20.011173 | 67 | 0.507259 |
0b56825f95d65b1e0278a1819cae0c9fe7baee3b | 76 | py | Python | test/really_simple_test.py | Ark-kun/strip-hints | aa2e217506f16999872439b03aaadf9a517e7389 | [
"MIT"
] | 29 | 2017-09-29T15:51:51.000Z | 2022-03-18T16:47:04.000Z | test/really_simple_test.py | Ark-kun/strip-hints | aa2e217506f16999872439b03aaadf9a517e7389 | [
"MIT"
] | 16 | 2018-06-18T19:54:36.000Z | 2021-12-04T01:37:52.000Z | test/really_simple_test.py | Ark-kun/strip-hints | aa2e217506f16999872439b03aaadf9a517e7389 | [
"MIT"
] | 4 | 2019-06-17T19:32:50.000Z | 2022-02-08T13:11:52.000Z |
def simp(x, y) -> int \
:
def q(x: int=4):
z: list=[3]
| 8.444444 | 23 | 0.355263 |
a4f27e0d4290ba6755a747beb08abc3695fc25e9 | 15,385 | py | Python | wtServer.py | jimboca/udi-WirelessSensorTags-poly | 0232c5df5b957b7675141965ebb732aa745a5dd5 | [
"MIT"
] | null | null | null | wtServer.py | jimboca/udi-WirelessSensorTags-poly | 0232c5df5b957b7675141965ebb732aa745a5dd5 | [
"MIT"
] | 25 | 2022-02-13T17:00:59.000Z | 2022-03-22T14:22:48.000Z | wtServer.py | jimboca/udi-WirelessSensorTags-poly | 0232c5df5b957b7675141965ebb732aa745a5dd5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Simple GET handler with BaseHTTPServer
"""
from http.server import HTTPServer,BaseHTTPRequestHandler
from urllib import parse
from urllib.parse import parse_qsl
import socket, threading, sys, requests, json, time
import netifaces as ni
class wtHandler(BaseHTTPRequestHandler):
def do_GET(self):
parsed_path = parse.urlparse(self.path)
self.query = dict(parse_qsl(parsed_path.query))
if 'debug' in self.query:
message_parts = [
'CLIENT VALUES:',
'client_address={} ({})'.format(
self.client_address,
self.address_string()),
'command={}'.format(self.command),
'path={}'.format(self.path),
'real path={}'.format(parsed_path.path),
'query={}'.format(parsed_path.query),
'query_dict={}'.format(self.query),
'request_version={}'.format(self.request_version),
'',
'SERVER VALUES:',
'server_version={}'.format(self.server_version),
'sys_version={}'.format(self.sys_version),
'protocol_version={}'.format(self.protocol_version),
'',
'HEADERS RECEIVED:',
]
for name, value in sorted(self.headers.items()):
message_parts.append(
'{}={}'.format(name, value.rstrip())
)
else:
message_parts = ["Received: {0} {1}. ".format(parsed_path.path,self.query)]
# We send back a response quickly cause the TAG Manager doesn't wait very long?
hrt = self.parent.get_handler(parsed_path.path,self.query)
message_parts.append("Code: {0}".format(int(hrt['code'])))
message_parts.append(hrt['message'])
self.send_response(int(hrt['code']))
self.send_header('Content-Type',
'text/plain; charset=utf-8')
self.end_headers()
message_parts.append('')
message = '\r\n'.join(message_parts)
message += '\r\n'
self.wfile.write(message.encode('utf-8'))
def log_message(self, fmt, *args):
# Stop log messages going to stdout
self.parent.logger.info('wtHandler:log_message' + fmt % args)
class wtREST():
def __init__(self,parent,logger):
self.parent = parent
self.logger = logger
def start(self):
port = 0
self.myip = self.get_network_ip_rhost('8.8.8.8')
if self.myip is False:
self.logger.error("wtREST: Can not start on IP={0}".format(self.myip))
return False
self.logger.info("wtREST: Running on IP={0}".format(self.myip))
self.address = (self.myip, port) # let the kernel give us a port
self.logger.debug("wtREST: address={0}".format(self.address))
# Get a handler and set parent to myself, so we can process the requests.
eh = wtHandler
eh.parent = self
self.server = HTTPServer(self.address, wtHandler)
self.url = 'http://{0}:{1}'.format(self.server.server_address[0],self.server.server_address[1])
self.listen_port = self.server.server_address[1]
self.logger.info("wtREST: Running on: {0}".format(self.url))
self.thread = threading.Thread(target=self.server.serve_forever)
# Need this so the thread will die when the main process dies
self.thread.daemon = True
self.thread.start()
return True
#try:
# self.server.serve_forever()
#except KeyboardInterrupt:
# self.logger.info('wtREST: Exiting from interupt')
# self.server.shutdown()
# self.server.server_close()
# raise
#except Exception as err:
# self.logger.error('wtREST: failed: {0}'.format(err), exc_info=True)
#self.server.shutdown()
#self.server.server_close()
def stop(self):
return self.server.shutdown()
def get_handler(self,path,query):
return self.parent.get_handler(path,query)
def get_network_ip_rhost(self,rhost):
self.logger.info("wtREST:get_network_ip: {0}".format(rhost))
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((rhost, 80))
rt = s.getsockname()[0]
except Exception as err:
self.logger.error('wtREST:get_network_id: failed: {0}'.format(err), exc_info=True)
rt = False
finally:
s.close()
self.logger.info("wtREST:get_network_ip: Returning {0}".format(rt))
return rt
# This didn't work on a mac, needs a try/except, which I didn't like...
def get_network_ip(self):
try:
ifaddr = ni.ifaddresses(iface)[ni.AF_INET][0]
if 'addr' in ifaddr and ifaddr['addr'] != '127.0.0.1':
self.logger.info("wtREST:get_network_ip: Got {0}".format(rt))
return ifaddr['addr']
except:
pass
self.logger.info("wtREST:get_network_ip: Failed")
return False
class wtServer():
def __init__(self,logger,client_id,client_secret,ghandler=None,oauth2_code=False):
self.l_name = "wtServer"
self.logger = logger
self.client_id = client_id
self.client_secret = client_secret
self.ghandler=ghandler
self.oauth2_code = oauth2_code
self.access_token = False
self.token_type = None
def start(self):
self.rest = wtREST(self,self.logger)
self.st = self.rest.start()
if self.st is False:
self.l_error('wtServer:start','REST server not started {}'.format(self.st))
return False
self.listen_url = self.rest.url
self.listen_port = self.rest.listen_port
self.url = self.rest.url
# Start the session for talking to wirelesstag server
self.session = wtSession(self,self.logger,self)
if self.oauth2_code != False:
self.pull_access_token()
self._slock = False
return True
def stop(self):
self.rest.stop()
def get_handler(self,command,params):
"""
This is passed the incoming http get's to processes
"""
self.l_debug('get_handler','command={}'.format(command))
# This is from the oauth2 redirect with our code.
if command == "/code":
code = 200
message = "\nGot code {}, asking for access token\n".format(params['code'])
self.oauth2_code = params['code']
self.l_info('get_handler','Got code: {}'.format(self.oauth2_code))
tr = self.pull_access_token()
if tr == False:
code = 500
message += "ERROR: Unable to get access token from code, see log"
else:
message += "SUCCESS, received our token, will save in Polyglot database for the future"
if self.ghandler is not None:
self.ghandler(command,{'oauth2_code': self.oauth2_code})
elif command == "/favicon.ico":
# Ignore this, where does it come from?
code = 200
message = "Ignored {0}".format(command)
else:
if self.ghandler is None:
code = 500
message = "Unknown command, no ghandler specified '{}'".format(command)
else:
ret = self.ghandler(command,params)
if ret:
code = 200
message = 'Command {0} success'.format(command)
else:
code = 500
message = 'Command {0} failed'.format(command)
if code == 200:
self.l_debug('get_handler','code={0} message={1}'.format(code,message))
else:
self.l_error('get_handler','code={0} message={1}'.format(code,message))
return { 'code': code, 'message': message }
def pull_access_token(self,code=None):
if code is not None:
self.oauth2_code = code
aret = self.session.post('oauth2/access_token.aspx',
{
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': self.oauth2_code
}, use_token=False)
# This gives us:
# {'token_type': 'Bearer', 'access_token': '...', 'expires_in': 9999999}
if aret == False:
self.l_error('pull_access_token','Failed')
self.access_token = aret
return aret
self.access_token = aret['access_token']
self.token_type = aret['token_type']
self.l_debug('start',"token_type={} access_token={}".format(self.token_type,self.access_token))
def get_access_token(self):
return self.access_token
def get_token_type(self):
return self.token_type
def l_info(self, name, string):
self.logger.info("%s: %s" % (name,string))
def l_error(self, name, string, exc_info=False):
self.logger.error("%s: %s" % (name,string), exc_info=exc_info)
def l_warning(self, name, string):
self.logger.warning("%s: %s" % (name,string))
def l_debug(self, name, string):
self.logger.debug("%s: %s" % (name,string))
# These match the names used in the API
# http://wirelesstag.net/ethAccount.asmx?op=IsSignedIn
def IsSignedIn(self):
return self.session.api_post_d('ethAccount.asmx/IsSignedIn',{})
# These match the names used in the API:
# http://wirelesstag.net/ethAccount.asmx?op=GetTagManagers
def GetTagManagers(self):
return self.session.api_post_d('ethAccount.asmx/GetTagManagers',{})
# http://wirelesstag.net/ethClient.asmx?op=GetServerTime
def GetServerTime(self):
return self.session.api_post_d('ethClient.asmx/GetServerTime',{})
class wtSession():
def __init__(self,parent,logger,wtServer,tmgr_mac=None):
self.parent = parent
self.logger = logger
self.wtServer = wtServer
self.tmgr_mac = tmgr_mac
self.tmgr_mac_st = False
self.session = requests.Session()
self.select_tag_manager()
def select_tag_manager(self,force=False):
if self.tmgr_mac is None:
return True
if not self.tmgr_mac_st or force:
mgd = self.api_post_dn('ethAccount.asmx/SelectTagManager',{'mac':self.tmgr_mac})
self.tmgr_mac_st = mgd['st']
self.session.headers.update({'X-Set-Mac': self.tmgr_mac})
return self.tmgr_mac_st
def post(self,path,payload,use_token=True):
url = "https://www.mytaglist.com/{}".format(path)
self.l_debug('post',"Sending: url={0} payload={1}".format(url,payload))
if use_token:
access_token = self.wtServer.get_access_token()
if access_token is False:
self.l_error('post',"No authorization for url={0} payload={1}".format(url,payload))
return False
token_type = self.wtServer.get_token_type()
self.session.headers.update(
{
"Authorization": "{0} {1}".format(token_type,access_token),
"Content-Type": "application/json"
}
)
else:
self.session.headers.update({})
try:
response = self.session.post(
url,
data=payload,
timeout=60
)
# This is supposed to catch all request excpetions.
except requests.exceptions.RequestException as e:
self.l_error('post',"Connection error for %s: %s" % (url, e))
return False
self.l_debug('post',' Got: code=%s' % (response.status_code))
if response.status_code == 200:
#self.l_debug('http_post',"Got: text=%s" % response.text)
try:
d = json.loads(response.text)
except (Exception) as err:
self.l_error('http_post','Failed to convert to json {0}: {1}'.format(response.text,err), exc_info=True)
return False
return d
elif response.status_code == 400:
self.l_error('post',"Bad request: %s" % (url) )
elif response.status_code == 404:
self.l_error('post',"Not Found: %s" % (url) )
elif response.status_code == 401:
# Authentication error
self.l_error('post',
"Failed to authenticate, please check your username and password")
else:
self.l_error('post',"Unknown response %s: %s %s" % (response.status_code, url, response.text) )
return False
def api_post_d(self,path,payload,dump=True):
"""
Call the api path with payload expecting data in d entry
Return status and result
"""
# If we failed to set tag manager, try again
if not self.select_tag_manager(): return { 'st': False }
return self.api_post_dn(path,payload,dump)
def api_post_dn(self,path,payload,dump=True):
"""
Just do the post, don't select tag manager
"""
if dump:
payload = json.dumps(payload)
aret = self.post(path,payload)
self.l_debug('post','path={0} got={1}'.format(path,aret))
if aret == False or not 'd' in aret:
mret = { 'st': False }
else:
mret = { 'st': True, 'result': aret['d'] }
self.l_debug('post','ret={0}'.format(mret))
return mret
def l_info(self, name, string):
self.logger.info("%s:%s: %s" % (self.parent.l_name,name,string))
def l_error(self, name, string, exc_info=False):
self.logger.error("%s:%s: %s" % (self.parent.l_name,name,string), exc_info=exc_info)
def l_warning(self, name, string):
self.logger.warning("%s:%s: %s" % (self.parent.l_name,name,string))
def l_debug(self, name, string):
self.logger.debug("%s:%s: %s" % (self.parent.l_name,name,string))
def my_ghandler(command,params):
return True
if __name__ == '__main__':
import logging, time
logging.basicConfig(
level=10,
format='%(levelname)s:\t%(name)s\t%(message)s'
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
client_id = "3b08b242-f0f8-41c0-ba29-6b0478cd0b77"
client_secret = "0b947853-1676-4a63-a384-72769c88f3b1"
code = "d967868a-144e-49ed-921f-c27b65dda06a"
obj = wtServer(logger,client_id,client_secret,ghandler=my_ghandler)
try:
obj.start()
except KeyboardInterrupt:
logger.info('Exiting from keyboard interupt')
sys.exit()
# Manually get the access token
obj.pull_access_token(code)
#while obj.oauth2_code == False:
# logger.info("Waiting for code...");
# time.sleep(10)
mgrs = obj.GetTagManagers()
if mgrs['st']:
obj.SelectTagManager(mgrs['result'][0]['mac'])
obj.GetTagList()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
except Exception as err:
logger.error('wtREST: failed: {0}'.format(err), exc_info=True)
sys.exit()
| 38.4625 | 119 | 0.578941 |
d73a51efc45fefc2e09c5662c7c6238c54372085 | 2,632 | py | Python | experiments/shakespeare_experiments/shakespeare_fedavg_hs.py | M4rukku/impact_of_non_iid_data_in_federated_learning | c818db03699c82e42217d56f8ddd4cc2081c8bb1 | [
"MIT"
] | null | null | null | experiments/shakespeare_experiments/shakespeare_fedavg_hs.py | M4rukku/impact_of_non_iid_data_in_federated_learning | c818db03699c82e42217d56f8ddd4cc2081c8bb1 | [
"MIT"
] | null | null | null | experiments/shakespeare_experiments/shakespeare_fedavg_hs.py | M4rukku/impact_of_non_iid_data_in_federated_learning | c818db03699c82e42217d56f8ddd4cc2081c8bb1 | [
"MIT"
] | null | null | null | import functools
import math
from pathlib import Path
from sources.simulators.serial_execution_simulator import \
SerialExecutionSimulator
from experiments.shakespeare_experiments.shakespeare_metadata_providers import \
SHAKESPEARE_BASE_METADATA_HYPERPARAMETER_SEARCH_PROVIDER
from sources.datasets.shakespeare.shakespeare_client_dataset_factory import \
ShakespeareClientDatasetFactory
from sources.datasets.shakespeare.shakespeare_client_dataset_processor import \
ShakespeareClientDatasetProcessor
from sources.models.shakespeare.shakespeare_model_template import ShakespeareKerasModelTemplate
from sources.dataset_creation_utils.get_iid_dataset_utils import get_default_iid_dataset
from sources.metrics.central_evaluation_keras import \
create_central_evaluation_function_from_dataset_processor_keras
from sources.flwr.flwr_strategies.full_evaluation_strategy_providers import \
full_eval_fed_avg_strategy_provider
from sources.experiments.simulate_experiment import SimulateExperiment
from sources.utils.set_random_seeds import DEFAULT_SEED
def e(exp):
return math.pow(10, exp)
def shakespeare_fedavg():
base_dir = Path(__file__).parent.parent.parent
root_data_dir = base_dir / "data"
model_template = ShakespeareKerasModelTemplate(DEFAULT_SEED)
dataset_factory = ShakespeareClientDatasetFactory(root_data_dir)
central_dataset = get_default_iid_dataset("shakespeare")
eval_fn = create_central_evaluation_function_from_dataset_processor_keras(
model_template,
central_dataset,
ShakespeareClientDatasetProcessor())
initial_model = model_template.get_model()
initial_parameters = initial_model.get_weights()
local_learning_rates = [e(-3.0), e(-2.0), e(-1.0), e(0.0)]
fed_avg = functools.partial(
full_eval_fed_avg_strategy_provider,
eval_fn,
initial_parameters=initial_parameters
)
SimulateExperiment.start_experiment(
f"Shakespeare_Fedavg",
model_template,
dataset_factory,
strategy_provider=None,
strategy_provider_list=[fed_avg for _ in local_learning_rates],
optimizer_list=[model_template.get_optimizer(lr) for lr in local_learning_rates],
experiment_metadata_list=[SHAKESPEARE_BASE_METADATA_HYPERPARAMETER_SEARCH_PROVIDER() for
_ in local_learning_rates],
base_dir=base_dir,
runs_per_experiment=2,
centralised_evaluation=True,
aggregated_evaluation=True,
rounds_between_centralised_evaluations=2,
simulator_provider=SerialExecutionSimulator
)
| 37.070423 | 96 | 0.789894 |
6b8466ed14255cedad0298cdedffb7eec0da4db7 | 2,416 | py | Python | test.py | ygs1985ygs/pyalgotrade3 | 5481c214891b7bd8ca76fee4e3f5cfcbb9f9de4e | [
"Apache-2.0"
] | 23 | 2017-06-07T04:36:38.000Z | 2021-11-09T10:39:54.000Z | test.py | ygs1985ygs/pyalgotrade3 | 5481c214891b7bd8ca76fee4e3f5cfcbb9f9de4e | [
"Apache-2.0"
] | 1 | 2018-03-29T16:25:54.000Z | 2018-03-29T16:25:54.000Z | test.py | ygs1985ygs/pyalgotrade3 | 5481c214891b7bd8ca76fee4e3f5cfcbb9f9de4e | [
"Apache-2.0"
] | 12 | 2017-03-29T08:35:32.000Z | 2020-02-08T04:01:03.000Z | from pyalgotrade import strategy
from pyalgotrade.barfeed import yahoofeed
from pyalgotrade.technical import ma
class MyStrategy(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, smaPeriod):
super(MyStrategy, self).__init__(feed, 1000)
self.__position = None
self.__instrument = instrument
# We'll use adjusted close values instead of regular close values.
self.setUseAdjustedValues(True)
self.__sma = ma.SMA(feed[instrument].getPriceDataSeries(), smaPeriod)
def onEnterOk(self, position):
execInfo = position.getEntryOrder().getExecutionInfo()
self.info("BUY at $%.2f" % (execInfo.getPrice()))
def onEnterCanceled(self, position):
self.__position = None
def onExitOk(self, position):
execInfo = position.getExitOrder().getExecutionInfo()
self.info("SELL at $%.2f" % (execInfo.getPrice()))
self.__position = None
def onExitCanceled(self, position):
# If the exit was canceled, re-submit it.
self.__position.exitMarket()
def onBars(self, bars):
# Wait for enough bars to be available to calculate a SMA.
if self.__sma[-1] is None:
return
bar = bars[self.__instrument]
# If a position was not opened, check if we should enter a long position.
if self.__position is None:
if bar.getPrice() > self.__sma[-1]:
# Enter a buy market order for 10 shares. The order is good till canceled.
self.__position = self.enterLong(self.__instrument, 10, True)
# Check if we have to exit the position.
elif bar.getPrice() < self.__sma[-1] and not self.__position.exitActive():
self.__position.exitMarket()
def run_strategy(smaPeriod):
# Load the yahoo feed from the CSV file
feed = yahoofeed.Feed()
feed.addBarsFromCSV("orcl", "orcl-2000.csv")
# Evaluate the strategy with the feed.
myStrategy = MyStrategy(feed, "orcl", smaPeriod)
myStrategy.run()
print(("Final portfolio value: $%.2f" % myStrategy.getBroker().getEquity()))
# run_strategy(15)
import datetime
begin=datetime.date(2008, 1, 1)
end=datetime.date(2015,12,31)
url = "http://ichart.finance.yahoo.com/table.csv?s=%s&a=%d&b=%d&c=%d&d=%d&e=%d&f=%d&g=%s&ignore=.csv" % ('orcl', begin.month, begin.day, begin.year, end.month, end.day, end.year, 'd')
print(url) | 38.967742 | 183 | 0.662252 |
afaf2ff40dc4ae1f06e92e447db61ea01724ac36 | 6,183 | py | Python | tests/contrib/hooks/test_azure_container_instance_hook.py | FlyrInc/airflow-1 | 74b22337b45a1eb25585d52e35694e6b0eb81f03 | [
"Apache-2.0"
] | 2 | 2019-01-26T06:04:11.000Z | 2019-01-26T12:54:21.000Z | tests/contrib/hooks/test_azure_container_instance_hook.py | FlyrInc/airflow-1 | 74b22337b45a1eb25585d52e35694e6b0eb81f03 | [
"Apache-2.0"
] | 6 | 2020-07-07T20:21:26.000Z | 2021-09-29T17:29:29.000Z | tests/contrib/hooks/test_azure_container_instance_hook.py | FlyrInc/airflow-1 | 74b22337b45a1eb25585d52e35694e6b0eb81f03 | [
"Apache-2.0"
] | 2 | 2020-04-24T10:51:17.000Z | 2020-05-26T01:50:29.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import unittest
from collections import namedtuple
from mock import patch
from airflow import configuration
from airflow.models.connection import Connection
from airflow.contrib.hooks.azure_container_instance_hook import AzureContainerInstanceHook
from airflow.utils import db
from azure.mgmt.containerinstance.models import (Container,
ContainerGroup,
ContainerState,
Event,
Logs,
ResourceRequests,
ResourceRequirements)
class TestAzureContainerInstanceHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
db.merge_conn(
Connection(
conn_id='azure_container_instance_test',
conn_type='azure_container_instances',
login='login',
password='key',
extra=json.dumps({'tenantId': 'tenant_id',
'subscriptionId': 'subscription_id'})
)
)
self.resources = ResourceRequirements(requests=ResourceRequests(
memory_in_gb='4',
cpu='1'))
with patch('azure.common.credentials.ServicePrincipalCredentials.__init__',
autospec=True, return_value=None):
with patch('azure.mgmt.containerinstance.ContainerInstanceManagementClient'):
self.testHook = AzureContainerInstanceHook(conn_id='azure_container_instance_test')
@patch('azure.mgmt.containerinstance.models.ContainerGroup')
@patch('azure.mgmt.containerinstance.operations.ContainerGroupsOperations.create_or_update')
def test_create_or_update(self, create_or_update_mock, container_group_mock):
self.testHook.create_or_update('resource_group', 'aci-test', container_group_mock)
create_or_update_mock.assert_called_with('resource_group', 'aci-test', container_group_mock)
@patch('airflow.contrib.hooks.azure_container_instance_hook'
'.AzureContainerInstanceHook._get_instance_view')
def test_get_state_exitcode_details(self, get_instance_view_mock):
expected_state = ContainerState(state='testing', exit_code=1, detail_status='details')
instance_view = {"current_state": expected_state}
named_instance = namedtuple("InstanceView", instance_view.keys())(*instance_view.values())
get_instance_view_mock.return_value = named_instance
state, exit_code, details = self.testHook.get_state_exitcode_details('resource-group', 'test')
self.assertEqual(state, expected_state.state)
self.assertEqual(exit_code, expected_state.exit_code)
self.assertEqual(details, expected_state.detail_status)
@patch('airflow.contrib.hooks.azure_container_instance_hook'
'.AzureContainerInstanceHook._get_instance_view')
def test_get_messages(self, get_instance_view_mock):
expected_messages = ['test1', 'test2']
events = [Event(message=m) for m in expected_messages]
instance_view = {"events": events}
named_instance = namedtuple("Events", instance_view.keys())(*instance_view.values())
get_instance_view_mock.return_value = named_instance
messages = self.testHook.get_messages('resource-group', 'test')
self.assertSequenceEqual(messages, expected_messages)
@patch('azure.mgmt.containerinstance.operations.ContainerOperations.list_logs')
def test_get_logs(self, list_logs_mock):
expected_messages = ['log line 1\n', 'log line 2\n', 'log line 3\n']
logs = Logs(content=''.join(expected_messages))
list_logs_mock.return_value = logs
logs = self.testHook.get_logs('resource_group', 'name', 'name')
self.assertSequenceEqual(logs, expected_messages)
@patch('azure.mgmt.containerinstance.operations.ContainerGroupsOperations.delete')
def test_delete(self, delete_mock):
self.testHook.delete('resource_group', 'aci-test')
delete_mock.assert_called_with('resource_group', 'aci-test')
@patch('azure.mgmt.containerinstance.operations.ContainerGroupsOperations.list_by_resource_group')
def test_exists_with_existing(self, list_mock):
list_mock.return_value = [ContainerGroup(os_type='Linux',
containers=[Container(name='test1',
image='hello-world',
resources=self.resources)])]
self.assertFalse(self.testHook.exists('test', 'test1'))
@patch('azure.mgmt.containerinstance.operations.ContainerGroupsOperations.list_by_resource_group')
def test_exists_with_not_existing(self, list_mock):
list_mock.return_value = [ContainerGroup(os_type='Linux',
containers=[Container(name='test1',
image='hello-world',
resources=self.resources)])]
self.assertFalse(self.testHook.exists('test', 'not found'))
| 49.464 | 102 | 0.652434 |
ad466882c542ecefeda831dfda72cf230172ee42 | 65 | py | Python | pychord/constants/__init__.py | hejops/pychord | ae5d0585c5df95c98517c1fb5351b8b2815c8b8c | [
"MIT"
] | 1 | 2021-10-08T00:18:38.000Z | 2021-10-08T00:18:38.000Z | pychord/constants/__init__.py | hejops/pychord | ae5d0585c5df95c98517c1fb5351b8b2815c8b8c | [
"MIT"
] | null | null | null | pychord/constants/__init__.py | hejops/pychord | ae5d0585c5df95c98517c1fb5351b8b2815c8b8c | [
"MIT"
] | null | null | null | from .scales import NOTE_VAL_DICT, VAL_NOTE_DICT, SCALE_VAL_DICT
| 32.5 | 64 | 0.861538 |
3fee8f27ad05c3a2477364cdf051b81be65e05f7 | 3,177 | py | Python | src/primaires/joueur/masques/encodage/__init__.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/primaires/joueur/masques/encodage/__init__.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/primaires/joueur/masques/encodage/__init__.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le masque <nom_encodage>."""
from primaires.interpreteur.masque.masque import Masque
from primaires.interpreteur.masque.fonctions import *
from primaires.interpreteur.masque.exceptions.erreur_validation \
import ErreurValidation
from reseau.connexions.client_connecte import ENCODAGES
class Encodage(Masque):
"""Masque <nom_encodage>.
On attend un nom d'encodage en paramètre.
"""
nom = "nom_encodage"
nom_complet = "encodage disponible"
def init(self):
"""Initialisation des attributs"""
self.encodage = ""
def repartir(self, personnage, masques, commande):
"""Répartition du masque."""
encodage = liste_vers_chaine(commande)
if not encodage:
raise ErreurValidation( \
"Précisez un encodage disponible.")
encodage = encodage.split(" ")[0].lower()
self.a_interpreter = encodage
commande[:] = commande[len(encodage):]
masques.append(self)
return True
def valider(self, personnage, dic_masques):
"""Validation du masque"""
Masque.valider(self, personnage, dic_masques)
encodage = self.a_interpreter
encodages = ["aucun"] + ENCODAGES
msg_enc = "\n\nEncodages disponibles : |ent|" + "|ff|, |ent|".join(
encodages) + "."
if not encodage in encodages:
raise ErreurValidation(
"|err|L'encodage précisé n'est pas disponible.|ff|" + msg_enc)
self.encodage = encodage
return True
| 38.743902 | 79 | 0.696569 |
d42f95971fc1011f089b30b114160d78763e0e10 | 5,627 | py | Python | hs_geographic_feature_resource/forms.py | hydroshare/hydroshare | bf9888bbe61507aff070b1dfcec2fdec1921468d | [
"BSD-3-Clause"
] | 178 | 2015-01-08T23:03:36.000Z | 2022-03-03T13:56:45.000Z | hs_geographic_feature_resource/forms.py | hydroshare/hydroshare | bf9888bbe61507aff070b1dfcec2fdec1921468d | [
"BSD-3-Clause"
] | 4,125 | 2015-01-01T14:26:15.000Z | 2022-03-31T16:38:55.000Z | hs_geographic_feature_resource/forms.py | hydroshare/hydroshare | bf9888bbe61507aff070b1dfcec2fdec1921468d | [
"BSD-3-Clause"
] | 53 | 2015-03-15T17:56:51.000Z | 2022-03-17T00:32:16.000Z | from django import forms
from crispy_forms.layout import Layout
from hs_core.forms import BaseFormHelper, get_crispy_form_fields
class OriginalCoverageFormHelper(BaseFormHelper):
def __init__(self, allow_edit=True, res_short_id=None,
element_id=None, element_name=None, *args, **kwargs):
# the order in which the model fields are listed for
# the FieldSet is the order these fields will be displayed
file_type = kwargs.pop('file_type', False)
form_field_names = ['projection_name', 'datum', 'unit', 'projection_string', 'northlimit',
'eastlimit', 'southlimit', 'westlimit']
crispy_form_fields = get_crispy_form_fields(form_field_names, file_type=file_type)
layout = Layout(*crispy_form_fields)
super(OriginalCoverageFormHelper, self). \
__init__(allow_edit, res_short_id, element_id, element_name, layout,
element_name_label='Spatial Reference', *args, **kwargs)
class OriginalCoverageForm(forms.Form):
projection_string = forms.CharField(required=False, label='Coordinate String',
widget=forms.Textarea())
projection_name = forms.CharField(max_length=256,
required=False,
label='Coordinate Reference System')
datum = forms.CharField(max_length=256, required=False, label='Datum')
unit = forms.CharField(max_length=256, required=False, label='Unit')
northlimit = forms.FloatField(label='North Extent', widget=forms.TextInput())
eastlimit = forms.FloatField(label='East Extent', widget=forms.TextInput())
southlimit = forms.FloatField(label='South Extent', widget=forms.TextInput())
westlimit = forms.FloatField(label='West Extent', widget=forms.TextInput())
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
file_type = kwargs.pop('file_type', False)
super(OriginalCoverageForm, self).__init__(*args, **kwargs)
self.helper = OriginalCoverageFormHelper(allow_edit,
res_short_id,
element_id,
element_name='OriginalCoverage',
file_type=file_type)
self.delete_modal_form = None
self.number = 0
self.allow_edit = allow_edit
self.errors.clear()
if not allow_edit:
for field in list(self.fields.values()):
field.widget.attrs['readonly'] = True
class OriginalCoverageValidationForm(forms.Form):
northlimit = forms.FloatField(required=True)
eastlimit = forms.FloatField(required=True)
southlimit = forms.FloatField(required=True)
westlimit = forms.FloatField(required=True)
projection_string = forms.CharField(required=False)
projection_name = forms.CharField(max_length=256, required=False)
datum = forms.CharField(max_length=256, required=False)
unit = forms.CharField(max_length=256, required=False)
class GeometryInformationFormHelper(BaseFormHelper):
def __init__(self, allow_edit=True, res_short_id=None, element_id=None,
element_name=None, *args, **kwargs):
# the order in which the model fields are listed for the FieldSet
# is the order these fields will be displayed
file_type = kwargs.pop('file_type', False)
form_field_names = ['geometryType', 'featureCount']
crispy_form_fields = get_crispy_form_fields(form_field_names, file_type=file_type)
layout = Layout(*crispy_form_fields)
super(GeometryInformationFormHelper, self) \
.__init__(allow_edit, res_short_id, element_id, element_name,
layout, element_name_label='Geometry Information',
*args, **kwargs)
class GeometryInformationForm(forms.Form):
geometryType = forms.CharField(max_length=128, required=True, label='Geometry Type')
featureCount = forms.IntegerField(label='Feature Count',
required=True,
widget=forms.TextInput())
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
file_type = kwargs.pop('file_type', False)
super(GeometryInformationForm, self).__init__(*args, **kwargs)
self.helper = GeometryInformationFormHelper(allow_edit,
res_short_id,
element_id,
element_name='GeometryInformation',
file_type=file_type)
self.delete_modal_form = None
self.number = 0
self.allow_edit = allow_edit
self.errors.clear()
if not allow_edit:
for field in list(self.fields.values()):
field.widget.attrs['readonly'] = True
class GeometryInformationValidationForm(forms.Form):
featureCount = forms.IntegerField(required=True)
geometryType = forms.CharField(max_length=128, required=True)
class FieldInformationValidationForm(forms.Form):
fieldName = forms.CharField(required=True, max_length=128)
fieldType = forms.CharField(required=True, max_length=128)
fieldTypeCode = forms.CharField(required=False, max_length=50)
fieldWidth = forms.DecimalField(required=False)
fieldPrecision = forms.DecimalField(required=False)
| 48.508621 | 98 | 0.638884 |
53c4f4ceb71c01a3fdf458233c57b5b7ba00823f | 7,027 | py | Python | src/GrainGenerator_noInterseptions.py | dkagramanyan/dkagramanyan-WC-Co_computer_vision | 74eece7ded37d33ff6cb38cf50d1c99ea27d7468 | [
"MIT"
] | null | null | null | src/GrainGenerator_noInterseptions.py | dkagramanyan/dkagramanyan-WC-Co_computer_vision | 74eece7ded37d33ff6cb38cf50d1c99ea27d7468 | [
"MIT"
] | null | null | null | src/GrainGenerator_noInterseptions.py | dkagramanyan/dkagramanyan-WC-Co_computer_vision | 74eece7ded37d33ff6cb38cf50d1c99ea27d7468 | [
"MIT"
] | null | null | null | import bpy
from random import random
from mathutils import Vector, Euler
import math
#единицы измерения - метры
#checks if 2 cubes are intersepting using their coordinates and rotations
def ifIntersept(loc1, loc2, rot1, rot2, size):
#creates a list of coordinates of every vertice
coord_list_1 = []
coord_list_2 = []
#calculates the coordinates and adds to list
coord_list_1.append([loc1[0]+size/2, loc1[1]+size/2, loc1[2]+size/2])
coord_list_1.append([loc1[0]+size/2, loc1[1]+size/2, loc1[2]-size/2])
coord_list_1.append([loc1[0]+size/2, loc1[1]-size/2, loc1[2]+size/2])
coord_list_1.append([loc1[0]+size/2, loc1[1]-size/2, loc1[2]-size/2])
coord_list_1.append([loc1[0]-size/2, loc1[1]+size/2, loc1[2]+size/2])
coord_list_1.append([loc1[0]-size/2, loc1[1]+size/2, loc1[2]-size/2])
coord_list_1.append([loc1[0]-size/2, loc1[1]-size/2, loc1[2]+size/2])
coord_list_1.append([loc1[0]-size/2, loc1[1]-size/2, loc1[2]-size/2])
coord_list_2.append([loc2[0]+size/2, loc2[1]+size/2, loc2[2]+size/2])
coord_list_2.append([loc2[0]+size/2, loc2[1]+size/2, loc2[2]-size/2])
coord_list_2.append([loc2[0]+size/2, loc2[1]-size/2, loc2[2]+size/2])
coord_list_2.append([loc2[0]+size/2, loc2[1]-size/2, loc2[2]-size/2])
coord_list_2.append([loc2[0]-size/2, loc2[1]+size/2, loc2[2]+size/2])
coord_list_2.append([loc2[0]-size/2, loc2[1]+size/2, loc2[2]-size/2])
coord_list_2.append([loc2[0]-size/2, loc2[1]-size/2, loc2[2]+size/2])
coord_list_2.append([loc2[0]-size/2, loc2[1]-size/2, loc2[2]-size/2])
for coord in coord_list_1:
coord[1]=coord[1]*math.cos(rot1[0]) + coord[2]*math.sin(rot[0])
coord[2]=-coord[1]*math.sin(rot1[0]) + coord[2]*math.cos(rot[0])
coord[0]=coord[0]*math.cos(rot1[1]) + coord[2]*math.sin(rot[1])
coord[2]=-coord[0]*math.sin(rot1[1]) + coord[2]*math.cos(rot[1])
coord[0]=coord[0]*math.cos(rot1[2]) - coord[1]*math.sin(rot[2])
coord[1]=-coord[0]*math.sin(rot1[2]) + coord[1]*math.cos(rot[2])
print(coord_list_1)
for coord in coord_list_2:
coord[1]=coord[1]*math.cos(rot1[0]) + coord[2]*math.sin(rot[0])
coord[2]=-coord[1]*math.sin(rot1[0]) + coord[2]*math.cos(rot[0])
coord[0]=coord[0]*math.cos(rot1[1]) + coord[2]*math.sin(rot[1])
coord[2]=-coord[0]*math.sin(rot1[1]) + coord[2]*math.cos(rot[1])
coord[0]=coord[0]*math.cos(rot1[2]) - coord[1]*math.sin(rot[2])
coord[1]=-coord[0]*math.sin(rot1[2]) + coord[1]*math.cos(rot[2])
#Create a list of x, y, z coordinates of each cube
x1_list = []
y1_list = []
z1_list = []
for c in coord_list_1:
x1_list.append(c[0])
y1_list.append(c[1])
z1_list.append(c[2])
x2_list = []
y2_list = []
z2_list = []
for c in coord_list_2:
x2_list.append(c[0])
y2_list.append(c[1])
z2_list.append(c[2])
#checks if cubes untersept
if ( (max(x1_list) >= min(x2_list)) and (min(x1_list) <= max(x2_list))
and (max(y1_list) >= min(y2_list)) and (min(y1_list) <= max(y2_list))
and (max(z1_list) >= min(z2_list)) and (min(z1_list)) <= max(z2_list) ):
return False
else:
return True
# min and max values for each axis for the random numbers
ranges = {
'x' : { 'min' : -0.5, 'max' : 0.5 },
'y' : { 'min' : -0.5, 'max' : 0.5 },
'z' : { 'min' : -0.025, 'max' : 0.025 }
}
#Changes the camera properties
cam = bpy.data.cameras["Camera"]
cam.lens = 67
cam.sensor_width = 10
cam.display_size = 0.1
#Creates a list of camera locations
'''
#для генерации на харизме
camera_locations = []
y = 45
while y > -45:
x = -45
while x < 45:
camera_locations.append((x/100.0, y/100.0, 0.7))
x+=10
y-=10
'''
camera_locations = [(-0.45, -0.45, 0.7), (-0.35, -0.45, 0.7), (-0.25, -0.45, 0.7)]#для генерации локально
'''
#для генерации на харизме
grain_sizes = [0.005, 0.003, 0.001]
'''
grain_sizes = [0.05] #для генерации локально
sample_number = 0
for cubeRadius in grain_sizes:
# Generates a random number within the axis minmax range
randLocInRange = lambda axis: ranges[axis]['min'] + random() * ( ranges[axis]['max'] - ranges[axis]['min'] )
size = (0.05/cubeRadius**3)*(1/7) # Number of cubes
cubes = [] # Cube coordinates list
rotations = [] #cube rotations list
sample_number += 1
loopIterations = 0
# Generate a random 3D coordinate
loc = Vector([ randLocInRange( axis ) for axis in ranges.keys() ])
# Add coordinate to cube list
cubes.append( loc )
#Generates a random rotation
rot = Euler((random(), random(), random()),'XYZ')
#Add rotation to rotations list
rotations.append (rot)
# Add the first cube (others will be duplicated from it)
# Note: in blender 2.8 size arg is used instead of radius
bpy.ops.mesh.primitive_cube_add( size = cubeRadius, location = cubes[0], rotation = rotations[0] )
cube = bpy.context.scene.objects['Cube']
mat = bpy.data.materials.get("Material")
cube.data.materials.append(mat)
# Add all other cubes
c = 0
while c <= size:
while True:
flag = True
# Generate a random 3D coordinate
loc = Vector([ randLocInRange( axis ) for axis in ranges.keys() ])
# Generate a random rotation
rot = Euler((random(), random(), random()),'XYZ')
for l in range (len(cubes)):
if ifIntersept(loc, cubes[l], rot, rotations[l], cubeRadius) is False:
flag = False
if flag is True:
break
# Add coordinate to cube list
cubes.append( loc )
rotations.append(rot)
dupliCube = cube.copy()
dupliCube.location = loc
dupliCube.rotation_euler = rot
dupliCube.scale = (1, 1, 1)
# bpy.context.scene.objects.link( dupliCube )
# in blender 2.8 an api change requires to use the collection instead of the scene
bpy.context.collection.objects.link(dupliCube)
c+=1
#makes 100 images
photo_number = 0
for cam_location in camera_locations:
photo_number+=1
#change the location of camera
camobj = bpy.data.objects["_cam"]
camobj.location = cam_location
#render and save the image
scene = bpy.context.scene
scene.camera = camobj
scene.render.image_settings.file_format='PNG'
scene.render.filepath='C:/Users/HOME/Desktop/Pobedit/GrainGenerator/blender'+str(sample_number)+'_'+str(photo_number)+'.png'
bpy.ops.render.render(write_still=1)
'''
#delete all the grains
bpy.ops.object.select_all(action='SELECT')
bpy.data.objects['_cam'].select_set(False)
bpy.ops.object.delete()
'''
| 34.787129 | 132 | 0.59556 |
985e5025870be87e21427b8e8be741ea2078eb6e | 3,916 | py | Python | recipes/h5pp/all/conanfile.py | dyndrite/conan-center-index | 106b5c2f532d5129e7ca1997e29e4e105bb3018c | [
"MIT"
] | null | null | null | recipes/h5pp/all/conanfile.py | dyndrite/conan-center-index | 106b5c2f532d5129e7ca1997e29e4e105bb3018c | [
"MIT"
] | null | null | null | recipes/h5pp/all/conanfile.py | dyndrite/conan-center-index | 106b5c2f532d5129e7ca1997e29e4e105bb3018c | [
"MIT"
] | null | null | null | from conan.tools.microsoft import is_msvc
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.45.0"
class H5ppConan(ConanFile):
name = "h5pp"
description = "A C++17 wrapper for HDF5 with focus on simplicity"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/DavidAce/h5pp"
topics = ("h5pp", "hdf5", "binary", "storage")
license = "MIT"
settings = "os", "arch", "compiler", "build_type"
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _compilers_minimum_version(self):
return {
"gcc": "7.4",
"Visual Studio": "15.7",
"clang": "6",
"apple-clang": "10",
}
def requirements(self):
self.requires("eigen/3.4.0")
self.requires("hdf5/1.12.1")
self.requires("spdlog/1.10.0")
def package_id(self):
self.info.header_only()
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 17)
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version:
if tools.Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration("h5pp requires C++17, which your compiler does not support.")
else:
self.output.warn("h5pp requires C++17. Your compiler is unknown. Assuming it supports C++17.")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def package(self):
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
if tools.Version(self.version) < "1.9.0":
includedir = os.path.join(self._source_subfolder, "h5pp", "include")
else:
includedir = os.path.join(self._source_subfolder, "include")
self.copy("*", src=includedir, dst="include")
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "h5pp")
self.cpp_info.set_property("cmake_target_name", "h5pp::h5pp")
self.cpp_info.components["h5pp_headers"].set_property("cmake_target_name", "h5pp::headers")
self.cpp_info.components["h5pp_deps"].set_property("cmake_target_name", "h5pp::deps")
self.cpp_info.components["h5pp_deps"].requires = ["eigen::eigen", "spdlog::spdlog", "hdf5::hdf5"]
self.cpp_info.components["h5pp_flags"].set_property("cmake_target_name", "h5pp::flags")
if (self.settings.compiler == "gcc" and tools.Version(self.settings.compiler.version) < "9") or \
(self.settings.compiler == "clang" and self.settings.compiler.get_safe("libcxx") in ["libstdc++", "libstdc++11"]):
self.cpp_info.components["h5pp_flags"].system_libs = ["stdc++fs"]
if is_msvc(self):
self.cpp_info.components["h5pp_flags"].defines = ["NOMINMAX"]
self.cpp_info.components["h5pp_flags"].cxxflags = ["/permissive-"]
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.names["cmake_find_package"] = "h5pp"
self.cpp_info.names["cmake_find_package_multi"] = "h5pp"
self.cpp_info.components["h5pp_headers"].names["cmake_find_package"] = "headers"
self.cpp_info.components["h5pp_headers"].names["cmake_find_package_multi"] = "headers"
self.cpp_info.components["h5pp_deps"].names["cmake_find_package"] = "deps"
self.cpp_info.components["h5pp_deps"].names["cmake_find_package_multi"] = "deps"
self.cpp_info.components["h5pp_flags"].names["cmake_find_package"] = "flags"
self.cpp_info.components["h5pp_flags"].names["cmake_find_package_multi"] = "flags"
| 46.070588 | 125 | 0.658069 |
0216067a03dfff688458571df3f4aec893a04b4c | 209 | py | Python | totall/totall/doctype/caja/test_caja.py | dacosta2213/totall | c64420bb8d35ccf423fe8ea66321b34431f79660 | [
"MIT"
] | null | null | null | totall/totall/doctype/caja/test_caja.py | dacosta2213/totall | c64420bb8d35ccf423fe8ea66321b34431f79660 | [
"MIT"
] | null | null | null | totall/totall/doctype/caja/test_caja.py | dacosta2213/totall | c64420bb8d35ccf423fe8ea66321b34431f79660 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2017, C0D1G0 B1NAR10 and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestCaja(unittest.TestCase):
pass
| 19 | 53 | 0.76555 |
0952b004ec00f1f61b90453daa249ca61209696e | 264,328 | py | Python | lib/GUI/ScrolledPanels/Management.py | Connor22/hydrus | d0ae4a8898742a0d13601e1167d5ba697b31c395 | [
"WTFPL"
] | null | null | null | lib/GUI/ScrolledPanels/Management.py | Connor22/hydrus | d0ae4a8898742a0d13601e1167d5ba697b31c395 | [
"WTFPL"
] | null | null | null | lib/GUI/ScrolledPanels/Management.py | Connor22/hydrus | d0ae4a8898742a0d13601e1167d5ba697b31c395 | [
"WTFPL"
] | null | null | null | from . import ClientCaches
from . import ClientConstants as CC
from . import ClientData
from . import ClientDefaults
from . import ClientDownloading
from . import ClientGUIACDropdown
from . import ClientGUICommon
from . import ClientGUIControls
from . import ClientGUIDialogs
from . import ClientGUIDialogsQuick
from . import ClientGUIImport
from . import ClientGUIListBoxes
from . import ClientGUIListCtrl
from . import ClientGUIPredicates
from . import ClientGUIScrolledPanels
from . import ClientGUIScrolledPanelsEdit
from . import ClientGUIScrolledPanelsReview
from . import ClientGUISerialisable
from . import ClientGUIShortcuts
from . import ClientGUITagSuggestions
from . import ClientGUITopLevelWindows
from . import ClientNetworkingContexts
from . import ClientNetworkingJobs
from . import ClientNetworkingSessions
from . import ClientImporting
from . import ClientMedia
from . import ClientRatings
from . import ClientSerialisable
from . import ClientServices
from . import ClientGUITime
import collections
from . import HydrusConstants as HC
from . import HydrusData
from . import HydrusExceptions
from . import HydrusGlobals as HG
from . import HydrusNetwork
from . import HydrusNetworking
from . import HydrusPaths
from . import HydrusSerialisable
from . import HydrusTagArchive
from . import HydrusTags
from . import HydrusText
import itertools
import os
import random
import traceback
import urllib.parse
import wx
class ManageAccountTypesPanel( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent, service_key ):
self._admin_service = HG.client_controller.services_manager.GetService( service_key )
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
self._deletee_account_type_keys_to_new_account_type_keys = {}
self._account_types_listctrl = ClientGUIListCtrl.SaneListCtrlForSingleObject( self, 200, [ ( 'title', -1 ) ], delete_key_callback = self._Delete, activation_callback = self._Edit )
self._add_button = ClientGUICommon.BetterButton( self, 'add', self._Add )
self._edit_button = ClientGUICommon.BetterButton( self, 'edit', self._Edit )
self._delete_button = ClientGUICommon.BetterButton( self, 'delete', self._Delete )
response = self._admin_service.Request( HC.GET, 'account_types' )
account_types = response[ 'account_types' ]
for account_type in account_types:
( display_tuple, sort_tuple ) = self._ConvertAccountTypeToTuples( account_type )
self._account_types_listctrl.Append( display_tuple, sort_tuple, account_type )
hbox = wx.BoxSizer( wx.HORIZONTAL )
hbox.Add( self._add_button, CC.FLAGS_VCENTER )
hbox.Add( self._edit_button, CC.FLAGS_VCENTER )
hbox.Add( self._delete_button, CC.FLAGS_VCENTER )
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._account_types_listctrl, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( hbox, CC.FLAGS_BUTTON_SIZER )
self.SetSizer( vbox )
def _Add( self ):
title = 'new account type'
permissions = {}
bandwidth_rules = HydrusNetworking.BandwidthRules()
account_type = HydrusNetwork.AccountType.GenerateNewAccountTypeFromParameters( title, permissions, bandwidth_rules )
with ClientGUITopLevelWindows.DialogEdit( self, 'edit account type' ) as dlg_edit:
panel = ClientGUIScrolledPanelsEdit.EditAccountTypePanel( dlg_edit, self._admin_service.GetServiceType(), account_type )
dlg_edit.SetPanel( panel )
if dlg_edit.ShowModal() == wx.ID_OK:
new_account_type = panel.GetValue()
( display_tuple, sort_tuple ) = self._ConvertAccountTypeToTuples( new_account_type )
self._account_types_listctrl.Append( display_tuple, sort_tuple, new_account_type )
def _ConvertAccountTypeToTuples( self, account_type ):
title = account_type.GetTitle()
display_tuple = ( title, )
sort_tuple = ( title, )
return ( display_tuple, sort_tuple )
def _Delete( self ):
with ClientGUIDialogs.DialogYesNo( self, 'Remove all selected?' ) as dlg:
if dlg.ShowModal() == wx.ID_YES:
indices = self._account_types_listctrl.GetAllSelected()
account_types_about_to_delete = { self._account_types_listctrl.GetObject( index ) for index in indices }
all_account_types = set( self._account_types_listctrl.GetObjects() )
account_types_can_move_to = list( all_account_types - account_types_about_to_delete )
if len( account_types_can_move_to ) == 0:
wx.MessageBox( 'You cannot delete every account type!' )
return
for deletee_account_type in account_types_about_to_delete:
if len( account_types_can_move_to ) > 1:
deletee_title = deletee_account_type.GetTitle()
choice_tuples = [ ( account_type.GetTitle(), account_type ) for account_type in account_types_can_move_to ]
try:
new_account_type = ClientGUIDialogsQuick.SelectFromList( self, 'what should deleted ' + deletee_title + ' accounts become?', choice_tuples )
except HydrusExceptions.CancelledException:
return
else:
( new_account_type, ) = account_types_can_move_to
deletee_account_type_key = deletee_account_type.GetAccountTypeKey()
new_account_type_key = new_account_type.GetAccountTypeKey()
self._deletee_account_type_keys_to_new_account_type_keys[ deletee_account_type_key ] = new_account_type_key
self._account_types_listctrl.RemoveAllSelected()
def _Edit( self ):
indices = self._account_types_listctrl.GetAllSelected()
for index in indices:
account_type = self._account_types_listctrl.GetObject( index )
with ClientGUITopLevelWindows.DialogEdit( self, 'edit account type' ) as dlg_edit:
panel = ClientGUIScrolledPanelsEdit.EditAccountTypePanel( dlg_edit, self._admin_service.GetServiceType(), account_type )
dlg_edit.SetPanel( panel )
if dlg_edit.ShowModal() == wx.ID_OK:
edited_account_type = panel.GetValue()
( display_tuple, sort_tuple ) = self._ConvertAccountTypeToTuples( edited_account_type )
self._account_types_listctrl.UpdateRow( index, display_tuple, sort_tuple, edited_account_type )
else:
return
def CommitChanges( self ):
account_types = self._account_types_listctrl.GetObjects()
def key_transfer_not_collapsed():
keys = set( self._deletee_account_type_keys_to_new_account_type_keys.keys() )
values = set( self._deletee_account_type_keys_to_new_account_type_keys.values() )
return len( keys.intersection( values ) ) > 0
while key_transfer_not_collapsed():
# some deletees are going to other deletees, so lets collapse
deletee_account_type_keys = set( self._deletee_account_type_keys_to_new_account_type_keys.keys() )
account_type_keys_tuples = list(self._deletee_account_type_keys_to_new_account_type_keys.items())
for ( deletee_account_type_key, new_account_type_key ) in account_type_keys_tuples:
if new_account_type_key in deletee_account_type_keys:
better_new_account_type_key = self._deletee_account_type_keys_to_new_account_type_keys[ new_account_type_key ]
self._deletee_account_type_keys_to_new_account_type_keys[ deletee_account_type_key ] = better_new_account_type_key
serialisable_deletee_account_type_keys_to_new_account_type_keys = HydrusSerialisable.SerialisableBytesDictionary( self._deletee_account_type_keys_to_new_account_type_keys )
self._admin_service.Request( HC.POST, 'account_types', { 'account_types' : account_types, 'deletee_account_type_keys_to_new_account_type_keys' : serialisable_deletee_account_type_keys_to_new_account_type_keys } )
class ManageClientServicesPanel( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent ):
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
columns = [ ( 'type', 20 ), ( 'name', -1 ), ( 'deletable', 12 ) ]
self._listctrl = ClientGUIListCtrl.BetterListCtrl( self, 'manage_services', 25, 20, columns, self._ConvertServiceToListCtrlTuples, delete_key_callback = self._Delete, activation_callback = self._Edit)
menu_items = []
for service_type in HC.ADDREMOVABLE_SERVICES:
service_string = HC.service_string_lookup[ service_type ]
menu_items.append( ( 'normal', service_string, 'Add a new ' + service_string + '.', HydrusData.Call( self._Add, service_type ) ) )
self._add_button = ClientGUICommon.MenuButton( self, 'add', menu_items = menu_items )
self._edit_button = ClientGUICommon.BetterButton( self, 'edit', self._Edit )
self._delete_button = ClientGUICommon.BetterButton( self, 'delete', self._Delete )
#
self._original_services = HG.client_controller.services_manager.GetServices()
self._listctrl.AddDatas( self._original_services )
self._listctrl.Sort( 0 )
#
add_remove_hbox = wx.BoxSizer( wx.HORIZONTAL )
add_remove_hbox.Add( self._add_button, CC.FLAGS_VCENTER )
add_remove_hbox.Add( self._edit_button, CC.FLAGS_VCENTER )
add_remove_hbox.Add( self._delete_button, CC.FLAGS_VCENTER )
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._listctrl, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( add_remove_hbox, CC.FLAGS_BUTTON_SIZER )
self.SetSizer( vbox )
def _Add( self, service_type ):
service_key = HydrusData.GenerateKey()
name = 'new service'
service = ClientServices.GenerateService( service_key, service_type, name )
with ClientGUITopLevelWindows.DialogEdit( self, 'edit service' ) as dlg:
panel = self._EditPanel( dlg, service )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
new_service = panel.GetValue()
HydrusSerialisable.SetNonDupeName( new_service, self._GetExistingNames() )
self._listctrl.AddDatas( ( new_service, ) )
self._listctrl.Sort()
def _ConvertServiceToListCtrlTuples( self, service ):
service_type = service.GetServiceType()
name = service.GetName()
deletable = service_type in HC.ADDREMOVABLE_SERVICES
pretty_service_type = HC.service_string_lookup[ service_type ]
if deletable:
pretty_deletable = 'yes'
else:
pretty_deletable = ''
return ( ( pretty_service_type, name, pretty_deletable ), ( pretty_service_type, name, deletable ) )
def _GetExistingNames( self ):
services = self._listctrl.GetData()
names = { service.GetName() for service in services }
return names
def _Delete( self ):
selected_services = self._listctrl.GetData( only_selected = True )
deletable_services = [ service for service in selected_services if service.GetServiceType() in HC.ADDREMOVABLE_SERVICES ]
if len( deletable_services ) > 0:
with ClientGUIDialogs.DialogYesNo( self, 'Delete the selected services?' ) as dlg:
if dlg.ShowModal() == wx.ID_YES:
self._listctrl.DeleteDatas( deletable_services )
def _Edit( self ):
selected_services = self._listctrl.GetData( only_selected = True )
try:
for service in selected_services:
with ClientGUITopLevelWindows.DialogEdit( self, 'edit service' ) as dlg:
panel = self._EditPanel( dlg, service )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
self._listctrl.DeleteDatas( ( service, ) )
edited_service = panel.GetValue()
HydrusSerialisable.SetNonDupeName( edited_service, self._GetExistingNames() )
self._listctrl.AddDatas( ( edited_service, ) )
else:
return
finally:
self._listctrl.Sort()
def CommitChanges( self ):
services = self._listctrl.GetData()
new_service_keys = { service.GetServiceKey() for service in services }
deletee_service_names = [ service.GetName() for service in self._original_services if service.GetServiceKey() not in new_service_keys ]
if len( deletee_service_names ) > 0:
message = 'You are about to delete the following services:'
message += os.linesep * 2
message += os.linesep.join( deletee_service_names )
message += os.linesep * 2
message += 'Are you absolutely sure this is correct?'
with ClientGUIDialogs.DialogYesNo( self, message ) as dlg:
if dlg.ShowModal() != wx.ID_YES:
raise HydrusExceptions.VetoException( 'Commit cancelled by user! If you do not believe you meant to delete any services (i.e the code accidentally intended to delete them all by itself), please inform hydrus dev immediately.' )
HG.client_controller.SetServices( services )
class _EditPanel( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent, service ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
duplicate_service = service.Duplicate()
( self._service_key, self._service_type, name, self._dictionary ) = duplicate_service.ToTuple()
self._service_panel = self._ServicePanel( self, name )
self._panels = []
if self._service_type in HC.REMOTE_SERVICES:
remote_panel = self._ServiceRemotePanel( self, self._service_type, self._dictionary )
self._panels.append( remote_panel )
if self._service_type in HC.RESTRICTED_SERVICES:
self._panels.append( self._ServiceRestrictedPanel( self, self._service_key, remote_panel, self._service_type, self._dictionary ) )
if self._service_type in HC.TAG_SERVICES:
self._panels.append( self._ServiceTagPanel( self, self._dictionary ) )
if self._service_type in ( HC.CLIENT_API_SERVICE, HC.LOCAL_BOORU ):
self._panels.append( self._ServiceClientServerPanel( self, self._service_type, self._dictionary ) )
if self._service_type in HC.RATINGS_SERVICES:
self._panels.append( self._ServiceRatingsPanel( self, self._dictionary ) )
if self._service_type == HC.LOCAL_RATING_NUMERICAL:
self._panels.append( self._ServiceRatingsNumericalPanel( self, self._dictionary ) )
if self._service_type == HC.IPFS:
self._panels.append( self._ServiceIPFSPanel( self, self._dictionary ) )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._service_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
for panel in self._panels:
vbox.Add( panel, CC.FLAGS_EXPAND_PERPENDICULAR )
self.SetSizer( vbox )
def _GetArchiveNameToDisplay( self, portable_hta_path, namespaces ):
hta_path = HydrusPaths.ConvertPortablePathToAbsPath( portable_hta_path )
if len( namespaces ) == 0: name_to_display = hta_path
else: name_to_display = hta_path + ' (' + ', '.join( HydrusData.ConvertUglyNamespacesToPrettyStrings( namespaces ) ) + ')'
return name_to_display
def EventArchiveAdd( self, event ):
if self._archive_sync.GetCount() == 0:
wx.MessageBox( 'Be careful with this tool! Syncing a lot of files to a large archive can take a very long time to initialise.' )
text = 'Select the Hydrus Tag Archive\'s location.'
with wx.FileDialog( self, message = text, style = wx.FD_OPEN ) as dlg_file:
if dlg_file.ShowModal() == wx.ID_OK:
hta_path = dlg_file.GetPath()
portable_hta_path = HydrusPaths.ConvertAbsPathToPortablePath( hta_path )
hta = HydrusTagArchive.HydrusTagArchive( hta_path )
archive_namespaces = list( hta.GetNamespaces() )
archive_namespaces.sort()
choice_tuples = [ ( HydrusData.ConvertUglyNamespaceToPrettyString( namespace ), namespace, False ) for namespace in archive_namespaces ]
with ClientGUITopLevelWindows.DialogEdit( self, 'select namespaces' ) as dlg:
panel = ClientGUIScrolledPanelsEdit.EditChooseMultiple( dlg, choice_tuples )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
namespaces = panel.GetValue()
else:
return
name_to_display = self._GetArchiveNameToDisplay( portable_hta_path, namespaces )
self._archive_sync.Append( name_to_display, ( portable_hta_path, namespaces ) )
def EventArchiveEdit( self, event ):
selection = self._archive_sync.GetSelection()
if selection != wx.NOT_FOUND:
( portable_hta_path, existing_namespaces ) = self._archive_sync.GetClientData( selection )
hta_path = HydrusPaths.ConvertPortablePathToAbsPath( portable_hta_path )
if not os.path.exists( hta_path ):
wx.MessageBox( 'This archive does not seem to exist any longer!' )
return
hta = HydrusTagArchive.HydrusTagArchive( hta_path )
archive_namespaces = list( hta.GetNamespaces() )
archive_namespaces.sort()
choice_tuples = [ ( HydrusData.ConvertUglyNamespaceToPrettyString( namespace ), namespace, namespace in existing_namespaces ) for namespace in archive_namespaces ]
with ClientGUITopLevelWindows.DialogEdit( self, 'select namespaces' ) as dlg:
panel = ClientGUIScrolledPanelsEdit.EditChooseMultiple( dlg, choice_tuples )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
namespaces = panel.GetValue()
else:
return
name_to_display = self._GetArchiveNameToDisplay( portable_hta_path, namespaces )
self._archive_sync.SetString( selection, name_to_display )
self._archive_sync.SetClientData( selection, ( portable_hta_path, namespaces ) )
def EventArchiveRemove( self, event ):
selection = self._archive_sync.GetSelection()
if selection != wx.NOT_FOUND:
self._archive_sync.Delete( selection )
def GetValue( self ):
name = self._service_panel.GetValue()
dictionary = self._dictionary.Duplicate()
for panel in self._panels:
dictionary_part = panel.GetValue()
dictionary.update( dictionary_part )
return ClientServices.GenerateService( self._service_key, self._service_type, name, dictionary )
class _ServicePanel( ClientGUICommon.StaticBox ):
def __init__( self, parent, name ):
ClientGUICommon.StaticBox.__init__( self, parent, 'name' )
self._name = wx.TextCtrl( self )
#
self._name.SetValue( name )
#
self.Add( self._name, CC.FLAGS_EXPAND_PERPENDICULAR )
def GetValue( self ):
name = self._name.GetValue()
if name == '':
raise HydrusExceptions.VetoException( 'Please enter a name!' )
return name
class _ServiceRemotePanel( ClientGUICommon.StaticBox ):
def __init__( self, parent, service_type, dictionary ):
ClientGUICommon.StaticBox.__init__( self, parent, 'network connection' )
self._service_type = service_type
credentials = dictionary[ 'credentials' ]
self._host = wx.TextCtrl( self )
self._port = wx.SpinCtrl( self, min = 1, max = 65535, size = ( 80, -1 ) )
self._test_address_button = ClientGUICommon.BetterButton( self, 'test address', self._TestAddress )
#
( host, port ) = credentials.GetAddress()
self._host.SetValue( host )
self._port.SetValue( port )
#
hbox = wx.BoxSizer( wx.HORIZONTAL )
hbox.Add( self._host, CC.FLAGS_EXPAND_BOTH_WAYS )
hbox.Add( ClientGUICommon.BetterStaticText( self, ':' ), CC.FLAGS_VCENTER )
hbox.Add( self._port, CC.FLAGS_VCENTER )
wrapped_hbox = ClientGUICommon.WrapInText( hbox, self, 'address: ' )
self.Add( wrapped_hbox, CC.FLAGS_EXPAND_PERPENDICULAR )
self.Add( self._test_address_button, CC.FLAGS_LONE_BUTTON )
def _TestAddress( self ):
try:
credentials = self.GetCredentials()
except HydrusExceptions.VetoException as e:
message = str( e )
if len( message ) > 0:
wx.MessageBox( message )
return
( host, port ) = credentials.GetAddress()
if self._service_type == HC.IPFS:
scheme = 'http://'
request = 'api/v0/version'
else:
scheme = 'https://'
request = ''
url = scheme + host + ':' + str( port ) + '/' + request
network_job = ClientNetworkingJobs.NetworkJobHydrus( CC.TEST_SERVICE_KEY, 'GET', url )
network_job.OverrideBandwidth()
network_job.SetForLogin( True )
HG.client_controller.network_engine.AddJob( network_job )
try:
network_job.WaitUntilDone()
wx.MessageBox( 'Got an ok response!' )
except HydrusExceptions.NetworkException as e:
wx.MessageBox( 'Problem with that address: ' + str( e ) )
def GetCredentials( self ):
host = self._host.GetValue()
if host == '':
raise HydrusExceptions.VetoException( 'Please enter a host!' )
port = self._port.GetValue()
return HydrusNetwork.Credentials( host, port )
def GetValue( self ):
dictionary_part = {}
credentials = self.GetCredentials()
dictionary_part[ 'credentials' ] = credentials
return dictionary_part
class _ServiceRestrictedPanel( ClientGUICommon.StaticBox ):
def __init__( self, parent, service_key, remote_panel, service_type, dictionary ):
ClientGUICommon.StaticBox.__init__( self, parent, 'hydrus network' )
self._service_key = service_key
self._remote_panel = remote_panel
self._service_type = service_type
self._original_credentials = dictionary[ 'credentials' ]
self._access_key = wx.TextCtrl( self, size = ( 400, -1 ) )
self._test_credentials_button = ClientGUICommon.BetterButton( self, 'test access key', self._TestCredentials )
self._register = ClientGUICommon.BetterButton( self, 'fetch an access key with a registration key', self._GetAccessKeyFromRegistrationKey )
#
if self._original_credentials.HasAccessKey():
self._access_key.SetValue( self._original_credentials.GetAccessKey().hex() )
#
hbox = wx.BoxSizer( wx.HORIZONTAL )
hbox.Add( self._register, CC.FLAGS_VCENTER )
hbox.Add( self._test_credentials_button, CC.FLAGS_VCENTER )
wrapped_access_key = ClientGUICommon.WrapInText( self._access_key, self, 'access key: ' )
self.Add( wrapped_access_key, CC.FLAGS_EXPAND_PERPENDICULAR )
self.Add( hbox, CC.FLAGS_BUTTON_SIZER )
def _GetAccessKeyFromRegistrationKey( self ):
def wx_done():
if not self:
return
self._register.Enable()
self._register.SetLabel( 'fetch an access key with a registration key' )
def wx_setkey( access_key_encoded ):
if not self:
return
self._access_key.SetValue( access_key_encoded )
def do_it( credentials, registration_key ):
try:
( host, port ) = credentials.GetAddress()
url = 'https://' + host + ':' + str( port ) + '/access_key?registration_key=' + registration_key.hex()
network_job = ClientNetworkingJobs.NetworkJobHydrus( CC.TEST_SERVICE_KEY, 'GET', url )
network_job.OverrideBandwidth()
network_job.SetForLogin( True )
HG.client_controller.network_engine.AddJob( network_job )
try:
network_job.WaitUntilDone()
network_bytes = network_job.GetContentBytes()
parsed_request_args = HydrusNetwork.ParseNetworkBytesToParsedHydrusArgs( network_bytes )
access_key_encoded = parsed_request_args[ 'access_key' ].hex()
wx.CallAfter( wx_setkey, access_key_encoded )
wx.CallAfter( wx.MessageBox, 'Looks good!' )
except Exception as e:
HydrusData.PrintException( e )
wx.CallAfter( wx.MessageBox, 'Had a problem: ' + str( e ) )
finally:
wx.CallAfter( wx_done )
try:
credentials = self._remote_panel.GetCredentials()
except HydrusExceptions.VetoException as e:
message = str( e )
if len( message ) > 0:
wx.MessageBox( message )
return
with ClientGUIDialogs.DialogTextEntry( self, 'Enter the registration key.' ) as dlg:
if dlg.ShowModal() == wx.ID_OK:
registration_key_encoded = dlg.GetValue()
else:
return
if registration_key_encoded[0] == 'r':
registration_key_encoded = registration_key_encoded[1:]
if registration_key_encoded == 'init':
registration_key = b'init'
else:
try:
registration_key = bytes.fromhex( registration_key_encoded )
except:
wx.MessageBox( 'Could not parse that registration key!' )
return
self._register.Disable()
self._register.SetLabel( 'fetching\u2026' )
HG.client_controller.CallToThread( do_it, credentials, registration_key )
def _TestCredentials( self ):
def do_it( credentials ):
service = ClientServices.GenerateService( CC.TEST_SERVICE_KEY, self._service_type, 'test service' )
service.SetCredentials( credentials )
try:
if self._service_type in HC.RESTRICTED_SERVICES:
response = service.Request( HC.GET, 'access_key_verification' )
if not response[ 'verified' ]:
wx.CallAfter( wx.MessageBox, 'That access key was not recognised!' )
else:
wx.CallAfter( wx.MessageBox, 'Everything looks ok!' )
except HydrusExceptions.WrongServiceTypeException:
wx.CallAfter( wx.MessageBox, 'Connection was made, but the service was not a ' + HC.service_string_lookup[ self._service_type ] + '.' )
return
except HydrusExceptions.NetworkException as e:
wx.CallAfter( wx.MessageBox, 'Network problem: ' + str( e ) )
return
finally:
self._test_credentials_button.Enable()
self._test_credentials_button.SetLabel( 'test access key' )
try:
credentials = self.GetCredentials()
except HydrusExceptions.VetoException as e:
message = str( e )
if len( message ) > 0:
wx.MessageBox( message )
return
self._test_credentials_button.Disable()
self._test_credentials_button.SetLabel( 'fetching\u2026' )
HG.client_controller.CallToThread( do_it, credentials )
def GetCredentials( self ):
credentials = self._remote_panel.GetCredentials()
try:
access_key = bytes.fromhex( self._access_key.GetValue() )
except:
raise HydrusExceptions.VetoException( 'Could not understand that access key!')
if len( access_key ) > 0:
credentials.SetAccessKey( access_key )
return credentials
def GetValue( self ):
dictionary_part = {}
credentials = self.GetCredentials()
if credentials != self._original_credentials:
account = HydrusNetwork.Account.GenerateUnknownAccount()
dictionary_part[ 'account' ] = HydrusNetwork.Account.GenerateSerialisableTupleFromAccount( account )
network_context = ClientNetworkingContexts.NetworkContext( CC.NETWORK_CONTEXT_HYDRUS, self._service_key )
HG.client_controller.network_engine.session_manager.ClearSession( network_context )
dictionary_part[ 'credentials' ] = credentials
return dictionary_part
class _ServiceClientServerPanel( ClientGUICommon.StaticBox ):
def __init__( self, parent, service_type, dictionary ):
ClientGUICommon.StaticBox.__init__( self, parent, 'client api' )
self._client_server_options_panel = ClientGUICommon.StaticBox( self, 'options' )
if service_type == HC.LOCAL_BOORU:
name = 'local booru'
default_port = 45868
elif service_type == HC.CLIENT_API_SERVICE:
name = 'client api'
default_port = 45869
port_name = '{} local port'.format( name )
none_phrase = 'do not run {} service'.format( name )
self._port = ClientGUICommon.NoneableSpinCtrl( self._client_server_options_panel, port_name, none_phrase = none_phrase, min = 1, max = 65535 )
self._allow_non_local_connections = wx.CheckBox( self._client_server_options_panel, label = 'allow non-local connections' )
self._support_cors = wx.CheckBox( self._client_server_options_panel, label = 'support CORS headers' )
self._support_cors.SetToolTip( 'Have this server support Cross-Origin Resource Sharing, which allows web browsers to access it off other domains. Turn this on if you want to access this service through a web-based wrapper (e.g. a booru wrapper) hosted on another domain.' )
self._log_requests = wx.CheckBox( self._client_server_options_panel, label = 'log requests' )
self._log_requests.SetToolTip( 'Hydrus server services will write a brief anonymous line to the log for every request made, but for the client services this tends to be a bit spammy. You probably want this off unless you are testing something.' )
self._upnp = ClientGUICommon.NoneableSpinCtrl( self._client_server_options_panel, 'upnp port', none_phrase = 'do not forward port', max = 65535 )
self._bandwidth_rules = ClientGUIControls.BandwidthRulesCtrl( self._client_server_options_panel, dictionary[ 'bandwidth_rules' ] )
#
self._port.SetValue( default_port )
self._upnp.SetValue( default_port )
self._port.SetValue( dictionary[ 'port' ] )
self._upnp.SetValue( dictionary[ 'upnp_port' ] )
self._allow_non_local_connections.SetValue( dictionary[ 'allow_non_local_connections' ] )
self._support_cors.SetValue( dictionary[ 'support_cors' ] )
self._log_requests.SetValue( dictionary[ 'log_requests' ] )
#
self._client_server_options_panel.Add( self._port, CC.FLAGS_EXPAND_PERPENDICULAR )
self._client_server_options_panel.Add( self._allow_non_local_connections, CC.FLAGS_EXPAND_PERPENDICULAR )
self._client_server_options_panel.Add( self._support_cors, CC.FLAGS_EXPAND_PERPENDICULAR )
self._client_server_options_panel.Add( self._log_requests, CC.FLAGS_EXPAND_PERPENDICULAR )
self._client_server_options_panel.Add( self._upnp, CC.FLAGS_EXPAND_PERPENDICULAR )
self._client_server_options_panel.Add( self._bandwidth_rules, CC.FLAGS_EXPAND_BOTH_WAYS )
self.Add( self._client_server_options_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self._allow_non_local_connections.Bind( wx.EVT_CHECKBOX, self.EventCheckBox )
def _UpdateControls( self ):
if self._allow_non_local_connections.GetValue():
self._upnp.SetValue( None )
self._upnp.Disable()
else:
self._upnp.Enable()
def EventCheckBox( self, event ):
self._UpdateControls()
def GetValue( self ):
dictionary_part = {}
dictionary_part[ 'port' ] = self._port.GetValue()
dictionary_part[ 'upnp_port' ] = self._upnp.GetValue()
dictionary_part[ 'allow_non_local_connections' ] = self._allow_non_local_connections.GetValue()
dictionary_part[ 'support_cors' ] = self._support_cors.GetValue()
dictionary_part[ 'log_requests' ] = self._log_requests.GetValue()
dictionary_part[ 'bandwidth_rules' ] = self._bandwidth_rules.GetValue()
return dictionary_part
class _ServiceTagPanel( ClientGUICommon.StaticBox ):
def __init__( self, parent, dictionary ):
ClientGUICommon.StaticBox.__init__( self, parent, 'tags' )
self._st = ClientGUICommon.BetterStaticText( self )
'''
if service_type in HC.TAG_SERVICES:
self._archive_panel = ClientGUICommon.StaticBox( self, 'archive synchronisation' )
self._archive_sync = wx.ListBox( self._archive_panel, size = ( -1, 100 ) )
self._archive_sync_add = wx.Button( self._archive_panel, label = 'add' )
self._archive_sync_add.Bind( wx.EVT_BUTTON, self.EventArchiveAdd )
self._archive_sync_edit = wx.Button( self._archive_panel, label = 'edit' )
self._archive_sync_edit.Bind( wx.EVT_BUTTON, self.EventArchiveEdit )
self._archive_sync_remove = wx.Button( self._archive_panel, label = 'remove' )
self._archive_sync_remove.Bind( wx.EVT_BUTTON, self.EventArchiveRemove )
if service_type in HC.TAG_SERVICES:
for ( portable_hta_path, namespaces ) in info[ 'tag_archive_sync' ].items():
name_to_display = self._GetArchiveNameToDisplay( portable_hta_path, namespaces )
self._archive_sync.Append( name_to_display, ( portable_hta_path, namespaces ) )
if service_type in HC.TAG_SERVICES:
tag_archives = {}
for i in range( self._archive_sync.GetCount() ):
( portable_hta_path, namespaces ) = self._archive_sync.GetClientData( i )
tag_archives[ portable_hta_path ] = namespaces
info[ 'tag_archive_sync' ] = tag_archives
'''
#
self._st.SetLabelText( 'This is a tag service. This box will get regain tag archive options in a future update.' )
#
self.Add( self._st, CC.FLAGS_EXPAND_PERPENDICULAR )
def GetValue( self ):
dictionary_part = {}
return dictionary_part
class _ServiceRatingsPanel( ClientGUICommon.StaticBox ):
def __init__( self, parent, dictionary ):
ClientGUICommon.StaticBox.__init__( self, parent, 'ratings' )
self._shape = ClientGUICommon.BetterChoice( self )
self._shape.Append( 'circle', ClientRatings.CIRCLE )
self._shape.Append( 'square', ClientRatings.SQUARE )
self._shape.Append( 'star', ClientRatings.STAR )
self._colour_ctrls = {}
for colour_type in [ ClientRatings.LIKE, ClientRatings.DISLIKE, ClientRatings.NULL, ClientRatings.MIXED ]:
border_ctrl = ClientGUICommon.BetterColourControl( self )
fill_ctrl = ClientGUICommon.BetterColourControl( self )
border_ctrl.SetMaxSize( ( 20, -1 ) )
fill_ctrl.SetMaxSize( ( 20, -1 ) )
self._colour_ctrls[ colour_type ] = ( border_ctrl, fill_ctrl )
#
self._shape.SelectClientData( dictionary[ 'shape' ] )
for ( colour_type, ( border_rgb, fill_rgb ) ) in dictionary[ 'colours' ]:
( border_ctrl, fill_ctrl ) = self._colour_ctrls[ colour_type ]
border_ctrl.SetColour( wx.Colour( *border_rgb ) )
fill_ctrl.SetColour( wx.Colour( *fill_rgb ) )
#
rows = []
rows.append( ( 'shape: ', self._shape ) )
for colour_type in [ ClientRatings.LIKE, ClientRatings.DISLIKE, ClientRatings.NULL, ClientRatings.MIXED ]:
( border_ctrl, fill_ctrl ) = self._colour_ctrls[ colour_type ]
hbox = wx.BoxSizer( wx.HORIZONTAL )
hbox.Add( border_ctrl, CC.FLAGS_VCENTER )
hbox.Add( fill_ctrl, CC.FLAGS_VCENTER )
if colour_type == ClientRatings.LIKE: colour_text = 'liked'
elif colour_type == ClientRatings.DISLIKE: colour_text = 'disliked'
elif colour_type == ClientRatings.NULL: colour_text = 'not rated'
elif colour_type == ClientRatings.MIXED: colour_text = 'a mixture of ratings'
rows.append( ( 'border/fill for ' + colour_text + ': ', hbox ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
self.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
def GetValue( self ):
dictionary_part = {}
dictionary_part[ 'shape' ] = self._shape.GetChoice()
dictionary_part[ 'colours' ] = {}
for ( colour_type, ( border_ctrl, fill_ctrl ) ) in list(self._colour_ctrls.items()):
border_colour = border_ctrl.GetColour()
border_rgb = ( border_colour.Red(), border_colour.Green(), border_colour.Blue() )
fill_colour = fill_ctrl.GetColour()
fill_rgb = ( fill_colour.Red(), fill_colour.Green(), fill_colour.Blue() )
dictionary_part[ 'colours' ][ colour_type ] = ( border_rgb, fill_rgb )
return dictionary_part
class _ServiceRatingsNumericalPanel( ClientGUICommon.StaticBox ):
def __init__( self, parent, dictionary ):
ClientGUICommon.StaticBox.__init__( self, parent, 'numerical ratings' )
self._num_stars = wx.SpinCtrl( self, min = 1, max = 20 )
self._allow_zero = wx.CheckBox( self )
#
self._num_stars.SetValue( dictionary[ 'num_stars' ] )
self._allow_zero.SetValue( dictionary[ 'allow_zero' ] )
#
rows = []
rows.append( ( 'number of \'stars\': ', self._num_stars ) )
rows.append( ( 'allow a zero rating: ', self._allow_zero ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
self.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
def GetValue( self ):
dictionary_part = {}
num_stars = self._num_stars.GetValue()
allow_zero = self._allow_zero.GetValue()
if num_stars == 1 and not allow_zero:
allow_zero = True
dictionary_part[ 'num_stars' ] = num_stars
dictionary_part[ 'allow_zero' ] = allow_zero
return dictionary_part
class _ServiceIPFSPanel( ClientGUICommon.StaticBox ):
def __init__( self, parent, dictionary ):
ClientGUICommon.StaticBox.__init__( self, parent, 'ipfs' )
self._multihash_prefix = wx.TextCtrl( self )
tts = 'When you tell the client to copy the ipfs multihash to your clipboard, it will prefix it with this.'
tts += os.linesep * 2
tts += 'Use this if you would rather copy a full gateway url with that action. For instance, you could put here:'
tts += os.linesep * 2
tts += 'http://127.0.0.1:8080/ipfs/'
tts += os.linesep
tts += 'http://ipfs.io/ipfs/'
self._multihash_prefix.SetToolTip( tts )
#
self._multihash_prefix.SetValue( dictionary[ 'multihash_prefix' ] )
#
self.Add( ClientGUICommon.WrapInText( self._multihash_prefix, self, 'multihash prefix: ' ), CC.FLAGS_EXPAND_PERPENDICULAR )
def GetValue( self ):
dictionary_part = {}
dictionary_part[ 'multihash_prefix' ] = self._multihash_prefix.GetValue()
return dictionary_part
class ManageOptionsPanel( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent ):
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
self._new_options = HG.client_controller.new_options
self._listbook = ClientGUICommon.ListBook( self )
self._listbook.AddPage( 'gui', 'gui', self._GUIPanel( self._listbook ) ) # leave this at the top, to make it default page
self._listbook.AddPage( 'gui pages', 'gui pages', self._GUIPagesPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'connection', 'connection', self._ConnectionPanel( self._listbook ) )
self._listbook.AddPage( 'files and trash', 'files and trash', self._FilesAndTrashPanel( self._listbook ) )
self._listbook.AddPage( 'speed and memory', 'speed and memory', self._SpeedAndMemoryPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'maintenance and processing', 'maintenance and processing', self._MaintenanceAndProcessingPanel( self._listbook ) )
self._listbook.AddPage( 'media', 'media', self._MediaPanel( self._listbook ) )
#self._listbook.AddPage( 'sound', 'sound', self._SoundPanel( self._listbook ) )
self._listbook.AddPage( 'default system predicates', 'default system predicates', self._DefaultFileSystemPredicatesPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'colours', 'colours', self._ColoursPanel( self._listbook ) )
self._listbook.AddPage( 'regex favourites', 'regex favourites', self._RegexPanel( self._listbook ) )
self._listbook.AddPage( 'sort/collect', 'sort/collect', self._SortCollectPanel( self._listbook ) )
self._listbook.AddPage( 'downloading', 'downloading', self._DownloadingPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'duplicates', 'duplicates', self._DuplicatesPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'importing', 'importing', self._ImportingPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'tag presentation', 'tag presentation', self._TagPresentationPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'tag suggestions', 'tag suggestions', self._TagSuggestionsPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'tags', 'tags', self._TagsPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'thumbnails', 'thumbnails', self._ThumbnailsPanel( self._listbook, self._new_options ) )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._listbook, CC.FLAGS_EXPAND_BOTH_WAYS )
self.SetSizer( vbox )
class _ColoursPanel( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__( self, parent )
self._new_options = HG.client_controller.new_options
coloursets_panel = ClientGUICommon.StaticBox( self, 'coloursets' )
self._current_colourset = ClientGUICommon.BetterChoice( coloursets_panel )
self._current_colourset.Append( 'default', 'default' )
self._current_colourset.Append( 'darkmode', 'darkmode' )
self._current_colourset.SelectClientData( self._new_options.GetString( 'current_colourset' ) )
self._notebook = wx.Notebook( coloursets_panel )
self._gui_colours = {}
for colourset in ( 'default', 'darkmode' ):
self._gui_colours[ colourset ] = {}
colour_panel = wx.Panel( self._notebook )
colour_types = []
colour_types.append( CC.COLOUR_THUMB_BACKGROUND )
colour_types.append( CC.COLOUR_THUMB_BACKGROUND_SELECTED )
colour_types.append( CC.COLOUR_THUMB_BACKGROUND_REMOTE )
colour_types.append( CC.COLOUR_THUMB_BACKGROUND_REMOTE_SELECTED )
colour_types.append( CC.COLOUR_THUMB_BORDER )
colour_types.append( CC.COLOUR_THUMB_BORDER_SELECTED )
colour_types.append( CC.COLOUR_THUMB_BORDER_REMOTE )
colour_types.append( CC.COLOUR_THUMB_BORDER_REMOTE_SELECTED )
colour_types.append( CC.COLOUR_THUMBGRID_BACKGROUND )
colour_types.append( CC.COLOUR_AUTOCOMPLETE_BACKGROUND )
colour_types.append( CC.COLOUR_MEDIA_BACKGROUND )
colour_types.append( CC.COLOUR_MEDIA_TEXT )
colour_types.append( CC.COLOUR_TAGS_BOX )
for colour_type in colour_types:
ctrl = ClientGUICommon.BetterColourControl( colour_panel )
ctrl.SetMaxSize( ( 20, -1 ) )
ctrl.SetColour( self._new_options.GetColour( colour_type, colourset ) )
self._gui_colours[ colourset ][ colour_type ] = ctrl
#
rows = []
hbox = wx.BoxSizer( wx.HORIZONTAL )
hbox.Add( self._gui_colours[ colourset ][ CC.COLOUR_THUMB_BACKGROUND ], CC.FLAGS_VCENTER )
hbox.Add( self._gui_colours[ colourset ][ CC.COLOUR_THUMB_BACKGROUND_SELECTED ], CC.FLAGS_VCENTER )
hbox.Add( self._gui_colours[ colourset ][ CC.COLOUR_THUMB_BACKGROUND_REMOTE ], CC.FLAGS_VCENTER )
hbox.Add( self._gui_colours[ colourset ][ CC.COLOUR_THUMB_BACKGROUND_REMOTE_SELECTED ], CC.FLAGS_VCENTER )
rows.append( ( 'thumbnail background (local: normal/selected, remote: normal/selected): ', hbox ) )
hbox = wx.BoxSizer( wx.HORIZONTAL )
hbox.Add( self._gui_colours[ colourset ][ CC.COLOUR_THUMB_BORDER ], CC.FLAGS_VCENTER )
hbox.Add( self._gui_colours[ colourset ][ CC.COLOUR_THUMB_BORDER_SELECTED ], CC.FLAGS_VCENTER )
hbox.Add( self._gui_colours[ colourset ][ CC.COLOUR_THUMB_BORDER_REMOTE ], CC.FLAGS_VCENTER )
hbox.Add( self._gui_colours[ colourset ][ CC.COLOUR_THUMB_BORDER_REMOTE_SELECTED ], CC.FLAGS_VCENTER )
rows.append( ( 'thumbnail border (local: normal/selected, remote: normal/selected): ', hbox ) )
rows.append( ( 'thumbnail grid background: ', self._gui_colours[ colourset ][ CC.COLOUR_THUMBGRID_BACKGROUND ] ) )
rows.append( ( 'autocomplete background: ', self._gui_colours[ colourset ][ CC.COLOUR_AUTOCOMPLETE_BACKGROUND ] ) )
rows.append( ( 'media viewer background: ', self._gui_colours[ colourset ][ CC.COLOUR_MEDIA_BACKGROUND ] ) )
rows.append( ( 'media viewer text: ', self._gui_colours[ colourset ][ CC.COLOUR_MEDIA_TEXT ] ) )
rows.append( ( 'tags box background: ', self._gui_colours[ colourset ][ CC.COLOUR_TAGS_BOX ] ) )
gridbox = ClientGUICommon.WrapInGrid( colour_panel, rows )
colour_panel.SetSizer( gridbox )
select = colourset == 'default'
self._notebook.AddPage( colour_panel, colourset, select = select )
#
coloursets_panel.Add( ClientGUICommon.WrapInText( self._current_colourset, coloursets_panel, 'current colourset: ' ), CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
coloursets_panel.Add( self._notebook, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( coloursets_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.SetSizer( vbox )
def UpdateOptions( self ):
for colourset in self._gui_colours:
for ( colour_type, ctrl ) in list(self._gui_colours[ colourset ].items()):
colour = ctrl.GetColour()
self._new_options.SetColour( colour_type, colourset, colour )
self._new_options.SetString( 'current_colourset', self._current_colourset.GetChoice() )
class _ConnectionPanel( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__( self, parent )
general = ClientGUICommon.StaticBox( self, 'general' )
self._verify_regular_https = wx.CheckBox( general )
self._external_host = wx.TextCtrl( self )
self._external_host.SetToolTip( 'If you have trouble parsing your external ip using UPnP, you can force it to be this.' )
self._network_timeout = wx.SpinCtrl( self, min = 3, max = 300 )
self._network_timeout.SetToolTip( 'If a network connection cannot be made in this duration or, if once started, it experiences uninterrupted inactivity for six times this duration, it will be abandoned.' )
self._max_network_jobs = wx.SpinCtrl( self, min = 1, max = 30 )
self._max_network_jobs_per_domain = wx.SpinCtrl( self, min = 1, max = 5 )
proxy_panel = ClientGUICommon.StaticBox( self, 'proxy settings' )
self._http_proxy = ClientGUICommon.NoneableTextCtrl( proxy_panel )
self._https_proxy = ClientGUICommon.NoneableTextCtrl( proxy_panel )
#
self._new_options = HG.client_controller.new_options
self._verify_regular_https.SetValue( self._new_options.GetBoolean( 'verify_regular_https' ) )
self._http_proxy.SetValue( self._new_options.GetNoneableString( 'http_proxy' ) )
self._https_proxy.SetValue( self._new_options.GetNoneableString( 'https_proxy' ) )
self._network_timeout.SetValue( self._new_options.GetInteger( 'network_timeout' ) )
self._max_network_jobs.SetValue( self._new_options.GetInteger( 'max_network_jobs' ) )
self._max_network_jobs_per_domain.SetValue( self._new_options.GetInteger( 'max_network_jobs_per_domain' ) )
if HC.options[ 'external_host' ] is not None:
self._external_host.SetValue( HC.options[ 'external_host' ] )
#
rows = []
rows.append( ( 'BUGFIX: verify regular https traffic:', self._verify_regular_https ) )
gridbox = ClientGUICommon.WrapInGrid( general, rows )
general.Add( gridbox, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
text = 'Enter strings such as "http://ip:port" or "http://user:pass@ip:port". It should take affect immediately on dialog ok.'
text += os.linesep * 2
if ClientNetworkingSessions.SOCKS_PROXY_OK:
text += 'It looks like you have socks support! You should also be able to enter (socks4 or) "socks5://ip:port".'
else:
text += 'It does not look like you have socks support! If you want it, try adding "pysocks" (or "requests[socks]")!'
proxy_panel.Add( wx.StaticText( proxy_panel, label = text ), CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'http: ', self._http_proxy ) )
rows.append( ( 'https: ', self._https_proxy ) )
gridbox = ClientGUICommon.WrapInGrid( proxy_panel, rows )
proxy_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
#
rows = []
rows.append( ( 'network timeout (seconds): ', self._network_timeout ) )
rows.append( ( 'max number of simultaneous active network jobs: ', self._max_network_jobs ) )
rows.append( ( 'max number of simultaneous active network jobs per domain: ', self._max_network_jobs_per_domain ) )
rows.append( ( 'external ip/host override: ', self._external_host ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( general, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
vbox.Add( proxy_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
self.SetSizer( vbox )
def UpdateOptions( self ):
self._new_options.SetBoolean( 'verify_regular_https', self._verify_regular_https.GetValue() )
self._new_options.SetNoneableString( 'http_proxy', self._http_proxy.GetValue() )
self._new_options.SetNoneableString( 'https_proxy', self._https_proxy.GetValue() )
external_host = self._external_host.GetValue()
if external_host == '':
external_host = None
HC.options[ 'external_host' ] = external_host
self._new_options.SetInteger( 'network_timeout', self._network_timeout.GetValue() )
self._new_options.SetInteger( 'max_network_jobs', self._max_network_jobs.GetValue() )
self._new_options.SetInteger( 'max_network_jobs_per_domain', self._max_network_jobs_per_domain.GetValue() )
class _DownloadingPanel( wx.Panel ):
def __init__( self, parent, new_options ):
wx.Panel.__init__( self, parent )
self._new_options = new_options
#
gallery_downloader = ClientGUICommon.StaticBox( self, 'gallery downloader' )
gug_key_and_name = HG.client_controller.network_engine.domain_manager.GetDefaultGUGKeyAndName()
self._default_gug = ClientGUIImport.GUGKeyAndNameSelector( gallery_downloader, gug_key_and_name )
self._gallery_page_wait_period_pages = wx.SpinCtrl( gallery_downloader, min = 1, max = 120 )
self._gallery_file_limit = ClientGUICommon.NoneableSpinCtrl( gallery_downloader, none_phrase = 'no limit', min = 1, max = 1000000 )
self._highlight_new_query = wx.CheckBox( gallery_downloader )
#
subscriptions = ClientGUICommon.StaticBox( self, 'subscriptions' )
self._gallery_page_wait_period_subscriptions = wx.SpinCtrl( subscriptions, min = 1, max = 30 )
self._max_simultaneous_subscriptions = wx.SpinCtrl( subscriptions, min = 1, max = 100 )
self._process_subs_in_random_order = wx.CheckBox( subscriptions )
self._process_subs_in_random_order.SetToolTip( 'Processing in random order is useful whenever bandwidth is tight, as it stops an \'aardvark\' subscription from always getting first whack at what is available. Otherwise, they will be processed in alphabetical order.' )
checker_options = self._new_options.GetDefaultSubscriptionCheckerOptions()
self._subscription_checker_options = ClientGUIImport.CheckerOptionsButton( subscriptions, checker_options )
#
watchers = ClientGUICommon.StaticBox( self, 'watchers' )
self._watcher_page_wait_period = wx.SpinCtrl( watchers, min = 1, max = 120 )
self._highlight_new_watcher = wx.CheckBox( watchers )
checker_options = self._new_options.GetDefaultWatcherCheckerOptions()
self._watcher_checker_options = ClientGUIImport.CheckerOptionsButton( watchers, checker_options )
#
misc = ClientGUICommon.StaticBox( self, 'misc' )
self._pause_character = wx.TextCtrl( misc )
self._stop_character = wx.TextCtrl( misc )
self._show_new_on_file_seed_short_summary = wx.CheckBox( misc )
self._show_deleted_on_file_seed_short_summary = wx.CheckBox( misc )
self._subscription_network_error_delay = ClientGUITime.TimeDeltaButton( misc, min = 600, days = True, hours = True, minutes = True )
self._subscription_other_error_delay = ClientGUITime.TimeDeltaButton( misc, min = 600, days = True, hours = True, minutes = True )
self._downloader_network_error_delay = ClientGUITime.TimeDeltaButton( misc, min = 600, days = True, hours = True, minutes = True )
#
gallery_page_tt = 'Gallery page fetches are heavy requests with unusual fetch-time requirements. It is important they not wait too long, but it is also useful to throttle them:'
gallery_page_tt += os.linesep * 2
gallery_page_tt += '- So they do not compete with file downloads for bandwidth, leading to very unbalanced 20/4400-type queues.'
gallery_page_tt += os.linesep
gallery_page_tt += '- So you do not get 1000 items in your queue before realising you did not like that tag anyway.'
gallery_page_tt += os.linesep
gallery_page_tt += '- To give servers a break (some gallery pages can be CPU-expensive to generate).'
gallery_page_tt += os.linesep * 2
gallery_page_tt += 'These delays/lots are per-domain.'
gallery_page_tt += os.linesep * 2
gallery_page_tt += 'If you do not understand this stuff, you can just leave it alone.'
self._gallery_page_wait_period_pages.SetValue( self._new_options.GetInteger( 'gallery_page_wait_period_pages' ) )
self._gallery_page_wait_period_pages.SetToolTip( gallery_page_tt )
self._gallery_file_limit.SetValue( HC.options[ 'gallery_file_limit' ] )
self._highlight_new_query.SetValue( self._new_options.GetBoolean( 'highlight_new_query' ) )
self._gallery_page_wait_period_subscriptions.SetValue( self._new_options.GetInteger( 'gallery_page_wait_period_subscriptions' ) )
self._gallery_page_wait_period_subscriptions.SetToolTip( gallery_page_tt )
self._max_simultaneous_subscriptions.SetValue( self._new_options.GetInteger( 'max_simultaneous_subscriptions' ) )
self._process_subs_in_random_order.SetValue( self._new_options.GetBoolean( 'process_subs_in_random_order' ) )
self._pause_character.SetValue( self._new_options.GetString( 'pause_character' ) )
self._stop_character.SetValue( self._new_options.GetString( 'stop_character' ) )
self._show_new_on_file_seed_short_summary.SetValue( self._new_options.GetBoolean( 'show_new_on_file_seed_short_summary' ) )
self._show_deleted_on_file_seed_short_summary.SetValue( self._new_options.GetBoolean( 'show_deleted_on_file_seed_short_summary' ) )
self._watcher_page_wait_period.SetValue( self._new_options.GetInteger( 'watcher_page_wait_period' ) )
self._watcher_page_wait_period.SetToolTip( gallery_page_tt )
self._highlight_new_watcher.SetValue( self._new_options.GetBoolean( 'highlight_new_watcher' ) )
self._subscription_network_error_delay.SetValue( self._new_options.GetInteger( 'subscription_network_error_delay' ) )
self._subscription_other_error_delay.SetValue( self._new_options.GetInteger( 'subscription_other_error_delay' ) )
self._downloader_network_error_delay.SetValue( self._new_options.GetInteger( 'downloader_network_error_delay' ) )
#
rows = []
rows.append( ( 'Default download source:', self._default_gug ) )
rows.append( ( 'If new query entered and no current highlight, highlight the new query:', self._highlight_new_query ) )
rows.append( ( 'Additional fixed time (in seconds) to wait between gallery page fetches:', self._gallery_page_wait_period_pages ) )
rows.append( ( 'By default, stop searching once this many files are found:', self._gallery_file_limit ) )
gridbox = ClientGUICommon.WrapInGrid( gallery_downloader, rows )
gallery_downloader.Add( gridbox, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
#
rows = []
rows.append( ( 'Additional fixed time (in seconds) to wait between gallery page fetches:', self._gallery_page_wait_period_subscriptions ) )
rows.append( ( 'Maximum number of subscriptions that can sync simultaneously:', self._max_simultaneous_subscriptions ) )
rows.append( ( 'Sync subscriptions in random order:', self._process_subs_in_random_order ) )
gridbox = ClientGUICommon.WrapInGrid( subscriptions, rows )
subscriptions.Add( gridbox, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
subscriptions.Add( self._subscription_checker_options, CC.FLAGS_EXPAND_PERPENDICULAR )
#
rows = []
rows.append( ( 'Additional fixed time (in seconds) to wait between watcher checks:', self._watcher_page_wait_period ) )
rows.append( ( 'If new watcher entered and no current highlight, highlight the new watcher:', self._highlight_new_watcher ) )
gridbox = ClientGUICommon.WrapInGrid( watchers, rows )
watchers.Add( gridbox, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
watchers.Add( self._watcher_checker_options, CC.FLAGS_EXPAND_PERPENDICULAR )
#
rows = []
rows.append( ( 'Pause character:', self._pause_character ) )
rows.append( ( 'Stop character:', self._stop_character ) )
rows.append( ( 'Show a \'N\' (for \'new\') count on short file import summaries:', self._show_new_on_file_seed_short_summary ) )
rows.append( ( 'Show a \'D\' (for \'deleted\') count on short file import summaries:', self._show_deleted_on_file_seed_short_summary ) )
rows.append( ( 'Delay time on a gallery/watcher network error:', self._downloader_network_error_delay ) )
rows.append( ( 'Delay time on a subscription network error:', self._subscription_network_error_delay ) )
rows.append( ( 'Delay time on a subscription other error:', self._subscription_other_error_delay ) )
gridbox = ClientGUICommon.WrapInGrid( misc, rows )
misc.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( gallery_downloader, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( subscriptions, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( watchers, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( misc, CC.FLAGS_EXPAND_PERPENDICULAR )
self.SetSizer( vbox )
def UpdateOptions( self ):
HG.client_controller.network_engine.domain_manager.SetDefaultGUGKeyAndName( self._default_gug.GetValue() )
self._new_options.SetInteger( 'gallery_page_wait_period_pages', self._gallery_page_wait_period_pages.GetValue() )
HC.options[ 'gallery_file_limit' ] = self._gallery_file_limit.GetValue()
self._new_options.SetBoolean( 'highlight_new_query', self._highlight_new_query.GetValue() )
self._new_options.SetInteger( 'gallery_page_wait_period_subscriptions', self._gallery_page_wait_period_subscriptions.GetValue() )
self._new_options.SetInteger( 'max_simultaneous_subscriptions', self._max_simultaneous_subscriptions.GetValue() )
self._new_options.SetBoolean( 'process_subs_in_random_order', self._process_subs_in_random_order.GetValue() )
self._new_options.SetInteger( 'watcher_page_wait_period', self._watcher_page_wait_period.GetValue() )
self._new_options.SetBoolean( 'highlight_new_watcher', self._highlight_new_watcher.GetValue() )
self._new_options.SetDefaultWatcherCheckerOptions( self._watcher_checker_options.GetValue() )
self._new_options.SetDefaultSubscriptionCheckerOptions( self._subscription_checker_options.GetValue() )
self._new_options.SetString( 'pause_character', self._pause_character.GetValue() )
self._new_options.SetString( 'stop_character', self._stop_character.GetValue() )
self._new_options.SetBoolean( 'show_new_on_file_seed_short_summary', self._show_new_on_file_seed_short_summary.GetValue() )
self._new_options.SetBoolean( 'show_deleted_on_file_seed_short_summary', self._show_deleted_on_file_seed_short_summary.GetValue() )
self._new_options.SetInteger( 'subscription_network_error_delay', self._subscription_network_error_delay.GetValue() )
self._new_options.SetInteger( 'subscription_other_error_delay', self._subscription_other_error_delay.GetValue() )
self._new_options.SetInteger( 'downloader_network_error_delay', self._downloader_network_error_delay.GetValue() )
class _DuplicatesPanel( wx.Panel ):
def __init__( self, parent, new_options ):
wx.Panel.__init__( self, parent )
self._new_options = new_options
#
weights_panel = ClientGUICommon.StaticBox( self, 'duplicate filter comparison score weights' )
self._duplicate_comparison_score_higher_filesize = wx.SpinCtrl( weights_panel, min = 0, max = 100 )
self._duplicate_comparison_score_much_higher_filesize = wx.SpinCtrl( weights_panel, min = 0, max = 100 )
self._duplicate_comparison_score_higher_resolution = wx.SpinCtrl( weights_panel, min = 0, max = 100 )
self._duplicate_comparison_score_much_higher_resolution = wx.SpinCtrl( weights_panel, min = 0, max = 100 )
self._duplicate_comparison_score_more_tags = wx.SpinCtrl( weights_panel, min = 0, max = 100 )
self._duplicate_comparison_score_older = wx.SpinCtrl( weights_panel, min = 0, max = 100 )
#
self._duplicate_comparison_score_higher_filesize.SetValue( self._new_options.GetInteger( 'duplicate_comparison_score_higher_filesize' ) )
self._duplicate_comparison_score_much_higher_filesize.SetValue( self._new_options.GetInteger( 'duplicate_comparison_score_much_higher_filesize' ) )
self._duplicate_comparison_score_higher_resolution.SetValue( self._new_options.GetInteger( 'duplicate_comparison_score_higher_resolution' ) )
self._duplicate_comparison_score_much_higher_resolution.SetValue( self._new_options.GetInteger( 'duplicate_comparison_score_much_higher_resolution' ) )
self._duplicate_comparison_score_more_tags.SetValue( self._new_options.GetInteger( 'duplicate_comparison_score_more_tags' ) )
self._duplicate_comparison_score_older.SetValue( self._new_options.GetInteger( 'duplicate_comparison_score_older' ) )
#
rows = []
rows.append( ( 'Score for file with non-trivially higher filesize:', self._duplicate_comparison_score_higher_filesize ) )
rows.append( ( 'Score for file with more than double the filesize:', self._duplicate_comparison_score_much_higher_filesize ) )
rows.append( ( 'Score for file with higher resolution (as num pixels):', self._duplicate_comparison_score_higher_resolution ) )
rows.append( ( 'Score for file with more than double the resolution (as num pixels):', self._duplicate_comparison_score_much_higher_resolution ) )
rows.append( ( 'Score for file with more tags:', self._duplicate_comparison_score_more_tags ) )
rows.append( ( 'Score for file with non-trivially earlier import time:', self._duplicate_comparison_score_older ) )
gridbox = ClientGUICommon.WrapInGrid( weights_panel, rows )
label = 'When processing potential duplicate pairs in the duplicate filter, the client tries to present the \'best\' file first. It judges the two files on a variety of potential differences, each with a score. The file with the greatest total score is presented first. Here you can tinker with these scores.'
st = ClientGUICommon.BetterStaticText( weights_panel, label )
st.SetWrapWidth( 640 )
weights_panel.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
weights_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( weights_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.SetSizer( vbox )
def UpdateOptions( self ):
self._new_options.SetInteger( 'duplicate_comparison_score_higher_filesize', self._duplicate_comparison_score_higher_filesize.GetValue() )
self._new_options.SetInteger( 'duplicate_comparison_score_much_higher_filesize', self._duplicate_comparison_score_much_higher_filesize.GetValue() )
self._new_options.SetInteger( 'duplicate_comparison_score_higher_resolution', self._duplicate_comparison_score_higher_resolution.GetValue() )
self._new_options.SetInteger( 'duplicate_comparison_score_much_higher_resolution', self._duplicate_comparison_score_much_higher_resolution.GetValue() )
self._new_options.SetInteger( 'duplicate_comparison_score_more_tags', self._duplicate_comparison_score_more_tags.GetValue() )
self._new_options.SetInteger( 'duplicate_comparison_score_older', self._duplicate_comparison_score_older.GetValue() )
class _ImportingPanel( wx.Panel ):
def __init__( self, parent, new_options ):
wx.Panel.__init__( self, parent )
self._new_options = new_options
#
default_fios = ClientGUICommon.StaticBox( self, 'default file import options' )
from . import ClientGUIImport
show_downloader_options = True
quiet_file_import_options = self._new_options.GetDefaultFileImportOptions( 'quiet' )
self._quiet_fios = ClientGUIImport.FileImportOptionsButton( default_fios, quiet_file_import_options, show_downloader_options )
loud_file_import_options = self._new_options.GetDefaultFileImportOptions( 'loud' )
self._loud_fios = ClientGUIImport.FileImportOptionsButton( default_fios, loud_file_import_options, show_downloader_options )
#
rows = []
rows.append( ( 'For \'quiet\' import contexts like import folders and subscriptions:', self._quiet_fios ) )
rows.append( ( 'For import contexts that work on pages:', self._loud_fios ) )
gridbox = ClientGUICommon.WrapInGrid( default_fios, rows )
default_fios.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( default_fios, CC.FLAGS_EXPAND_PERPENDICULAR )
self.SetSizer( vbox )
def UpdateOptions( self ):
self._new_options.SetDefaultFileImportOptions( 'quiet', self._quiet_fios.GetValue() )
self._new_options.SetDefaultFileImportOptions( 'loud', self._loud_fios.GetValue() )
class _MaintenanceAndProcessingPanel( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__( self, parent )
self._new_options = HG.client_controller.new_options
self._jobs_panel = ClientGUICommon.StaticBox( self, 'when to run high cpu jobs' )
self._maintenance_panel = ClientGUICommon.StaticBox( self, 'maintenance period' )
self._idle_panel = ClientGUICommon.StaticBox( self._jobs_panel, 'idle' )
self._shutdown_panel = ClientGUICommon.StaticBox( self._jobs_panel, 'shutdown' )
#
self._idle_normal = wx.CheckBox( self._idle_panel )
self._idle_normal.Bind( wx.EVT_CHECKBOX, self.EventIdleNormal )
self._idle_period = ClientGUICommon.NoneableSpinCtrl( self._idle_panel, '', min = 1, max = 1000, multiplier = 60, unit = 'minutes', none_phrase = 'ignore normal browsing' )
self._idle_mouse_period = ClientGUICommon.NoneableSpinCtrl( self._idle_panel, '', min = 1, max = 1000, multiplier = 60, unit = 'minutes', none_phrase = 'ignore mouse movements' )
self._idle_cpu_max = ClientGUICommon.NoneableSpinCtrl( self._idle_panel, '', min = 5, max = 99, unit = '%', none_phrase = 'ignore cpu usage' )
#
self._idle_shutdown = ClientGUICommon.BetterChoice( self._shutdown_panel )
for idle_id in ( CC.IDLE_NOT_ON_SHUTDOWN, CC.IDLE_ON_SHUTDOWN, CC.IDLE_ON_SHUTDOWN_ASK_FIRST ):
self._idle_shutdown.Append( CC.idle_string_lookup[ idle_id ], idle_id )
self._idle_shutdown.Bind( wx.EVT_CHOICE, self.EventIdleShutdown )
self._idle_shutdown_max_minutes = wx.SpinCtrl( self._shutdown_panel, min = 1, max = 1440 )
self._shutdown_work_period = ClientGUITime.TimeDeltaButton( self._shutdown_panel, min = 3600, days = True, hours = True )
#
self._maintenance_vacuum_period_days = ClientGUICommon.NoneableSpinCtrl( self._maintenance_panel, '', min = 28, max = 365, none_phrase = 'do not automatically vacuum' )
tts = 'Vacuuming is a kind of full defrag of the database\'s internal page table. It can take a long time (1MB/s) on a slow drive and does not need to be done often, so feel free to set this at 90 days+.'
self._maintenance_vacuum_period_days.SetToolTip( tts )
#
self._idle_normal.SetValue( HC.options[ 'idle_normal' ] )
self._idle_period.SetValue( HC.options[ 'idle_period' ] )
self._idle_mouse_period.SetValue( HC.options[ 'idle_mouse_period' ] )
self._idle_cpu_max.SetValue( HC.options[ 'idle_cpu_max' ] )
self._idle_shutdown.SelectClientData( HC.options[ 'idle_shutdown' ] )
self._idle_shutdown_max_minutes.SetValue( HC.options[ 'idle_shutdown_max_minutes' ] )
self._shutdown_work_period.SetValue( self._new_options.GetInteger( 'shutdown_work_period' ) )
self._maintenance_vacuum_period_days.SetValue( self._new_options.GetNoneableInteger( 'maintenance_vacuum_period_days' ) )
#
rows = []
rows.append( ( 'Run maintenance jobs when the client is idle and the system is not otherwise busy: ', self._idle_normal ) )
rows.append( ( 'Assume the client is idle if no general browsing activity has occurred in the past: ', self._idle_period ) )
rows.append( ( 'Assume the client is idle if the mouse has not been moved in the past: ', self._idle_mouse_period ) )
rows.append( ( 'Assume the system is busy if any CPU core has recent average usage above: ', self._idle_cpu_max ) )
gridbox = ClientGUICommon.WrapInGrid( self._idle_panel, rows )
self._idle_panel.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
#
rows = []
rows.append( ( 'Run jobs on shutdown: ', self._idle_shutdown ) )
rows.append( ( 'Only run shutdown jobs once per: ', self._shutdown_work_period ) )
rows.append( ( 'Max number of minutes to run shutdown jobs: ', self._idle_shutdown_max_minutes ) )
gridbox = ClientGUICommon.WrapInGrid( self._shutdown_panel, rows )
self._shutdown_panel.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
#
text = '***'
text += os.linesep
text +='If you are a new user or do not completely understand these options, please do not touch them! Do not set the client to be idle all the time unless you know what you are doing or are testing something and are prepared for potential problems!'
text += os.linesep
text += '***'
text += os.linesep * 2
text += 'Sometimes, the client needs to do some heavy maintenance. This could be reformatting the database to keep it running fast or processing a large number of tags from a repository. Typically, these jobs will not allow you to use the gui while they run, and on slower computers--or those with not much memory--they can take a long time to complete.'
text += os.linesep * 2
text += 'You can set these jobs to run only when the client is idle, or only during shutdown, or neither, or both. If you leave the client on all the time in the background, focusing on \'idle time\' processing is often ideal. If you have a slow computer, relying on \'shutdown\' processing (which you can manually start when convenient), is often better.'
text += os.linesep * 2
text += 'If the client switches from idle to not idle during a job, it will try to abandon it and give you back control. This is not always possible, and even when it is, it will sometimes take several minutes, particularly on slower machines or those on HDDs rather than SSDs.'
text += os.linesep * 2
text += 'If the client believes the system is busy, it will generally not start jobs.'
st = ClientGUICommon.BetterStaticText( self._jobs_panel, label = text )
st.SetWrapWidth( 550 )
self._jobs_panel.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
self._jobs_panel.Add( self._idle_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
self._jobs_panel.Add( self._shutdown_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
rows = []
rows.append( ( 'Number of days to wait between vacuums: ', self._maintenance_vacuum_period_days ) )
gridbox = ClientGUICommon.WrapInGrid( self._maintenance_panel, rows )
self._maintenance_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._jobs_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._maintenance_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
self.SetSizer( vbox )
self._EnableDisableIdleNormal()
self._EnableDisableIdleShutdown()
def _EnableDisableIdleNormal( self ):
if self._idle_normal.GetValue() == True:
self._idle_period.Enable()
self._idle_mouse_period.Enable()
self._idle_cpu_max.Enable()
else:
self._idle_period.Disable()
self._idle_mouse_period.Disable()
self._idle_cpu_max.Disable()
def _EnableDisableIdleShutdown( self ):
if self._idle_shutdown.GetChoice() == CC.IDLE_NOT_ON_SHUTDOWN:
self._shutdown_work_period.Disable()
self._idle_shutdown_max_minutes.Disable()
else:
self._shutdown_work_period.Enable()
self._idle_shutdown_max_minutes.Enable()
def EventIdleNormal( self, event ):
self._EnableDisableIdleNormal()
def EventIdleShutdown( self, event ):
self._EnableDisableIdleShutdown()
def UpdateOptions( self ):
HC.options[ 'idle_normal' ] = self._idle_normal.GetValue()
HC.options[ 'idle_period' ] = self._idle_period.GetValue()
HC.options[ 'idle_mouse_period' ] = self._idle_mouse_period.GetValue()
HC.options[ 'idle_cpu_max' ] = self._idle_cpu_max.GetValue()
HC.options[ 'idle_shutdown' ] = self._idle_shutdown.GetChoice()
HC.options[ 'idle_shutdown_max_minutes' ] = self._idle_shutdown_max_minutes.GetValue()
self._new_options.SetInteger( 'shutdown_work_period', self._shutdown_work_period.GetValue() )
self._new_options.SetNoneableInteger( 'maintenance_vacuum_period_days', self._maintenance_vacuum_period_days.GetValue() )
class _DefaultFileSystemPredicatesPanel( wx.Panel ):
def __init__( self, parent, new_options ):
wx.Panel.__init__( self, parent )
self._new_options = new_options
self._always_show_system_everything = wx.CheckBox( self, label = 'show system:everything even if total files is over 10,000' )
self._always_show_system_everything.SetValue( self._new_options.GetBoolean( 'always_show_system_everything' ) )
self._filter_inbox_and_archive_predicates = wx.CheckBox( self, label = 'hide inbox and archive system predicates if either has no files' )
self._filter_inbox_and_archive_predicates.SetValue( self._new_options.GetBoolean( 'filter_inbox_and_archive_predicates' ) )
self._file_system_predicate_age = ClientGUIPredicates.PanelPredicateSystemAgeDelta( self )
self._file_system_predicate_duration = ClientGUIPredicates.PanelPredicateSystemDuration( self )
self._file_system_predicate_height = ClientGUIPredicates.PanelPredicateSystemHeight( self )
self._file_system_predicate_limit = ClientGUIPredicates.PanelPredicateSystemLimit( self )
self._file_system_predicate_mime = ClientGUIPredicates.PanelPredicateSystemMime( self )
self._file_system_predicate_num_pixels = ClientGUIPredicates.PanelPredicateSystemNumPixels( self )
self._file_system_predicate_num_tags = ClientGUIPredicates.PanelPredicateSystemNumTags( self )
self._file_system_predicate_num_words = ClientGUIPredicates.PanelPredicateSystemNumWords( self )
self._file_system_predicate_ratio = ClientGUIPredicates.PanelPredicateSystemRatio( self )
self._file_system_predicate_similar_to = ClientGUIPredicates.PanelPredicateSystemSimilarTo( self )
self._file_system_predicate_size = ClientGUIPredicates.PanelPredicateSystemSize( self )
self._file_system_predicate_width = ClientGUIPredicates.PanelPredicateSystemWidth( self )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._always_show_system_everything, CC.FLAGS_VCENTER )
vbox.Add( self._filter_inbox_and_archive_predicates, CC.FLAGS_VCENTER )
vbox.Add( ( 20, 20 ), CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._file_system_predicate_age, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._file_system_predicate_duration, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._file_system_predicate_height, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._file_system_predicate_limit, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._file_system_predicate_mime, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._file_system_predicate_num_pixels, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._file_system_predicate_num_tags, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._file_system_predicate_num_words, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._file_system_predicate_ratio, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._file_system_predicate_similar_to, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._file_system_predicate_size, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._file_system_predicate_width, CC.FLAGS_EXPAND_PERPENDICULAR )
self.SetSizer( vbox )
def UpdateOptions( self ):
self._new_options.SetBoolean( 'always_show_system_everything', self._always_show_system_everything.GetValue() )
self._new_options.SetBoolean( 'filter_inbox_and_archive_predicates', self._filter_inbox_and_archive_predicates.GetValue() )
system_predicates = HC.options[ 'file_system_predicates' ]
system_predicates[ 'age' ] = self._file_system_predicate_age.GetInfo()
system_predicates[ 'duration' ] = self._file_system_predicate_duration.GetInfo()
system_predicates[ 'hamming_distance' ] = self._file_system_predicate_similar_to.GetInfo()[1]
system_predicates[ 'height' ] = self._file_system_predicate_height.GetInfo()
system_predicates[ 'limit' ] = self._file_system_predicate_limit.GetInfo()
system_predicates[ 'mime' ] = self._file_system_predicate_mime.GetInfo()
system_predicates[ 'num_pixels' ] = self._file_system_predicate_num_pixels.GetInfo()
system_predicates[ 'num_tags' ] = self._file_system_predicate_num_tags.GetInfo()
system_predicates[ 'num_words' ] = self._file_system_predicate_num_words.GetInfo()
system_predicates[ 'ratio' ] = self._file_system_predicate_ratio.GetInfo()
system_predicates[ 'size' ] = self._file_system_predicate_size.GetInfo()
system_predicates[ 'width' ] = self._file_system_predicate_width.GetInfo()
HC.options[ 'file_system_predicates' ] = system_predicates
class _FilesAndTrashPanel( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__( self, parent )
self._new_options = HG.client_controller.new_options
self._export_location = wx.DirPickerCtrl( self, style = wx.DIRP_USE_TEXTCTRL )
self._file_system_waits_on_wakeup = wx.CheckBox( self, label = '' )
self._file_system_waits_on_wakeup.SetToolTip( 'This is useful if your hydrus is stored on a NAS that takes a few seconds to get going after your machine resumes from sleep.' )
self._delete_to_recycle_bin = wx.CheckBox( self, label = '' )
self._confirm_trash = wx.CheckBox( self )
self._confirm_archive = wx.CheckBox( self )
self._remove_filtered_files = wx.CheckBox( self, label = '' )
self._remove_trashed_files = wx.CheckBox( self, label = '' )
self._trash_max_age = ClientGUICommon.NoneableSpinCtrl( self, '', none_phrase = 'no age limit', min = 0, max = 8640 )
self._trash_max_size = ClientGUICommon.NoneableSpinCtrl( self, '', none_phrase = 'no size limit', min = 0, max = 20480 )
self._temp_path_override = wx.DirPickerCtrl( self, style = wx.DIRP_USE_TEXTCTRL )
mime_panel = ClientGUICommon.StaticBox( self, '\'open externally\' launch paths' )
self._web_browser_path = wx.TextCtrl( mime_panel )
columns = [ ( 'mime', 20 ), ( 'launch path', -1 ) ]
self._mime_launch_listctrl = ClientGUIListCtrl.BetterListCtrl( mime_panel, 'mime_launch', 15, 30, columns, self._ConvertMimeToListCtrlTuples, activation_callback = self._EditMimeLaunch )
#
if HC.options[ 'export_path' ] is not None:
abs_path = HydrusPaths.ConvertPortablePathToAbsPath( HC.options[ 'export_path' ] )
if abs_path is not None:
self._export_location.SetPath( abs_path )
self._file_system_waits_on_wakeup.SetValue( self._new_options.GetBoolean( 'file_system_waits_on_wakeup' ) )
self._delete_to_recycle_bin.SetValue( HC.options[ 'delete_to_recycle_bin' ] )
self._confirm_trash.SetValue( HC.options[ 'confirm_trash' ] )
self._confirm_archive.SetValue( HC.options[ 'confirm_archive' ] )
self._remove_filtered_files.SetValue( HC.options[ 'remove_filtered_files' ] )
self._remove_trashed_files.SetValue( HC.options[ 'remove_trashed_files' ] )
self._trash_max_age.SetValue( HC.options[ 'trash_max_age' ] )
self._trash_max_size.SetValue( HC.options[ 'trash_max_size' ] )
temp_path_override = self._new_options.GetNoneableString( 'temp_path_override' )
if temp_path_override is not None:
self._temp_path_override.SetPath( temp_path_override )
web_browser_path = self._new_options.GetNoneableString( 'web_browser_path' )
if web_browser_path is not None:
self._web_browser_path.SetValue( web_browser_path )
for mime in HC.SEARCHABLE_MIMES:
launch_path = self._new_options.GetMimeLaunch( mime )
self._mime_launch_listctrl.AddDatas( [ ( mime, launch_path ) ] )
self._mime_launch_listctrl.Sort( 0 )
#
vbox = wx.BoxSizer( wx.VERTICAL )
text = 'If you set the default export directory blank, the client will use \'hydrus_export\' under the current user\'s home directory.'
vbox.Add( ClientGUICommon.BetterStaticText( self, text ), CC.FLAGS_CENTER )
rows = []
rows.append( ( 'Confirm sending files to trash: ', self._confirm_trash ) )
rows.append( ( 'Confirm sending more than one file to archive or inbox: ', self._confirm_archive ) )
rows.append( ( 'Wait 15s after computer resume before accessing files: ', self._file_system_waits_on_wakeup ) )
rows.append( ( 'When deleting files or folders, send them to the OS\'s recycle bin: ', self._delete_to_recycle_bin ) )
rows.append( ( 'Remove files from view when they are filtered: ', self._remove_filtered_files ) )
rows.append( ( 'Remove files from view when they are sent to the trash: ', self._remove_trashed_files ) )
rows.append( ( 'Number of hours a file can be in the trash before being deleted: ', self._trash_max_age ) )
rows.append( ( 'Maximum size of trash (MB): ', self._trash_max_size ) )
rows.append( ( 'Default export directory: ', self._export_location ) )
rows.append( ( 'BUGFIX: Temp folder override (set blank for OS default): ', self._temp_path_override ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
vbox.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
text = 'Setting a specific web browser path here--like \'C:\\program files\\firefox\\firefox.exe "%path%"\'--can help with the \'share->open->in web browser\' command, which is buggy working with OS defaults, particularly on Windows. It also fixes #anchors, which are dropped in some OSes using default means. Use the same %path% format as the \'open externally\' commands below.'
st = ClientGUICommon.BetterStaticText( mime_panel, text )
st.SetWrapWidth( 800 )
mime_panel.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'Manual web browser launch path: ', self._web_browser_path ) )
gridbox = ClientGUICommon.WrapInGrid( mime_panel, rows )
mime_panel.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
mime_panel.Add( self._mime_launch_listctrl, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( mime_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.SetSizer( vbox )
def _ConvertMimeToListCtrlTuples( self, data ):
( mime, launch_path ) = data
pretty_mime = HC.mime_string_lookup[ mime ]
if launch_path is None:
pretty_launch_path = 'default: ' + HydrusPaths.GetDefaultLaunchPath()
else:
pretty_launch_path = launch_path
display_tuple = ( pretty_mime, pretty_launch_path )
sort_tuple = display_tuple
return ( display_tuple, sort_tuple )
def _EditMimeLaunch( self ):
for ( mime, launch_path ) in self._mime_launch_listctrl.GetData( only_selected = True ):
message = 'Enter the new launch path for ' + HC.mime_string_lookup[ mime ]
message += os.linesep * 2
message += 'Hydrus will insert the file\'s full path wherever you put %path%, even multiple times!'
message += os.linesep * 2
message += 'Set as blank to reset to default.'
if launch_path is None:
default = 'program "%path%"'
else:
default = launch_path
with ClientGUIDialogs.DialogTextEntry( self, message, default = default, allow_blank = True ) as dlg:
if dlg.ShowModal() == wx.ID_OK:
new_launch_path = dlg.GetValue()
if new_launch_path == '':
new_launch_path = None
if new_launch_path not in ( launch_path, default ):
self._mime_launch_listctrl.DeleteDatas( [ ( mime, launch_path ) ] )
self._mime_launch_listctrl.AddDatas( [ ( mime, new_launch_path ) ] )
else:
break
self._mime_launch_listctrl.Sort()
def UpdateOptions( self ):
HC.options[ 'export_path' ] = HydrusPaths.ConvertAbsPathToPortablePath( self._export_location.GetPath() )
self._new_options.SetBoolean( 'file_system_waits_on_wakeup', self._file_system_waits_on_wakeup.GetValue() )
HC.options[ 'delete_to_recycle_bin' ] = self._delete_to_recycle_bin.GetValue()
HC.options[ 'confirm_trash' ] = self._confirm_trash.GetValue()
HC.options[ 'confirm_archive' ] = self._confirm_archive.GetValue()
HC.options[ 'remove_filtered_files' ] = self._remove_filtered_files.GetValue()
HC.options[ 'remove_trashed_files' ] = self._remove_trashed_files.GetValue()
HC.options[ 'trash_max_age' ] = self._trash_max_age.GetValue()
HC.options[ 'trash_max_size' ] = self._trash_max_size.GetValue()
temp_path_override = self._temp_path_override.GetPath()
if temp_path_override == '':
temp_path_override = None
else:
if not HydrusPaths.DirectoryIsWritable( temp_path_override ):
raise HydrusExceptions.VetoException( 'The temporary path override either did not exist or was not writeable-to! Please change it or fix its permissions!' )
self._new_options.SetNoneableString( 'temp_path_override', temp_path_override )
web_browser_path = self._web_browser_path.GetValue()
if web_browser_path == '':
web_browser_path = None
self._new_options.SetNoneableString( 'web_browser_path', web_browser_path )
for ( mime, launch_path ) in self._mime_launch_listctrl.GetData():
self._new_options.SetMimeLaunch( mime, launch_path )
class _GUIPanel( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__( self, parent )
self._main_gui_title = wx.TextCtrl( self )
self._confirm_client_exit = wx.CheckBox( self )
self._always_show_iso_time = wx.CheckBox( self )
tt = 'In many places across the program (typically import status lists), the client will state a timestamp as "5 days ago". If you would prefer a standard ISO string, like "2018-03-01 12:40:23", check this.'
self._always_show_iso_time.SetToolTip( tt )
self._always_embed_autocompletes = wx.CheckBox( self )
self._hide_preview = wx.CheckBox( self )
self._popup_message_character_width = wx.SpinCtrl( self, min = 16, max = 256 )
self._popup_message_force_min_width = wx.CheckBox( self )
self._discord_dnd_fix = wx.CheckBox( self )
self._discord_dnd_fix.SetToolTip( 'This makes small file drag-and-drops a little laggier in exchange for discord support.' )
self._secret_discord_dnd_fix = wx.CheckBox( self )
self._secret_discord_dnd_fix.SetToolTip( 'This saves the lag but is potentially dangerous, as it (may) treat the from-db-files-drag as a move rather than a copy and hence only works when the drop destination will not consume the files. It requires an additional secret Alternate key to unlock.' )
self._always_show_hover_windows = wx.CheckBox( self )
self._always_show_hover_windows.SetToolTip( 'If your window manager doesn\'t like showing the hover windows on mouse-over (typically on some Linux flavours), please try this out and give the dev feedback on this forced size and position accuracy!' )
self._hide_message_manager_on_gui_iconise = wx.CheckBox( self )
self._hide_message_manager_on_gui_iconise.SetToolTip( 'If your message manager does not automatically minimise with your main gui, try this. It can lead to unusual show and positioning behaviour on window managers that do not support it, however.' )
self._hide_message_manager_on_gui_deactive = wx.CheckBox( self )
self._hide_message_manager_on_gui_deactive.SetToolTip( 'If your message manager stays up after you minimise the program to the system tray using a custom window manager, try this out! It hides the popup messages as soon as the main gui loses focus.' )
frame_locations_panel = ClientGUICommon.StaticBox( self, 'frame locations' )
self._frame_locations = ClientGUIListCtrl.SaneListCtrl( frame_locations_panel, 200, [ ( 'name', -1 ), ( 'remember size', 90 ), ( 'remember position', 90 ), ( 'last size', 90 ), ( 'last position', 90 ), ( 'default gravity', 90 ), ( 'default position', 90 ), ( 'maximised', 90 ), ( 'fullscreen', 90 ) ], activation_callback = self.EditFrameLocations )
self._frame_locations_edit_button = wx.Button( frame_locations_panel, label = 'edit' )
self._frame_locations_edit_button.Bind( wx.EVT_BUTTON, self.EventEditFrameLocation )
#
self._new_options = HG.client_controller.new_options
self._main_gui_title.SetValue( self._new_options.GetString( 'main_gui_title' ) )
self._confirm_client_exit.SetValue( HC.options[ 'confirm_client_exit' ] )
self._always_show_iso_time.SetValue( self._new_options.GetBoolean( 'always_show_iso_time' ) )
self._always_embed_autocompletes.SetValue( HC.options[ 'always_embed_autocompletes' ] )
self._hide_preview.SetValue( HC.options[ 'hide_preview' ] )
self._popup_message_character_width.SetValue( self._new_options.GetInteger( 'popup_message_character_width' ) )
self._popup_message_force_min_width.SetValue( self._new_options.GetBoolean( 'popup_message_force_min_width' ) )
self._discord_dnd_fix.SetValue( self._new_options.GetBoolean( 'discord_dnd_fix' ) )
self._secret_discord_dnd_fix.SetValue( self._new_options.GetBoolean( 'secret_discord_dnd_fix' ) )
self._always_show_hover_windows.SetValue( self._new_options.GetBoolean( 'always_show_hover_windows' ) )
self._hide_message_manager_on_gui_iconise.SetValue( self._new_options.GetBoolean( 'hide_message_manager_on_gui_iconise' ) )
self._hide_message_manager_on_gui_deactive.SetValue( self._new_options.GetBoolean( 'hide_message_manager_on_gui_deactive' ) )
for ( name, info ) in self._new_options.GetFrameLocations():
listctrl_list = [ name ] + list( info )
pretty_listctrl_list = self._GetPrettyFrameLocationInfo( listctrl_list )
self._frame_locations.Append( pretty_listctrl_list, listctrl_list )
#self._frame_locations.SortListItems( col = 0 )
#
rows = []
rows.append( ( 'Main gui title: ', self._main_gui_title ) )
rows.append( ( 'Confirm client exit: ', self._confirm_client_exit ) )
rows.append( ( 'Prefer ISO time ("2018-03-01 12:40:23") to "5 days ago": ', self._always_show_iso_time ) )
rows.append( ( 'Always embed autocomplete dropdown results window: ', self._always_embed_autocompletes ) )
rows.append( ( 'Hide the preview window: ', self._hide_preview ) )
rows.append( ( 'Approximate max width of popup messages (in characters): ', self._popup_message_character_width ) )
rows.append( ( 'BUGFIX: Force this width as the minimum width for all popup messages: ', self._popup_message_force_min_width ) )
rows.append( ( 'BUGFIX: Discord file drag-and-drop fix (works for <=25, <200MB file DnDs): ', self._discord_dnd_fix ) )
rows.append( ( 'EXPERIMENTAL BUGFIX: Secret discord file drag-and-drop fix: ', self._secret_discord_dnd_fix ) )
rows.append( ( 'BUGFIX: Always show media viewer hover windows: ', self._always_show_hover_windows ) )
rows.append( ( 'BUGFIX: Hide the popup message manager when the main gui is minimised: ', self._hide_message_manager_on_gui_iconise ) )
rows.append( ( 'BUGFIX: Hide the popup message manager when the main gui loses focus: ', self._hide_message_manager_on_gui_deactive ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
text = 'Here you can override the current and default values for many frame and dialog sizing and positioning variables.'
text += os.linesep
text += 'This is an advanced control. If you aren\'t confident of what you are doing here, come back later!'
frame_locations_panel.Add( wx.StaticText( frame_locations_panel, label = text ), CC.FLAGS_EXPAND_PERPENDICULAR )
frame_locations_panel.Add( self._frame_locations, CC.FLAGS_EXPAND_BOTH_WAYS )
frame_locations_panel.Add( self._frame_locations_edit_button, CC.FLAGS_LONE_BUTTON )
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( frame_locations_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.SetSizer( vbox )
def _GetPrettyFrameLocationInfo( self, listctrl_list ):
pretty_listctrl_list = []
for item in listctrl_list:
pretty_listctrl_list.append( str( item ) )
return pretty_listctrl_list
def EditFrameLocations( self ):
for i in self._frame_locations.GetAllSelected():
listctrl_list = self._frame_locations.GetClientData( i )
title = 'set frame location information'
with ClientGUITopLevelWindows.DialogEdit( self, title ) as dlg:
panel = ClientGUIScrolledPanelsEdit.EditFrameLocationPanel( dlg, listctrl_list )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
new_listctrl_list = panel.GetValue()
pretty_new_listctrl_list = self._GetPrettyFrameLocationInfo( new_listctrl_list )
self._frame_locations.UpdateRow( i, pretty_new_listctrl_list, new_listctrl_list )
def EventEditFrameLocation( self, event ):
self.EditFrameLocations()
def UpdateOptions( self ):
HC.options[ 'confirm_client_exit' ] = self._confirm_client_exit.GetValue()
self._new_options.SetBoolean( 'always_show_iso_time', self._always_show_iso_time.GetValue() )
HC.options[ 'always_embed_autocompletes' ] = self._always_embed_autocompletes.GetValue()
HC.options[ 'hide_preview' ] = self._hide_preview.GetValue()
self._new_options.SetInteger( 'popup_message_character_width', self._popup_message_character_width.GetValue() )
self._new_options.SetBoolean( 'popup_message_force_min_width', self._popup_message_force_min_width.GetValue() )
title = self._main_gui_title.GetValue()
self._new_options.SetString( 'main_gui_title', title )
HG.client_controller.pub( 'main_gui_title', title )
self._new_options.SetBoolean( 'discord_dnd_fix', self._discord_dnd_fix.GetValue() )
self._new_options.SetBoolean( 'secret_discord_dnd_fix', self._secret_discord_dnd_fix.GetValue() )
self._new_options.SetBoolean( 'always_show_hover_windows', self._always_show_hover_windows.GetValue() )
self._new_options.SetBoolean( 'hide_message_manager_on_gui_iconise', self._hide_message_manager_on_gui_iconise.GetValue() )
self._new_options.SetBoolean( 'hide_message_manager_on_gui_deactive', self._hide_message_manager_on_gui_deactive.GetValue() )
for listctrl_list in self._frame_locations.GetClientData():
( name, remember_size, remember_position, last_size, last_position, default_gravity, default_position, maximised, fullscreen ) = listctrl_list
self._new_options.SetFrameLocation( name, remember_size, remember_position, last_size, last_position, default_gravity, default_position, maximised, fullscreen )
class _GUIPagesPanel( wx.Panel ):
def __init__( self, parent, new_options ):
wx.Panel.__init__( self, parent )
self._new_options = new_options
self._default_gui_session = wx.Choice( self )
self._last_session_save_period_minutes = wx.SpinCtrl( self, min = 1, max = 1440 )
self._only_save_last_session_during_idle = wx.CheckBox( self )
self._only_save_last_session_during_idle.SetToolTip( 'This is useful if you usually have a very large session (200,000+ files/import items open) and a client that is always on.' )
self._number_of_gui_session_backups = wx.SpinCtrl( self, min = 1, max = 32 )
self._number_of_gui_session_backups.SetToolTip( 'The client keeps multiple rolling backups of your gui sessions. If you have very large sessions, you might like to reduce this number.' )
self._default_new_page_goes = ClientGUICommon.BetterChoice( self )
for value in [ CC.NEW_PAGE_GOES_FAR_LEFT, CC.NEW_PAGE_GOES_LEFT_OF_CURRENT, CC.NEW_PAGE_GOES_RIGHT_OF_CURRENT, CC.NEW_PAGE_GOES_FAR_RIGHT ]:
self._default_new_page_goes.Append( CC.new_page_goes_string_lookup[ value ], value )
self._notebook_tabs_on_left = wx.CheckBox( self )
self._max_page_name_chars = wx.SpinCtrl( self, min = 1, max = 256 )
self._page_file_count_display = ClientGUICommon.BetterChoice( self )
for display_type in ( CC.PAGE_FILE_COUNT_DISPLAY_ALL, CC.PAGE_FILE_COUNT_DISPLAY_ONLY_IMPORTERS, CC.PAGE_FILE_COUNT_DISPLAY_NONE ):
self._page_file_count_display.Append( CC.page_file_count_display_string_lookup[ display_type ], display_type )
self._import_page_progress_display = wx.CheckBox( self )
self._total_pages_warning = wx.SpinCtrl( self, min = 5, max = 200 )
self._reverse_page_shift_drag_behaviour = wx.CheckBox( self )
self._reverse_page_shift_drag_behaviour.SetToolTip( 'By default, holding down shift when you drop off a page tab means the client will not \'chase\' the page tab. This makes this behaviour default, with shift-drop meaning to chase.' )
self._set_search_focus_on_page_change = wx.CheckBox( self )
#
gui_session_names = HG.client_controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_GUI_SESSION )
if 'last session' not in gui_session_names:
gui_session_names.insert( 0, 'last session' )
self._default_gui_session.Append( 'just a blank page', None )
for name in gui_session_names:
self._default_gui_session.Append( name, name )
try:
self._default_gui_session.SetStringSelection( HC.options[ 'default_gui_session' ] )
except:
self._default_gui_session.SetSelection( 0 )
self._last_session_save_period_minutes.SetValue( self._new_options.GetInteger( 'last_session_save_period_minutes' ) )
self._only_save_last_session_during_idle.SetValue( self._new_options.GetBoolean( 'only_save_last_session_during_idle' ) )
self._number_of_gui_session_backups.SetValue( self._new_options.GetInteger( 'number_of_gui_session_backups' ) )
self._default_new_page_goes.SelectClientData( self._new_options.GetInteger( 'default_new_page_goes' ) )
self._notebook_tabs_on_left.SetValue( self._new_options.GetBoolean( 'notebook_tabs_on_left' ) )
self._max_page_name_chars.SetValue( self._new_options.GetInteger( 'max_page_name_chars' ) )
self._page_file_count_display.SelectClientData( self._new_options.GetInteger( 'page_file_count_display' ) )
self._import_page_progress_display.SetValue( self._new_options.GetBoolean( 'import_page_progress_display' ) )
self._total_pages_warning.SetValue( self._new_options.GetInteger( 'total_pages_warning' ) )
self._reverse_page_shift_drag_behaviour.SetValue( self._new_options.GetBoolean( 'reverse_page_shift_drag_behaviour' ) )
self._set_search_focus_on_page_change.SetValue( self._new_options.GetBoolean( 'set_search_focus_on_page_change' ) )
#
rows = []
rows.append( ( 'Default session on startup: ', self._default_gui_session ) )
rows.append( ( 'If \'last session\' above, autosave it how often (minutes)?', self._last_session_save_period_minutes ) )
rows.append( ( 'If \'last session\' above, only autosave during idle time?', self._only_save_last_session_during_idle ) )
rows.append( ( 'Number of session backups to keep: ', self._number_of_gui_session_backups ) )
rows.append( ( 'By default, put new page tabs on (requires restart): ', self._default_new_page_goes ) )
rows.append( ( 'When switching to a page, focus its input field (if any): ', self._set_search_focus_on_page_change ) )
rows.append( ( 'Line notebook tabs down the left: ', self._notebook_tabs_on_left ) )
rows.append( ( 'Max characters to display in a page name: ', self._max_page_name_chars ) )
rows.append( ( 'Show page file count after its name: ', self._page_file_count_display ) )
rows.append( ( 'Show import page x/y progress after its name: ', self._import_page_progress_display ) )
rows.append( ( 'Warn at this many total pages: ', self._total_pages_warning ) )
rows.append( ( 'Reverse page tab shift-drag behaviour: ', self._reverse_page_shift_drag_behaviour ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
self.SetSizer( vbox )
def UpdateOptions( self ):
HC.options[ 'default_gui_session' ] = self._default_gui_session.GetStringSelection()
self._new_options.SetBoolean( 'notebook_tabs_on_left', self._notebook_tabs_on_left.GetValue() )
self._new_options.SetInteger( 'last_session_save_period_minutes', self._last_session_save_period_minutes.GetValue() )
self._new_options.SetInteger( 'number_of_gui_session_backups', self._number_of_gui_session_backups.GetValue() )
self._new_options.SetBoolean( 'only_save_last_session_during_idle', self._only_save_last_session_during_idle.GetValue() )
self._new_options.SetInteger( 'default_new_page_goes', self._default_new_page_goes.GetChoice() )
self._new_options.SetInteger( 'max_page_name_chars', self._max_page_name_chars.GetValue() )
self._new_options.SetInteger( 'page_file_count_display', self._page_file_count_display.GetChoice() )
self._new_options.SetBoolean( 'import_page_progress_display', self._import_page_progress_display.GetValue() )
self._new_options.SetInteger( 'total_pages_warning', self._total_pages_warning.GetValue() )
self._new_options.SetBoolean( 'reverse_page_shift_drag_behaviour', self._reverse_page_shift_drag_behaviour.GetValue() )
self._new_options.SetBoolean( 'set_search_focus_on_page_change', self._set_search_focus_on_page_change.GetValue() )
class _MediaPanel( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__( self, parent )
self._new_options = HG.client_controller.new_options
self._animation_start_position = wx.SpinCtrl( self, min = 0, max = 100 )
self._disable_cv_for_gifs = wx.CheckBox( self )
self._disable_cv_for_gifs.SetToolTip( 'OpenCV is good at rendering gifs, but if you have problems with it and your graphics card, check this and the less reliable and slower PIL will be used instead. EDIT: OpenCV is much better these days--this is mostly not needed.' )
self._load_images_with_pil = wx.CheckBox( self )
self._load_images_with_pil.SetToolTip( 'OpenCV is much faster than PIL, but it is sometimes less reliable. Switch this on if you experience crashes or other unusual problems while importing or viewing certain images. EDIT: OpenCV is much better these days--this is mostly not needed.' )
self._use_system_ffmpeg = wx.CheckBox( self )
self._use_system_ffmpeg.SetToolTip( 'Check this to always default to the system ffmpeg in your path, rather than using the static ffmpeg in hydrus\'s bin directory. (requires restart)' )
self._file_viewing_stats_menu_display = ClientGUICommon.BetterChoice( self )
self._file_viewing_stats_menu_display.Append( 'do not show', CC.FILE_VIEWING_STATS_MENU_DISPLAY_NONE )
self._file_viewing_stats_menu_display.Append( 'show media', CC.FILE_VIEWING_STATS_MENU_DISPLAY_MEDIA_ONLY )
self._file_viewing_stats_menu_display.Append( 'show media, and put preview in a submenu', CC.FILE_VIEWING_STATS_MENU_DISPLAY_MEDIA_AND_PREVIEW_IN_SUBMENU )
self._file_viewing_stats_menu_display.Append( 'show media and preview in two lines', CC.FILE_VIEWING_STATS_MENU_DISPLAY_MEDIA_AND_PREVIEW_STACKED )
self._file_viewing_stats_menu_display.Append( 'show media and preview combined', CC.FILE_VIEWING_STATS_MENU_DISPLAY_MEDIA_AND_PREVIEW_SUMMED )
self._anchor_and_hide_canvas_drags = wx.CheckBox( self )
self._media_zooms = wx.TextCtrl( self )
self._media_zooms.Bind( wx.EVT_TEXT, self.EventZoomsChanged )
self._media_viewer_panel = ClientGUICommon.StaticBox( self, 'media viewer mime handling' )
self._media_viewer_options = ClientGUIListCtrl.SaneListCtrlForSingleObject( self._media_viewer_panel, 300, [ ( 'mime', 150 ), ( 'media show action', 140 ), ( 'preview show action', 140 ), ( 'zoom info', -1 ) ], activation_callback = self.EditMediaViewerOptions )
self._media_viewer_edit_button = wx.Button( self._media_viewer_panel, label = 'edit' )
self._media_viewer_edit_button.Bind( wx.EVT_BUTTON, self.EventEditMediaViewerOptions )
#
self._animation_start_position.SetValue( int( HC.options[ 'animation_start_position' ] * 100.0 ) )
self._disable_cv_for_gifs.SetValue( self._new_options.GetBoolean( 'disable_cv_for_gifs' ) )
self._load_images_with_pil.SetValue( self._new_options.GetBoolean( 'load_images_with_pil' ) )
self._use_system_ffmpeg.SetValue( self._new_options.GetBoolean( 'use_system_ffmpeg' ) )
self._file_viewing_stats_menu_display.SelectClientData( self._new_options.GetInteger( 'file_viewing_stats_menu_display' ) )
self._anchor_and_hide_canvas_drags.SetValue( self._new_options.GetBoolean( 'anchor_and_hide_canvas_drags' ) )
media_zooms = self._new_options.GetMediaZooms()
self._media_zooms.SetValue( ','.join( ( str( media_zoom ) for media_zoom in media_zooms ) ) )
mimes_in_correct_order = ( HC.IMAGE_JPEG, HC.IMAGE_PNG, HC.IMAGE_APNG, HC.IMAGE_GIF, HC.IMAGE_WEBP, HC.IMAGE_TIFF, HC.APPLICATION_FLASH, HC.APPLICATION_PDF, HC.APPLICATION_PSD, HC.APPLICATION_ZIP, HC.APPLICATION_RAR, HC.APPLICATION_7Z, HC.APPLICATION_HYDRUS_UPDATE_CONTENT, HC.APPLICATION_HYDRUS_UPDATE_DEFINITIONS, HC.VIDEO_AVI, HC.VIDEO_FLV, HC.VIDEO_MOV, HC.VIDEO_MP4, HC.VIDEO_MKV, HC.VIDEO_MPEG, HC.VIDEO_WEBM, HC.VIDEO_WMV, HC.AUDIO_MP3, HC.AUDIO_OGG, HC.AUDIO_FLAC, HC.AUDIO_WMA )
for mime in mimes_in_correct_order:
items = self._new_options.GetMediaViewOptions( mime )
data = [ mime ] + list( items )
( display_tuple, sort_tuple, data ) = self._GetListCtrlData( data )
self._media_viewer_options.Append( display_tuple, sort_tuple, data )
#self._media_viewer_options.SortListItems( col = 0 )
#
vbox = wx.BoxSizer( wx.VERTICAL )
rows = []
rows.append( ( 'Start animations this % in: ', self._animation_start_position ) )
rows.append( ( 'Prefer system FFMPEG: ', self._use_system_ffmpeg ) )
rows.append( ( 'Media zooms: ', self._media_zooms ) )
rows.append( ( 'Show media/preview viewing stats or media right-click menus?: ', self._file_viewing_stats_menu_display ) )
rows.append( ( 'WINDOWS ONLY: Hide and anchor mouse cursor on slow canvas drags: ', self._anchor_and_hide_canvas_drags ) )
rows.append( ( 'BUGFIX: Load images with PIL (slower): ', self._load_images_with_pil ) )
rows.append( ( 'BUGFIX: Load gifs with PIL instead of OpenCV (slower, bad transparency): ', self._disable_cv_for_gifs ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
vbox.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
self._media_viewer_panel.Add( self._media_viewer_options, CC.FLAGS_EXPAND_BOTH_WAYS )
self._media_viewer_panel.Add( self._media_viewer_edit_button, CC.FLAGS_LONE_BUTTON )
vbox.Add( self._media_viewer_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.SetSizer( vbox )
def _GetListCtrlData( self, data ):
( mime, media_show_action, preview_show_action, zoom_info ) = data
# can't store a list in the listctrl obj space, as it is unhashable
data = ( mime, media_show_action, preview_show_action, tuple( zoom_info ) )
pretty_mime = HC.mime_string_lookup[ mime ]
pretty_media_show_action = CC.media_viewer_action_string_lookup[ media_show_action ]
pretty_preview_show_action = CC.media_viewer_action_string_lookup[ preview_show_action ]
no_show = media_show_action in CC.no_support and preview_show_action in CC.no_support
if no_show:
pretty_zoom_info = ''
else:
pretty_zoom_info = str( zoom_info )
display_tuple = ( pretty_mime, pretty_media_show_action, pretty_preview_show_action, pretty_zoom_info )
sort_tuple = ( pretty_mime, pretty_media_show_action, pretty_preview_show_action, pretty_zoom_info )
return ( display_tuple, sort_tuple, data )
def EditMediaViewerOptions( self ):
for i in self._media_viewer_options.GetAllSelected():
data = self._media_viewer_options.GetObject( i )
title = 'set media view options information'
with ClientGUITopLevelWindows.DialogEdit( self, title ) as dlg:
panel = ClientGUIScrolledPanelsEdit.EditMediaViewOptionsPanel( dlg, data )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
new_data = panel.GetValue()
( display_tuple, sort_tuple, new_data ) = self._GetListCtrlData( new_data )
self._media_viewer_options.UpdateRow( i, display_tuple, sort_tuple, new_data )
def EventEditMediaViewerOptions( self, event ):
self.EditMediaViewerOptions()
def EventZoomsChanged( self, event ):
try:
media_zooms = [ float( media_zoom ) for media_zoom in self._media_zooms.GetValue().split( ',' ) ]
self._media_zooms.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
except ValueError:
self._media_zooms.SetBackgroundColour( wx.Colour( 255, 127, 127 ) )
self._media_zooms.Refresh()
def UpdateOptions( self ):
HC.options[ 'animation_start_position' ] = self._animation_start_position.GetValue() / 100.0
self._new_options.SetInteger( 'file_viewing_stats_menu_display', self._file_viewing_stats_menu_display.GetChoice() )
self._new_options.SetBoolean( 'disable_cv_for_gifs', self._disable_cv_for_gifs.GetValue() )
self._new_options.SetBoolean( 'load_images_with_pil', self._load_images_with_pil.GetValue() )
self._new_options.SetBoolean( 'use_system_ffmpeg', self._use_system_ffmpeg.GetValue() )
self._new_options.SetBoolean( 'anchor_and_hide_canvas_drags', self._anchor_and_hide_canvas_drags.GetValue() )
try:
media_zooms = [ float( media_zoom ) for media_zoom in self._media_zooms.GetValue().split( ',' ) ]
media_zooms = [ media_zoom for media_zoom in media_zooms if media_zoom > 0.0 ]
if len( media_zooms ) > 0:
self._new_options.SetMediaZooms( media_zooms )
except ValueError:
HydrusData.ShowText( 'Could not parse those zooms, so they were not saved!' )
for data in self._media_viewer_options.GetObjects():
data = list( data )
mime = data[0]
value = data[1:]
self._new_options.SetMediaViewOptions( mime, value )
class _RegexPanel( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__( self, parent )
regex_favourites = HC.options[ 'regex_favourites' ]
self._regex_panel = ClientGUIScrolledPanelsEdit.EditRegexFavourites( self, regex_favourites )
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._regex_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.SetSizer( vbox )
def UpdateOptions( self ):
regex_favourites = self._regex_panel.GetValue()
HC.options[ 'regex_favourites' ] = regex_favourites
class _SortCollectPanel( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__( self, parent )
self._default_sort = ClientGUICommon.ChoiceSort( self )
self._fallback_sort = ClientGUICommon.ChoiceSort( self )
self._default_collect = ClientGUICommon.CheckboxCollect( self )
self._sort_by = wx.ListBox( self )
self._sort_by.Bind( wx.EVT_LEFT_DCLICK, self.EventRemoveSortBy )
self._new_sort_by = wx.TextCtrl( self, style = wx.TE_PROCESS_ENTER )
self._new_sort_by.Bind( wx.EVT_KEY_DOWN, self.EventKeyDownSortBy )
#
self._new_options = HG.client_controller.new_options
try:
self._default_sort.SetSort( self._new_options.GetDefaultSort() )
except:
media_sort = ClientMedia.MediaSort( ( 'system', CC.SORT_FILES_BY_FILESIZE ), CC.SORT_ASC )
self._default_sort.SetSort( media_sort )
try:
self._fallback_sort.SetSort( self._new_options.GetFallbackSort() )
except:
media_sort = ClientMedia.MediaSort( ( 'system', CC.SORT_FILES_BY_IMPORT_TIME ), CC.SORT_ASC )
self._fallback_sort.SetSort( media_sort )
for ( sort_by_type, sort_by ) in HC.options[ 'sort_by' ]:
self._sort_by.Append( '-'.join( sort_by ), sort_by )
#
rows = []
rows.append( ( 'Default sort: ', self._default_sort ) )
rows.append( ( 'Secondary sort (when primary gives two equal values): ', self._fallback_sort ) )
rows.append( ( 'Default collect: ', self._default_collect ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
vbox = wx.BoxSizer( wx.VERTICAL )
sort_by_text = 'You can manage new namespace sorting schemes here.'
sort_by_text += os.linesep
sort_by_text += 'The client will sort media by comparing their namespaces, moving from left to right until an inequality is found.'
sort_by_text += os.linesep
sort_by_text += 'Any changes will be shown in the sort-by dropdowns of any new pages you open.'
vbox.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
vbox.Add( ClientGUICommon.BetterStaticText( self, sort_by_text ), CC.FLAGS_VCENTER )
vbox.Add( self._sort_by, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( self._new_sort_by, CC.FLAGS_EXPAND_PERPENDICULAR )
self.SetSizer( vbox )
def EventKeyDownSortBy( self, event ):
( modifier, key ) = ClientGUIShortcuts.ConvertKeyEventToSimpleTuple( event )
if key in ( wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER ):
sort_by_string = self._new_sort_by.GetValue()
if sort_by_string != '':
try: sort_by = sort_by_string.split( '-' )
except:
wx.MessageBox( 'Could not parse that sort by string!' )
return
self._sort_by.Append( sort_by_string, sort_by )
self._new_sort_by.SetValue( '' )
else:
event.Skip()
def EventRemoveSortBy( self, event ):
selection = self._sort_by.GetSelection()
if selection != wx.NOT_FOUND: self._sort_by.Delete( selection )
def UpdateOptions( self ):
self._new_options.SetDefaultSort( self._default_sort.GetSort() )
self._new_options.SetFallbackSort( self._fallback_sort.GetSort() )
HC.options[ 'default_collect' ] = self._default_collect.GetChoice()
sort_by_choices = []
for sort_by in [ self._sort_by.GetClientData( i ) for i in range( self._sort_by.GetCount() ) ]: sort_by_choices.append( ( 'namespaces', sort_by ) )
HC.options[ 'sort_by' ] = sort_by_choices
class _SoundPanel( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__( self, parent )
self._play_dumper_noises = wx.CheckBox( self, label = 'play success/fail noises when dumping' )
#
self._play_dumper_noises.SetValue( HC.options[ 'play_dumper_noises' ] )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._play_dumper_noises, CC.FLAGS_EXPAND_PERPENDICULAR )
self.SetSizer( vbox )
def UpdateOptions( self ):
HC.options[ 'play_dumper_noises' ] = self._play_dumper_noises.GetValue()
class _SpeedAndMemoryPanel( wx.Panel ):
def __init__( self, parent, new_options ):
wx.Panel.__init__( self, parent )
self._new_options = new_options
disk_panel = ClientGUICommon.StaticBox( self, 'disk cache' )
disk_cache_help_button = ClientGUICommon.BetterBitmapButton( disk_panel, CC.GlobalBMPs.help, self._ShowDiskCacheHelp )
disk_cache_help_button.SetToolTip( 'Show help regarding the disk cache.' )
help_hbox = ClientGUICommon.WrapInText( disk_cache_help_button, disk_panel, 'help for this panel -->', wx.Colour( 0, 0, 255 ) )
self._disk_cache_init_period = ClientGUICommon.NoneableSpinCtrl( disk_panel, unit = 's', none_phrase = 'do not run', min = 1, max = 120 )
self._disk_cache_init_period.SetToolTip( 'When the client boots, it can speed up operation (particularly loading your session pages) by reading the front of its database into memory. This sets the max number of seconds it can spend doing that.' )
self._disk_cache_maintenance = ClientGUIControls.NoneableBytesControl( disk_panel, initial_value = 256 * 1024 * 1024, none_label = 'do not keep db cached' )
self._disk_cache_maintenance.SetToolTip( 'The client can regularly ensure the front of its database is cached in your OS\'s disk cache. This represents how many megabytes it will ensure are cached in memory.' )
#
media_panel = ClientGUICommon.StaticBox( self, 'thumbnail size and media cache' )
self._thumbnail_cache_size = wx.SpinCtrl( media_panel, min = 5, max = 3000 )
self._thumbnail_cache_size.Bind( wx.EVT_SPINCTRL, self.EventThumbnailsUpdate )
self._estimated_number_thumbnails = wx.StaticText( media_panel, label = '' )
self._fullscreen_cache_size = wx.SpinCtrl( media_panel, min = 25, max = 8192 )
self._fullscreen_cache_size.Bind( wx.EVT_SPINCTRL, self.EventFullscreensUpdate )
self._estimated_number_fullscreens = wx.StaticText( media_panel, label = '' )
self._thumbnail_cache_timeout = ClientGUITime.TimeDeltaButton( media_panel, min = 300, days = True, hours = True, minutes = True )
self._thumbnail_cache_timeout.SetToolTip( 'The amount of time after which a thumbnail in the cache will naturally be removed, if it is not shunted out due to a new member exceeding the size limit. Requires restart to kick in.' )
self._image_cache_timeout = ClientGUITime.TimeDeltaButton( media_panel, min = 300, days = True, hours = True, minutes = True )
self._image_cache_timeout.SetToolTip( 'The amount of time after which a rendered image in the cache will naturally be removed, if it is not shunted out due to a new member exceeding the size limit. Requires restart to kick in.' )
#
buffer_panel = ClientGUICommon.StaticBox( self, 'video buffer' )
self._video_buffer_size_mb = wx.SpinCtrl( buffer_panel, min = 48, max = 16 * 1024 )
self._video_buffer_size_mb.Bind( wx.EVT_SPINCTRL, self.EventVideoBufferUpdate )
self._estimated_number_video_frames = wx.StaticText( buffer_panel, label = '' )
#
ac_panel = ClientGUICommon.StaticBox( self, 'tag autocomplete' )
self._num_autocomplete_chars = wx.SpinCtrl( ac_panel, min = 1, max = 100 )
self._num_autocomplete_chars.SetToolTip( 'how many characters you enter before the gui fetches autocomplete results from the db. (otherwise, it will only fetch exact matches)' + os.linesep + 'increase this if you find autocomplete results are slow' )
self._fetch_ac_results_automatically = wx.CheckBox( ac_panel )
self._fetch_ac_results_automatically.Bind( wx.EVT_CHECKBOX, self.EventFetchAuto )
self._autocomplete_long_wait = wx.SpinCtrl( ac_panel, min = 0, max = 10000 )
self._autocomplete_long_wait.SetToolTip( 'how long the gui will typically wait, after you enter a character, before it queries the db with what you have entered so far' )
self._autocomplete_short_wait_chars = wx.SpinCtrl( ac_panel, min = 1, max = 100 )
self._autocomplete_short_wait_chars.SetToolTip( 'how many characters you enter before the gui starts waiting the short time before querying the db' )
self._autocomplete_short_wait = wx.SpinCtrl( ac_panel, min = 0, max = 10000 )
self._autocomplete_short_wait.SetToolTip( 'how long the gui will typically wait, after you enter a lot of characters, before it queries the db with what you have entered so far' )
#
misc_panel = ClientGUICommon.StaticBox( self, 'misc' )
self._forced_search_limit = ClientGUICommon.NoneableSpinCtrl( misc_panel, '', min = 1, max = 100000 )
#
self._disk_cache_init_period.SetValue( self._new_options.GetNoneableInteger( 'disk_cache_init_period' ) )
disk_cache_maintenance_mb = self._new_options.GetNoneableInteger( 'disk_cache_maintenance_mb' )
if disk_cache_maintenance_mb is None:
disk_cache_maintenance = disk_cache_maintenance_mb
else:
disk_cache_maintenance = disk_cache_maintenance_mb * 1024 * 1024
self._disk_cache_maintenance.SetValue( disk_cache_maintenance )
self._thumbnail_cache_size.SetValue( int( HC.options[ 'thumbnail_cache_size' ] // 1048576 ) )
self._fullscreen_cache_size.SetValue( int( HC.options[ 'fullscreen_cache_size' ] // 1048576 ) )
self._thumbnail_cache_timeout.SetValue( self._new_options.GetInteger( 'thumbnail_cache_timeout' ) )
self._image_cache_timeout.SetValue( self._new_options.GetInteger( 'image_cache_timeout' ) )
self._video_buffer_size_mb.SetValue( self._new_options.GetInteger( 'video_buffer_size_mb' ) )
self._num_autocomplete_chars.SetValue( HC.options[ 'num_autocomplete_chars' ] )
self._fetch_ac_results_automatically.SetValue( HC.options[ 'fetch_ac_results_automatically' ] )
( char_limit, long_wait, short_wait ) = HC.options[ 'ac_timings' ]
self._autocomplete_long_wait.SetValue( long_wait )
self._autocomplete_short_wait_chars.SetValue( char_limit )
self._autocomplete_short_wait.SetValue( short_wait )
self._forced_search_limit.SetValue( self._new_options.GetNoneableInteger( 'forced_search_limit' ) )
#
rows = []
rows.append( ( 'run disk cache on boot for this long: ', self._disk_cache_init_period ) )
rows.append( ( 'regularly ensure this much of the db is in OS\'s disk cache: ', self._disk_cache_maintenance ) )
gridbox = ClientGUICommon.WrapInGrid( disk_panel, rows )
vbox = wx.BoxSizer( wx.VERTICAL )
disk_panel.Add( help_hbox, CC.FLAGS_BUTTON_SIZER )
disk_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
vbox.Add( disk_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
thumbnails_sizer = wx.BoxSizer( wx.HORIZONTAL )
thumbnails_sizer.Add( self._thumbnail_cache_size, CC.FLAGS_VCENTER )
thumbnails_sizer.Add( self._estimated_number_thumbnails, CC.FLAGS_VCENTER )
fullscreens_sizer = wx.BoxSizer( wx.HORIZONTAL )
fullscreens_sizer.Add( self._fullscreen_cache_size, CC.FLAGS_VCENTER )
fullscreens_sizer.Add( self._estimated_number_fullscreens, CC.FLAGS_VCENTER )
video_buffer_sizer = wx.BoxSizer( wx.HORIZONTAL )
video_buffer_sizer.Add( self._video_buffer_size_mb, CC.FLAGS_VCENTER )
video_buffer_sizer.Add( self._estimated_number_video_frames, CC.FLAGS_VCENTER )
rows = []
rows.append( ( 'MB memory reserved for thumbnail cache: ', thumbnails_sizer ) )
rows.append( ( 'MB memory reserved for image cache: ', fullscreens_sizer ) )
rows.append( ( 'Thumbnail cache timeout: ', self._thumbnail_cache_timeout ) )
rows.append( ( 'Image cache timeout: ', self._image_cache_timeout ) )
gridbox = ClientGUICommon.WrapInGrid( media_panel, rows )
media_panel.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( media_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
text = 'Hydrus video rendering is CPU intensive.'
text += os.linesep
text += 'If you have a lot of memory, you can set a generous potential video buffer to compensate.'
text += os.linesep
text += 'If the video buffer can hold an entire video, it only needs to be rendered once and will play and loop very smoothly.'
text += os.linesep
text += 'PROTIP: Do not go crazy here.'
buffer_panel.Add( wx.StaticText( buffer_panel, label = text ), CC.FLAGS_VCENTER )
rows = []
rows.append( ( 'MB memory for video buffer: ', video_buffer_sizer ) )
gridbox = ClientGUICommon.WrapInGrid( buffer_panel, rows )
buffer_panel.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( buffer_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
text = 'If you disable automatic autocomplete results fetching, use Ctrl+Space to fetch results manually.'
ac_panel.Add( wx.StaticText( ac_panel, label = text ), CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'Automatically fetch autocomplete results after a short delay: ', self._fetch_ac_results_automatically ) )
rows.append( ( 'Autocomplete long wait character threshold: ', self._num_autocomplete_chars ) )
rows.append( ( 'Autocomplete long wait (ms): ', self._autocomplete_long_wait ) )
rows.append( ( 'Autocomplete short wait character threshold: ', self._autocomplete_short_wait_chars ) )
rows.append( ( 'Autocomplete short wait (ms): ', self._autocomplete_short_wait ) )
gridbox = ClientGUICommon.WrapInGrid( ac_panel, rows )
ac_panel.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( ac_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
rows = []
rows.append( ( 'Forced system:limit for all searches: ', self._forced_search_limit ) )
gridbox = ClientGUICommon.WrapInGrid( misc_panel, rows )
misc_panel.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( misc_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
self.SetSizer( vbox )
#
self.EventFetchAuto( None )
self.EventFullscreensUpdate( None )
self.EventThumbnailsUpdate( None )
self.EventVideoBufferUpdate( None )
wx.CallAfter( self.Layout ) # draws the static texts correctly
def _ShowDiskCacheHelp( self ):
message = 'The hydrus database runs best on a drive with fast random access latency. Certain important operations can function up to 100 times faster when started raw from an SSD rather than an HDD.'
message += os.linesep * 2
message += 'To get around this, the client populates a pre-boot and ongoing disk cache. By contiguously frontloading the database into memory, the most important functions do not need to wait on your disk for most of their work.'
message += os.linesep * 2
message += 'If you tend to leave your client on in the background and have a slow drive but a lot of ram, you might like to pump these numbers up. 15s boot cache and 2048MB ongoing can really make a difference on, for instance, a slow laptop drive.'
message += os.linesep * 2
message += 'If you run the database from an SSD, you can reduce or entirely eliminate these values, as the benefit is not so stark. 2s and 256MB is fine.'
message += os.linesep * 2
message += 'Unless you are testing, do not go crazy with this stuff. You can set 8192MB if you like, but there are diminishing (and potentially negative) returns.'
wx.MessageBox( message )
def EventFetchAuto( self, event ):
if self._fetch_ac_results_automatically.GetValue() == True:
self._num_autocomplete_chars.Enable()
self._autocomplete_long_wait.Enable()
self._autocomplete_short_wait_chars.Enable()
self._autocomplete_short_wait.Enable()
else:
self._num_autocomplete_chars.Disable()
self._autocomplete_long_wait.Disable()
self._autocomplete_short_wait_chars.Disable()
self._autocomplete_short_wait.Disable()
def EventFullscreensUpdate( self, event ):
( width, height ) = ClientGUITopLevelWindows.GetDisplaySize( self )
estimated_bytes_per_fullscreen = 3 * width * height
self._estimated_number_fullscreens.SetLabelText( '(about ' + HydrusData.ToHumanInt( ( self._fullscreen_cache_size.GetValue() * 1048576 ) // estimated_bytes_per_fullscreen ) + '-' + HydrusData.ToHumanInt( ( self._fullscreen_cache_size.GetValue() * 1048576 ) // ( estimated_bytes_per_fullscreen // 4 ) ) + ' images)' )
def EventThumbnailsUpdate( self, event ):
( thumbnail_width, thumbnail_height ) = HC.options[ 'thumbnail_dimensions' ]
res_string = HydrusData.ConvertResolutionToPrettyString( ( thumbnail_width, thumbnail_height ) )
estimated_bytes_per_thumb = 3 * thumbnail_width * thumbnail_height
estimated_thumbs = ( self._thumbnail_cache_size.GetValue() * 1048576 ) // estimated_bytes_per_thumb
self._estimated_number_thumbnails.SetLabelText( '(at ' + res_string + ', about ' + HydrusData.ToHumanInt( estimated_thumbs ) + ' thumbnails)' )
def EventVideoBufferUpdate( self, event ):
estimated_720p_frames = int( ( self._video_buffer_size_mb.GetValue() * 1024 * 1024 ) // ( 1280 * 720 * 3 ) )
self._estimated_number_video_frames.SetLabelText( '(about ' + HydrusData.ToHumanInt( estimated_720p_frames ) + ' frames of 720p video)' )
def UpdateOptions( self ):
self._new_options.SetNoneableInteger( 'disk_cache_init_period', self._disk_cache_init_period.GetValue() )
disk_cache_maintenance = self._disk_cache_maintenance.GetValue()
if disk_cache_maintenance is None:
disk_cache_maintenance_mb = disk_cache_maintenance
else:
disk_cache_maintenance_mb = disk_cache_maintenance // ( 1024 * 1024 )
self._new_options.SetNoneableInteger( 'disk_cache_maintenance_mb', disk_cache_maintenance_mb )
HC.options[ 'thumbnail_cache_size' ] = self._thumbnail_cache_size.GetValue() * 1048576
HC.options[ 'fullscreen_cache_size' ] = self._fullscreen_cache_size.GetValue() * 1048576
self._new_options.SetInteger( 'thumbnail_cache_timeout', self._thumbnail_cache_timeout.GetValue() )
self._new_options.SetInteger( 'image_cache_timeout', self._image_cache_timeout.GetValue() )
self._new_options.SetInteger( 'video_buffer_size_mb', self._video_buffer_size_mb.GetValue() )
self._new_options.SetNoneableInteger( 'forced_search_limit', self._forced_search_limit.GetValue() )
HC.options[ 'num_autocomplete_chars' ] = self._num_autocomplete_chars.GetValue()
HC.options[ 'fetch_ac_results_automatically' ] = self._fetch_ac_results_automatically.GetValue()
long_wait = self._autocomplete_long_wait.GetValue()
char_limit = self._autocomplete_short_wait_chars.GetValue()
short_wait = self._autocomplete_short_wait.GetValue()
HC.options[ 'ac_timings' ] = ( char_limit, long_wait, short_wait )
class _TagsPanel( wx.Panel ):
def __init__( self, parent, new_options ):
wx.Panel.__init__( self, parent )
self._new_options = new_options
#
general_panel = ClientGUICommon.StaticBox( self, 'general tag options' )
self._default_tag_sort = wx.Choice( general_panel )
self._default_tag_sort.Append( 'lexicographic (a-z)', CC.SORT_BY_LEXICOGRAPHIC_ASC )
self._default_tag_sort.Append( 'lexicographic (z-a)', CC.SORT_BY_LEXICOGRAPHIC_DESC )
self._default_tag_sort.Append( 'lexicographic (a-z) (grouped by namespace)', CC.SORT_BY_LEXICOGRAPHIC_NAMESPACE_ASC )
self._default_tag_sort.Append( 'lexicographic (z-a) (grouped by namespace)', CC.SORT_BY_LEXICOGRAPHIC_NAMESPACE_DESC )
self._default_tag_sort.Append( 'incidence (desc)', CC.SORT_BY_INCIDENCE_DESC )
self._default_tag_sort.Append( 'incidence (asc)', CC.SORT_BY_INCIDENCE_ASC )
self._default_tag_sort.Append( 'incidence (desc) (grouped by namespace)', CC.SORT_BY_INCIDENCE_NAMESPACE_DESC )
self._default_tag_sort.Append( 'incidence (asc) (grouped by namespace)', CC.SORT_BY_INCIDENCE_NAMESPACE_ASC )
self._default_tag_repository = ClientGUICommon.BetterChoice( general_panel )
self._default_tag_service_search_page = ClientGUICommon.BetterChoice( general_panel )
self._show_all_tags_in_autocomplete = wx.CheckBox( general_panel )
self._ac_select_first_with_count = wx.CheckBox( general_panel )
self._apply_all_parents_to_all_services = wx.CheckBox( general_panel )
self._apply_all_siblings_to_all_services = wx.CheckBox( general_panel )
#
favourites_panel = ClientGUICommon.StaticBox( self, 'favourite tags' )
desc = 'These tags will appear in your tag autocomplete results area, under the \'favourites\' tab.'
favourites_st = ClientGUICommon.BetterStaticText( favourites_panel, desc )
favourites_st.SetWrapWidth( 400 )
expand_parents = False
self._favourites = ClientGUIListBoxes.ListBoxTagsStringsAddRemove( favourites_panel )
self._favourites_input = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( favourites_panel, self._favourites.AddTags, expand_parents, CC.LOCAL_FILE_SERVICE_KEY, CC.COMBINED_TAG_SERVICE_KEY, tag_service_key_changed_callable = self._favourites.SetTagServiceKey )
#
if HC.options[ 'default_tag_sort' ] == CC.SORT_BY_LEXICOGRAPHIC_ASC: self._default_tag_sort.Select( 0 )
elif HC.options[ 'default_tag_sort' ] == CC.SORT_BY_LEXICOGRAPHIC_DESC: self._default_tag_sort.Select( 1 )
elif HC.options[ 'default_tag_sort' ] == CC.SORT_BY_LEXICOGRAPHIC_NAMESPACE_ASC: self._default_tag_sort.Select( 2 )
elif HC.options[ 'default_tag_sort' ] == CC.SORT_BY_LEXICOGRAPHIC_NAMESPACE_DESC: self._default_tag_sort.Select( 3 )
elif HC.options[ 'default_tag_sort' ] == CC.SORT_BY_INCIDENCE_DESC: self._default_tag_sort.Select( 4 )
elif HC.options[ 'default_tag_sort' ] == CC.SORT_BY_INCIDENCE_ASC: self._default_tag_sort.Select( 5 )
elif HC.options[ 'default_tag_sort' ] == CC.SORT_BY_INCIDENCE_NAMESPACE_DESC: self._default_tag_sort.Select( 6 )
elif HC.options[ 'default_tag_sort' ] == CC.SORT_BY_INCIDENCE_NAMESPACE_ASC: self._default_tag_sort.Select( 7 )
self._default_tag_service_search_page.Append( 'all known tags', CC.COMBINED_TAG_SERVICE_KEY )
services = HG.client_controller.services_manager.GetServices( HC.TAG_SERVICES )
for service in services:
self._default_tag_repository.Append( service.GetName(), service.GetServiceKey() )
self._default_tag_service_search_page.Append( service.GetName(), service.GetServiceKey() )
default_tag_repository_key = HC.options[ 'default_tag_repository' ]
self._default_tag_repository.SelectClientData( default_tag_repository_key )
self._default_tag_service_search_page.SelectClientData( new_options.GetKey( 'default_tag_service_search_page' ) )
self._show_all_tags_in_autocomplete.SetValue( HC.options[ 'show_all_tags_in_autocomplete' ] )
self._ac_select_first_with_count.SetValue( self._new_options.GetBoolean( 'ac_select_first_with_count' ) )
self._apply_all_parents_to_all_services.SetValue( self._new_options.GetBoolean( 'apply_all_parents_to_all_services' ) )
self._apply_all_siblings_to_all_services.SetValue( self._new_options.GetBoolean( 'apply_all_siblings_to_all_services' ) )
#
self._favourites.SetTags( new_options.GetStringList( 'favourite_tags' ) )
#
vbox = wx.BoxSizer( wx.VERTICAL )
rows = []
rows.append( ( 'Default tag service in manage tag dialogs: ', self._default_tag_repository ) )
rows.append( ( 'Default tag service in search pages: ', self._default_tag_service_search_page ) )
rows.append( ( 'Default tag sort: ', self._default_tag_sort ) )
rows.append( ( 'By default, search non-local tags in write-autocomplete: ', self._show_all_tags_in_autocomplete ) )
rows.append( ( 'By default, select the first tag result with actual count in write-autocomplete: ', self._ac_select_first_with_count ) )
rows.append( ( 'Suggest all parents for all services: ', self._apply_all_parents_to_all_services ) )
rows.append( ( 'Apply all siblings to all services (local siblings have precedence): ', self._apply_all_siblings_to_all_services ) )
gridbox = ClientGUICommon.WrapInGrid( general_panel, rows )
general_panel.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( general_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
favourites_panel.Add( favourites_st, CC.FLAGS_EXPAND_PERPENDICULAR )
favourites_panel.Add( self._favourites, CC.FLAGS_EXPAND_BOTH_WAYS )
favourites_panel.Add( self._favourites_input, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( favourites_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
#
self.SetSizer( vbox )
def UpdateOptions( self ):
HC.options[ 'default_tag_repository' ] = self._default_tag_repository.GetChoice()
HC.options[ 'default_tag_sort' ] = self._default_tag_sort.GetClientData( self._default_tag_sort.GetSelection() )
HC.options[ 'show_all_tags_in_autocomplete' ] = self._show_all_tags_in_autocomplete.GetValue()
self._new_options.SetBoolean( 'ac_select_first_with_count', self._ac_select_first_with_count.GetValue() )
self._new_options.SetKey( 'default_tag_service_search_page', self._default_tag_service_search_page.GetChoice() )
self._new_options.SetBoolean( 'apply_all_parents_to_all_services', self._apply_all_parents_to_all_services.GetValue() )
self._new_options.SetBoolean( 'apply_all_siblings_to_all_services', self._apply_all_siblings_to_all_services.GetValue() )
#
self._new_options.SetStringList( 'favourite_tags', list( self._favourites.GetTags() ) )
class _TagPresentationPanel( wx.Panel ):
def __init__( self, parent, new_options ):
wx.Panel.__init__( self, parent )
self._new_options = new_options
#
tag_summary_generator = self._new_options.GetTagSummaryGenerator( 'thumbnail_top' )
self._thumbnail_top = ClientGUIScrolledPanelsEdit.TagSummaryGeneratorButton( self, tag_summary_generator )
tag_summary_generator = self._new_options.GetTagSummaryGenerator( 'thumbnail_bottom_right' )
self._thumbnail_bottom_right = ClientGUIScrolledPanelsEdit.TagSummaryGeneratorButton( self, tag_summary_generator )
tag_summary_generator = self._new_options.GetTagSummaryGenerator( 'media_viewer_top' )
self._media_viewer_top = ClientGUIScrolledPanelsEdit.TagSummaryGeneratorButton( self, tag_summary_generator )
#
render_panel = ClientGUICommon.StaticBox( self, 'namespace rendering' )
render_st = ClientGUICommon.BetterStaticText( render_panel, label = 'Namespaced tags are stored and directly edited in hydrus as "namespace:subtag", but most presentation windows can display them differently.' )
render_st.SetWrapWidth( 400 )
self._show_namespaces = wx.CheckBox( render_panel )
self._namespace_connector = wx.TextCtrl( render_panel )
#
namespace_colours_panel = ClientGUICommon.StaticBox( self, 'namespace colours' )
self._namespace_colours = ClientGUIListBoxes.ListBoxTagsColourOptions( namespace_colours_panel, HC.options[ 'namespace_colours' ] )
self._edit_namespace_colour = wx.Button( namespace_colours_panel, label = 'edit selected' )
self._edit_namespace_colour.Bind( wx.EVT_BUTTON, self.EventEditNamespaceColour )
self._new_namespace_colour = wx.TextCtrl( namespace_colours_panel, style = wx.TE_PROCESS_ENTER )
self._new_namespace_colour.Bind( wx.EVT_KEY_DOWN, self.EventKeyDownNamespace )
#
self._show_namespaces.SetValue( new_options.GetBoolean( 'show_namespaces' ) )
self._namespace_connector.SetValue( new_options.GetString( 'namespace_connector' ) )
#
namespace_colours_panel.Add( self._namespace_colours, CC.FLAGS_EXPAND_BOTH_WAYS )
namespace_colours_panel.Add( self._new_namespace_colour, CC.FLAGS_EXPAND_PERPENDICULAR )
namespace_colours_panel.Add( self._edit_namespace_colour, CC.FLAGS_EXPAND_PERPENDICULAR )
#
vbox = wx.BoxSizer( wx.VERTICAL )
#
rows = []
rows.append( ( 'On thumbnail top:', self._thumbnail_top ) )
rows.append( ( 'On thumbnail bottom-right:', self._thumbnail_bottom_right ) )
rows.append( ( 'On media viewer top:', self._media_viewer_top ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
vbox.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
rows = []
rows.append( ( 'Show namespaces: ', self._show_namespaces ) )
rows.append( ( 'If shown, namespace connecting string: ', self._namespace_connector ) )
gridbox = ClientGUICommon.WrapInGrid( render_panel, rows )
render_panel.Add( render_st, CC.FLAGS_EXPAND_PERPENDICULAR )
render_panel.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( render_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
vbox.Add( namespace_colours_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
#
self.SetSizer( vbox )
def EventEditNamespaceColour( self, event ):
results = self._namespace_colours.GetSelectedNamespaceColours()
for ( namespace, colour ) in list(results.items()):
colour_data = wx.ColourData()
colour_data.SetColour( colour )
colour_data.SetChooseFull( True )
with wx.ColourDialog( self, data = colour_data ) as dlg:
if dlg.ShowModal() == wx.ID_OK:
colour_data = dlg.GetColourData()
colour = colour_data.GetColour()
self._namespace_colours.SetNamespaceColour( namespace, colour )
def EventKeyDownNamespace( self, event ):
( modifier, key ) = ClientGUIShortcuts.ConvertKeyEventToSimpleTuple( event )
if key in ( wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER ):
namespace = self._new_namespace_colour.GetValue()
if namespace != '':
self._namespace_colours.SetNamespaceColour( namespace, wx.Colour( random.randint( 0, 255 ), random.randint( 0, 255 ), random.randint( 0, 255 ) ) )
self._new_namespace_colour.SetValue( '' )
else:
event.Skip()
def UpdateOptions( self ):
self._new_options.SetTagSummaryGenerator( 'thumbnail_top', self._thumbnail_top.GetValue() )
self._new_options.SetTagSummaryGenerator( 'thumbnail_bottom_right', self._thumbnail_bottom_right.GetValue() )
self._new_options.SetTagSummaryGenerator( 'media_viewer_top', self._media_viewer_top.GetValue() )
self._new_options.SetBoolean( 'show_namespaces', self._show_namespaces.GetValue() )
self._new_options.SetString( 'namespace_connector', self._namespace_connector.GetValue() )
HC.options[ 'namespace_colours' ] = self._namespace_colours.GetNamespaceColours()
class _TagSuggestionsPanel( wx.Panel ):
def __init__( self, parent, new_options ):
wx.Panel.__init__( self, parent )
self._new_options = new_options
suggested_tags_panel = ClientGUICommon.StaticBox( self, 'suggested tags' )
self._suggested_tags_width = wx.SpinCtrl( suggested_tags_panel, min = 20, max = 65535 )
self._suggested_tags_layout = ClientGUICommon.BetterChoice( suggested_tags_panel )
self._suggested_tags_layout.Append( 'notebook', 'notebook' )
self._suggested_tags_layout.Append( 'side-by-side', 'columns' )
suggest_tags_panel_notebook = wx.Notebook( suggested_tags_panel )
#
suggested_tags_favourites_panel = wx.Panel( suggest_tags_panel_notebook )
suggested_tags_favourites_panel.SetMinSize( ( 400, -1 ) )
self._suggested_favourites_services = ClientGUICommon.BetterChoice( suggested_tags_favourites_panel )
self._suggested_favourites_services.Append( CC.LOCAL_TAG_SERVICE_KEY, CC.LOCAL_TAG_SERVICE_KEY )
tag_services = HG.client_controller.services_manager.GetServices( ( HC.TAG_REPOSITORY, ) )
for tag_service in tag_services:
self._suggested_favourites_services.Append( tag_service.GetName(), tag_service.GetServiceKey() )
self._suggested_favourites = ClientGUIListBoxes.ListBoxTagsStringsAddRemove( suggested_tags_favourites_panel )
self._current_suggested_favourites_service = None
self._suggested_favourites_dict = {}
expand_parents = False
self._suggested_favourites_input = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( suggested_tags_favourites_panel, self._suggested_favourites.AddTags, expand_parents, CC.LOCAL_FILE_SERVICE_KEY, CC.LOCAL_TAG_SERVICE_KEY, tag_service_key_changed_callable = self._suggested_favourites.SetTagServiceKey )
#
suggested_tags_related_panel = wx.Panel( suggest_tags_panel_notebook )
self._show_related_tags = wx.CheckBox( suggested_tags_related_panel )
self._related_tags_search_1_duration_ms = wx.SpinCtrl( suggested_tags_related_panel, min = 50, max = 60000 )
self._related_tags_search_2_duration_ms = wx.SpinCtrl( suggested_tags_related_panel, min = 50, max = 60000 )
self._related_tags_search_3_duration_ms = wx.SpinCtrl( suggested_tags_related_panel, min = 50, max = 60000 )
#
suggested_tags_file_lookup_script_panel = wx.Panel( suggest_tags_panel_notebook )
self._show_file_lookup_script_tags = wx.CheckBox( suggested_tags_file_lookup_script_panel )
self._favourite_file_lookup_script = ClientGUICommon.BetterChoice( suggested_tags_file_lookup_script_panel )
script_names = list( HG.client_controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_PARSE_ROOT_FILE_LOOKUP ) )
script_names.sort()
for name in script_names:
self._favourite_file_lookup_script.Append( name, name )
#
suggested_tags_recent_panel = wx.Panel( suggest_tags_panel_notebook )
self._num_recent_tags = ClientGUICommon.NoneableSpinCtrl( suggested_tags_recent_panel, 'number of recent tags to show', min = 1, none_phrase = 'do not show' )
#
self._suggested_tags_width.SetValue( self._new_options.GetInteger( 'suggested_tags_width' ) )
self._suggested_tags_layout.SelectClientData( self._new_options.GetNoneableString( 'suggested_tags_layout' ) )
self._suggested_favourites_services.SelectClientData( CC.LOCAL_TAG_SERVICE_KEY )
self._show_related_tags.SetValue( self._new_options.GetBoolean( 'show_related_tags' ) )
self._related_tags_search_1_duration_ms.SetValue( self._new_options.GetInteger( 'related_tags_search_1_duration_ms' ) )
self._related_tags_search_2_duration_ms.SetValue( self._new_options.GetInteger( 'related_tags_search_2_duration_ms' ) )
self._related_tags_search_3_duration_ms.SetValue( self._new_options.GetInteger( 'related_tags_search_3_duration_ms' ) )
self._show_file_lookup_script_tags.SetValue( self._new_options.GetBoolean( 'show_file_lookup_script_tags' ) )
self._favourite_file_lookup_script.SelectClientData( self._new_options.GetNoneableString( 'favourite_file_lookup_script' ) )
self._num_recent_tags.SetValue( self._new_options.GetNoneableInteger( 'num_recent_tags' ) )
#
panel_vbox = wx.BoxSizer( wx.VERTICAL )
panel_vbox.Add( self._suggested_favourites_services, CC.FLAGS_EXPAND_PERPENDICULAR )
panel_vbox.Add( self._suggested_favourites, CC.FLAGS_EXPAND_BOTH_WAYS )
panel_vbox.Add( self._suggested_favourites_input, CC.FLAGS_EXPAND_PERPENDICULAR )
suggested_tags_favourites_panel.SetSizer( panel_vbox )
#
panel_vbox = wx.BoxSizer( wx.VERTICAL )
rows = []
rows.append( ( 'Show related tags on single-file manage tags windows: ', self._show_related_tags ) )
rows.append( ( 'Initial search duration (ms): ', self._related_tags_search_1_duration_ms ) )
rows.append( ( 'Medium search duration (ms): ', self._related_tags_search_2_duration_ms ) )
rows.append( ( 'Thorough search duration (ms): ', self._related_tags_search_3_duration_ms ) )
gridbox = ClientGUICommon.WrapInGrid( suggested_tags_related_panel, rows )
desc = 'This will search the database for statistically related tags based on what your focused file already has.'
panel_vbox.Add( ClientGUICommon.BetterStaticText( suggested_tags_related_panel, desc ), CC.FLAGS_EXPAND_PERPENDICULAR )
panel_vbox.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
suggested_tags_related_panel.SetSizer( panel_vbox )
#
panel_vbox = wx.BoxSizer( wx.VERTICAL )
rows = []
rows.append( ( 'Show file lookup scripts on single-file manage tags windows: ', self._show_file_lookup_script_tags ) )
rows.append( ( 'Favourite file lookup script: ', self._favourite_file_lookup_script ) )
gridbox = ClientGUICommon.WrapInGrid( suggested_tags_file_lookup_script_panel, rows )
panel_vbox.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
suggested_tags_file_lookup_script_panel.SetSizer( panel_vbox )
#
panel_vbox = wx.BoxSizer( wx.VERTICAL )
panel_vbox.Add( self._num_recent_tags, CC.FLAGS_EXPAND_PERPENDICULAR )
suggested_tags_recent_panel.SetSizer( panel_vbox )
#
suggest_tags_panel_notebook.AddPage( suggested_tags_favourites_panel, 'favourites' )
suggest_tags_panel_notebook.AddPage( suggested_tags_related_panel, 'related' )
suggest_tags_panel_notebook.AddPage( suggested_tags_file_lookup_script_panel, 'file lookup scripts' )
suggest_tags_panel_notebook.AddPage( suggested_tags_recent_panel, 'recent' )
#
rows = []
rows.append( ( 'Width of suggested tags columns: ', self._suggested_tags_width ) )
rows.append( ( 'Column layout: ', self._suggested_tags_layout ) )
gridbox = ClientGUICommon.WrapInGrid( suggested_tags_panel, rows )
desc = 'The manage tags dialog can provide several kinds of tag suggestions. For simplicity, most are turned off by default.'
suggested_tags_panel.Add( ClientGUICommon.BetterStaticText( suggested_tags_panel, desc ), CC.FLAGS_EXPAND_PERPENDICULAR )
suggested_tags_panel.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
suggested_tags_panel.Add( suggest_tags_panel_notebook, CC.FLAGS_EXPAND_BOTH_WAYS )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( suggested_tags_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.SetSizer( vbox )
#
self._suggested_favourites_services.Bind( wx.EVT_CHOICE, self.EventSuggestedFavouritesService )
self.EventSuggestedFavouritesService( None )
def _SaveCurrentSuggestedFavourites( self ):
if self._current_suggested_favourites_service is not None:
self._suggested_favourites_dict[ self._current_suggested_favourites_service ] = self._suggested_favourites.GetTags()
def EventSuggestedFavouritesService( self, event ):
self._SaveCurrentSuggestedFavourites()
self._current_suggested_favourites_service = self._suggested_favourites_services.GetChoice()
if self._current_suggested_favourites_service in self._suggested_favourites_dict:
favourites = self._suggested_favourites_dict[ self._current_suggested_favourites_service ]
else:
favourites = self._new_options.GetSuggestedTagsFavourites( self._current_suggested_favourites_service )
self._suggested_favourites.SetTags( favourites )
self._suggested_favourites_input.SetTagService( self._current_suggested_favourites_service )
def UpdateOptions( self ):
self._new_options.SetInteger( 'suggested_tags_width', self._suggested_tags_width.GetValue() )
self._new_options.SetNoneableString( 'suggested_tags_layout', self._suggested_tags_layout.GetChoice() )
self._SaveCurrentSuggestedFavourites()
for ( service_key, favourites ) in list(self._suggested_favourites_dict.items()):
self._new_options.SetSuggestedTagsFavourites( service_key, favourites )
self._new_options.SetBoolean( 'show_related_tags', self._show_related_tags.GetValue() )
self._new_options.SetInteger( 'related_tags_search_1_duration_ms', self._related_tags_search_1_duration_ms.GetValue() )
self._new_options.SetInteger( 'related_tags_search_2_duration_ms', self._related_tags_search_2_duration_ms.GetValue() )
self._new_options.SetInteger( 'related_tags_search_3_duration_ms', self._related_tags_search_3_duration_ms.GetValue() )
self._new_options.SetBoolean( 'show_file_lookup_script_tags', self._show_file_lookup_script_tags.GetValue() )
self._new_options.SetNoneableString( 'favourite_file_lookup_script', self._favourite_file_lookup_script.GetChoice() )
self._new_options.SetNoneableInteger( 'num_recent_tags', self._num_recent_tags.GetValue() )
class _ThumbnailsPanel( wx.Panel ):
def __init__( self, parent, new_options ):
wx.Panel.__init__( self, parent )
self._new_options = new_options
self._thumbnail_width = wx.SpinCtrl( self, min = 20, max = 2048 )
self._thumbnail_height = wx.SpinCtrl( self, min = 20, max = 2048 )
self._thumbnail_border = wx.SpinCtrl( self, min = 0, max = 20 )
self._thumbnail_margin = wx.SpinCtrl( self, min = 0, max = 20 )
self._video_thumbnail_percentage_in = wx.SpinCtrl( self, min = 0, max = 100 )
self._thumbnail_scroll_rate = wx.TextCtrl( self )
self._thumbnail_fill = wx.CheckBox( self )
self._thumbnail_visibility_scroll_percent = wx.SpinCtrl( self, min = 1, max = 99 )
self._thumbnail_visibility_scroll_percent.SetToolTip( 'Lower numbers will cause fewer scrolls, higher numbers more.' )
self._media_background_bmp_path = wx.FilePickerCtrl( self )
#
( thumbnail_width, thumbnail_height ) = HC.options[ 'thumbnail_dimensions' ]
self._thumbnail_width.SetValue( thumbnail_width )
self._thumbnail_height.SetValue( thumbnail_height )
self._thumbnail_border.SetValue( self._new_options.GetInteger( 'thumbnail_border' ) )
self._thumbnail_margin.SetValue( self._new_options.GetInteger( 'thumbnail_margin' ) )
self._video_thumbnail_percentage_in.SetValue( self._new_options.GetInteger( 'video_thumbnail_percentage_in' ) )
self._thumbnail_scroll_rate.SetValue( self._new_options.GetString( 'thumbnail_scroll_rate' ) )
self._thumbnail_fill.SetValue( self._new_options.GetBoolean( 'thumbnail_fill' ) )
self._thumbnail_visibility_scroll_percent.SetValue( self._new_options.GetInteger( 'thumbnail_visibility_scroll_percent' ) )
media_background_bmp_path = self._new_options.GetNoneableString( 'media_background_bmp_path' )
if media_background_bmp_path is not None:
self._media_background_bmp_path.SetPath( media_background_bmp_path )
self._media_background_bmp_path.Hide()
#
rows = []
rows.append( ( 'Thumbnail width: ', self._thumbnail_width ) )
rows.append( ( 'Thumbnail height: ', self._thumbnail_height ) )
rows.append( ( 'Thumbnail border: ', self._thumbnail_border ) )
rows.append( ( 'Thumbnail margin: ', self._thumbnail_margin ) )
rows.append( ( 'Generate video thumbnails this % in: ', self._video_thumbnail_percentage_in ) )
rows.append( ( 'Do not scroll down on key navigation if thumbnail at least this % visible: ', self._thumbnail_visibility_scroll_percent ) )
rows.append( ( 'EXPERIMENTAL: Scroll thumbnails at this rate per scroll tick: ', self._thumbnail_scroll_rate ) )
rows.append( ( 'EXPERIMENTAL: Zoom thumbnails so they \'fill\' their space: ', self._thumbnail_fill ) )
#rows.append( ( 'EXPERIMENTAL: Image path for thumbnail panel background image (set blank to clear): ', self._media_background_bmp_path ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
self.SetSizer( vbox )
def UpdateOptions( self ):
new_thumbnail_dimensions = [ self._thumbnail_width.GetValue(), self._thumbnail_height.GetValue() ]
HC.options[ 'thumbnail_dimensions' ] = new_thumbnail_dimensions
self._new_options.SetInteger( 'thumbnail_border', self._thumbnail_border.GetValue() )
self._new_options.SetInteger( 'thumbnail_margin', self._thumbnail_margin.GetValue() )
self._new_options.SetInteger( 'video_thumbnail_percentage_in', self._video_thumbnail_percentage_in.GetValue() )
try:
thumbnail_scroll_rate = self._thumbnail_scroll_rate.GetValue()
float( thumbnail_scroll_rate )
self._new_options.SetString( 'thumbnail_scroll_rate', thumbnail_scroll_rate )
except:
pass
self._new_options.SetBoolean( 'thumbnail_fill', self._thumbnail_fill.GetValue() )
self._new_options.SetInteger( 'thumbnail_visibility_scroll_percent', self._thumbnail_visibility_scroll_percent.GetValue() )
media_background_bmp_path = self._media_background_bmp_path.GetPath()
if media_background_bmp_path == '':
media_background_bmp_path = None
self._new_options.SetNoneableString( 'media_background_bmp_path', media_background_bmp_path )
def CommitChanges( self ):
for page in self._listbook.GetActivePages():
page.UpdateOptions()
try:
HG.client_controller.WriteSynchronous( 'save_options', HC.options )
HG.client_controller.WriteSynchronous( 'serialisable', self._new_options )
except:
wx.MessageBox( traceback.format_exc() )
class ManageServerServicesPanel( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent, service_key ):
self._clientside_admin_service = HG.client_controller.services_manager.GetService( service_key )
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
self._deletee_service_keys = []
columns = [ ( 'port', 80 ), ( 'name', -1 ), ( 'type', 220 ) ]
self._services_listctrl = ClientGUIListCtrl.SaneListCtrlForSingleObject( self, 120, columns, delete_key_callback = self._Delete, activation_callback = self._Edit )
menu_items = []
menu_items.append( ( 'normal', 'tag repository', 'Create a new tag repository.', self._AddTagRepository ) )
menu_items.append( ( 'normal', 'file repository', 'Create a new file repository.', self._AddFileRepository ) )
self._add_button = ClientGUICommon.MenuButton( self, 'add', menu_items )
self._edit_button = ClientGUICommon.BetterButton( self, 'edit', self._Edit )
self._delete_button = ClientGUICommon.BetterButton( self, 'delete', self._Delete )
#
response = self._clientside_admin_service.Request( HC.GET, 'services' )
serverside_services = response[ 'services' ]
for serverside_service in serverside_services:
( display_tuple, sort_tuple ) = self._ConvertServiceToTuples( serverside_service )
self._services_listctrl.Append( display_tuple, sort_tuple, serverside_service )
#self._services_listctrl.SortListItems( 0 )
#
hbox = wx.BoxSizer( wx.HORIZONTAL )
hbox.Add( self._add_button, CC.FLAGS_VCENTER )
hbox.Add( self._edit_button, CC.FLAGS_VCENTER )
hbox.Add( self._delete_button, CC.FLAGS_VCENTER )
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._services_listctrl, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( hbox, CC.FLAGS_SMALL_INDENT )
self.SetSizer( vbox )
def _ConvertServiceToTuples( self, service ):
port = service.GetPort()
name = service.GetName()
service_type = service.GetServiceType()
pretty_port = str( port )
pretty_name = name
pretty_service_type = HC.service_string_lookup[ service_type ]
return ( ( pretty_port, pretty_name, pretty_service_type ), ( port, name, service_type ) )
def _Add( self, service_type ):
service_key = HydrusData.GenerateKey()
port = self._GetNextPort()
name = 'new service'
dictionary = HydrusNetwork.GenerateDefaultServiceDictionary( service_type )
service = HydrusNetwork.GenerateService( service_key, service_type, name, port, dictionary )
with ClientGUITopLevelWindows.DialogEdit( self, 'edit serverside service' ) as dlg_edit:
panel = ClientGUIScrolledPanelsEdit.EditServersideService( dlg_edit, service )
dlg_edit.SetPanel( panel )
if dlg_edit.ShowModal() == wx.ID_OK:
new_service = panel.GetValue()
self._services_listctrl.SetNonDupeName( new_service )
self._SetNonDupePort( new_service )
( display_tuple, sort_tuple ) = self._ConvertServiceToTuples( new_service )
self._services_listctrl.Append( display_tuple, sort_tuple, new_service )
def _AddFileRepository( self ):
self._Add( HC.FILE_REPOSITORY )
def _AddTagRepository( self ):
self._Add( HC.TAG_REPOSITORY )
def _Delete( self ):
with ClientGUIDialogs.DialogYesNo( self, 'Remove all selected?' ) as dlg:
if dlg.ShowModal() == wx.ID_YES:
for service in self._services_listctrl.GetObjects( only_selected = True ):
self._deletee_service_keys.append( service.GetServiceKey() )
self._services_listctrl.RemoveAllSelected()
def _Edit( self ):
for index in self._services_listctrl.GetAllSelected():
service = self._services_listctrl.GetObject( index )
original_name = service.GetName()
with ClientGUITopLevelWindows.DialogEdit( self, 'edit serverside service' ) as dlg_edit:
panel = ClientGUIScrolledPanelsEdit.EditServersideService( dlg_edit, service )
dlg_edit.SetPanel( panel )
result = dlg_edit.ShowModal()
if result == wx.ID_OK:
edited_service = panel.GetValue()
if edited_service.GetName() != original_name:
self._services_listctrl.SetNonDupeName( edited_service )
self._SetNonDupePort( edited_service )
( display_tuple, sort_tuple ) = self._ConvertServiceToTuples( edited_service )
self._services_listctrl.UpdateRow( index, display_tuple, sort_tuple, edited_service )
elif result == wx.ID_CANCEL:
break
def _GetNextPort( self ):
existing_ports = [ service.GetPort() for service in self._services_listctrl.GetObjects() ]
largest_port = max( existing_ports )
next_port = largest_port
while next_port in existing_ports:
next_port = max( 1, ( next_port + 1 ) % 65536 )
return next_port
def _SetNonDupePort( self, new_service ):
existing_ports = [ service.GetPort() for service in self._services_listctrl.GetObjects() if service.GetServiceKey() != new_service.GetServiceKey() ]
new_port = new_service.GetPort()
if new_port in existing_ports:
next_port = self._GetNextPort()
new_service.SetPort( next_port )
def CommitChanges( self ):
services = self._services_listctrl.GetObjects()
unique_ports = { service.GetPort() for service in services }
if len( unique_ports ) < len( services ):
raise HydrusExceptions.VetoException( 'It looks like some of those services share ports! Please give them unique ports!' )
response = self._clientside_admin_service.Request( HC.POST, 'services', { 'services' : services } )
service_keys_to_access_keys = dict( response[ 'service_keys_to_access_keys' ] )
admin_service_key = self._clientside_admin_service.GetServiceKey()
with HG.dirty_object_lock:
HG.client_controller.WriteSynchronous( 'update_server_services', admin_service_key, services, service_keys_to_access_keys, self._deletee_service_keys )
HG.client_controller.RefreshServices()
class ManageShortcutsPanel( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent ):
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
help_button = ClientGUICommon.BetterBitmapButton( self, CC.GlobalBMPs.help, self._ShowHelp )
help_button.SetToolTip( 'Show help regarding editing shortcuts.' )
reserved_panel = ClientGUICommon.StaticBox( self, 'reserved' )
self._reserved_shortcuts = ClientGUIListCtrl.SaneListCtrlForSingleObject( reserved_panel, 180, [ ( 'name', -1 ), ( 'size', 100 ) ], activation_callback = self._EditReserved )
self._reserved_shortcuts.SetMinSize( ( 320, 200 ) )
self._edit_reserved_button = ClientGUICommon.BetterButton( reserved_panel, 'edit', self._EditReserved )
#
custom_panel = ClientGUICommon.StaticBox( self, 'custom' )
self._custom_shortcuts = ClientGUIListCtrl.SaneListCtrlForSingleObject( custom_panel, 120, [ ( 'name', -1 ), ( 'size', 100 ) ], delete_key_callback = self._Delete, activation_callback = self._EditCustom )
self._add_button = ClientGUICommon.BetterButton( custom_panel, 'add', self._Add )
self._edit_custom_button = ClientGUICommon.BetterButton( custom_panel, 'edit', self._EditCustom )
self._delete_button = ClientGUICommon.BetterButton( custom_panel, 'delete', self._Delete )
#
all_shortcuts = HG.client_controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SHORTCUTS )
reserved_shortcuts = [ shortcuts for shortcuts in all_shortcuts if shortcuts.GetName() in CC.SHORTCUTS_RESERVED_NAMES ]
custom_shortcuts = [ shortcuts for shortcuts in all_shortcuts if shortcuts.GetName() not in CC.SHORTCUTS_RESERVED_NAMES ]
for shortcuts in reserved_shortcuts:
( display_tuple, sort_tuple ) = self._GetTuples( shortcuts )
self._reserved_shortcuts.Append( display_tuple, sort_tuple, shortcuts )
self._original_custom_names = set()
for shortcuts in custom_shortcuts:
( display_tuple, sort_tuple ) = self._GetTuples( shortcuts )
self._custom_shortcuts.Append( display_tuple, sort_tuple, shortcuts )
self._original_custom_names.add( shortcuts.GetName() )
#
reserved_panel.Add( self._reserved_shortcuts, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
reserved_panel.Add( self._edit_reserved_button, CC.FLAGS_LONE_BUTTON )
#
button_hbox = wx.BoxSizer( wx.HORIZONTAL )
button_hbox.Add( self._add_button, CC.FLAGS_VCENTER )
button_hbox.Add( self._edit_custom_button, CC.FLAGS_VCENTER )
button_hbox.Add( self._delete_button, CC.FLAGS_VCENTER )
custom_panel.Add( self._custom_shortcuts, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
custom_panel.Add( button_hbox, CC.FLAGS_BUTTON_SIZER )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( help_button, CC.FLAGS_LONE_BUTTON )
vbox.Add( reserved_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
if not HG.client_controller.new_options.GetBoolean( 'advanced_mode' ):
vbox.Add( ClientGUICommon.BetterStaticText( self, 'Careful--custom sets are advanced!' ), CC.FLAGS_CENTER )
vbox.Add( custom_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.SetSizer( vbox )
def _Add( self ):
shortcuts = ClientGUIShortcuts.Shortcuts( 'new shortcuts' )
with ClientGUITopLevelWindows.DialogEdit( self, 'edit shortcuts' ) as dlg:
panel = self._EditPanel( dlg, shortcuts )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
new_shortcuts = panel.GetValue()
( display_tuple, sort_tuple ) = self._GetTuples( new_shortcuts )
self._custom_shortcuts.Append( display_tuple, sort_tuple, new_shortcuts )
def _Delete( self ):
with ClientGUIDialogs.DialogYesNo( self, 'Remove all selected?' ) as dlg:
if dlg.ShowModal() == wx.ID_YES:
self._custom_shortcuts.RemoveAllSelected()
def _EditCustom( self ):
all_selected = self._custom_shortcuts.GetAllSelected()
for index in all_selected:
shortcuts = self._custom_shortcuts.GetObject( index )
with ClientGUITopLevelWindows.DialogEdit( self, 'edit shortcuts' ) as dlg:
panel = self._EditPanel( dlg, shortcuts )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
edited_shortcuts = panel.GetValue()
( display_tuple, sort_tuple ) = self._GetTuples( edited_shortcuts )
self._custom_shortcuts.UpdateRow( index, display_tuple, sort_tuple, edited_shortcuts )
else:
break
def _EditReserved( self ):
all_selected = self._reserved_shortcuts.GetAllSelected()
for index in all_selected:
shortcuts = self._reserved_shortcuts.GetObject( index )
with ClientGUITopLevelWindows.DialogEdit( self, 'edit shortcuts' ) as dlg:
panel = self._EditPanel( dlg, shortcuts )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
edited_shortcuts = panel.GetValue()
( display_tuple, sort_tuple ) = self._GetTuples( edited_shortcuts )
self._reserved_shortcuts.UpdateRow( index, display_tuple, sort_tuple, edited_shortcuts )
else:
break
def _GetTuples( self, shortcuts ):
name = shortcuts.GetName()
size = len( shortcuts )
display_tuple = ( name, HydrusData.ToHumanInt( size ) )
sort_tuple = ( name, size )
return ( display_tuple, sort_tuple )
def _ShowHelp( self ):
message = 'I am in the process of converting the multiple old messy shortcut systems to this single unified engine. Many actions are not yet available here, and mouse support is very limited. I expect to overwrite the reserved shortcut sets back to (new and expanded) defaults at least once more, so don\'t remap everything yet unless you are ok with doing it again.'
message += os.linesep * 2
message += '---'
message += os.linesep * 2
message += 'In hydrus, shortcuts are split into different sets that are active in different contexts. Depending on where the program focus is, multiple sets can be active at the same time. On a keyboard or mouse event, the active sets will be consulted one after another (typically from the smallest and most precise focus to the largest and broadest parent) until an action match is found.'
message += os.linesep * 2
message += 'There are two kinds--\'reserved\' and \'custom\':'
message += os.linesep * 2
message += 'Reserved shortcuts are always active in their contexts--the \'main_gui\' one is always consulted when you hit a key on the main gui window, for instance. They have limited actions to choose from, appropriate to their context. If you would prefer to, say, open the manage tags dialog with Ctrl+F3, edit or add that entry in the \'media\' set and that new shortcut will apply anywhere you are focused on some particular media.'
message += os.linesep * 2
message += 'Custom shortcuts sets are those you can create and rename at will. They are only ever active in the media viewer window, and only when you set them so from the top hover-window\'s keyboard icon. They are primarily meant for setting tags and ratings with shortcuts, and are intended to be turned on and off as you perform different \'filtering\' jobs--for instance, you might like to set the 1-5 keys to the different values of a five-star rating system, or assign a few simple keystrokes to a number of common tags.'
message += os.linesep * 2
message += 'The reserved \'media\' set also supports tag and rating actions, if you would like some of those to always be active.'
wx.MessageBox( message )
def CommitChanges( self ):
for shortcuts in self._reserved_shortcuts.GetObjects():
HG.client_controller.Write( 'serialisable', shortcuts )
good_names = set()
for shortcuts in self._custom_shortcuts.GetObjects():
good_names.add( shortcuts.GetName() )
HG.client_controller.Write( 'serialisable', shortcuts )
deletees = self._original_custom_names.difference( good_names )
for name in deletees:
HG.client_controller.Write( 'delete_serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SHORTCUTS, name )
HG.client_controller.pub( 'new_shortcuts' )
class _EditPanel( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent, shortcuts ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
self._name = wx.TextCtrl( self )
self._shortcuts = ClientGUIListCtrl.SaneListCtrl( self, 480, [ ( 'shortcut', 150 ), ( 'command', -1 ) ], delete_key_callback = self.RemoveShortcuts, activation_callback = self.EditShortcuts )
self._shortcuts.SetMinSize( ( 360, 480 ) )
self._add = wx.Button( self, label = 'add' )
self._add.Bind( wx.EVT_BUTTON, self.EventAdd )
self._edit = wx.Button( self, label = 'edit' )
self._edit.Bind( wx.EVT_BUTTON, self.EventEdit )
self._remove = wx.Button( self, label = 'remove' )
self._remove.Bind( wx.EVT_BUTTON, self.EventRemove )
#
name = shortcuts.GetName()
self._name.SetValue( name )
self._this_is_custom = True
if name in CC.SHORTCUTS_RESERVED_NAMES:
self._this_is_custom = False
self._name.Disable()
for ( shortcut, command ) in shortcuts:
sort_tuple = ( shortcut, command )
pretty_tuple = self._ConvertSortTupleToPrettyTuple( sort_tuple )
self._shortcuts.Append( pretty_tuple, sort_tuple )
#self._shortcuts.SortListItems( 1 )
#
action_buttons = wx.BoxSizer( wx.HORIZONTAL )
action_buttons.Add( self._add, CC.FLAGS_VCENTER )
action_buttons.Add( self._edit, CC.FLAGS_VCENTER )
action_buttons.Add( self._remove, CC.FLAGS_VCENTER )
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( ClientGUICommon.WrapInText( self._name, self, 'name: ' ), CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
vbox.Add( self._shortcuts, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( action_buttons, CC.FLAGS_BUTTON_SIZER )
self.SetSizer( vbox )
def _ConvertSortTupleToPrettyTuple( self, shortcut_tuple ):
( shortcut, command ) = shortcut_tuple
return ( shortcut.ToString(), command.ToString() )
def EditShortcuts( self ):
name = self._name.GetValue()
selected_indices = self._shortcuts.GetAllSelected()
for index in selected_indices:
( shortcut, command ) = self._shortcuts.GetClientData( index )
with ClientGUITopLevelWindows.DialogEdit( self, 'edit shortcut command' ) as dlg:
panel = self._EditPanel( dlg, shortcut, command, name )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
( shortcut, command ) = panel.GetValue()
sort_tuple = ( shortcut, command )
pretty_tuple = self._ConvertSortTupleToPrettyTuple( sort_tuple )
self._shortcuts.UpdateRow( index, pretty_tuple, sort_tuple )
else:
break
def EventAdd( self, event ):
shortcut = ClientGUIShortcuts.Shortcut()
command = ClientData.ApplicationCommand()
name = self._name.GetValue()
with ClientGUITopLevelWindows.DialogEdit( self, 'edit shortcut command' ) as dlg:
panel = self._EditPanel( dlg, shortcut, command, name )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
( shortcut, command ) = panel.GetValue()
sort_tuple = ( shortcut, command )
pretty_tuple = self._ConvertSortTupleToPrettyTuple( sort_tuple )
self._shortcuts.Append( pretty_tuple, sort_tuple )
def EventEdit( self, event ):
self.EditShortcuts()
def EventRemove( self, event ):
self.RemoveShortcuts()
def GetValue( self ):
name = self._name.GetValue()
if self._this_is_custom and name in CC.SHORTCUTS_RESERVED_NAMES:
raise HydrusExceptions.VetoException( 'That name is reserved--please pick another!' )
shortcuts = ClientGUIShortcuts.Shortcuts( name )
for ( shortcut, command ) in self._shortcuts.GetClientData():
shortcuts.SetCommand( shortcut, command )
return shortcuts
def RemoveShortcuts( self ):
with ClientGUIDialogs.DialogYesNo( self, 'Remove all selected?' ) as dlg:
if dlg.ShowModal() == wx.ID_YES:
self._shortcuts.RemoveAllSelected()
class _EditPanel( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent, shortcut, command, shortcuts_name ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
self._final_command = 'simple'
self._current_ratings_like_service = None
self._current_ratings_numerical_service = None
#
self._shortcut_panel = ClientGUICommon.StaticBox( self, 'shortcut' )
self._shortcut = ClientGUIShortcuts.ShortcutPanel( self._shortcut_panel )
#
self._none_panel = ClientGUICommon.StaticBox( self, 'simple actions' )
if shortcuts_name in CC.SHORTCUTS_RESERVED_NAMES:
choices = CC.simple_shortcut_name_to_action_lookup[ shortcuts_name ]
else:
choices = CC.simple_shortcut_name_to_action_lookup[ 'custom' ]
choices = list( choices )
choices.sort()
self._simple_actions = wx.Choice( self._none_panel, choices = choices )
self._set_simple = ClientGUICommon.BetterButton( self._none_panel, 'set command', self._SetSimple )
#
self._content_panel = ClientGUICommon.StaticBox( self, 'content actions' )
self._flip_or_set_action = ClientGUICommon.BetterChoice( self._content_panel )
self._flip_or_set_action.Append( 'set', HC.CONTENT_UPDATE_SET )
self._flip_or_set_action.Append( 'flip on and off', HC.CONTENT_UPDATE_FLIP )
self._flip_or_set_action.SelectClientData( HC.CONTENT_UPDATE_SET )
self._tag_panel = ClientGUICommon.StaticBox( self._content_panel, 'tag service actions' )
self._tag_service_keys = wx.Choice( self._tag_panel )
self._tag_value = wx.TextCtrl( self._tag_panel, style = wx.TE_READONLY )
expand_parents = False
self._tag_input = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( self._tag_panel, self.SetTags, expand_parents, CC.LOCAL_FILE_SERVICE_KEY, CC.COMBINED_TAG_SERVICE_KEY )
self._set_tag = ClientGUICommon.BetterButton( self._tag_panel, 'set command', self._SetTag )
#
self._ratings_like_panel = ClientGUICommon.StaticBox( self._content_panel, 'like/dislike ratings service actions' )
self._ratings_like_service_keys = wx.Choice( self._ratings_like_panel )
self._ratings_like_service_keys.Bind( wx.EVT_CHOICE, self.EventRecalcActions )
self._ratings_like_like = wx.RadioButton( self._ratings_like_panel, style = wx.RB_GROUP, label = 'like' )
self._ratings_like_dislike = wx.RadioButton( self._ratings_like_panel, label = 'dislike' )
self._ratings_like_remove = wx.RadioButton( self._ratings_like_panel, label = 'remove rating' )
self._set_ratings_like = ClientGUICommon.BetterButton( self._ratings_like_panel, 'set command', self._SetRatingsLike )
#
self._ratings_numerical_panel = ClientGUICommon.StaticBox( self._content_panel, 'numerical ratings service actions' )
self._ratings_numerical_service_keys = wx.Choice( self._ratings_numerical_panel )
self._ratings_numerical_service_keys.Bind( wx.EVT_CHOICE, self.EventRecalcActions )
self._ratings_numerical_slider = wx.Slider( self._ratings_numerical_panel, style = wx.SL_AUTOTICKS | wx.SL_LABELS )
self._ratings_numerical_remove = wx.CheckBox( self._ratings_numerical_panel, label = 'remove rating' )
self._set_ratings_numerical = ClientGUICommon.BetterButton( self._ratings_numerical_panel, 'set command', self._SetRatingsNumerical )
#
services = HG.client_controller.services_manager.GetServices( ( HC.LOCAL_TAG, HC.TAG_REPOSITORY, HC.LOCAL_RATING_LIKE, HC.LOCAL_RATING_NUMERICAL ) )
for service in services:
service_type = service.GetServiceType()
if service_type in HC.TAG_SERVICES: choice = self._tag_service_keys
elif service_type == HC.LOCAL_RATING_LIKE: choice = self._ratings_like_service_keys
elif service_type == HC.LOCAL_RATING_NUMERICAL: choice = self._ratings_numerical_service_keys
choice.Append( service.GetName(), service.GetServiceKey() )
self._SetActions()
#
self._shortcut.SetValue( shortcut )
command_type = command.GetCommandType()
data = command.GetData()
if command_type == CC.APPLICATION_COMMAND_TYPE_SIMPLE:
action = data
self._simple_actions.SetStringSelection( action )
self._SetSimple()
else:
( service_key, content_type, action, value ) = data
self._service = HG.client_controller.services_manager.GetService( service_key )
service_name = self._service.GetName()
service_type = self._service.GetServiceType()
self._flip_or_set_action.SelectClientData( action )
if service_type in HC.TAG_SERVICES:
self._tag_service_keys.SetStringSelection( service_name )
self._tag_value.SetValue( value )
self._SetTag()
elif service_type == HC.LOCAL_RATING_LIKE:
self._ratings_like_service_keys.SetStringSelection( service_name )
self._SetActions()
if value is None:
self._ratings_like_remove.SetValue( True )
elif value == True:
self._ratings_like_like.SetValue( True )
elif value == False:
self._ratings_like_dislike.SetValue( True )
self._SetRatingsLike()
elif service_type == HC.LOCAL_RATING_NUMERICAL:
self._ratings_numerical_service_keys.SetStringSelection( service_name )
self._SetActions()
if value is None:
self._ratings_numerical_remove.SetValue( True )
else:
num_stars = self._current_ratings_numerical_service.GetNumStars()
slider_value = int( round( value * num_stars ) )
self._ratings_numerical_slider.SetValue( slider_value )
self._SetRatingsNumerical()
if self._final_command is None:
self._SetSimple()
#
self._shortcut_panel.Add( self._shortcut, CC.FLAGS_EXPAND_PERPENDICULAR )
none_hbox = wx.BoxSizer( wx.HORIZONTAL )
none_hbox.Add( self._simple_actions, CC.FLAGS_EXPAND_DEPTH_ONLY )
none_hbox.Add( self._set_simple, CC.FLAGS_VCENTER )
self._none_panel.Add( none_hbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
tag_sub_vbox = wx.BoxSizer( wx.VERTICAL )
tag_sub_vbox.Add( self._tag_value, CC.FLAGS_EXPAND_PERPENDICULAR )
tag_sub_vbox.Add( self._tag_input, CC.FLAGS_EXPAND_BOTH_WAYS )
tag_hbox = wx.BoxSizer( wx.HORIZONTAL )
tag_hbox.Add( self._tag_service_keys, CC.FLAGS_EXPAND_DEPTH_ONLY )
tag_hbox.Add( tag_sub_vbox, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
tag_hbox.Add( self._set_tag, CC.FLAGS_VCENTER )
self._tag_panel.Add( tag_hbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
ratings_like_hbox = wx.BoxSizer( wx.HORIZONTAL )
ratings_like_hbox.Add( self._ratings_like_service_keys, CC.FLAGS_EXPAND_DEPTH_ONLY )
ratings_like_hbox.Add( self._ratings_like_like, CC.FLAGS_VCENTER )
ratings_like_hbox.Add( self._ratings_like_dislike, CC.FLAGS_VCENTER )
ratings_like_hbox.Add( self._ratings_like_remove, CC.FLAGS_VCENTER )
ratings_like_hbox.Add( self._set_ratings_like, CC.FLAGS_VCENTER )
self._ratings_like_panel.Add( ratings_like_hbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
ratings_numerical_hbox = wx.BoxSizer( wx.HORIZONTAL )
ratings_numerical_hbox.Add( self._ratings_numerical_service_keys, CC.FLAGS_EXPAND_DEPTH_ONLY )
ratings_numerical_hbox.Add( self._ratings_numerical_slider, CC.FLAGS_VCENTER )
ratings_numerical_hbox.Add( self._ratings_numerical_remove, CC.FLAGS_VCENTER )
ratings_numerical_hbox.Add( self._set_ratings_numerical, CC.FLAGS_VCENTER )
self._ratings_numerical_panel.Add( ratings_numerical_hbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self._content_panel.Add( self._flip_or_set_action, CC.FLAGS_EXPAND_PERPENDICULAR )
self._content_panel.Add( self._tag_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
self._content_panel.Add( self._ratings_like_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
self._content_panel.Add( self._ratings_numerical_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._none_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._content_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
is_custom_or_media = shortcuts_name not in CC.SHORTCUTS_RESERVED_NAMES or shortcuts_name == 'media'
if not is_custom_or_media:
self._set_simple.Hide()
self._content_panel.Hide()
hbox = wx.BoxSizer( wx.HORIZONTAL )
hbox.Add( self._shortcut_panel, CC.FLAGS_VCENTER )
hbox.Add( ClientGUICommon.BetterStaticText( self, '\u2192' ), CC.FLAGS_VCENTER )
hbox.Add( vbox, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
self.SetSizer( hbox )
def _EnableButtons( self ):
for button in [ self._set_simple, self._set_ratings_like, self._set_ratings_numerical, self._set_tag ]:
button.Enable()
def _GetCommand( self ):
if self._final_command == 'simple':
return self._GetSimple()
elif self._final_command == 'ratings_like':
return self._GetRatingsLike()
if self._final_command == 'ratings_numerical':
return self._GetRatingsNumerical()
if self._final_command == 'tag':
return self._GetTag()
def _GetSimple( self ):
action = self._simple_actions.GetStringSelection()
if action == '':
raise HydrusExceptions.VetoException( 'Please select an action!' )
else:
return ClientData.ApplicationCommand( CC.APPLICATION_COMMAND_TYPE_SIMPLE, action )
def _GetRatingsLike( self ):
selection = self._ratings_like_service_keys.GetSelection()
if selection != wx.NOT_FOUND:
service_key = self._ratings_like_service_keys.GetClientData( selection )
action = self._flip_or_set_action.GetChoice()
if self._ratings_like_like.GetValue():
value = 1.0
elif self._ratings_like_dislike.GetValue():
value = 0.0
else:
value = None
return ClientData.ApplicationCommand( CC.APPLICATION_COMMAND_TYPE_CONTENT, ( service_key, HC.CONTENT_TYPE_RATINGS, action, value ) )
else:
raise HydrusExceptions.VetoException( 'Please select a rating service!' )
def _GetRatingsNumerical( self ):
selection = self._ratings_numerical_service_keys.GetSelection()
if selection != wx.NOT_FOUND:
service_key = self._ratings_numerical_service_keys.GetClientData( selection )
action = self._flip_or_set_action.GetChoice()
if self._ratings_numerical_remove.GetValue():
value = None
else:
value = self._ratings_numerical_slider.GetValue()
num_stars = self._current_ratings_numerical_service.GetNumStars()
allow_zero = self._current_ratings_numerical_service.AllowZero()
if allow_zero:
value = value / num_stars
else:
value = ( value - 1 ) / ( num_stars - 1 )
return ClientData.ApplicationCommand( CC.APPLICATION_COMMAND_TYPE_CONTENT, ( service_key, HC.CONTENT_TYPE_RATINGS, action, value ) )
else:
raise HydrusExceptions.VetoException( 'Please select a rating service!' )
def _GetTag( self ):
selection = self._tag_service_keys.GetSelection()
if selection != wx.NOT_FOUND:
service_key = self._tag_service_keys.GetClientData( selection )
action = self._flip_or_set_action.GetChoice()
value = self._tag_value.GetValue()
if value == '':
raise HydrusExceptions.VetoException( 'Please enter a tag!' )
return ClientData.ApplicationCommand( CC.APPLICATION_COMMAND_TYPE_CONTENT, ( service_key, HC.CONTENT_TYPE_MAPPINGS, action, value ) )
else:
raise HydrusExceptions.VetoException( 'Please select a tag service!' )
def _SetActions( self ):
if self._ratings_like_service_keys.GetCount() > 0:
selection = self._ratings_like_service_keys.GetSelection()
if selection != wx.NOT_FOUND:
service_key = self._ratings_like_service_keys.GetClientData( selection )
service = HG.client_controller.services_manager.GetService( service_key )
self._current_ratings_like_service = service
if self._ratings_numerical_service_keys.GetCount() > 0:
selection = self._ratings_numerical_service_keys.GetSelection()
if selection != wx.NOT_FOUND:
service_key = self._ratings_numerical_service_keys.GetClientData( selection )
service = HG.client_controller.services_manager.GetService( service_key )
self._current_ratings_numerical_service = service
num_stars = service.GetNumStars()
allow_zero = service.AllowZero()
if allow_zero:
min = 0
else:
min = 1
self._ratings_numerical_slider.SetRange( min, num_stars )
def _SetSimple( self ):
self._EnableButtons()
self._set_simple.Disable()
self._final_command = 'simple'
def _SetRatingsLike( self ):
self._EnableButtons()
self._set_ratings_like.Disable()
self._final_command = 'ratings_like'
def _SetRatingsNumerical( self ):
self._EnableButtons()
self._set_ratings_numerical.Disable()
self._final_command = 'ratings_numerical'
def _SetTag( self ):
self._EnableButtons()
self._set_tag.Disable()
self._final_command = 'tag'
def EventRecalcActions( self, event ):
self._SetActions()
event.Skip()
def GetValue( self ):
shortcut = self._shortcut.GetValue()
command = self._GetCommand()
return ( shortcut, command )
def SetTags( self, tags ):
if len( tags ) > 0:
tag = list( tags )[0]
self._tag_value.SetValue( tag )
class ManageURLsPanel( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent, media ):
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
media = ClientMedia.FlattenMedia( media )
self._current_media = [ m.Duplicate() for m in media ]
self._multiple_files_warning = ClientGUICommon.BetterStaticText( self, label = 'Warning: you are editing urls for multiple files!\nBe very careful about adding URLs here, as they will apply to everything.\nAdding the same URL to multiple files is only appropriate for gallery-type URLs!' )
self._multiple_files_warning.SetForegroundColour( ( 128, 0, 0 ) )
if len( self._current_media ) == 1:
self._multiple_files_warning.Hide()
self._urls_listbox = wx.ListBox( self, style = wx.LB_SORT | wx.LB_EXTENDED )
self._urls_listbox.Bind( wx.EVT_LISTBOX_DCLICK, self.EventListDoubleClick )
self._urls_listbox.Bind( wx.EVT_KEY_DOWN, self.EventListKeyDown )
( width, height ) = ClientGUICommon.ConvertTextToPixels( self._urls_listbox, ( 120, 10 ) )
self._urls_listbox.SetInitialSize( ( width, height ) )
self._url_input = wx.TextCtrl( self, style = wx.TE_PROCESS_ENTER )
self._url_input.Bind( wx.EVT_CHAR_HOOK, self.EventInputCharHook )
self._copy_button = ClientGUICommon.BetterButton( self, 'copy all', self._Copy )
self._paste_button = ClientGUICommon.BetterButton( self, 'paste', self._Paste )
self._urls_to_add = set()
self._urls_to_remove = set()
#
self._pending_content_updates = []
self._current_urls_count = collections.Counter()
self._UpdateList()
#
hbox = wx.BoxSizer( wx.HORIZONTAL )
hbox.Add( self._copy_button, CC.FLAGS_VCENTER )
hbox.Add( self._paste_button, CC.FLAGS_VCENTER )
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( self._multiple_files_warning, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._urls_listbox, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( self._url_input, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( hbox, CC.FLAGS_BUTTON_SIZER )
self.SetSizer( vbox )
self._my_shortcut_handler = ClientGUIShortcuts.ShortcutsHandler( self, [ 'media', 'main_gui' ] )
wx.CallAfter( self._SetSearchFocus )
def _Copy( self ):
urls = list( self._current_urls_count.keys() )
urls.sort()
text = os.linesep.join( urls )
HG.client_controller.pub( 'clipboard', 'text', text )
def _EnterURL( self, url, only_add = False ):
normalised_url = HG.client_controller.network_engine.domain_manager.NormaliseURL( url )
addee_media = set()
for m in self._current_media:
locations_manager = m.GetLocationsManager()
if normalised_url not in locations_manager.GetURLs():
addee_media.add( m )
if len( addee_media ) > 0:
addee_hashes = { m.GetHash() for m in addee_media }
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_URLS, HC.CONTENT_UPDATE_ADD, ( ( url, ), addee_hashes ) )
for m in addee_media:
m.GetMediaResult().ProcessContentUpdate( CC.COMBINED_LOCAL_FILE_SERVICE_KEY, content_update )
self._pending_content_updates.append( content_update )
#
self._UpdateList()
def _Paste( self ):
raw_text = HG.client_controller.GetClipboardText()
try:
for url in HydrusText.DeserialiseNewlinedTexts( raw_text ):
if url != '':
self._EnterURL( url, only_add = True )
except:
wx.MessageBox( 'I could not understand what was in the clipboard' )
def _RemoveURL( self, url ):
removee_media = set()
for m in self._current_media:
locations_manager = m.GetLocationsManager()
if url in locations_manager.GetURLs():
removee_media.add( m )
if len( removee_media ) > 0:
removee_hashes = { m.GetHash() for m in removee_media }
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_URLS, HC.CONTENT_UPDATE_DELETE, ( ( url, ), removee_hashes ) )
for m in removee_media:
m.GetMediaResult().ProcessContentUpdate( CC.COMBINED_LOCAL_FILE_SERVICE_KEY, content_update )
self._pending_content_updates.append( content_update )
#
self._UpdateList()
def _SetSearchFocus( self ):
self._url_input.SetFocus()
def _UpdateList( self ):
self._urls_listbox.Clear()
self._current_urls_count = collections.Counter()
for m in self._current_media:
locations_manager = m.GetLocationsManager()
for url in locations_manager.GetURLs():
self._current_urls_count[ url ] += 1
for ( url, count ) in self._current_urls_count.items():
if len( self._current_media ) == 1:
label = url
else:
label = '{} ({})'.format( url, count )
self._urls_listbox.Append( label, url )
def EventListDoubleClick( self, event ):
urls = [ self._urls_listbox.GetClientData( selection ) for selection in list( self._urls_listbox.GetSelections() ) ]
for url in urls:
self._RemoveURL( url )
if len( urls ) == 1:
url = urls[0]
self._url_input.SetValue( url )
def EventListKeyDown( self, event ):
( modifier, key ) = ClientGUIShortcuts.ConvertKeyEventToSimpleTuple( event )
if key in CC.DELETE_KEYS:
urls = [ self._urls_listbox.GetClientData( selection ) for selection in list( self._urls_listbox.GetSelections() ) ]
for url in urls:
self._RemoveURL( url )
else:
event.Skip()
def EventInputCharHook( self, event ):
( modifier, key ) = ClientGUIShortcuts.ConvertKeyEventToSimpleTuple( event )
if key in ( wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER ):
url = self._url_input.GetValue()
if url == '':
self.GetParent().DoOK()
else:
parse_result = urllib.parse.urlparse( url )
if parse_result.scheme == '':
wx.MessageBox( 'Could not parse that URL! Please make sure you include http:// or https://.' )
return
self._EnterURL( url )
self._url_input.SetValue( '' )
else:
event.Skip()
def CommitChanges( self ):
if len( self._pending_content_updates ) > 0:
service_keys_to_content_updates = { CC.COMBINED_LOCAL_FILE_SERVICE_KEY : self._pending_content_updates }
HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
def ProcessApplicationCommand( self, command ):
command_processed = True
command_type = command.GetCommandType()
data = command.GetData()
if command_type == CC.APPLICATION_COMMAND_TYPE_SIMPLE:
action = data
if action == 'manage_file_urls':
self._OKParent()
elif action == 'set_search_focus':
self._SetSearchFocus()
else:
command_processed = False
else:
command_processed = False
return command_processed
class RepairFileSystemPanel( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent, missing_locations ):
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
self._only_thumbs = True
self._incorrect_locations = {}
self._correct_locations = {}
for ( incorrect_location, prefix ) in missing_locations:
self._incorrect_locations[ prefix ] = incorrect_location
if prefix.startswith( 'f' ):
self._only_thumbs = False
text = 'This dialog has launched because some expected file storage directories were not found. This is a serious error. You have two options:'
text += os.linesep * 2
text += '1) If you know what these should be (e.g. you recently remapped their external drive to another location), update the paths here manually. For most users, this will be clicking _add a possibly correct location_ and then select the new folder where the subdirectories all went. You can repeat this if your folders are missing in multiple locations. Check everything reports _ok!_'
text += os.linesep * 2
text += 'Although it is best if you can find everything, you only _have_ to fix the subdirectories starting with \'f\', which store your original files. Those starting \'t\' and \'r\' are for your thumbnails, which can be regenerated with a bit of work.'
text += os.linesep * 2
text += 'Then hit \'apply\', and the client will launch. You should double-check all your locations under database->migrate database immediately.'
text += os.linesep * 2
text += '2) If the locations are not available, or you do not know what they should be, or you wish to fix this outside of the program, hit \'cancel\' to gracefully cancel client boot. Feel free to contact hydrus dev for help.'
if self._only_thumbs:
text += os.linesep * 2
text += 'SPECIAL NOTE FOR YOUR SITUATION: The only paths missing are thumbnail paths. If you cannot recover these folders, you can hit apply to create empty paths at the original or corrected locations and then run a maintenance routine to regenerate the thumbnails from their originals.'
st = ClientGUICommon.BetterStaticText( self, text )
st.SetWrapWidth( 640 )
columns = [ ( 'missing location', -1 ), ( 'expected subdirectory', 23 ), ( 'correct location', 36 ), ( 'now ok?', 9 ) ]
self._locations = ClientGUIListCtrl.BetterListCtrl( self, 'repair_locations', 12, 36, columns, self._ConvertPrefixToListCtrlTuples, activation_callback = self._SetLocations )
self._set_button = ClientGUICommon.BetterButton( self, 'set correct location', self._SetLocations )
self._add_button = ClientGUICommon.BetterButton( self, 'add a possibly correct location (let the client figure out what it contains)', self._AddLocation )
# add a button here for 'try to fill them in for me'. you give it a dir, and it tries to figure out and fill in the prefixes for you
#
self._locations.AddDatas( [ prefix for ( incorrect_location, prefix ) in missing_locations ] )
self._locations.Sort( 0 )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._locations, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( self._set_button, CC.FLAGS_LONE_BUTTON )
vbox.Add( self._add_button, CC.FLAGS_LONE_BUTTON )
self.SetSizer( vbox )
def _AddLocation( self ):
with wx.DirDialog( self, 'Select the potential correct location.' ) as dlg:
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
for prefix in self._locations.GetData():
ok = os.path.exists( os.path.join( path, prefix ) )
if ok:
self._correct_locations[ prefix ] = ( path, ok )
self._locations.UpdateDatas()
def _ConvertPrefixToListCtrlTuples( self, prefix ):
incorrect_location = self._incorrect_locations[ prefix ]
if prefix in self._correct_locations:
( correct_location, ok ) = self._correct_locations[ prefix ]
if ok:
pretty_ok = 'ok!'
else:
pretty_ok = 'not found'
else:
correct_location = ''
ok = None
pretty_ok = ''
pretty_incorrect_location = incorrect_location
pretty_prefix = prefix
pretty_correct_location = correct_location
display_tuple = ( pretty_incorrect_location, pretty_prefix, pretty_correct_location, pretty_ok )
sort_tuple = ( incorrect_location, prefix, correct_location, ok )
return ( display_tuple, sort_tuple )
def _SetLocations( self ):
prefixes = self._locations.GetData( only_selected = True )
if len( prefixes ) > 0:
with wx.DirDialog( self, 'Select correct location.' ) as dlg:
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
for prefix in prefixes:
ok = os.path.exists( os.path.join( path, prefix ) )
self._correct_locations[ prefix ] = ( path, ok )
self._locations.UpdateDatas()
def CommitChanges( self ):
correct_rows = []
thumb_problems = False
for prefix in self._locations.GetData():
incorrect_location = self._incorrect_locations[ prefix ]
if prefix not in self._correct_locations:
if prefix.startswith( 'f' ):
raise HydrusExceptions.VetoException( 'You did not correct all the file locations!' )
else:
thumb_problems = True
correct_location = incorrect_location
else:
( correct_location, ok ) = self._correct_locations[ prefix ]
if not ok:
if prefix.startswith( 'f' ):
raise HydrusExceptions.VetoException( 'You did not find all the correct file locations!' )
else:
thumb_problems = True
correct_rows.append( ( incorrect_location, prefix, correct_location ) )
if thumb_problems:
message = 'Some or all of your incorrect paths have not been corrected, but they are all thumbnail paths.'
message += os.linesep * 2
message += 'Would you like instead to create new empty subdirectories at the previous (or corrected, if you have entered them) locations?'
message += os.linesep * 2
message += 'You can run database->regenerate->thumbnails to fill them up again.'
with ClientGUIDialogs.DialogYesNo( self, message ) as dlg:
if dlg.ShowModal() != wx.ID_YES:
raise HydrusExceptions.VetoException()
HG.client_controller.WriteSynchronous( 'repair_client_files', correct_rows )
| 45.184274 | 536 | 0.555526 |
4b5c4d092833d03034b30338774f18b90ce1f956 | 3,592 | py | Python | data_handler/LLD.py | demetoir/ALLGANS | 2f972db5e9a65f18aee0695d817f4acc221e54da | [
"MIT"
] | 11 | 2018-01-25T15:31:54.000Z | 2019-12-17T06:12:57.000Z | data_handler/LLD.py | demetoir/ALLGANs | 2f972db5e9a65f18aee0695d817f4acc221e54da | [
"MIT"
] | 19 | 2018-01-29T16:12:09.000Z | 2018-02-15T15:56:48.000Z | data_handler/LLD.py | demetoir/ALLGANs | 2f972db5e9a65f18aee0695d817f4acc221e54da | [
"MIT"
] | 2 | 2018-02-06T00:31:55.000Z | 2018-02-19T15:29:10.000Z | from __future__ import division
from glob import glob
from PIL import Image
from data_handler.AbstractDataset import AbstractDataset, DownloadInfo, AbstractDatasetHelper
from dict_keys.dataset_batch_keys import *
from env_settting import LLD_PATH
from dict_keys.input_shape_keys import *
import _pickle as cPickle
import os
import numpy as np
class LLD(AbstractDataset):
LLD_CLEAN = 'CLEAN'
LLD_FULL = 'FULL'
PATTERN = 'LLD_favicon_data*.pkl'
def __init__(self, preprocess=None, batch_after_task=None):
super().__init__(preprocess, batch_after_task)
self.batch_keys = [
LLD_CLEAN,
]
self.download_infos = [
DownloadInfo(
url="https://data.vision.ee.ethz.ch/cvl/lld_data/LLD_favicons_clean.zip",
is_zipped=True,
download_file_name="LLD_favicons_clean.zip",
extracted_file_names=[
"LLD_favicon_data_0.pkl",
"LLD_favicon_data_1.pkl",
"LLD_favicon_data_2.pkl",
"LLD_favicon_data_3.pkl",
"LLD_favicon_data_4.pkl"
]
)
]
def load(self, path, limit=None):
files = glob(os.path.join(path, self.PATTERN))
files.sort()
self.data[BATCH_KEY_TRAIN_X] = None
for file in files:
with open(file, 'rb') as f:
data = cPickle.load(f, encoding='latin1')
self.log('pickle load :%s' % file)
if self.data[BATCH_KEY_TRAIN_X] is None:
self.data[BATCH_KEY_TRAIN_X] = data
else:
self.data[BATCH_KEY_TRAIN_X] = np.concatenate((self.data[BATCH_KEY_TRAIN_X], data))
if limit is not None:
self.data[BATCH_KEY_TRAIN_X] = self.data[BATCH_KEY_TRAIN_X][:limit]
self.cursor[BATCH_KEY_TRAIN_X] = 0
self.data_size = len(self.data[BATCH_KEY_TRAIN_X])
self.log('data set fully loaded')
def save(self):
# def save_icon_data(icons, data_path, package_size=100000):
# if not os.instance_path.exists(data_path):
# os.makedirs(data_path)
# num_packages = int(math.ceil(len(icons) / package_size))
# num_len = len(str(num_packages))
# for p in range(num_packages):
# with open(os.instance_path.join(data_path, 'icon_data_' + str(p).zfill(num_len) + '.pkl'), 'wb') as f:
# cPickle.dump(icons[p * package_size:(p + 1) * package_size], f, protocol=cPickle.HIGHEST_PROTOCOL)
raise NotImplementedError
@staticmethod
def load_sample(path):
files = glob(os.path.join(path, '5klogos', '*.png'))
files.sort()
imgs = []
for file in files:
img = Image.open(file)
img.load_model_instance()
im_arr = np.fromstring(img.tobytes(), dtype=np.uint8)
im_arr = im_arr.reshape((img.size[1], img.size[0], 3))
imgs += [im_arr]
return np.array(imgs)
class LLDHelper(AbstractDatasetHelper):
@staticmethod
def next_batch_task(batch):
x = batch[0]
return x
@staticmethod
def load_dataset(limit=None):
lld_data = LLD(batch_after_task=LLDHelper.next_batch_task)
lld_data.load(LLD_PATH, limit=limit)
input_shapes = {
INPUT_SHAPE_KEY_DATA_X: [32, 32, 3],
}
return lld_data, input_shapes
| 34.873786 | 121 | 0.583241 |
4f1e69a286802c88a746ec1fc519c38b59bb6b16 | 2,363 | py | Python | basic/list2.py | tyagow/google-python-exercises | 7036293c5f2b1740ce646b300267b71e6d3d642a | [
"Apache-2.0"
] | 1 | 2019-07-05T12:24:15.000Z | 2019-07-05T12:24:15.000Z | basic/list2.py | tyagow/google-python-exercises | 7036293c5f2b1740ce646b300267b71e6d3d642a | [
"Apache-2.0"
] | null | null | null | basic/list2.py | tyagow/google-python-exercises | 7036293c5f2b1740ce646b300267b71e6d3d642a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
# https://www.peterbe.com/plog/uniqifiers-benchmark
def remove_adjacent(seq):
checked = []
for e in seq:
if e not in checked:
checked.append(e)
return checked
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
# +++your code here+++
return sorted(list1 + list2)
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print('%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))
# Calls the above functions with interesting inputs.
def main():
print('remove_adjacent')
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print()
print('linear_merge')
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| 32.819444 | 79 | 0.646212 |
03427b2e5d016a10246c195dbab951874618b4aa | 2,177 | py | Python | twint/storage/write.py | recurrence/twint | c434115d21fd244f8fcfd987ae048a1a4fac6f74 | [
"MIT"
] | null | null | null | twint/storage/write.py | recurrence/twint | c434115d21fd244f8fcfd987ae048a1a4fac6f74 | [
"MIT"
] | null | null | null | twint/storage/write.py | recurrence/twint | c434115d21fd244f8fcfd987ae048a1a4fac6f74 | [
"MIT"
] | null | null | null | from . import write_meta as meta
import csv
import json
import os
def outputExt(objType, fType):
if objType == "str":
objType = "username"
outExt = f"/{objType}s.{fType}"
return outExt
def addExt(base, objType, fType):
if len(base.split('.')) == 1:
createDirIfMissing(base)
base += outputExt(objType, fType)
return base
def Text(entry, f):
print(entry.replace('\n', ' '), file=open(f, "a", encoding="utf-8"))
def Type(config):
if config.User_full:
_type = "user"
elif config.Followers or config.Following:
_type = "username"
else:
_type = "tweet"
return _type
def struct(obj, custom, _type):
if custom:
fieldnames = custom
row = {}
for f in fieldnames:
row[f] = meta.Data(obj, _type)[f]
else:
fieldnames = meta.Fieldnames(_type)
row = meta.Data(obj, _type)
return fieldnames, row
def createDirIfMissing(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def Csv(obj, config):
_obj_type = obj.__class__.__name__
if _obj_type == "str":
_obj_type = "username"
fieldnames, row = struct(obj, config.Custom[_obj_type], _obj_type)
base = addExt(config.Output, _obj_type, "csv")
dialect = 'excel-tab' if 'Tabs' in config.__dict__ and config.Tabs else 'excel'
if not (os.path.exists(base)):
with open(base, "w", newline='', encoding="utf-8") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fieldnames, dialect=dialect)
writer.writeheader()
with open(base, "a", newline='', encoding="utf-8") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fieldnames, dialect=dialect)
writer.writerow(row)
def Json(obj, config):
_obj_type = obj.__class__.__name__
if _obj_type == "str":
_obj_type = "username"
null, data = struct(obj, config.Custom[_obj_type], _obj_type)
base = addExt(config.Output, _obj_type, "json")
with open(base, "a", newline='', encoding="utf-8") as json_file:
json.dump(data, json_file, ensure_ascii=False)
json_file.write("\n")
| 27.910256 | 85 | 0.625172 |
7bfc51bceb2edf6f217cb2066adea045231daf11 | 5,738 | py | Python | PythonAPI/carissma_project/lib/python3.5/site-packages/matplotlib/backends/backend_mixed.py | AbdulHoffmann/carla_carissma | 8d382769ffa02a6c61a22c57160285505f5ff0a4 | [
"MIT"
] | 445 | 2019-01-26T13:50:26.000Z | 2022-03-18T05:17:38.000Z | venv/lib/python3.7/site-packages/matplotlib/backends/backend_mixed.py | John1001Song/Big-Data-Robo-Adviser | 9444dce96954c546333d5aecc92a06c3bfd19aa5 | [
"MIT"
] | 242 | 2019-01-29T15:48:27.000Z | 2022-03-31T22:09:21.000Z | venv/lib/python3.7/site-packages/matplotlib/backends/backend_mixed.py | John1001Song/Big-Data-Robo-Adviser | 9444dce96954c546333d5aecc92a06c3bfd19aa5 | [
"MIT"
] | 64 | 2018-04-25T08:51:57.000Z | 2022-01-29T14:13:57.000Z | import numpy as np
from matplotlib.backends.backend_agg import RendererAgg
from matplotlib.tight_bbox import process_figure_for_rasterizing
class MixedModeRenderer(object):
"""
A helper class to implement a renderer that switches between
vector and raster drawing. An example may be a PDF writer, where
most things are drawn with PDF vector commands, but some very
complex objects, such as quad meshes, are rasterised and then
output as images.
"""
def __init__(self, figure, width, height, dpi, vector_renderer,
raster_renderer_class=None,
bbox_inches_restore=None):
"""
Parameters
----------
figure : `matplotlib.figure.Figure`
The figure instance.
width : scalar
The width of the canvas in logical units
height : scalar
The height of the canvas in logical units
dpi : scalar
The dpi of the canvas
vector_renderer : `matplotlib.backend_bases.RendererBase`
An instance of a subclass of
`~matplotlib.backend_bases.RendererBase` that will be used for the
vector drawing.
raster_renderer_class : `matplotlib.backend_bases.RendererBase`
The renderer class to use for the raster drawing. If not provided,
this will use the Agg backend (which is currently the only viable
option anyway.)
"""
if raster_renderer_class is None:
raster_renderer_class = RendererAgg
self._raster_renderer_class = raster_renderer_class
self._width = width
self._height = height
self.dpi = dpi
self._vector_renderer = vector_renderer
self._raster_renderer = None
self._rasterizing = 0
# A reference to the figure is needed as we need to change
# the figure dpi before and after the rasterization. Although
# this looks ugly, I couldn't find a better solution. -JJL
self.figure = figure
self._figdpi = figure.get_dpi()
self._bbox_inches_restore = bbox_inches_restore
self._set_current_renderer(vector_renderer)
_methods = """
close_group draw_image draw_markers draw_path
draw_path_collection draw_quad_mesh draw_tex draw_text
finalize flipy get_canvas_width_height get_image_magnification
get_texmanager get_text_width_height_descent new_gc open_group
option_image_nocomposite points_to_pixels strip_math
start_filter stop_filter draw_gouraud_triangle
draw_gouraud_triangles option_scale_image
_text2path _get_text_path_transform height width
""".split()
def _set_current_renderer(self, renderer):
self._renderer = renderer
for method in self._methods:
if hasattr(renderer, method):
setattr(self, method, getattr(renderer, method))
renderer.start_rasterizing = self.start_rasterizing
renderer.stop_rasterizing = self.stop_rasterizing
def start_rasterizing(self):
"""
Enter "raster" mode. All subsequent drawing commands (until
stop_rasterizing is called) will be drawn with the raster
backend.
If start_rasterizing is called multiple times before
stop_rasterizing is called, this method has no effect.
"""
# change the dpi of the figure temporarily.
self.figure.set_dpi(self.dpi)
if self._bbox_inches_restore: # when tight bbox is used
r = process_figure_for_rasterizing(self.figure,
self._bbox_inches_restore)
self._bbox_inches_restore = r
if self._rasterizing == 0:
self._raster_renderer = self._raster_renderer_class(
self._width*self.dpi, self._height*self.dpi, self.dpi)
self._set_current_renderer(self._raster_renderer)
self._rasterizing += 1
def stop_rasterizing(self):
"""
Exit "raster" mode. All of the drawing that was done since
the last start_rasterizing command will be copied to the
vector backend by calling draw_image.
If stop_rasterizing is called multiple times before
start_rasterizing is called, this method has no effect.
"""
self._rasterizing -= 1
if self._rasterizing == 0:
self._set_current_renderer(self._vector_renderer)
height = self._height * self.dpi
buffer, bounds = self._raster_renderer.tostring_rgba_minimized()
l, b, w, h = bounds
if w > 0 and h > 0:
image = np.frombuffer(buffer, dtype=np.uint8)
image = image.reshape((h, w, 4))
image = image[::-1]
gc = self._renderer.new_gc()
# TODO: If the mixedmode resolution differs from the figure's
# dpi, the image must be scaled (dpi->_figdpi). Not all
# backends support this.
self._renderer.draw_image(
gc,
l * self._figdpi / self.dpi,
(height-b-h) * self._figdpi / self.dpi,
image)
self._raster_renderer = None
self._rasterizing = False
# restore the figure dpi.
self.figure.set_dpi(self._figdpi)
if self._bbox_inches_restore: # when tight bbox is used
r = process_figure_for_rasterizing(self.figure,
self._bbox_inches_restore,
self._figdpi)
self._bbox_inches_restore = r
| 38 | 79 | 0.623736 |
98436138ed4e5cd91b990b7c928fa465393531af | 863 | py | Python | p_library/admin.py | StalingradTeam/my_library | ec9f43acb548a1552c8f64785d3a040390e91ae1 | [
"MIT"
] | null | null | null | p_library/admin.py | StalingradTeam/my_library | ec9f43acb548a1552c8f64785d3a040390e91ae1 | [
"MIT"
] | null | null | null | p_library/admin.py | StalingradTeam/my_library | ec9f43acb548a1552c8f64785d3a040390e91ae1 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from django.contrib import admin
from p_library.models import Book, Author, Publisher
@admin.register(Author)
class BookAdmin(admin.ModelAdmin):
fields = ('full_name', 'birth_year', 'country', )
@staticmethod
def author_full_name(obj):
return obj.author.full_name
@admin.register(Publisher)
class BookAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
@staticmethod
def author_full_name(obj):
return obj.author.full_name
@staticmethod
def author_name(obj):
return obj.author.name
list_display = ('title', 'author_full_name',)
fields = ('ISBN', 'title', 'description', 'year_release', 'author', 'Publisher_id', 'copy_count', 'price') | 25.382353 | 110 | 0.677868 |
9a24a278b13ec83fa9bdba924f62afa00a9dd766 | 11,478 | py | Python | waveorder/io/multipagetiff.py | mehta-lab/waveorder | 9892c20955d3487778fd440a0d7f4f86334e7b8e | [
"Unlicense"
] | 2 | 2020-12-19T02:55:09.000Z | 2022-02-24T19:40:26.000Z | waveorder/io/multipagetiff.py | mehta-lab/waveorder | 9892c20955d3487778fd440a0d7f4f86334e7b8e | [
"Unlicense"
] | 42 | 2021-01-20T22:34:14.000Z | 2022-03-31T00:13:37.000Z | waveorder/io/multipagetiff.py | mehta-lab/waveorder | 9892c20955d3487778fd440a0d7f4f86334e7b8e | [
"Unlicense"
] | null | null | null | import numpy as np
import os
import zarr
from tifffile import TiffFile
import tifffile as tiff
from copy import copy
import glob
import warnings
from waveorder.io.reader_interface import ReaderInterface
class MicromanagerOmeTiffReader(ReaderInterface):
def __init__(self, folder: str, extract_data: bool = False):
"""
Parameters
----------
folder: (str) folder or file containing all ome-tiff files
extract_data: (bool) True if ome_series should be extracted immediately
"""
# Add Initial Checks
if len(glob.glob(os.path.join(folder, '*.ome.tif'))) == 0:
raise ValueError('Specific input contains no ome.tif files, please specify a valid input directory')
# ignore tiffile warnings, doesn't work
with warnings.catch_warnings():
warnings.simplefilter('ignore', tiff)
# Grab all image files
self.data_directory = folder
self.files = glob.glob(os.path.join(self.data_directory, '*.ome.tif'))
# Generate Data Specific Properties
self.coords = None
self.coord_map = dict()
self.pos_names = []
self.position_arrays = dict()
self.positions = 0
self.frames = 0
self.channels = 0
self.slices = 0
self.height = 0
self.width = 0
self._set_dtype()
# Initialize MM attributes
self.mm_meta = None
self.stage_positions = 0
self.z_step_size = None
self.channel_names = []
# Read MM data
self._set_mm_meta()
# Gather index map of file, page, byte offset
self._gather_index_maps()
# if extract data, create all of the virtual zarr stores up front
if extract_data:
for i in range(self.positions):
self._create_position_array(i)
def _gather_index_maps(self):
"""
Will return a dictionary of {coord: (filepath, page, byte_offset)} of length(N_Images) to later query
Returns
-------
"""
positions = 0
frames = 0
channels = 0
slices = 0
for file in self.files:
tf = TiffFile(file)
meta = tf.micromanager_metadata['IndexMap']
offsets = list(meta['Offset'])
for page in range(len(meta['Channel'])):
coord = [0, 0, 0, 0]
coord[0] = meta['Position'][page]
coord[1] = meta['Frame'][page]
coord[2] = meta['Channel'][page]
coord[3] = meta['Slice'][page]
offset = self._get_byte_offset(offsets, page)
self.coord_map[tuple(coord)] = (file, page, offset)
# update dimensions as we go along, helps with incomplete datasets
if coord[0]+1 > positions:
positions = coord[0]+1
if coord[1]+1 > frames:
frames = coord[1]+1
if coord[2]+1 > channels:
channels = coord[2]+1
if coord[3]+1 > slices:
slices = coord[3]+1
# update dimensions to the largest dimensions present in the saved data
self.positions = positions
self.frames = frames
self.channels = channels
self.slices = slices
def _get_byte_offset(self, offsets, page):
"""
Gets the byte offset from the tiff tag metadata
Parameters
----------
tiff_file: (Tiff-File object) Opened tiff file
page: (int) Page to look at for the tag
Returns
-------
byte offset: (int) byte offset for the image array
"""
if page == 0:
array_offset = offsets[page] + 210
else:
array_offset = offsets[page] + 162
return array_offset
def _set_mm_meta(self):
"""
assign image metadata from summary metadata
Returns
-------
"""
with TiffFile(self.files[0]) as tif:
self.mm_meta = tif.micromanager_metadata
mm_version = self.mm_meta['Summary']['MicroManagerVersion']
if 'beta' in mm_version:
if self.mm_meta['Summary']['Positions'] > 1:
self.stage_positions = []
for p in range(len(self.mm_meta['Summary']['StagePositions'])):
pos = self._simplify_stage_position_beta(self.mm_meta['Summary']['StagePositions'][p])
self.stage_positions.append(pos)
# self.channel_names = 'Not Listed'
elif mm_version == '1.4.22':
for ch in self.mm_meta['Summary']['ChNames']:
self.channel_names.append(ch)
else:
if self.mm_meta['Summary']['Positions'] > 1:
self.stage_positions = []
for p in range(self.mm_meta['Summary']['Positions']):
pos = self._simplify_stage_position(self.mm_meta['Summary']['StagePositions'][p])
self.stage_positions.append(pos)
for ch in self.mm_meta['Summary']['ChNames']:
self.channel_names.append(ch)
# dimensions based on mm metadata do not reflect final written dimensions
# these will change after data is loaded
self.z_step_size = self.mm_meta['Summary']['z-step_um']
self.height = self.mm_meta['Summary']['Height']
self.width = self.mm_meta['Summary']['Width']
self.frames = self.mm_meta['Summary']['Frames']
self.slices = self.mm_meta['Summary']['Slices']
self.channels = self.mm_meta['Summary']['Channels']
def _simplify_stage_position(self, stage_pos: dict):
"""
flattens the nested dictionary structure of stage_pos and removes superfluous keys
Parameters
----------
stage_pos: (dict) dictionary containing a single position's device info
Returns
-------
out: (dict) flattened dictionary
"""
out = copy(stage_pos)
out.pop('DevicePositions')
for dev_pos in stage_pos['DevicePositions']:
out.update({dev_pos['Device']: dev_pos['Position_um']})
return out
def _simplify_stage_position_beta(self, stage_pos: dict):
"""
flattens the nested dictionary structure of stage_pos and removes superfluous keys
for MM2.0 Beta versions
Parameters
----------
stage_pos: (dict) dictionary containing a single position's device info
Returns
-------
new_dict: (dict) flattened dictionary
"""
new_dict = {}
new_dict['Label'] = stage_pos['label']
new_dict['GridRow'] = stage_pos['gridRow']
new_dict['GridCol'] = stage_pos['gridCol']
for sub in stage_pos['subpositions']:
values = []
for field in ['x', 'y', 'z']:
if sub[field] != 0:
values.append(sub[field])
if len(values) == 1:
new_dict[sub['stageName']] = values[0]
else:
new_dict[sub['stageName']] = values
return new_dict
def _create_position_array(self, pos):
"""
maps all of the tiff data into a virtual zarr store in memory for a given position
Parameters
----------
pos: (int) index of the position to create array under
Returns
-------
"""
# intialize virtual zarr store and save it under positions
timepoints, channels, slices = self._get_dimensions(pos)
self.position_arrays[pos] = zarr.empty(shape=(timepoints, channels, slices, self.height, self.width),
chunks=(1, 1, 1, self.height, self.width),
dtype=self.dtype)
# add all the images with this specific dimension. Will be blank images if dataset
# is incomplete
for t in range(timepoints):
for c in range(channels):
for z in range(slices):
self.position_arrays[pos][t, c, z, :, :] = self.get_image(pos, t, c, z)
def _set_dtype(self):
"""
gets the datatype from any image plane metadata
Returns
-------
"""
tf = tiff.TiffFile(self.files[0])
self.dtype = tf.pages[0].dtype
def _get_dimensions(self, position):
"""
Gets the max dimensions from the current position in case of incomplete datasets
Parameters
----------
position: (int) Position index to grab dimensions from
Returns
-------
"""
t = 0
c = 0
z = 0
# dimension size = index + 1
for tup in self.coord_map.keys():
if position != tup[0]:
continue
else:
if tup[1]+1 > t:
t = tup[1]+1
if tup[2]+1 > c:
c = tup[2]+1
if tup[3]+1 > z:
z = tup[3]+1
return t, c, z
def get_image(self, p, t, c, z):
"""
get the image at a specific coordinate through memory mapping
Parameters
----------
p: (int) position index
t: (int) time index
c: (int) channel index
z: (int) slice/z index
Returns
-------
image: (np-array) numpy array of shape (Y, X) at given coordinate
"""
coord_key = (p, t, c, z)
coord = self.coord_map[coord_key] # (file, page, offset)
return np.memmap(coord[0], dtype=self.dtype, mode='r', offset=coord[2], shape=(self.height, self.width))
def get_zarr(self, position):
"""
return a zarr array for a given position
Parameters
----------
position: (int) position (aka ome-tiff scene)
Returns
-------
position: (zarr.array)
"""
if position not in self.position_arrays.keys():
self._create_position_array(position)
return self.position_arrays[position]
def get_array(self, position):
"""
return a numpy array for a given position
Parameters
----------
position: (int) position (aka ome-tiff scene)
Returns
-------
position: (np.ndarray)
"""
# if position hasn't been initialized in memory, do that.
if position not in self.position_arrays.keys():
self._create_position_array(position)
return np.array(self.position_arrays[position])
def get_num_positions(self):
"""
get total number of scenes referenced in ome-tiff metadata
Returns
-------
number of positions (int)
"""
return self.positions
@property
def shape(self):
"""
return the underlying data shape as a tuple
Returns
-------
(tuple) five elements of (frames, slices, channels, height, width)
"""
return self.frames, self.channels, self.slices, self.height, self.width
| 30.205263 | 112 | 0.533368 |
d3dde661949bde9817a393e00bab0a0cf7acfe41 | 779 | py | Python | cws/node.py | mattianeroni/clarke-wright-savings | b418bfbef060f662b09f73b70b8a5e7d03612bc2 | [
"MIT"
] | 1 | 2022-03-31T07:16:14.000Z | 2022-03-31T07:16:14.000Z | cws/node.py | mattianeroni/clarke-wright-savings | b418bfbef060f662b09f73b70b8a5e7d03612bc2 | [
"MIT"
] | null | null | null | cws/node.py | mattianeroni/clarke-wright-savings | b418bfbef060f662b09f73b70b8a5e7d03612bc2 | [
"MIT"
] | null | null | null | import abc
class Node (abc.ABC):
"""
The class that inherits from this one
inherits all the attributes and methods
needed to work as an Node of the graph
on which the Clarke Wright Savings heuristic
is going to be computed.
"""
def __init__(self, id, dn_edge, nd_edge):
"""
Initialise.
:param id: The unique id of the node. (NOTE: There is no control
on the unicity of this id)
:param dn_edge: Depot-to-node edge.
:param nd_edge: Node-to-depot edge.
:attr route: The cluster where the node currently is.
"""
self.id = id
self.dn_edge = dn_edge
self.nd_edge = nd_edge
self.route = None
def __repr__(self):
return str(self.id)
| 26.862069 | 72 | 0.599487 |
5d720a24663bfe1abbe39f12e5e54e0d8cbcf606 | 261 | py | Python | pibackbone/__init__.py | anarkiwi/pibackbone | 8677da0a7fb93fe86cd4459b0ad8ce3c5d5af90f | [
"Apache-2.0"
] | null | null | null | pibackbone/__init__.py | anarkiwi/pibackbone | 8677da0a7fb93fe86cd4459b0ad8ce3c5d5af90f | [
"Apache-2.0"
] | 29 | 2022-03-22T20:20:50.000Z | 2022-03-31T23:12:51.000Z | pibackbone/__init__.py | anarkiwi/pibackbone | 8677da0a7fb93fe86cd4459b0ad8ce3c5d5af90f | [
"Apache-2.0"
] | 2 | 2022-03-22T16:24:59.000Z | 2022-03-31T20:25:27.000Z | import pkg_resources
from pbr.version import VersionInfo
pkg_resources.declare_namespace(__name__)
# TODO: 3.8+ and later, use importlib: https://pypi.org/project/importlib-metadata/
__version__ = VersionInfo('pibackbone').semantic_version().release_string()
| 32.625 | 83 | 0.812261 |
edd5f00d3d06e4d8d3b626fd9279d39c271e964c | 10,881 | py | Python | mmdet3d/core/bbox/structures/coord_3d_mode.py | gopi231091/mmdetection3d | 1b2e64cd75c8d1c238c61a3bc1e3c62a7d403b53 | [
"Apache-2.0"
] | 217 | 2021-12-10T09:44:33.000Z | 2022-03-31T16:17:35.000Z | mmdet3d/core/bbox/structures/coord_3d_mode.py | gopi231091/mmdetection3d | 1b2e64cd75c8d1c238c61a3bc1e3c62a7d403b53 | [
"Apache-2.0"
] | 22 | 2021-12-29T08:57:31.000Z | 2022-03-31T11:21:53.000Z | mmdet3d/core/bbox/structures/coord_3d_mode.py | gopi231091/mmdetection3d | 1b2e64cd75c8d1c238c61a3bc1e3c62a7d403b53 | [
"Apache-2.0"
] | 23 | 2021-12-13T06:56:38.000Z | 2022-03-28T02:02:13.000Z | import numpy as np
import torch
from enum import IntEnum, unique
from mmdet3d.core.points import (BasePoints, CameraPoints, DepthPoints,
LiDARPoints)
from .base_box3d import BaseInstance3DBoxes
from .cam_box3d import CameraInstance3DBoxes
from .depth_box3d import DepthInstance3DBoxes
from .lidar_box3d import LiDARInstance3DBoxes
@unique
class Coord3DMode(IntEnum):
r"""Enum of different ways to represent a box
and point cloud.
Coordinates in LiDAR:
.. code-block:: none
up z
^ x front
| /
| /
left y <------ 0
The relative coordinate of bottom center in a LiDAR box is (0.5, 0.5, 0),
and the yaw is around the z axis, thus the rotation axis=2.
Coordinates in camera:
.. code-block:: none
z front
/
/
0 ------> x right
|
|
v
down y
The relative coordinate of bottom center in a CAM box is [0.5, 1.0, 0.5],
and the yaw is around the y axis, thus the rotation axis=1.
Coordinates in Depth mode:
.. code-block:: none
up z
^ y front
| /
| /
0 ------> x right
The relative coordinate of bottom center in a DEPTH box is (0.5, 0.5, 0),
and the yaw is around the z axis, thus the rotation axis=2.
"""
LIDAR = 0
CAM = 1
DEPTH = 2
@staticmethod
def convert(input, src, dst, rt_mat=None):
"""Convert boxes or points from `src` mode to `dst` mode."""
if isinstance(input, BaseInstance3DBoxes):
return Coord3DMode.convert_box(input, src, dst, rt_mat=rt_mat)
elif isinstance(input, BasePoints):
return Coord3DMode.convert_point(input, src, dst, rt_mat=rt_mat)
else:
raise NotImplementedError
@staticmethod
def convert_box(box, src, dst, rt_mat=None):
"""Convert boxes from `src` mode to `dst` mode.
Args:
box (tuple | list | np.ndarray |
torch.Tensor | BaseInstance3DBoxes):
Can be a k-tuple, k-list or an Nxk array/tensor, where k = 7.
src (:obj:`CoordMode`): The src Box mode.
dst (:obj:`CoordMode`): The target Box mode.
rt_mat (np.ndarray | torch.Tensor): The rotation and translation
matrix between different coordinates. Defaults to None.
The conversion from `src` coordinates to `dst` coordinates
usually comes along the change of sensors, e.g., from camera
to LiDAR. This requires a transformation matrix.
Returns:
(tuple | list | np.ndarray | torch.Tensor | BaseInstance3DBoxes): \
The converted box of the same type.
"""
if src == dst:
return box
is_numpy = isinstance(box, np.ndarray)
is_Instance3DBoxes = isinstance(box, BaseInstance3DBoxes)
single_box = isinstance(box, (list, tuple))
if single_box:
assert len(box) >= 7, (
'CoordMode.convert takes either a k-tuple/list or '
'an Nxk array/tensor, where k >= 7')
arr = torch.tensor(box)[None, :]
else:
# avoid modifying the input box
if is_numpy:
arr = torch.from_numpy(np.asarray(box)).clone()
elif is_Instance3DBoxes:
arr = box.tensor.clone()
else:
arr = box.clone()
# convert box from `src` mode to `dst` mode.
x_size, y_size, z_size = arr[..., 3:4], arr[..., 4:5], arr[..., 5:6]
if src == Coord3DMode.LIDAR and dst == Coord3DMode.CAM:
if rt_mat is None:
rt_mat = arr.new_tensor([[0, -1, 0], [0, 0, -1], [1, 0, 0]])
xyz_size = torch.cat([y_size, z_size, x_size], dim=-1)
elif src == Coord3DMode.CAM and dst == Coord3DMode.LIDAR:
if rt_mat is None:
rt_mat = arr.new_tensor([[0, 0, 1], [-1, 0, 0], [0, -1, 0]])
xyz_size = torch.cat([z_size, x_size, y_size], dim=-1)
elif src == Coord3DMode.DEPTH and dst == Coord3DMode.CAM:
if rt_mat is None:
rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, 1], [0, -1, 0]])
xyz_size = torch.cat([x_size, z_size, y_size], dim=-1)
elif src == Coord3DMode.CAM and dst == Coord3DMode.DEPTH:
if rt_mat is None:
rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]])
xyz_size = torch.cat([x_size, z_size, y_size], dim=-1)
elif src == Coord3DMode.LIDAR and dst == Coord3DMode.DEPTH:
if rt_mat is None:
rt_mat = arr.new_tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
xyz_size = torch.cat([y_size, x_size, z_size], dim=-1)
elif src == Coord3DMode.DEPTH and dst == Coord3DMode.LIDAR:
if rt_mat is None:
rt_mat = arr.new_tensor([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
xyz_size = torch.cat([y_size, x_size, z_size], dim=-1)
else:
raise NotImplementedError(
f'Conversion from Coord3DMode {src} to {dst} '
'is not supported yet')
if not isinstance(rt_mat, torch.Tensor):
rt_mat = arr.new_tensor(rt_mat)
if rt_mat.size(1) == 4:
extended_xyz = torch.cat(
[arr[:, :3], arr.new_ones(arr.size(0), 1)], dim=-1)
xyz = extended_xyz @ rt_mat.t()
else:
xyz = arr[:, :3] @ rt_mat.t()
remains = arr[..., 6:]
arr = torch.cat([xyz[:, :3], xyz_size, remains], dim=-1)
# convert arr to the original type
original_type = type(box)
if single_box:
return original_type(arr.flatten().tolist())
if is_numpy:
return arr.numpy()
elif is_Instance3DBoxes:
if dst == Coord3DMode.CAM:
target_type = CameraInstance3DBoxes
elif dst == Coord3DMode.LIDAR:
target_type = LiDARInstance3DBoxes
elif dst == Coord3DMode.DEPTH:
target_type = DepthInstance3DBoxes
else:
raise NotImplementedError(
f'Conversion to {dst} through {original_type}'
' is not supported yet')
return target_type(
arr, box_dim=arr.size(-1), with_yaw=box.with_yaw)
else:
return arr
@staticmethod
def convert_point(point, src, dst, rt_mat=None):
"""Convert points from `src` mode to `dst` mode.
Args:
point (tuple | list | np.ndarray |
torch.Tensor | BasePoints):
Can be a k-tuple, k-list or an Nxk array/tensor.
src (:obj:`CoordMode`): The src Point mode.
dst (:obj:`CoordMode`): The target Point mode.
rt_mat (np.ndarray | torch.Tensor): The rotation and translation
matrix between different coordinates. Defaults to None.
The conversion from `src` coordinates to `dst` coordinates
usually comes along the change of sensors, e.g., from camera
to LiDAR. This requires a transformation matrix.
Returns:
(tuple | list | np.ndarray | torch.Tensor | BasePoints): \
The converted point of the same type.
"""
if src == dst:
return point
is_numpy = isinstance(point, np.ndarray)
is_InstancePoints = isinstance(point, BasePoints)
single_point = isinstance(point, (list, tuple))
if single_point:
assert len(point) >= 3, (
'CoordMode.convert takes either a k-tuple/list or '
'an Nxk array/tensor, where k >= 3')
arr = torch.tensor(point)[None, :]
else:
# avoid modifying the input point
if is_numpy:
arr = torch.from_numpy(np.asarray(point)).clone()
elif is_InstancePoints:
arr = point.tensor.clone()
else:
arr = point.clone()
# convert point from `src` mode to `dst` mode.
# TODO: LIDAR
# only implemented provided Rt matrix in cam-depth conversion
if src == Coord3DMode.LIDAR and dst == Coord3DMode.CAM:
if rt_mat is None:
rt_mat = arr.new_tensor([[0, -1, 0], [0, 0, -1], [1, 0, 0]])
elif src == Coord3DMode.CAM and dst == Coord3DMode.LIDAR:
if rt_mat is None:
rt_mat = arr.new_tensor([[0, 0, 1], [-1, 0, 0], [0, -1, 0]])
elif src == Coord3DMode.DEPTH and dst == Coord3DMode.CAM:
if rt_mat is None:
rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]])
elif src == Coord3DMode.CAM and dst == Coord3DMode.DEPTH:
if rt_mat is None:
rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, 1], [0, -1, 0]])
elif src == Coord3DMode.LIDAR and dst == Coord3DMode.DEPTH:
if rt_mat is None:
rt_mat = arr.new_tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
elif src == Coord3DMode.DEPTH and dst == Coord3DMode.LIDAR:
if rt_mat is None:
rt_mat = arr.new_tensor([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
else:
raise NotImplementedError(
f'Conversion from Coord3DMode {src} to {dst} '
'is not supported yet')
if not isinstance(rt_mat, torch.Tensor):
rt_mat = arr.new_tensor(rt_mat)
if rt_mat.size(1) == 4:
extended_xyz = torch.cat(
[arr[:, :3], arr.new_ones(arr.size(0), 1)], dim=-1)
xyz = extended_xyz @ rt_mat.t()
else:
xyz = arr[:, :3] @ rt_mat.t()
remains = arr[:, 3:]
arr = torch.cat([xyz[:, :3], remains], dim=-1)
# convert arr to the original type
original_type = type(point)
if single_point:
return original_type(arr.flatten().tolist())
if is_numpy:
return arr.numpy()
elif is_InstancePoints:
if dst == Coord3DMode.CAM:
target_type = CameraPoints
elif dst == Coord3DMode.LIDAR:
target_type = LiDARPoints
elif dst == Coord3DMode.DEPTH:
target_type = DepthPoints
else:
raise NotImplementedError(
f'Conversion to {dst} through {original_type}'
' is not supported yet')
return target_type(
arr,
points_dim=arr.size(-1),
attribute_dims=point.attribute_dims)
else:
return arr
| 38.72242 | 79 | 0.531201 |
6a93f9fa560ce7397320678a35a7325cfd3731dd | 362 | py | Python | unittests/__init__.py | xinranduan/mpc-forest | e93bee948daeda80d956df210b2057db41698ac9 | [
"BSD-3-Clause"
] | null | null | null | unittests/__init__.py | xinranduan/mpc-forest | e93bee948daeda80d956df210b2057db41698ac9 | [
"BSD-3-Clause"
] | null | null | null | unittests/__init__.py | xinranduan/mpc-forest | e93bee948daeda80d956df210b2057db41698ac9 | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) 2017 Eric Shook. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
@author: eshook (Eric Shook, eshook@gmail.edu)
@contributors: <Contribute and add your name here!>
"""
# Import unittests
from .test_Bob import *
from .test_Bobs import *
from .test_PrimitivesRaster import * | 32.909091 | 97 | 0.759669 |
56145a865723ea952cb0e4370da40229117d4716 | 11,607 | py | Python | magenta/models/performance_rnn/performance_rnn_generate.py | fanzhiyan/magenta | 622c47c19bb84c6f57b286ed03b738516b2f27d6 | [
"Apache-2.0"
] | 16 | 2016-09-02T04:59:30.000Z | 2022-01-11T10:38:29.000Z | magenta/models/performance_rnn/performance_rnn_generate.py | fanzhiyan/magenta | 622c47c19bb84c6f57b286ed03b738516b2f27d6 | [
"Apache-2.0"
] | 2 | 2016-09-25T16:39:59.000Z | 2016-11-18T17:43:41.000Z | magenta/models/performance_rnn/performance_rnn_generate.py | fanzhiyan/magenta | 622c47c19bb84c6f57b286ed03b738516b2f27d6 | [
"Apache-2.0"
] | 10 | 2016-09-02T04:59:32.000Z | 2021-09-29T06:57:24.000Z | # Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate polyphonic performances from a trained checkpoint.
Uses flags to define operation.
"""
import ast
import os
import time
import magenta
from magenta.models.performance_rnn import performance_model
from magenta.models.performance_rnn import performance_sequence_generator
from magenta.music import constants
from magenta.protobuf import generator_pb2
from magenta.protobuf import music_pb2
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'run_dir', None,
'Path to the directory where the latest checkpoint will be loaded from.')
tf.app.flags.DEFINE_string(
'bundle_file', None,
'Path to the bundle file. If specified, this will take priority over '
'run_dir, unless save_generator_bundle is True, in which case both this '
'flag and run_dir are required')
tf.app.flags.DEFINE_boolean(
'save_generator_bundle', False,
'If true, instead of generating a sequence, will save this generator as a '
'bundle file in the location specified by the bundle_file flag')
tf.app.flags.DEFINE_string(
'bundle_description', None,
'A short, human-readable text description of the bundle (e.g., training '
'data, hyper parameters, etc.).')
tf.app.flags.DEFINE_string(
'config', 'performance', 'Config to use.')
tf.app.flags.DEFINE_string(
'output_dir', '/tmp/performance_rnn/generated',
'The directory where MIDI files will be saved to.')
tf.app.flags.DEFINE_integer(
'num_outputs', 10,
'The number of tracks to generate. One MIDI file will be created for '
'each.')
tf.app.flags.DEFINE_integer(
'num_steps', 3000,
'The total number of steps the generated track should be, priming '
'track length + generated steps. Each step is 10 milliseconds.')
tf.app.flags.DEFINE_string(
'primer_pitches', '',
'A string representation of a Python list of pitches that will be used as '
'a starting chord with a quarter note duration. For example: '
'"[60, 64, 67]"')
tf.app.flags.DEFINE_string(
'primer_melody', '',
'A string representation of a Python list of '
'magenta.music.Melody event values. For example: '
'"[60, -2, 60, -2, 67, -2, 67, -2]". The primer melody will be played at '
'a fixed tempo of 120 QPM with 4 steps per quarter note.')
tf.app.flags.DEFINE_string(
'primer_midi', '',
'The path to a MIDI file containing a polyphonic track that will be used '
'as a priming track.')
tf.app.flags.DEFINE_string(
'disable_conditioning', None,
'When optional conditioning is available, a string representation of a '
'Boolean indicating whether or not to disable conditioning. Similar to '
'control signals, this can also be a list of Booleans; when it is a list, '
'the other conditioning variables will be ignored for segments where '
'conditioning is disabled.')
tf.app.flags.DEFINE_float(
'temperature', 1.0,
'The randomness of the generated tracks. 1.0 uses the unaltered '
'softmax probabilities, greater than 1.0 makes tracks more random, less '
'than 1.0 makes tracks less random.')
tf.app.flags.DEFINE_integer(
'beam_size', 1,
'The beam size to use for beam search when generating tracks.')
tf.app.flags.DEFINE_integer(
'branch_factor', 1,
'The branch factor to use for beam search when generating tracks.')
tf.app.flags.DEFINE_integer(
'steps_per_iteration', 1,
'The number of steps to take per beam search iteration.')
tf.app.flags.DEFINE_string(
'log', 'INFO',
'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '
'or FATAL.')
tf.app.flags.DEFINE_string(
'hparams', '',
'Comma-separated list of `name=value` pairs. For each pair, the value of '
'the hyperparameter named `name` is set to `value`. This mapping is merged '
'with the default hyperparameters.')
# Add flags for all performance control signals.
for control_signal_cls in magenta.music.all_performance_control_signals:
tf.app.flags.DEFINE_string(
control_signal_cls.name, None, control_signal_cls.description)
def get_checkpoint():
"""Get the training dir or checkpoint path to be used by the model."""
if FLAGS.run_dir and FLAGS.bundle_file and not FLAGS.save_generator_bundle:
raise magenta.music.SequenceGeneratorError(
'Cannot specify both bundle_file and run_dir')
if FLAGS.run_dir:
train_dir = os.path.join(os.path.expanduser(FLAGS.run_dir), 'train')
return train_dir
else:
return None
def get_bundle():
"""Returns a generator_pb2.GeneratorBundle object based read from bundle_file.
Returns:
Either a generator_pb2.GeneratorBundle or None if the bundle_file flag is
not set or the save_generator_bundle flag is set.
"""
if FLAGS.save_generator_bundle:
return None
if FLAGS.bundle_file is None:
return None
bundle_file = os.path.expanduser(FLAGS.bundle_file)
return magenta.music.read_bundle_file(bundle_file)
def run_with_flags(generator):
"""Generates performance tracks and saves them as MIDI files.
Uses the options specified by the flags defined in this module.
Args:
generator: The PerformanceRnnSequenceGenerator to use for generation.
"""
if not FLAGS.output_dir:
tf.logging.fatal('--output_dir required')
return
output_dir = os.path.expanduser(FLAGS.output_dir)
primer_midi = None
if FLAGS.primer_midi:
primer_midi = os.path.expanduser(FLAGS.primer_midi)
if not tf.gfile.Exists(output_dir):
tf.gfile.MakeDirs(output_dir)
primer_sequence = None
if FLAGS.primer_pitches:
primer_sequence = music_pb2.NoteSequence()
primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ
for pitch in ast.literal_eval(FLAGS.primer_pitches):
note = primer_sequence.notes.add()
note.start_time = 0
note.end_time = 60.0 / magenta.music.DEFAULT_QUARTERS_PER_MINUTE
note.pitch = pitch
note.velocity = 100
primer_sequence.total_time = note.end_time
elif FLAGS.primer_melody:
primer_melody = magenta.music.Melody(ast.literal_eval(FLAGS.primer_melody))
primer_sequence = primer_melody.to_sequence()
elif primer_midi:
primer_sequence = magenta.music.midi_file_to_sequence_proto(primer_midi)
else:
tf.logging.warning(
'No priming sequence specified. Defaulting to empty sequence.')
primer_sequence = music_pb2.NoteSequence()
primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ
# Derive the total number of seconds to generate.
seconds_per_step = 1.0 / generator.steps_per_second
generate_end_time = FLAGS.num_steps * seconds_per_step
# Specify start/stop time for generation based on starting generation at the
# end of the priming sequence and continuing until the sequence is num_steps
# long.
generator_options = generator_pb2.GeneratorOptions()
# Set the start time to begin when the last note ends.
generate_section = generator_options.generate_sections.add(
start_time=primer_sequence.total_time,
end_time=generate_end_time)
if generate_section.start_time >= generate_section.end_time:
tf.logging.fatal(
'Priming sequence is longer than the total number of steps '
'requested: Priming sequence length: %s, Total length '
'requested: %s',
generate_section.start_time, generate_end_time)
return
for control_cls in magenta.music.all_performance_control_signals:
if FLAGS[control_cls.name].value is not None and (
generator.control_signals is None or not any(
control.name == control_cls.name
for control in generator.control_signals)):
tf.logging.warning(
'Control signal requested via flag, but generator is not set up to '
'condition on this control signal. Request will be ignored: %s = %s',
control_cls.name, FLAGS[control_cls.name].value)
if (FLAGS.disable_conditioning is not None and
not generator.optional_conditioning):
tf.logging.warning(
'Disable conditioning flag set, but generator is not set up for '
'optional conditioning. Requested disable conditioning flag will be '
'ignored: %s', FLAGS.disable_conditioning)
if generator.control_signals:
for control in generator.control_signals:
if FLAGS[control.name].value is not None:
generator_options.args[control.name].string_value = (
FLAGS[control.name].value)
if FLAGS.disable_conditioning is not None:
generator_options.args['disable_conditioning'].string_value = (
FLAGS.disable_conditioning)
generator_options.args['temperature'].float_value = FLAGS.temperature
generator_options.args['beam_size'].int_value = FLAGS.beam_size
generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
generator_options.args[
'steps_per_iteration'].int_value = FLAGS.steps_per_iteration
tf.logging.debug('primer_sequence: %s', primer_sequence)
tf.logging.debug('generator_options: %s', generator_options)
# Make the generate request num_outputs times and save the output as midi
# files.
date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
digits = len(str(FLAGS.num_outputs))
for i in range(FLAGS.num_outputs):
generated_sequence = generator.generate(primer_sequence, generator_options)
midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
midi_path = os.path.join(output_dir, midi_filename)
magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path)
tf.logging.info('Wrote %d MIDI files to %s',
FLAGS.num_outputs, output_dir)
def main(unused_argv):
"""Saves bundle or runs generator based on flags."""
tf.logging.set_verbosity(FLAGS.log)
bundle = get_bundle()
config_id = bundle.generator_details.id if bundle else FLAGS.config
config = performance_model.default_configs[config_id]
config.hparams.parse(FLAGS.hparams)
# Having too large of a batch size will slow generation down unnecessarily.
config.hparams.batch_size = min(
config.hparams.batch_size, FLAGS.beam_size * FLAGS.branch_factor)
generator = performance_sequence_generator.PerformanceRnnSequenceGenerator(
model=performance_model.PerformanceRnnModel(config),
details=config.details,
steps_per_second=config.steps_per_second,
num_velocity_bins=config.num_velocity_bins,
control_signals=config.control_signals,
optional_conditioning=config.optional_conditioning,
checkpoint=get_checkpoint(),
bundle=bundle,
note_performance=config.note_performance)
if FLAGS.save_generator_bundle:
bundle_filename = os.path.expanduser(FLAGS.bundle_file)
if FLAGS.bundle_description is None:
tf.logging.warning('No bundle description provided.')
tf.logging.info('Saving generator bundle to %s', bundle_filename)
generator.create_bundle_file(bundle_filename, FLAGS.bundle_description)
else:
run_with_flags(generator)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| 39.479592 | 80 | 0.740329 |
fd481f2c805f7187ebc0b5e0ca4c28fa8d5ca5f9 | 13,343 | py | Python | processes.py | GNUBrinux/pilot | 18d0ef3ea9822df7877fbe39b191f56a6288ada5 | [
"Apache-2.0"
] | 13 | 2015-02-19T17:17:10.000Z | 2021-12-22T06:48:02.000Z | processes.py | GNUBrinux/pilot | 18d0ef3ea9822df7877fbe39b191f56a6288ada5 | [
"Apache-2.0"
] | 85 | 2015-01-06T15:01:51.000Z | 2018-11-29T09:03:35.000Z | processes.py | GNUBrinux/pilot | 18d0ef3ea9822df7877fbe39b191f56a6288ada5 | [
"Apache-2.0"
] | 22 | 2015-06-09T12:08:29.000Z | 2018-11-20T10:07:01.000Z | import commands
import os
import signal
import time
import re
import pUtil
def findProcessesInGroup(cpids, pid):
""" recursively search for the children processes belonging to pid and return their pids
here pid is the parent pid for all the children to be found
cpids is a list that has to be initialized before calling this function and it contains
the pids of the children AND the parent as well """
cpids.append(pid)
psout = commands.getoutput("ps -eo pid,ppid -m | grep %d" % pid)
lines = psout.split("\n")
if lines != ['']:
for i in range(0, len(lines)):
try:
thispid = int(lines[i].split()[0])
thisppid = int(lines[i].split()[1])
except Exception as e:
pUtil.tolog('exception caught: %s (lines[1]=%s)' % (e, lines[1]))
else:
if thisppid == pid:
findProcessesInGroup(cpids, thispid)
def isZombie(pid):
""" Return True if pid is a zombie process """
zombie = False
out = commands.getoutput("ps aux | grep %d" % (pid))
if "<defunct>" in out:
zombie = True
return zombie
def getProcessCommands(euid, pids):
""" return a list of process commands corresponding to a pid list for user euid """
_cmd = 'ps u -u %d' % (euid)
processCommands = []
ec, rs = commands.getstatusoutput(_cmd)
if ec != 0:
pUtil.tolog("Command failed: %s" % (rs))
else:
# extract the relevant processes
pCommands = rs.split('\n')
first = True
for pCmd in pCommands:
if first:
# get the header info line
processCommands.append(pCmd)
first = False
else:
# remove extra spaces
_pCmd = pCmd
while " " in _pCmd:
_pCmd = _pCmd.replace(" ", " ")
items = _pCmd.split(" ")
for pid in pids:
# items = username pid ...
if items[1] == str(pid):
processCommands.append(pCmd)
break
return processCommands
def dumpStackTrace(pid):
""" run the stack trace command """
# make sure that the process is not in a zombie state
if not isZombie(pid):
pUtil.tolog("Running stack trace command on pid=%d:" % (pid))
cmd = "pstack %d" % (pid)
timeout = 60
exitcode, output = pUtil.timedCommand(cmd, timeout=timeout)
pUtil.tolog(output or "(pstack returned empty string)")
else:
pUtil.tolog("Skipping pstack dump for zombie process")
def killProcesses(pid, pgrp):
""" kill a job upon request """
pUtil.tolog("killProcesses() called")
# if there is a known subprocess pgrp, then it should be enough to kill the group in one go
status = False
_sleep = True
if pgrp != 0:
# kill the process gracefully
pUtil.tolog("Killing group process %d" % (pgrp))
try:
os.killpg(pgrp, signal.SIGTERM)
except Exception,e:
pUtil.tolog("WARNING: Exception thrown when killing the child group process with SIGTERM: %s" % (e))
_sleep = False
else:
pUtil.tolog("(SIGTERM sent)")
if _sleep:
_t = 30
pUtil.tolog("Sleeping %d s to allow processes to exit" % (_t))
time.sleep(_t)
try:
os.killpg(pgrp, signal.SIGKILL)
except Exception,e:
pUtil.tolog("WARNING: Exception thrown when killing the child group process with SIGKILL: %s" % (e))
else:
pUtil.tolog("(SIGKILL sent)")
status = True
if not status:
# firstly find all the children process IDs to be killed
children = []
findProcessesInGroup(children, pid)
# reverse the process order so that the athena process is killed first (otherwise the stdout will be truncated)
children.reverse()
pUtil.tolog("Process IDs to be killed: %s (in reverse order)" % str(children))
# find which commands are still running
try:
cmds = getProcessCommands(os.geteuid(), children)
except Exception, e:
pUtil.tolog("getProcessCommands() threw an exception: %s" % str(e))
else:
if len(cmds) <= 1:
pUtil.tolog("Found no corresponding commands to process id(s)")
else:
pUtil.tolog("Found commands still running:")
for cmd in cmds:
pUtil.tolog(cmd)
# loop over all child processes
for i in children:
# dump the stack trace before killing it
dumpStackTrace(i)
# kill the process gracefully
try:
os.kill(i, signal.SIGTERM)
except Exception,e:
pUtil.tolog("WARNING: Exception thrown when killing the child process %d under SIGTERM, wait for kill -9 later: %s" % (i, str(e)))
pass
else:
pUtil.tolog("Killed pid: %d (SIGTERM)" % (i))
_t = 10
pUtil.tolog("Sleeping %d s to allow process to exit" % (_t))
time.sleep(_t)
# now do a hardkill just in case some processes haven't gone away
try:
os.kill(i, signal.SIGKILL)
except Exception,e:
pUtil.tolog("WARNING: Exception thrown when killing the child process %d under SIGKILL, ignore this if it is already killed by previous SIGTERM: %s" % (i, str(e)))
pass
else:
pUtil.tolog("Killed pid: %d (SIGKILL)" % (i))
pUtil.tolog("Killing any remaining orphan processes")
killOrphans()
def checkProcesses(pid):
""" Check the number of running processes """
children = []
n = 0
try:
findProcessesInGroup(children, pid)
except Exception, e:
pUtil.tolog("!!WARNING!!2888!! Caught exception in findProcessesInGroup: %s" % (e))
else:
n = len(children)
pUtil.tolog("Number of running processes: %d" % (n))
return n
def killOrphans():
""" Find and kill all orphan processes belonging to current pilot user """
if 'BOINC' in pUtil.env['sitename']:
pUtil.tolog("BOINC job, not looking for orphan processes")
return
if 'PILOT_NOKILL' in os.environ:
return
pUtil.tolog("Searching for orphan processes")
cmd = "ps -o pid,ppid,args -u %s" % (commands.getoutput("whoami"))
processes = commands.getoutput(cmd)
pattern = re.compile('(\d+)\s+(\d+)\s+(\S+)')
count = 0
for line in processes.split('\n'):
ids = pattern.search(line)
if ids:
pid = ids.group(1)
ppid = ids.group(2)
args = ids.group(3)
if 'cvmfs2' in args:
pUtil.tolog("Ignoring possible orphan process running cvmfs2: pid=%s, ppid=%s, args='%s'" % (pid, ppid, args))
elif 'pilots_starter.py' in args:
pUtil.tolog("Ignoring Pilot Launcher: pid=%s, ppid=%s, args='%s'" % (pid, ppid, args))
elif ppid == '1':
count += 1
pUtil.tolog("Found orphan process: pid=%s, ppid=%s, args='%s'" % (pid, ppid, args))
if args.endswith('bash'):
pUtil.tolog("Will not kill bash process")
else:
try:
os.killpg(int(pid), signal.SIGKILL)
except Exception as e:
pUtil.tolog("!!WARNING!!2323!! Failed to send SIGKILL: %s" % e)
cmd = 'kill -9 %s' % (pid)
ec, rs = commands.getstatusoutput(cmd)
if ec != 0:
pUtil.tolog("!!WARNING!!2999!! %s" % (rs))
else:
pUtil.tolog("Killed orphaned process %s (%s)" % (pid, args))
pUtil.tolog("Killed orphaned process group %s (%s)" % (pid, args))
if count == 0:
pUtil.tolog("Did not find any orphan processes")
else:
pUtil.tolog("Found %d orphan process(es)" % (count))
def getMaxMemoryUsageFromCGroups():
""" Read the max_memory from CGROUPS file memory.max_usage_in_bytes"""
max_memory = None
# Get the CGroups max memory using the pilot pid
pid = os.getpid()
path = "/proc/%d/cgroup" % (pid)
if os.path.exists(path):
cmd = "grep memory %s" % (path)
pUtil.tolog("Executing command: %s" % (cmd))
out = commands.getoutput(cmd)
if out == "":
pUtil.tolog("(Command did not return anything)")
else:
pUtil.tolog(out)
if ":memory:" in out:
pos = out.find('/')
path = out[pos:]
pUtil.tolog("Extracted path = %s" % (path))
pre = getCGROUPSBasePath()
if pre != "":
path = pre + os.path.join(path, "memory.max_usage_in_bytes")
pUtil.tolog("Path to CGROUPS memory info: %s" % (path))
try:
f = open(path, 'r')
except IOError, e:
pUtil.tolog("!!WARNING!!2212!! Could not open file %s: %s" % (path, e))
else:
max_memory = f.read()
f.close()
else:
pUtil.tolog("CGROUPS base path could not be extracted - not a CGROUPS site")
else:
pUtil.tolog("!!WARNING!!2211!! Invalid format: %s (expected ..:memory:[path])" % (out))
else:
pUtil.tolog("Path %s does not exist (not a CGROUPS site)" % path)
return max_memory
def getCGROUPSBasePath():
""" Return the base path for CGROUPS """
return commands.getoutput("grep \'^cgroup\' /proc/mounts|grep memory| awk \'{print $2}\'")
def isCGROUPSSite():
""" Return True if site is a CGROUPS site """
status = False
if getCGROUPSBasePath() != "":
status = True
# Make experiment specific?
# if os.environ.has_key('ATLAS_CGROUPS_BASE'):
# cgroups = os.environ['ATLAS_CGROUPS_BASE']
# if cgroups != "":
# pUtil.tolog("ATLAS_CGROUPS_BASE = %s" % (cgroups))
# # if cgroups.lower() == "true":
# status = True
return status
def get_cpu_consumption_time(t0):
"""
Return the CPU consumption time for child processes measured by system+user time from os.times().
Note: the os.times() tuple is user time, system time, s user time, s system time, and elapsed real time since a
fixed point in the past.
:param t0: initial os.times() tuple prior to measurement.
:return: system+user time for child processes (float).
"""
t1 = os.times()
user_time = t1[2] - t0[2]
system_time = t1[3] - t0[3]
pUtil.tolog('user time=%d' % user_time)
pUtil.tolog('system time=%d' % system_time)
return user_time + system_time
def get_instant_cpu_consumption_time(pid):
"""
Return the CPU consumption time (system+user time) for a given process, by parsing /prod/pid/stat.
Note 1: the function returns 0.0 if the pid is not set.
Note 2: the function must sum up all the user+system times for both the main process (pid) and the child
processes, since the main process is most likely spawning new processes.
:param pid: process id (int).
:return: system+user time for a given pid (float).
"""
utime = None
stime = None
cutime = None
cstime = None
hz = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
if type(hz) != int:
pUtil.tolog('Unknown SC_CLK_TCK: %s' % str(hz))
return 0.0
if pid and hz and hz > 0:
path = "/proc/%d/stat" % pid
if os.path.exists(path):
with open(path) as fp:
fields = fp.read().split(' ')[13:17]
utime, stime, cutime, cstime = [(float(f) / hz) for f in fields]
if utime and stime and cutime and cstime:
# sum up all the user+system times for both the main process (pid) and the child processes
cpu_consumption_time = utime + stime + cutime + cstime
pUtil.tolog('CPU consumption time for pid=%d' % pid)
pUtil.tolog('user time=%d' % utime)
pUtil.tolog('system time=%d' % stime)
pUtil.tolog('user time child=%d' % cutime)
pUtil.tolog('system time child=%d' % cstime)
else:
cpu_consumption_time = 0.0
return cpu_consumption_time
def get_current_cpu_consumption_time(pid):
# get all the child processes
children = []
findProcessesInGroup(children, pid)
cpuconsumptiontime = 0
for _pid in children:
_cpuconsumptiontime = get_instant_cpu_consumption_time(_pid)
if _cpuconsumptiontime:
cpuconsumptiontime += _cpuconsumptiontime
return cpuconsumptiontime
| 36.159892 | 187 | 0.548452 |
d0229a51c8b27cbf4a71fbf5779655d0d139d539 | 32,451 | py | Python | lib/flows/general/collectors.py | scudette/grr | d4257c5259af881e28a7d62e9837fa13352e2bf6 | [
"Apache-2.0"
] | 6 | 2015-04-03T02:25:28.000Z | 2021-11-17T21:42:59.000Z | lib/flows/general/collectors.py | scudette/grr | d4257c5259af881e28a7d62e9837fa13352e2bf6 | [
"Apache-2.0"
] | null | null | null | lib/flows/general/collectors.py | scudette/grr | d4257c5259af881e28a7d62e9837fa13352e2bf6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Flows for handling the collection for artifacts."""
import logging
from grr.lib import aff4
from grr.lib import artifact
from grr.lib import artifact_lib
from grr.lib import config_lib
from grr.lib import flow
from grr.lib import parsers
from grr.lib import rdfvalue
class ArtifactCollectorFlow(flow.GRRFlow):
"""Flow that takes a list of artifacts and collects them.
This flow is the core of the Artifact implementation for GRR. Artifacts are
defined using a standardized data format that includes what to collect and
how to process the things collected. This flow takes that data driven format
and makes it useful.
The core functionality of Artifacts is split into Collectors and Processors.
An Artifact defines a set of Collectors that are used to retrieve data from
the client. These can specify collection of files, registry keys, command
output and others. The first part of this flow "Collect" handles running those
collections by issuing GRR flows and client actions.
The results of those are then collected and GRR searches for Processors that
know how to process the output of the Collectors. The Processors all inherit
from the Parser class, and each Parser specifies which Artifacts it knows how
to process.
So this flow hands off the collected rdfvalue results to the Processors which
then return modified or different rdfvalues. These final results are then
either:
1. Sent to the calling flow.
2. Written to a collection.
3. Stored in AFF4 based on a special mapping called the GRRArtifactMappings.
4. A combination of the above.
This is controlled by the flow parameters.
"""
category = "/Collectors/"
args_type = rdfvalue.ArtifactCollectorFlowArgs
behaviours = flow.GRRFlow.behaviours + "BASIC"
@flow.StateHandler(next_state=["StartCollection"])
def Start(self):
"""For each artifact, create subflows for each collector."""
self.client = aff4.FACTORY.Open(self.client_id, token=self.token)
self.state.Register("artifacts_skipped_due_to_condition", [])
self.state.Register("response_count", 0)
self.state.Register("failed_count", 0)
self.state.Register("artifacts_failed", [])
self.state.Register("knowledge_base", self.args.knowledge_base)
self.state.Register("called_fallbacks", set())
self.state.Register("client_anomaly_store", None)
if self.args.use_tsk:
self.state.Register("path_type", rdfvalue.PathSpec.PathType.TSK)
else:
self.state.Register("path_type", rdfvalue.PathSpec.PathType.OS)
if (self.args.dependencies ==
rdfvalue.ArtifactCollectorFlowArgs.Dependency.FETCH_NOW):
# Don't retrieve a full knowledgebase, just get the dependencies we
# need. CollectArtifactDependencies calls back to this flow to retrieve
# the necessary dependencies. We avoid a loop because
# dependencies defaults to USE_CACHED set and a knowledgebase is
# provided.
self.CallFlow("CollectArtifactDependencies",
artifact_list=self.args.artifact_list,
next_state="StartCollection")
return
elif (self.args.dependencies ==
rdfvalue.ArtifactCollectorFlowArgs.Dependency.USE_CACHED) and (
not self.state.knowledge_base):
# If not provided, get a knowledge base from the client.
try:
self.state.knowledge_base = artifact.GetArtifactKnowledgeBase(
self.client)
except artifact_lib.KnowledgeBaseUninitializedError:
# If no-one has ever initialized the knowledge base, we should do so
# now.
if not self._AreArtifactsKnowledgeBaseArtifacts():
self.CallFlow("KnowledgeBaseInitializationFlow",
next_state="StartCollection")
return
# In all other cases start the collection state.
self.CallState(next_state="StartCollection")
@flow.StateHandler(next_state=["ProcessCollected",
"ProcessCollectedArtifactFiles",
"ProcessFileFinderResults"])
def StartCollection(self, responses):
"""Start collecting."""
if not responses.success:
raise artifact_lib.KnowledgeBaseUninitializedError(
"Attempt to initialize Knowledge Base failed.")
if not self.state.knowledge_base:
self.client = aff4.FACTORY.Open(self.client_id, token=self.token)
# If we are processing the knowledge base, it still won't exist yet.
self.state.knowledge_base = artifact.GetArtifactKnowledgeBase(
self.client, allow_uninitialized=True)
for artifact_name in self.args.artifact_list:
artifact_obj = self._GetArtifactFromName(artifact_name)
# Ensure artifact has been written sanely. Note that this could be
# removed if it turns out to be expensive. Artifact tests should catch
# these.
artifact_obj.Validate()
self.Collect(artifact_obj)
def ConvertSupportedOSToConditions(self, src_object, filter_list):
"""Turn supported_os into a condition."""
if src_object.supported_os:
filter_str = " OR ".join("os == '%s'" % o for o in
src_object.supported_os)
return filter_list.append(filter_str)
def Collect(self, artifact_obj):
"""Collect the raw data from the client for this artifact."""
artifact_name = artifact_obj.name
test_conditions = list(artifact_obj.conditions)
self.ConvertSupportedOSToConditions(artifact_obj, test_conditions)
# Check each of the conditions match our target.
for condition in test_conditions:
if not artifact_lib.CheckCondition(condition, self.state.knowledge_base):
logging.debug("Artifact %s condition %s failed on %s",
artifact_name, condition, self.client_id)
self.state.artifacts_skipped_due_to_condition.append(
(artifact_name, condition))
return
# Call the collector defined action for each collector.
for collector in artifact_obj.collectors:
# Check conditions on the collector.
collector_conditions_met = True
self.ConvertSupportedOSToConditions(collector, collector.conditions)
if collector.conditions:
for condition in collector.conditions:
if not artifact_lib.CheckCondition(condition,
self.state.knowledge_base):
collector_conditions_met = False
if collector_conditions_met:
type_name = collector.collector_type
self.current_artifact_name = artifact_name
if type_name == rdfvalue.Collector.CollectorType.COMMAND:
self.RunCommand(collector)
elif type_name == rdfvalue.Collector.CollectorType.FILE:
self.GetFiles(collector, self.state.path_type,
self.args.max_file_size)
elif type_name == rdfvalue.Collector.CollectorType.GREP:
self.Grep(collector, self.state.path_type)
elif type_name == rdfvalue.Collector.CollectorType.LIST_FILES:
self.Glob(collector, self.state.path_type)
elif type_name == rdfvalue.Collector.CollectorType.REGISTRY_KEY:
self.Glob(collector, rdfvalue.PathSpec.PathType.REGISTRY)
elif type_name == rdfvalue.Collector.CollectorType.REGISTRY_VALUE:
self.GetRegistryValue(collector)
elif type_name == rdfvalue.Collector.CollectorType.WMI:
self.WMIQuery(collector)
elif type_name == rdfvalue.Collector.CollectorType.REKALL_PLUGIN:
self.RekallPlugin(collector)
elif type_name == rdfvalue.Collector.CollectorType.ARTIFACT:
self.CollectArtifacts(collector)
elif type_name == rdfvalue.Collector.CollectorType.ARTIFACT_FILES:
self.CollectArtifactFiles(collector)
elif type_name == rdfvalue.Collector.CollectorType.GRR_CLIENT_ACTION:
self.RunGrrClientAction(collector)
else:
raise RuntimeError("Invalid type %s in %s" % (type_name,
artifact_name))
else:
logging.debug("Artifact %s no collectors run due to all collectors "
"having failing conditons on %s", artifact_name,
self.client_id)
def _AreArtifactsKnowledgeBaseArtifacts(self):
knowledgebase_list = config_lib.CONFIG["Artifacts.knowledge_base"]
for artifact_name in self.args.artifact_list:
if artifact_name not in knowledgebase_list:
return False
return True
def GetFiles(self, collector, path_type, max_size):
"""Get a set of files."""
new_path_list = []
for path in collector.args["path_list"]:
# Interpolate any attributes from the knowledgebase.
new_path_list.extend(artifact_lib.InterpolateKbAttributes(
path, self.state.knowledge_base))
action = rdfvalue.FileFinderAction(
action_type=rdfvalue.FileFinderAction.Action.DOWNLOAD,
download=rdfvalue.FileFinderDownloadActionOptions(max_size=max_size))
self.CallFlow(
"FileFinder", paths=new_path_list, pathtype=path_type, action=action,
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessFileFinderResults")
@flow.StateHandler(next_state=["ProcessCollected"])
def ProcessFileFinderResults(self, responses):
if not responses.success:
self.Log("Failed to fetch files %s" %
responses.request_data["artifact_name"])
else:
self.CallStateInline(next_state="ProcessCollected",
request_data=responses.request_data,
messages=[r.stat_entry for r in responses])
def Glob(self, collector, pathtype):
"""Glob paths, return StatEntry objects."""
self.CallFlow(
"Glob", paths=self.InterpolateList(collector.args.get("path_list", [])),
pathtype=pathtype,
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollected"
)
def _CombineRegex(self, regex_list):
if len(regex_list) == 1:
return regex_list[0]
regex_combined = ""
for regex in regex_list:
if regex_combined:
regex_combined = "%s|(%s)" % (regex_combined, regex)
else:
regex_combined = "(%s)" % regex
return regex_combined
def Grep(self, collector, pathtype):
"""Grep files in path_list for any matches to content_regex_list.
Args:
collector: artifact collector
pathtype: pathspec path type
When multiple regexes are supplied, combine them into a single regex as an
OR match so that we check all regexes at once.
"""
path_list = self.InterpolateList(collector.args.get("path_list", []))
content_regex_list = self.InterpolateList(
collector.args.get("content_regex_list", []))
regex_condition = rdfvalue.FileFinderContentsRegexMatchCondition(
regex=self._CombineRegex(content_regex_list), bytes_before=0,
bytes_after=0)
file_finder_condition = rdfvalue.FileFinderCondition(
condition_type=rdfvalue.FileFinderCondition.Type.CONTENTS_REGEX_MATCH,
contents_regex_match=regex_condition)
self.CallFlow("FileFinder", paths=path_list,
conditions=[file_finder_condition],
action=rdfvalue.FileFinderAction(), pathtype=pathtype,
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollected")
def GetRegistryValue(self, collector):
"""Retrieve directly specified registry values, returning Stat objects."""
new_paths = set()
for path in collector.args["path_list"]:
expanded_paths = artifact_lib.InterpolateKbAttributes(
path, self.state.knowledge_base)
new_paths.update(expanded_paths)
for new_path in new_paths:
pathspec = rdfvalue.PathSpec(path=new_path,
pathtype=rdfvalue.PathSpec.PathType.REGISTRY)
self.CallClient(
"StatFile", pathspec=pathspec,
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollected"
)
def CollectArtifacts(self, collector):
self.CallFlow(
"ArtifactCollectorFlow", artifact_list=collector.args["artifact_list"],
use_tsk=self.args.use_tsk,
store_results_in_aff4=False,
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollected"
)
def CollectArtifactFiles(self, collector):
"""Collect files from artifact pathspecs."""
self.CallFlow(
"ArtifactCollectorFlow", artifact_list=collector.args["artifact_list"],
use_tsk=self.args.use_tsk,
store_results_in_aff4=False,
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollectedArtifactFiles"
)
def RunCommand(self, collector):
"""Run a command."""
self.CallClient("ExecuteCommand", cmd=collector.args["cmd"],
args=collector.args.get("args", {}),
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollected")
def WMIQuery(self, collector):
"""Run a Windows WMI Query."""
query = collector.args["query"]
queries = artifact_lib.InterpolateKbAttributes(query,
self.state.knowledge_base)
for query in queries:
self.CallClient(
"WmiQuery", query=query,
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollected"
)
def RekallPlugin(self, collector):
request = rdfvalue.RekallRequest()
request.plugins = [
# Only use these methods for listing processes.
rdfvalue.PluginRequest(
plugin=collector.args["plugin"],
args=collector.args.get("args", {}))]
self.CallFlow(
"AnalyzeClientMemory", request=request,
request_data={"artifact_name": self.current_artifact_name,
"rekall_plugin": collector.args["plugin"],
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollected"
)
def _GetSingleExpansion(self, value):
results = list(artifact_lib.InterpolateKbAttributes(
value, self.state.knowledge_base))
if len(results) > 1:
raise ValueError("Interpolation generated multiple results, use a"
" list for multi-value expansions. %s yielded: %s" %
(value, results))
return results[0]
def InterpolateDict(self, input_dict):
"""Interpolate all items from a dict.
Args:
input_dict: dict to interpolate
Returns:
original dict with all string values interpolated
"""
new_args = {}
for key, value in input_dict.items():
if isinstance(value, basestring):
new_args[key] = self._GetSingleExpansion(value)
elif isinstance(value, list):
new_args[key] = self.InterpolateList(value)
else:
new_args[key] = value
return new_args
def InterpolateList(self, input_list):
"""Interpolate all items from a given collector array.
Args:
input_list: list of values to interpolate
Returns:
original list of values extended with strings interpolated
"""
new_args = []
for value in input_list:
if isinstance(value, basestring):
results = list(artifact_lib.InterpolateKbAttributes(
value, self.state.knowledge_base))
new_args.extend(results)
else:
new_args.extend(value)
return new_args
def RunGrrClientAction(self, collector):
"""Call a GRR Client Action."""
self.CallClient(
collector.args["client_action"],
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollected",
**self.InterpolateDict(collector.args.get("action_args", {})))
def CallFallback(self, artifact_name, request_data):
classes = artifact.ArtifactFallbackCollector.classes.items()
for clsname, fallback_class in classes:
if not aff4.issubclass(fallback_class,
artifact.ArtifactFallbackCollector):
continue
if artifact_name in fallback_class.artifacts:
if artifact_name in self.state.called_fallbacks:
self.Log("Already called fallback class %s for artifact: %s",
clsname, artifact_name)
else:
self.Log("Calling fallback class %s for artifact: %s",
clsname, artifact_name)
self.CallFlow(clsname, request_data=request_data.ToDict(),
artifact_name=artifact_name,
next_state="ProcessCollected")
# Make sure we only try this once
self.state.called_fallbacks.add(artifact_name)
return True
return False
@flow.StateHandler(next_state=["ProcessCollected", "End"])
def ProcessCollected(self, responses):
"""Each individual collector will call back into here.
Args:
responses: Responses from the collection.
Raises:
artifact_lib.ArtifactDefinitionError: On bad definition.
artifact_lib.ArtifactProcessingError: On failure to process.
"""
flow_name = self.__class__.__name__
artifact_name = responses.request_data["artifact_name"]
collector = responses.request_data.GetItem("collector", None)
if responses.success:
self.Log("Artifact data collection %s completed successfully in flow %s "
"with %d responses", artifact_name, flow_name,
len(responses))
else:
self.Log("Artifact %s data collection failed. Status: %s.",
artifact_name, responses.status)
if not self.CallFallback(artifact_name, responses.request_data):
self.state.failed_count += 1
self.state.artifacts_failed.append(artifact_name)
return
# Initialize some local non-state saved variables for processing.
if not hasattr(self, "output_collection_map"):
self.output_collection_map = {}
if not hasattr(self, "aff4_output_map"):
self.aff4_output_map = {}
# Now process the responses.
processors = parsers.Parser.GetClassesByArtifact(artifact_name)
saved_responses = {}
for response in responses:
if processors and self.args.apply_parsers:
for processor in processors:
processor_obj = processor()
if processor_obj.process_together:
# Store the response until we have them all.
saved_responses.setdefault(processor.__name__, []).append(response)
else:
# Process the response immediately
self._ParseResponses(processor_obj, response, responses,
artifact_name, collector)
else:
# We don't have any defined processors for this artifact.
self._ParseResponses(None, response, responses, artifact_name,
collector)
# If we were saving responses, process them now:
for processor_name, responses_list in saved_responses.items():
processor_obj = parsers.Parser.classes[processor_name]()
self._ParseResponses(processor_obj, responses_list, responses,
artifact_name, collector)
# Flush the results to the objects.
if self.args.split_output_by_artifact:
self._FinalizeSplitCollection()
if self.args.store_results_in_aff4:
self._FinalizeMappedAFF4Locations(artifact_name)
if self.state.client_anomaly_store:
self.state.client_anomaly_store.Flush()
@flow.StateHandler(next_state="ProcessCollected")
def ProcessCollectedArtifactFiles(self, responses):
"""Schedule files for download based on pathspec attribute.
Args:
responses: Response objects from the artifact collector.
Raises:
RuntimeError: if pathspec value is not a PathSpec instance and not
a basestring.
"""
self.download_list = []
collector = responses.request_data.GetItem("collector")
pathspec_attribute = collector["args"].get("pathspec_attribute", None)
for response in responses:
if pathspec_attribute:
if response.HasField(pathspec_attribute):
pathspec = response.Get(pathspec_attribute)
else:
self.Log("Missing pathspec field %s: %s", pathspec_attribute,
response)
continue
else:
pathspec = response
# Check the default .pathspec attribute.
if not isinstance(pathspec, rdfvalue.PathSpec):
try:
pathspec = response.pathspec
except AttributeError:
pass
if isinstance(pathspec, basestring):
pathspec = rdfvalue.PathSpec(path=pathspec)
if self.args.use_tsk:
pathspec.pathtype = rdfvalue.PathSpec.PathType.TSK
else:
pathspec.pathtype = rdfvalue.PathSpec.PathType.OS
self.download_list.append(pathspec)
elif isinstance(pathspec, rdfvalue.PathSpec):
self.download_list.append(pathspec)
else:
raise RuntimeError(
"Response must be a string path, a pathspec, or have "
"pathspec_attribute set. Got: %s" % pathspec)
if self.download_list:
request_data = responses.request_data.ToDict()
self.CallFlow("MultiGetFile", pathspecs=self.download_list,
request_data=request_data,
next_state="ProcessCollected")
else:
self.Log("No files to download")
def _GetArtifactReturnTypes(self, collector):
"""Get a list of types we expect to handle from our responses."""
if collector:
return collector["returned_types"]
def _ProcessAnomaly(self, anomaly_value):
"""Write anomalies to the client in the data store."""
if not self.state.client_anomaly_store:
self.state.client_anomaly_store = aff4.FACTORY.Create(
self.client_id.Add("anomalies"), "RDFValueCollection",
token=self.token, mode="rw")
self.state.client_anomaly_store.Add(anomaly_value)
def _ParseResponses(self, processor_obj, responses, responses_obj,
artifact_name, collector):
"""Create a result parser sending different arguments for diff parsers.
Args:
processor_obj: A Processor object that inherits from Parser.
responses: A list of, or single response depending on the processors
process_together setting.
responses_obj: The responses object itself.
artifact_name: Name of the artifact that generated the responses.
collector: The collector responsible for producing the responses.
Raises:
RuntimeError: On bad parser.
"""
_ = responses_obj
if not processor_obj:
# We don't do any parsing, the results are raw as they came back.
# If this is an RDFValue we don't want to unpack it further
if isinstance(responses, rdfvalue.RDFValue):
result_iterator = [responses]
else:
result_iterator = responses
else:
# We have some processors to run.
if processor_obj.process_together:
# We are processing things in a group which requires specialized
# handling by the parser. This is used when multiple responses need to
# be combined to parse successfully. E.g parsing passwd and shadow files
# together.
parse_method = processor_obj.ParseMultiple
else:
parse_method = processor_obj.Parse
if isinstance(processor_obj, parsers.CommandParser):
# Command processor only supports one response at a time.
response = responses
result_iterator = parse_method(
cmd=response.request.cmd,
args=response.request.args,
stdout=response.stdout,
stderr=response.stderr,
return_val=response.exit_status,
time_taken=response.time_used,
knowledge_base=self.state.knowledge_base)
elif isinstance(processor_obj, parsers.WMIQueryParser):
query = collector["args"]["query"]
result_iterator = parse_method(query, responses,
self.state.knowledge_base)
elif isinstance(processor_obj, parsers.FileParser):
if processor_obj.process_together:
file_objects = [aff4.FACTORY.Open(r.aff4path, token=self.token)
for r in responses]
result_iterator = parse_method(responses, file_objects,
self.state.knowledge_base)
else:
fd = aff4.FACTORY.Open(responses.aff4path,
token=self.token)
result_iterator = parse_method(responses, fd,
self.state.knowledge_base)
elif isinstance(processor_obj, (parsers.RegistryParser,
parsers.RekallPluginParser,
parsers.RegistryValueParser,
parsers.GenericResponseParser,
parsers.GrepParser)):
result_iterator = parse_method(responses, self.state.knowledge_base)
elif isinstance(processor_obj, (parsers.ArtifactFilesParser)):
result_iterator = parse_method(responses, self.state.knowledge_base,
self.state.path_type)
else:
raise RuntimeError("Unsupported parser detected %s" % processor_obj)
artifact_return_types = self._GetArtifactReturnTypes(collector)
if result_iterator:
# If we have a parser, do something with the results it produces.
for result in result_iterator:
result_type = result.__class__.__name__
if result_type == "Anomaly":
# Anomalies are special results and get handled separately.
self._ProcessAnomaly(result)
elif not artifact_return_types or result_type in artifact_return_types:
self.state.response_count += 1
self.SendReply(result)
self._WriteResultToSplitCollection(result, artifact_name)
if self.args.store_results_in_aff4:
# Write our result back to a mapped location in AFF4 space.
self._WriteResultToMappedAFF4Location(result)
def _WriteResultToSplitCollection(self, result, artifact_name):
"""Write any results to the collection if we are splitting by artifact.
If not splitting, SendReply will handle writing to the collection.
Args:
result: result to write
artifact_name: artifact name string
"""
if self.args.split_output_by_artifact:
if (self.runner.output is not None and
artifact_name not in self.output_collection_map):
# Create the new collections in the same directory but not as children,
# so they are visible in the GUI
urn = "_".join((str(self.runner.output.urn), artifact_name))
collection = aff4.FACTORY.Create(urn, "RDFValueCollection", mode="rw",
token=self.token)
# Cache the opened object.
self.output_collection_map[artifact_name] = collection
self.output_collection_map[artifact_name].Add(result)
def _FinalizeSplitCollection(self):
"""Finalize writes to the Collection."""
total = 0
for artifact_name, collection in self.output_collection_map.iteritems():
total += len(collection)
collection.Flush()
self.Log("Wrote results from Artifact %s to %s. Collection size %d.",
artifact_name, collection.urn, total)
def _WriteResultToMappedAFF4Location(self, result):
"""If we have a mapping for this result type, write it there."""
result_type = result.__class__.__name__
if result_type not in self.aff4_output_map:
aff4_obj, aff4_attr, operator = (
self.GetAFF4PathForArtifactResponses(result_type))
# Cache the opened object.
self.aff4_output_map[result_type] = (aff4_obj, aff4_attr, operator)
else:
aff4_obj, aff4_attr, operator = self.aff4_output_map[result_type]
if operator == "Append":
aff4_attr.Append(result)
elif operator == "Overwrite":
# We set for each new value, overwriting older ones.
aff4_obj.Set(aff4_attr)
else:
raise RuntimeError("Bad RDFMap writing method")
def _FinalizeMappedAFF4Locations(self, artifact_name):
for aff4_obj, aff4_attr, operator in self.aff4_output_map.values():
if operator == "Append":
# For any objects we appended to, we need to do the set now as the new
# attributes aren't assigned to the AFF4 object yet.
aff4_obj.Set(aff4_attr)
aff4_obj.Flush()
self.Log("Wrote Artifact %s results to %s on %s", artifact_name,
aff4_obj.urn, aff4_attr.__class__.__name__)
def GetAFF4PathForArtifactResponses(self, output_type):
"""Use the RDFValue type to find where in AFF4 space to write results.
Args:
output_type: The name of a SemanticValue type.
Returns:
A tuple of (aff4 object, attribute, operator)
Raises:
ArtifactProcessingError: If there is no defined mapping.
"""
rdf_type = artifact.GRRArtifactMappings.rdf_map.get(output_type)
if rdf_type is None:
raise artifact_lib.ArtifactProcessingError(
"No defined RDF type for %s. See the description for "
" the store_results_in_aff4 option, you probably want it set to "
"false. Supported types are: %s" %
(output_type, artifact.GRRArtifactMappings.rdf_map.keys()))
# "info/software", "InstalledSoftwarePackages", "INSTALLED_PACKAGES",
# "Append"
relative_path, aff4_type, aff4_attribute, operator = rdf_type
urn = self.client_id.Add(relative_path)
try:
result_object = aff4.FACTORY.Open(urn, aff4_type=aff4_type, mode="w",
token=self.token)
except IOError as e:
raise artifact_lib.ArtifactProcessingError(
"Failed to open result object for type %s. %s" % (output_type, e))
result_attr = getattr(result_object.Schema, aff4_attribute)()
if not isinstance(result_attr, rdfvalue.RDFValue):
raise artifact_lib.ArtifactProcessingError(
"Failed to get attribute %s for output type %s" %
(aff4_attribute, output_type))
return result_object, result_attr, operator
def _GetArtifactFromName(self, name):
"""Get an artifact class from the cache in the flow."""
if name in artifact_lib.ArtifactRegistry.artifacts:
return artifact_lib.ArtifactRegistry.artifacts[name]
else:
# If we don't have an artifact, things shouldn't have passed validation
# so we assume its a new one in the datastore.
artifact.LoadArtifactsFromDatastore(token=self.token)
if name not in artifact_lib.ArtifactRegistry.artifacts:
raise RuntimeError("ArtifactCollectorFlow failed due to unknown "
"Artifact %s" % name)
else:
return artifact_lib.ArtifactRegistry.artifacts[name]
@flow.StateHandler()
def End(self):
# If we got no responses, and user asked for it, we error out.
if self.args.on_no_results_error and self.state.response_count == 0:
raise artifact_lib.ArtifactProcessingError("Artifact collector returned "
"0 responses.")
if self.runner.output is not None:
urn = self.runner.output.urn
else:
urn = self.client_id
self.Notify("ViewObject", urn,
"Completed artifact collection of %s. Collected %d. Errors %d."
% (self.args.artifact_list, self.state.response_count,
self.state.failed_count))
| 40.921816 | 80 | 0.669286 |
38843947bb127403d90cd881f92aded85f9c6458 | 1,394 | py | Python | renmin/renmin/pipelines.py | dumin199101/Scrapy-Project | b4135beb73a2c5aad1728f747c1856266af649dd | [
"Apache-2.0"
] | 1 | 2019-07-11T03:28:25.000Z | 2019-07-11T03:28:25.000Z | renmin/renmin/pipelines.py | dumin199101/Scrapy-Project | b4135beb73a2c5aad1728f747c1856266af649dd | [
"Apache-2.0"
] | null | null | null | renmin/renmin/pipelines.py | dumin199101/Scrapy-Project | b4135beb73a2c5aad1728f747c1856266af649dd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import codecs
import json
import pymysql
class RenminPipeline(object):
# 1.数据写入json文件
# def __init__(self):
# # 创建一个只写文件,指定文本编码格式为utf-8
# self.filename = codecs.open('renmin.json', 'w', encoding='utf-8')
#
# def process_item(self, item, spider):
# content = json.dumps(dict(item), ensure_ascii=False) + "\n"
# self.filename.write(content)
# return item
#
# def spider_closed(self, spider):
# self.filename.close()
# 2.数据写入MySQL数据库
def __init__(self):
self.conn = pymysql.connect(host='192.168.1.129', user='root', passwd='2e31685493', port=3306, db='maocuhui')
def process_item(self, item, spider):
self.cur = self.conn.cursor()
sql = (
"INSERT INTO `tb_comp_news`(`v_title`,`v_source`,`v_publish_time`,`v_desc`,`n_nav_id`,`v_editor`,`v_link`)"
"VALUES (%s,%s,%s,%s,%s,%s,%s)")
lis = (item['title'], item['source'], item['pubdate'], item['content'], '20', item['editor'], item['link'])
self.cur.execute(sql, lis)
self.conn.commit()
return item
def spider_closed(self, spider):
self.cur.close()
self.conn.close()
| 32.418605 | 119 | 0.609756 |
d1142e294c2da2f32fde9900f5b85be9ec93cfe4 | 1,576 | py | Python | scripts/show_states.py | andrewjunyoung/symboard | 51591c0aaac8b5394fde8d97470bf584437a3842 | [
"MIT"
] | 5 | 2020-02-09T21:56:26.000Z | 2021-11-17T10:41:03.000Z | scripts/show_states.py | andrewjunyoung/symboard | 51591c0aaac8b5394fde8d97470bf584437a3842 | [
"MIT"
] | 2 | 2020-03-24T18:11:14.000Z | 2020-03-31T10:54:02.000Z | scripts/show_states.py | andrewjunyoung/symboard | 51591c0aaac8b5394fde8d97470bf584437a3842 | [
"MIT"
] | 1 | 2021-11-27T14:02:01.000Z | 2021-11-27T14:02:01.000Z | # Imports from third party packages.
from argparse import ArgumentParser
from tabulate import tabulate
from typing import List
import logging
# Imports from the local package.
from symboard.states import load_yaml
def get_arg_parser() -> ArgumentParser:
"""
Returns:
ArgumentParser: An ArgumentParser instance which will parse the
arguments provided to Symboard when executed from the command line.
"""
parser = ArgumentParser(
description='Show all the states available to your program inside the' \
'states directory, and some info about each of these states.'
)
return parser
def format_outputs(state):
"""
Returns:
str: A concatenated list of all of the outputs in the given state's
action_to_output_map.
"""
return ''.join(state.action_to_output_map.values())
def main() -> None:
""" The main method (entry point) for the script. This function parses the
input arguments, and manages the core code logic using these arguments.
"""
logging.info(f'Parsing command line arguments.')
arg_parser: ArgumentParser = get_arg_parser()
args = arg_parser.parse_args()
logging.info(f'Collecting states from states directory.')
states: dict = load_yaml()
headers: List[str] = ['Name', 'Terminator', 'Outputs']
data: List[list] = [
[state.name, state.terminator, format_outputs(state)]
for state in states.values()
]
print(tabulate(data, headers=headers, tablefmt='orgtbl'))
if __name__ == '__main__':
main()
| 26.711864 | 80 | 0.686548 |
fc462e14150472acf9ae11e4322cacb78c89baf2 | 10,835 | py | Python | marketplaces/apps/market/migrations/0013_auto__add_field_marketsubcategory_image__add_field_marketcategory_imag.py | diassor/CollectorCity-Market-Place | 892ad220b8cf1c0fc7433f625213fe61729522b2 | [
"Apache-2.0"
] | 135 | 2015-03-19T13:28:18.000Z | 2022-03-27T06:41:42.000Z | marketplaces/apps/market/migrations/0013_auto__add_field_marketsubcategory_image__add_field_marketcategory_imag.py | dfcoding/CollectorCity-Market-Place | e59acec3d600c049323397b17cae14fdcaaaec07 | [
"Apache-2.0"
] | null | null | null | marketplaces/apps/market/migrations/0013_auto__add_field_marketsubcategory_image__add_field_marketcategory_imag.py | dfcoding/CollectorCity-Market-Place | e59acec3d600c049323397b17cae14fdcaaaec07 | [
"Apache-2.0"
] | 83 | 2015-01-30T01:00:15.000Z | 2022-03-08T17:25:10.000Z | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MarketSubCategory.image'
db.add_column('market_marketsubcategory', 'image', self.gf('core.thumbs.ImageWithThumbsField')(max_length=100, null=True, blank=True), keep_default=False)
# Adding field 'MarketCategory.image'
db.add_column('market_marketcategory', 'image', self.gf('core.thumbs.ImageWithThumbsField')(max_length=100, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'MarketSubCategory.image'
db.delete_column('market_marketsubcategory', 'image')
# Deleting field 'MarketCategory.image'
db.delete_column('market_marketcategory', 'image')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'market.contactforminfo': {
'Meta': {'object_name': 'ContactFormInfo'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'market.marketblogpost': {
'Meta': {'object_name': 'MarketBlogPost'},
'allow_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'posted_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'posted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '80', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'market.marketcategory': {
'Meta': {'object_name': 'MarketCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('core.thumbs.ImageWithThumbsField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'})
},
'market.marketmailinglistmember': {
'Meta': {'object_name': 'MarketMailingListMember'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"})
},
'market.marketplace': {
'Meta': {'object_name': 'MarketPlace'},
'base_domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'charge_on_card_as': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'default': "'contact@yourstore.com'", 'max_length': '75'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '92'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'template_prefix': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '92'})
},
'market.marketpostcategory': {
'Meta': {'object_name': 'MarketPostCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '80', 'db_index': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'market.marketpostcomment': {
'Meta': {'object_name': 'MarketPostComment'},
'comment': ('django.db.models.fields.TextField', [], {}),
'commented_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketBlogPost']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'market.marketpostpick': {
'Meta': {'object_name': 'MarketPostPick'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketBlogPost']"})
},
'market.marketsubcategory': {
'Meta': {'unique_together': "(('parent', 'slug'),)", 'object_name': 'MarketSubCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('core.thumbs.ImageWithThumbsField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subcategories'", 'null': 'True', 'to': "orm['market.MarketCategory']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '60', 'db_index': 'True'})
},
'market.privacypolicy': {
'Meta': {'object_name': 'PrivacyPolicy'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'text': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'market.termsandconditions': {
'Meta': {'object_name': 'TermsAndConditions'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'text': ('django.db.models.fields.TextField', [], {'default': "''"})
}
}
complete_apps = ['market']
| 69.455128 | 181 | 0.563359 |
2a1b63f0a2a9a6e0c4058ada0a02adbf1ae1e953 | 957 | py | Python | partnerships/forms.py | cforlando/intake | a5233d5c0f862f28ee265b9b4831405aabeec7e2 | [
"MIT"
] | 51 | 2016-07-20T02:26:57.000Z | 2021-07-07T14:45:06.000Z | partnerships/forms.py | enterstudio/intake | 793a8935914fdc8356321ec46e54d9ae1eeeee04 | [
"MIT"
] | 1,091 | 2016-04-29T18:07:45.000Z | 2021-04-19T18:39:39.000Z | partnerships/forms.py | enterstudio/intake | 793a8935914fdc8356321ec46e54d9ae1eeeee04 | [
"MIT"
] | 24 | 2016-06-14T18:10:43.000Z | 2021-11-14T20:26:39.000Z | from django.forms import ModelForm, widgets
from partnerships.models import PartnershipLead
def with_classes(widget_class, *classes, **kwargs):
attrs = kwargs.get('attrs', {})
class_ = attrs.get('class', '')
class_ += ' '.join(classes)
attrs.update({'class': class_})
kwargs.update({'attrs': attrs})
return widget_class(**kwargs)
class PotentialPartnerLeadForm(ModelForm):
class Meta:
model = PartnershipLead
fields = ['name', 'email', 'organization_name', 'message']
widgets = {
'email': with_classes(
widgets.EmailInput, 'text-input', 'form-width--long'),
'name': with_classes(
widgets.TextInput, 'text-input', 'form-width--long'),
'organization_name': with_classes(
widgets.TextInput, 'text-input', 'form-width--long'),
'message': with_classes(
widgets.Textarea, 'textarea')
}
| 33 | 70 | 0.600836 |
657e45fe30c9a949aee6ae7d09eb7bd7c4b7c70d | 1,934 | py | Python | saver.py | Kirkados/DeepCube | cc6b8d402a47ea0c7ecde8e4c5ed2d361c539ad5 | [
"MIT"
] | 3 | 2019-01-10T01:06:04.000Z | 2021-10-31T18:19:50.000Z | saver.py | Kirkados/DeepCube | cc6b8d402a47ea0c7ecde8e4c5ed2d361c539ad5 | [
"MIT"
] | null | null | null | saver.py | Kirkados/DeepCube | cc6b8d402a47ea0c7ecde8e4c5ed2d361c539ad5 | [
"MIT"
] | null | null | null | """
This script saves and loads neural network parameters
"""
import os
import tensorflow as tf
from settings import Settings
class Saver:
def __init__(self, sess, filename):
self.sess = sess
self.filename = filename
def save(self, n_iteration):
# Save all the tensorflow parameters from this session into a file
# The file is saved to the directory Settings.MODEL_SAVE_DIRECTORY.
# It uses the n_iteration in the file name
print("Saving neural networks at iteration number " + str(n_iteration) + "...")
os.makedirs(os.path.dirname(Settings.MODEL_SAVE_DIRECTORY + self.filename), exist_ok = True)
self.saver.save(self.sess, Settings.MODEL_SAVE_DIRECTORY + self.filename + "/Episode_" + str(n_iteration) + ".ckpt")
def load(self):
# Try to load in weights to the networks in the current Session.
# If it fails, or we don't want to load (Settings.RESUME_TRAINING = False)
# then we start from scratch
self.saver = tf.train.Saver() # initialize the tensorflow Saver()
if Settings.RESUME_TRAINING:
print("Attempting to load in previously-trained model")
try:
ckpt = tf.train.get_checkpoint_state(Settings.MODEL_SAVE_DIRECTORY + Settings.RUN_NAME)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
print("Model successfully loaded!")
return True
except (ValueError, AttributeError):
print("No model found... :(")
return False
else:
return False
def initialize(self):
self.saver = tf.train.Saver() # initialize the tensorflow Saver() without trying to load in parameters | 40.291667 | 130 | 0.59152 |
11d430a65df017caf3e08db3c96ef3215a6e801a | 11,736 | py | Python | virtual/lib/python3.6/site-packages/astroid/arguments.py | edithamadi/pitch_one | 40c8d1c67c77e483b29bd326721dde7f4a20120d | [
"Unlicense"
] | 3 | 2018-10-21T14:01:01.000Z | 2018-10-22T14:42:22.000Z | virtual/lib/python3.6/site-packages/astroid/arguments.py | edithamadi/pitch_one | 40c8d1c67c77e483b29bd326721dde7f4a20120d | [
"Unlicense"
] | 12 | 2018-10-03T19:45:36.000Z | 2022-03-11T23:54:25.000Z | virtual/lib/python3.6/site-packages/astroid/arguments.py | edithamadi/pitch_one | 40c8d1c67c77e483b29bd326721dde7f4a20120d | [
"Unlicense"
] | 3 | 2020-01-19T21:26:14.000Z | 2020-11-04T08:37:38.000Z | # Copyright (c) 2015-2016, 2018 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2015-2016 Ceridwen <ceridwenv@gmail.com>
# Copyright (c) 2018 Bryce Guinta <bryce.paul.guinta@gmail.com>
# Copyright (c) 2018 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2018 Anthony Sottile <asottile@umich.edu>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
from astroid import bases
from astroid import context as contextmod
from astroid import exceptions
from astroid import nodes
from astroid import util
class CallSite:
"""Class for understanding arguments passed into a call site
It needs a call context, which contains the arguments and the
keyword arguments that were passed into a given call site.
In order to infer what an argument represents, call
:meth:`infer_argument` with the corresponding function node
and the argument name.
"""
def __init__(self, callcontext, argument_context_map=None):
if argument_context_map is None:
argument_context_map = {}
self.argument_context_map = argument_context_map
args = callcontext.args
keywords = callcontext.keywords
self.duplicated_keywords = set()
self._unpacked_args = self._unpack_args(args)
self._unpacked_kwargs = self._unpack_keywords(keywords)
self.positional_arguments = [
arg for arg in self._unpacked_args
if arg is not util.Uninferable
]
self.keyword_arguments = {
key: value for key, value in self._unpacked_kwargs.items()
if value is not util.Uninferable
}
@classmethod
def from_call(cls, call_node):
"""Get a CallSite object from the given Call node."""
callcontext = contextmod.CallContext(call_node.args,
call_node.keywords)
return cls(callcontext)
def has_invalid_arguments(self):
"""Check if in the current CallSite were passed *invalid* arguments
This can mean multiple things. For instance, if an unpacking
of an invalid object was passed, then this method will return True.
Other cases can be when the arguments can't be inferred by astroid,
for example, by passing objects which aren't known statically.
"""
return len(self.positional_arguments) != len(self._unpacked_args)
def has_invalid_keywords(self):
"""Check if in the current CallSite were passed *invalid* keyword arguments
For instance, unpacking a dictionary with integer keys is invalid
(**{1:2}), because the keys must be strings, which will make this
method to return True. Other cases where this might return True if
objects which can't be inferred were passed.
"""
return len(self.keyword_arguments) != len(self._unpacked_kwargs)
def _unpack_keywords(self, keywords):
values = {}
context = contextmod.InferenceContext()
context.extra_context = self.argument_context_map
for name, value in keywords:
if name is None:
# Then it's an unpacking operation (**)
try:
inferred = next(value.infer(context=context))
except exceptions.InferenceError:
values[name] = util.Uninferable
continue
if not isinstance(inferred, nodes.Dict):
# Not something we can work with.
values[name] = util.Uninferable
continue
for dict_key, dict_value in inferred.items:
try:
dict_key = next(dict_key.infer(context=context))
except exceptions.InferenceError:
values[name] = util.Uninferable
continue
if not isinstance(dict_key, nodes.Const):
values[name] = util.Uninferable
continue
if not isinstance(dict_key.value, str):
values[name] = util.Uninferable
continue
if dict_key.value in values:
# The name is already in the dictionary
values[dict_key.value] = util.Uninferable
self.duplicated_keywords.add(dict_key.value)
continue
values[dict_key.value] = dict_value
else:
values[name] = value
return values
def _unpack_args(self, args):
values = []
context = contextmod.InferenceContext()
context.extra_context = self.argument_context_map
for arg in args:
if isinstance(arg, nodes.Starred):
try:
inferred = next(arg.value.infer(context=context))
except exceptions.InferenceError:
values.append(util.Uninferable)
continue
if inferred is util.Uninferable:
values.append(util.Uninferable)
continue
if not hasattr(inferred, 'elts'):
values.append(util.Uninferable)
continue
values.extend(inferred.elts)
else:
values.append(arg)
return values
def infer_argument(self, funcnode, name, context):
"""infer a function argument value according to the call context
Arguments:
funcnode: The function being called.
name: The name of the argument whose value is being inferred.
context: Inference context object
"""
if name in self.duplicated_keywords:
raise exceptions.InferenceError('The arguments passed to {func!r} '
' have duplicate keywords.',
call_site=self, func=funcnode,
arg=name, context=context)
# Look into the keywords first, maybe it's already there.
try:
return self.keyword_arguments[name].infer(context)
except KeyError:
pass
# Too many arguments given and no variable arguments.
if len(self.positional_arguments) > len(funcnode.args.args):
if not funcnode.args.vararg:
raise exceptions.InferenceError('Too many positional arguments '
'passed to {func!r} that does '
'not have *args.',
call_site=self, func=funcnode,
arg=name, context=context)
positional = self.positional_arguments[:len(funcnode.args.args)]
vararg = self.positional_arguments[len(funcnode.args.args):]
argindex = funcnode.args.find_argname(name)[0]
kwonlyargs = {arg.name for arg in funcnode.args.kwonlyargs}
kwargs = {
key: value for key, value in self.keyword_arguments.items()
if key not in kwonlyargs
}
# If there are too few positionals compared to
# what the function expects to receive, check to see
# if the missing positional arguments were passed
# as keyword arguments and if so, place them into the
# positional args list.
if len(positional) < len(funcnode.args.args):
for func_arg in funcnode.args.args:
if func_arg.name in kwargs:
arg = kwargs.pop(func_arg.name)
positional.append(arg)
if argindex is not None:
# 2. first argument of instance/class method
if argindex == 0 and funcnode.type in ('method', 'classmethod'):
if context.boundnode is not None:
boundnode = context.boundnode
else:
# XXX can do better ?
boundnode = funcnode.parent.frame()
if isinstance(boundnode, nodes.ClassDef):
# Verify that we're accessing a method
# of the metaclass through a class, as in
# `cls.metaclass_method`. In this case, the
# first argument is always the class.
method_scope = funcnode.parent.scope()
if method_scope is boundnode.metaclass():
return iter((boundnode, ))
if funcnode.type == 'method':
if not isinstance(boundnode, bases.Instance):
boundnode = bases.Instance(boundnode)
return iter((boundnode,))
if funcnode.type == 'classmethod':
return iter((boundnode,))
# if we have a method, extract one position
# from the index, so we'll take in account
# the extra parameter represented by `self` or `cls`
if funcnode.type in ('method', 'classmethod'):
argindex -= 1
# 2. search arg index
try:
return self.positional_arguments[argindex].infer(context)
except IndexError:
pass
if funcnode.args.kwarg == name:
# It wants all the keywords that were passed into
# the call site.
if self.has_invalid_keywords():
raise exceptions.InferenceError(
"Inference failed to find values for all keyword arguments "
"to {func!r}: {unpacked_kwargs!r} doesn't correspond to "
"{keyword_arguments!r}.",
keyword_arguments=self.keyword_arguments,
unpacked_kwargs=self._unpacked_kwargs,
call_site=self, func=funcnode, arg=name, context=context)
kwarg = nodes.Dict(lineno=funcnode.args.lineno,
col_offset=funcnode.args.col_offset,
parent=funcnode.args)
kwarg.postinit([(nodes.const_factory(key), value)
for key, value in kwargs.items()])
return iter((kwarg, ))
if funcnode.args.vararg == name:
# It wants all the args that were passed into
# the call site.
if self.has_invalid_arguments():
raise exceptions.InferenceError(
"Inference failed to find values for all positional "
"arguments to {func!r}: {unpacked_args!r} doesn't "
"correspond to {positional_arguments!r}.",
positional_arguments=self.positional_arguments,
unpacked_args=self._unpacked_args,
call_site=self, func=funcnode, arg=name, context=context)
args = nodes.Tuple(lineno=funcnode.args.lineno,
col_offset=funcnode.args.col_offset,
parent=funcnode.args)
args.postinit(vararg)
return iter((args, ))
# Check if it's a default parameter.
try:
return funcnode.args.default_value(name).infer(context)
except exceptions.NoDefault:
pass
raise exceptions.InferenceError('No value found for argument {name} to '
'{func!r}', call_site=self,
func=funcnode, arg=name, context=context)
| 44.793893 | 85 | 0.565099 |
36d563f984bf57abb771f8be3c8694da44a53597 | 1,783 | py | Python | wbia_pie_v2/losses/hard_mine_triplet_loss.py | dylanirion/wbia-plugin-pie-v2 | 8ae37c2ad218e5e888bb1aea039f1b04a3fe9d8d | [
"Apache-2.0"
] | null | null | null | wbia_pie_v2/losses/hard_mine_triplet_loss.py | dylanirion/wbia-plugin-pie-v2 | 8ae37c2ad218e5e888bb1aea039f1b04a3fe9d8d | [
"Apache-2.0"
] | null | null | null | wbia_pie_v2/losses/hard_mine_triplet_loss.py | dylanirion/wbia-plugin-pie-v2 | 8ae37c2ad218e5e888bb1aea039f1b04a3fe9d8d | [
"Apache-2.0"
] | 1 | 2021-04-05T23:46:11.000Z | 2021-04-05T23:46:11.000Z | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import
import torch
import torch.nn as nn
class TripletLoss(nn.Module):
"""Triplet loss with hard positive/negative mining.
Reference:
Hermans et al. In Defense of the Triplet Loss for Person Re-Identification. arXiv:1703.07737.
Imported from `<https://github.com/Cysu/open-reid/blob/master/reid/loss/triplet.py>`_.
Args:
margin (float, optional): margin for triplet. Default is 0.3.
"""
def __init__(self, margin=0.3):
super(TripletLoss, self).__init__()
self.margin = margin
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, inputs, targets):
"""
Args:
inputs (torch.Tensor): feature matrix with shape (batch_size, feat_dim).
targets (torch.LongTensor): ground truth labels with shape (num_classes).
"""
n = inputs.size(0)
# Compute pairwise distance, replace by the official when merged
dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)
dist = dist + dist.t()
dist.addmm_(inputs, inputs.t(), beta=1, alpha=-2)
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
# For each anchor, find the hardest positive and negative
mask = targets.expand(n, n).eq(targets.expand(n, n).t())
dist_ap, dist_an = [], []
for i in range(n):
dist_ap.append(dist[i][mask[i]].max().unsqueeze(0))
dist_an.append(dist[i][mask[i] == 0].min().unsqueeze(0))
dist_ap = torch.cat(dist_ap)
dist_an = torch.cat(dist_an)
# Compute ranking hinge loss
y = torch.ones_like(dist_an)
return self.ranking_loss(dist_an, dist_ap, y)
| 35.66 | 101 | 0.626472 |
3d57792202f8057f39aa2a13fb39ca53efbb673b | 366 | py | Python | americas.py | 4bic-attic/data_viz | db1ef07712d7deaccea6d5509d5a281a5484ac0b | [
"MIT"
] | null | null | null | americas.py | 4bic-attic/data_viz | db1ef07712d7deaccea6d5509d5a281a5484ac0b | [
"MIT"
] | null | null | null | americas.py | 4bic-attic/data_viz | db1ef07712d7deaccea6d5509d5a281a5484ac0b | [
"MIT"
] | null | null | null | import pygal
from pygal.maps.world import World
wm = World()
wm.title = 'North, Central and South America'
wm.add('North America', ['ca', 'mx', 'us'])
wm.add('Cental America', ['bx', 'cr', 'gt', 'hn', 'ni', 'pa', 'sv'])
wm.add('South America', ['ar', 'bo', 'br', 'cl', 'co', 'ec', 'gf', 'gy',
'pe', 'py', 'sr', 'uy','ve'])
wm.render_to_file('americas.svg')
| 24.4 | 72 | 0.562842 |
76bb9ed328bc72991b306d0e4e0ebeaba1c7cb4f | 3,655 | py | Python | Scripts/simulation/drama_scheduler/club_gathering_drama_node.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/drama_scheduler/club_gathering_drama_node.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/drama_scheduler/club_gathering_drama_node.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\drama_scheduler\club_gathering_drama_node.py
# Compiled at: 2020-02-07 22:50:40
# Size of source mod 2**32: 4129 bytes
import random
from clubs.club_tuning import ClubTunables
from drama_scheduler.drama_node import BaseDramaNode, CooldownOption, DramaNodeRunOutcome
from drama_scheduler.drama_node_types import DramaNodeType
from event_testing.results import TestResult
from gsi_handlers.drama_handlers import GSIRejectedDramaNodeScoringData
from sims4.tuning.instances import lock_instance_tunables
from sims4.utils import classproperty
import services
class ClubGatheringDramaNode(BaseDramaNode):
@classproperty
def drama_node_type(cls):
return DramaNodeType.CLUB
def _run(self):
club_service = services.get_club_service()
if club_service is None:
return DramaNodeRunOutcome.FAILURE
club = club_service.get_club_by_id(self._club_id)
club.show_club_gathering_dialog((self._receiver_sim_info), flavor_text=(ClubTunables.CLUB_GATHERING_DIALOG_TEXT_DRAMA_NODE),
sender_sim_info=(self._sender_sim_info))
return DramaNodeRunOutcome.SUCCESS_NODE_COMPLETE
def _test(self, resolver, skip_run_tests=False):
if self._club_id is None:
return TestResult(False, 'Cannot run because there is no chosen node.')
else:
if self._sender_sim_info is None:
return TestResult(False, 'Cannot run because there is no sender sim info.')
if not skip_run_tests:
club_service = services.get_club_service()
if club_service is None:
return TestResult(False, 'Club Service is None')
club = club_service.get_club_by_id(self._club_id)
if club is None:
return TestResult(False, 'Cannot run because the club no longer exists.')
if club in club_service.clubs_to_gatherings_map:
return TestResult(False, 'Cannot run because the Club is already gathering')
if self._sender_sim_info not in club.members:
return TestResult(False, 'Cannot run because the sender sim info is no longer in the chosen club.')
if self._receiver_sim_info not in club.members:
return TestResult(False, 'Cannot run because the receiver sim info is no longer in the chosen club.')
return super()._test(resolver, skip_run_tests=skip_run_tests)
def _setup(self, *args, gsi_data=None, **kwargs):
result = (super()._setup)(args, gsi_data=gsi_data, **kwargs)
if not result:
return result
club_service = services.get_club_service()
if club_service is None:
if gsi_data is not None:
gsi_data.rejected_nodes.append(GSIRejectedDramaNodeScoringData(type(self), 'Club service is None.'))
return False
available_clubs = {club for club in club_service.get_clubs_for_sim_info(self._receiver_sim_info)}
available_clubs &= {club for club in club_service.get_clubs_for_sim_info(self._sender_sim_info)}
if not available_clubs:
if gsi_data is not None:
gsi_data.rejected_nodes.append(GSIRejectedDramaNodeScoringData(type(self), 'No available clubs.'))
return False
chosen_club = random.choice(tuple(available_clubs))
self._club_id = chosen_club.club_id
return True | 52.214286 | 132 | 0.697674 |
f8f83137f20a0abfac4233c1350174e3bc30dfeb | 4,219 | py | Python | ibmsecurity/isam/base/network/felb/attributes/advanced_tuning.py | mtrinh20/ibmsecurity | c084ac14322a03f582a31984ecfa9b4d6433f252 | [
"Apache-2.0"
] | 2 | 2019-12-05T13:51:10.000Z | 2019-12-20T08:02:35.000Z | ibmsecurity/isam/base/network/felb/attributes/advanced_tuning.py | harshalchemate/ibmsecurity | e3dd3f930bcd9f151e30eff0a394c50f98d3eccb | [
"Apache-2.0"
] | null | null | null | ibmsecurity/isam/base/network/felb/attributes/advanced_tuning.py | harshalchemate/ibmsecurity | e3dd3f930bcd9f151e30eff0a394c50f98d3eccb | [
"Apache-2.0"
] | null | null | null | import logging
from ibmsecurity.utilities import tools
logger = logging.getLogger(__name__)
module_uri = "/isam/felb/configuration/attributes"
requires_modules = None
requires_version = None
def add(isamAppliance, name, value, check_mode=False, force=False):
"""
Creates attribute
"""
if force is True or _check(isamAppliance, name, value) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post("Creating Attribute", module_uri,
{
"name": name,
"value": value
}, requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object()
def delete(isamAppliance, attribute_name, check_mode=False, force=False):
"""
deletes given attribute
"""
if force or search(isamAppliance, attribute_name):
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete("Deleting Attribute", "{0}/{1}".format(module_uri, attribute_name),
requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object()
def get(isamAppliance, attribute_name):
"""
Retrieves attribute
"""
return isamAppliance.invoke_get("Retrieving Attribute", "{0}/{1}".format(module_uri, attribute_name),
requires_modules=requires_modules, requires_version=requires_version)
def get_all(isamAppliance):
"""
Retrieves all attributes
"""
return isamAppliance.invoke_get("Retrieving Attributes", "{0}/".format(module_uri),
requires_modules=requires_modules, requires_version=requires_version)
def update(isamAppliance, attribute_name, value, check_mode=False, force=False):
"""
Updates given attribute
"""
if force is True or _check(isamAppliance, attribute_name, value) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put("Updating Attribute", "{0}/{1}".format(module_uri, attribute_name),
{
"value": value
}, requires_version=requires_version, requires_modules=requires_modules)
return isamAppliance.create_return_object(changed=False)
def _check(isamAppliance, attribute_name, attribute_value):
"""
Check for idempotency
"""
# Error handling to see if attribute exist returns True if attribute doesnt exist
try:
temp_obj = get(isamAppliance, attribute_name)
except:
return True
if temp_obj['data']['value'] != attribute_value:
return True
else:
return False
def search(isamAppliance, attribute_name):
"""
Check for idempotency
"""
# Error handling to see if attribute exist returns True if attribute doesnt exist
try:
return get(isamAppliance, attribute_name)
except:
return False
def compare(isamAppliance1, isamAppliance2):
"""
Compare access policies between two appliances
"""
ret_obj1 = get_all(isamAppliance1)
ret_obj2 = get_all(isamAppliance2)
obj1 = {'rc': 0, 'data': []}
obj2 = {'rc': 0, 'data': []}
for attr in ret_obj1["data"]:
if search(isamAppliance=isamAppliance1, attribute_name=attr["name"]):
value = get(isamAppliance1, attribute_name=attr["name"])
obj1['data'].append({attr["name"]: value["data"]["value"]})
for attr in ret_obj2["data"]:
if search(isamAppliance=isamAppliance2, attribute_name=attr["name"]):
value = get(isamAppliance2, attribute_name=attr["name"])
obj2['data'].append({attr["name"]: value["data"]["value"]})
return tools.json_compare(obj1, obj2)
| 34.024194 | 117 | 0.624556 |
2def34c6b84cfe64dc6c7a5a2c3bda2a704467c2 | 3,642 | py | Python | searchs/RealBestTeams.py | iamtrex/TeamGenerator | fbfadb4ed78efb26e0358443a08d38353c2d7392 | [
"MIT"
] | 1 | 2020-07-21T06:37:45.000Z | 2020-07-21T06:37:45.000Z | searchs/RealBestTeams.py | iamtrex/TeamGenerator | fbfadb4ed78efb26e0358443a08d38353c2d7392 | [
"MIT"
] | 1 | 2020-07-21T06:43:07.000Z | 2020-07-21T06:43:07.000Z | searchs/RealBestTeams.py | iamtrex/TeamGenerator | fbfadb4ed78efb26e0358443a08d38353c2d7392 | [
"MIT"
] | null | null | null | from data import Const as C
from model import TeamSet as TS, Team as T
import itertools
import time
'''Class Def Player - Represents a Player with name, preferred roles, and rating at each role.'''
def get_score(obj):
return obj.score
def calc_best_n_team_sets(n):
# Track Time execution
time_start = time.time()
team_sets = [] # List of Sets, keeps top n
counter = 0
score_total = 0
# Create all permutations
all_combinations = itertools.permutations(C.OG, 10)
for i in all_combinations:
counter += 1
if counter % 1000000 == 0: # Print every million combinations
print("Evaluated " + str(counter) + " combinations")
if len(team_sets) > 0:
print("Current Average " + str(score_total / len(team_sets))) # Average over time... Should only improve
print("Current Best " + team_sets[0].to_string())
# Create a set with the permutation.
team_set = TS.TeamSet(C.players[i[0]], C.players[i[1]], C.players[i[2]], C.players[i[3]], C.players[i[4]],
C.players[i[5]], C.players[i[6]], C.players[i[7]], C.players[i[8]], C.players[i[9]])
score = get_score(team_set)
if len(team_sets) < n:
team_sets.append(team_set)
score_total += score
else:
team_sets.sort(key=get_score, reverse=True)
last_place = get_score(team_sets[-1])
if last_place < score:
# Pop Lowest score and replace with a new score...
while last_place < score and len(team_sets) > n:
s = team_sets.pop()
score_total -= get_score(s)
last_place = get_score(team_sets[-1])
team_sets.append(team_set)
score_total += score
elif last_place == score:
# Append to the end.
score_total += score
team_sets.append(team_set)
# Print Team from Best to Worst
team_sets.sort(key=get_score, reverse=True)
print("Time Taken " + str(time.time() - time_start) + "s")
return team_sets
def calc_best_n_teams(n):
time_start = time.time()
counter = 1
all_combinations = itertools.permutations(C.OG, 5)
score_total = 0
teams = []
for i in all_combinations:
counter += 1
if counter % 10000 == 0: # Print every 1000 combinations
print("Evaluated " + str(counter) + " combinations")
if len(teams) > 0:
print("Current Average " + str(score_total / len(teams))) # Average over time... Should only improve
print("Current Best\n" + teams[0].to_string())
# Create a set with the permutation.
team = T.Team(C.players[i[0]], C.players[i[1]], C.players[i[2]], C.players[i[3]], C.players[i[4]])
score = get_score(team)
if len(teams) < n:
teams.append(team)
score_total += score
else:
teams.sort(key=get_score, reverse=True)
last_place = get_score(teams[-1])
if last_place < score:
# Pop Lowest score and replace with a new score...
s = teams.pop()
score_total -= last_place
teams.append(team)
score_total += score
elif last_place == score:
# Append to the end.
score_total += score
teams.append(team)
teams.sort(key=get_score, reverse=True)
print("Time Taken " + str(time.time() - time_start) + "s")
return teams
| 33.722222 | 121 | 0.563152 |
3032b27d6768cd20ea4bc8270216bb29471113ce | 2,896 | py | Python | openmdao/test_suite/test_examples/beam_optimization/beam_group.py | naylor-b/blue | d7d7e8d63212c047a7a9b0625da98aa29ddc39b4 | [
"Apache-2.0"
] | null | null | null | openmdao/test_suite/test_examples/beam_optimization/beam_group.py | naylor-b/blue | d7d7e8d63212c047a7a9b0625da98aa29ddc39b4 | [
"Apache-2.0"
] | null | null | null | openmdao/test_suite/test_examples/beam_optimization/beam_group.py | naylor-b/blue | d7d7e8d63212c047a7a9b0625da98aa29ddc39b4 | [
"Apache-2.0"
] | null | null | null | from __future__ import division
import numpy as np
import openmdao.api as om
from openmdao.test_suite.test_examples.beam_optimization.components.moment_comp import MomentOfInertiaComp
from openmdao.test_suite.test_examples.beam_optimization.components.local_stiffness_matrix_comp import LocalStiffnessMatrixComp
from openmdao.test_suite.test_examples.beam_optimization.components.states_comp import StatesComp
from openmdao.test_suite.test_examples.beam_optimization.components.displacements_comp import DisplacementsComp
from openmdao.test_suite.test_examples.beam_optimization.components.compliance_comp import ComplianceComp
from openmdao.test_suite.test_examples.beam_optimization.components.volume_comp import VolumeComp
class BeamGroup(om.Group):
def initialize(self):
self.options.declare('E')
self.options.declare('L')
self.options.declare('b')
self.options.declare('volume')
self.options.declare('num_elements', int)
def setup(self):
E = self.options['E']
L = self.options['L']
b = self.options['b']
volume = self.options['volume']
num_elements = self.options['num_elements']
num_nodes = num_elements + 1
force_vector = np.zeros(2 * num_nodes)
force_vector[-2] = -1.
inputs_comp = om.IndepVarComp()
inputs_comp.add_output('h', shape=num_elements)
self.add_subsystem('inputs_comp', inputs_comp)
I_comp = MomentOfInertiaComp(num_elements=num_elements, b=b)
self.add_subsystem('I_comp', I_comp)
comp = LocalStiffnessMatrixComp(num_elements=num_elements, E=E, L=L)
self.add_subsystem('local_stiffness_matrix_comp', comp)
comp = StatesComp(num_elements=num_elements, force_vector=force_vector)
self.add_subsystem('states_comp', comp)
comp = DisplacementsComp(num_elements=num_elements)
self.add_subsystem('displacements_comp', comp)
comp = ComplianceComp(num_elements=num_elements, force_vector=force_vector)
self.add_subsystem('compliance_comp', comp)
comp = VolumeComp(num_elements=num_elements, b=b, L=L)
self.add_subsystem('volume_comp', comp)
self.connect('inputs_comp.h', 'I_comp.h')
self.connect('I_comp.I', 'local_stiffness_matrix_comp.I')
self.connect(
'local_stiffness_matrix_comp.K_local',
'states_comp.K_local')
self.connect(
'states_comp.d',
'displacements_comp.d')
self.connect(
'displacements_comp.displacements',
'compliance_comp.displacements')
self.connect(
'inputs_comp.h',
'volume_comp.h')
self.add_design_var('inputs_comp.h', lower=1e-2, upper=10.)
self.add_objective('compliance_comp.compliance')
self.add_constraint('volume_comp.volume', equals=volume)
| 39.135135 | 127 | 0.706492 |
23a3bae65bfa7b4b4888b40642f0506cc7173b75 | 1,097 | py | Python | kubernetes/test/test_v2beta1_cross_version_object_reference.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | 2 | 2020-06-21T08:03:18.000Z | 2020-06-21T09:53:29.000Z | kubernetes/test/test_v2beta1_cross_version_object_reference.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v2beta1_cross_version_object_reference.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | 1 | 2020-12-10T07:28:08.000Z | 2020-12-10T07:28:08.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v2beta1_cross_version_object_reference import V2beta1CrossVersionObjectReference # noqa: E501
from kubernetes.client.rest import ApiException
class TestV2beta1CrossVersionObjectReference(unittest.TestCase):
"""V2beta1CrossVersionObjectReference unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV2beta1CrossVersionObjectReference(self):
"""Test V2beta1CrossVersionObjectReference"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v2beta1_cross_version_object_reference.V2beta1CrossVersionObjectReference() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 27.425 | 132 | 0.758432 |
66c51ba021842b8169287c7f5684d4f32bcea963 | 698 | py | Python | Chapter03/mysite/blog/admin.py | HankSerg/Django-2-by-Example | 15409eda3ba99e53fc4e8d53793398f924d3fefe | [
"MIT"
] | 639 | 2018-05-31T22:28:21.000Z | 2022-03-21T16:49:47.000Z | Chapter03/mysite/blog/admin.py | HankSerg/Django-2-by-Example | 15409eda3ba99e53fc4e8d53793398f924d3fefe | [
"MIT"
] | 43 | 2018-06-26T06:56:28.000Z | 2022-03-31T18:32:57.000Z | Chapter03/mysite/blog/admin.py | HankSerg/Django-2-by-Example | 15409eda3ba99e53fc4e8d53793398f924d3fefe | [
"MIT"
] | 644 | 2018-06-10T22:37:24.000Z | 2022-03-04T18:34:01.000Z | from django.contrib import admin
from .models import Post, Comment
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'author', 'publish',
'status')
list_filter = ('status', 'created', 'publish', 'author')
search_fields = ('title', 'body')
prepopulated_fields = {'slug': ('title',)}
raw_id_fields = ('author',)
date_hierarchy = 'publish'
ordering = ('status', 'publish')
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('name', 'email', 'post', 'created', 'active')
list_filter = ('active', 'created', 'updated')
search_fields = ('name', 'email', 'body')
| 31.727273 | 66 | 0.624642 |
bb8d7b8c63503b755b01ec4d6bd263ac17d97bc0 | 52,813 | py | Python | eucaconsole/views/scalinggroups.py | gholms/eucaconsole | 4629c961c90e3aae27e3a869a7f157bafeda6489 | [
"BSD-2-Clause"
] | null | null | null | eucaconsole/views/scalinggroups.py | gholms/eucaconsole | 4629c961c90e3aae27e3a869a7f157bafeda6489 | [
"BSD-2-Clause"
] | null | null | null | eucaconsole/views/scalinggroups.py | gholms/eucaconsole | 4629c961c90e3aae27e3a869a7f157bafeda6489 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2013-2016 Hewlett Packard Enterprise Development LP
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Pyramid views for Eucalyptus and AWS scaling groups
"""
import simplejson as json
import time
from dateutil import parser
from hashlib import md5
from itertools import chain
from markupsafe import escape
from operator import attrgetter, itemgetter
from boto.ec2.autoscale import AutoScalingGroup, ScalingPolicy
from boto.ec2.autoscale.tag import Tag
from pyramid.httpexceptions import HTTPFound, HTTPNotFound
from pyramid.view import view_config
from ..constants.cloudwatch import (
METRIC_TYPES, MONITORING_DURATION_CHOICES, STATISTIC_CHOICES, GRANULARITY_CHOICES,
DURATION_GRANULARITY_CHOICES_MAPPING)
from ..constants.scalinggroups import (
SCALING_GROUP_MONITORING_CHARTS_LIST, SCALING_GROUP_INSTANCE_MONITORING_CHARTS_LIST)
from ..forms.scalinggroups import (
ScalingGroupDeleteForm, ScalingGroupEditForm, ScalingGroupMonitoringForm,
ScalingGroupCreateForm, ScalingGroupInstancesMarkUnhealthyForm,
ScalingGroupInstancesTerminateForm, ScalingGroupPolicyCreateForm,
ScalingGroupPolicyDeleteForm, ScalingGroupsFiltersForm)
from ..i18n import _
from ..models import Notification
from ..models.alarms import Alarm
from ..views import LandingPageView, BaseView, TaggedItemView, JSONResponse, JSONError
from . import boto_error_handler
class DeleteScalingGroupMixin(object):
def wait_for_instances_to_shutdown(self, scaling_group):
if scaling_group.instances:
ec2_conn = self.get_connection()
instance_ids = [i.instance_id for i in scaling_group.instances]
is_all_shutdown = False
count = 0
while is_all_shutdown is False and count < 30:
instances = ec2_conn.get_only_instances(instance_ids)
if instances:
is_all_shutdown = True
for instance in instances:
if self.cloud_type == 'aws':
if not str(instance._state).startswith('terminated'):
is_all_shutdown = False
else:
if not str(instance._state).startswith('terminated') and \
not str(instance._state).startswith('shutting-down'):
is_all_shutdown = False
time.sleep(5)
count += 1
return
class ScalingGroupsView(LandingPageView, DeleteScalingGroupMixin):
TEMPLATE = '../templates/scalinggroups/scalinggroups.pt'
def __init__(self, request):
super(ScalingGroupsView, self).__init__(request)
self.title_parts = [_(u'Scaling Groups')]
self.initial_sort_key = 'name'
self.prefix = '/scalinggroups'
self.delete_form = ScalingGroupDeleteForm(self.request, formdata=self.request.params or None)
self.json_items_endpoint = self.get_json_endpoint('scalinggroups_json')
self.ec2_conn = self.get_connection()
self.autoscale_conn = self.get_connection(conn_type='autoscale')
self.vpc_conn = self.get_connection(conn_type='vpc')
self.filters_form = ScalingGroupsFiltersForm(
self.request, formdata=self.request.params or None,
ec2_conn=self.ec2_conn, autoscale_conn=self.autoscale_conn, vpc_conn=self.vpc_conn)
self.filter_keys = [
'availability_zones', 'launch_config', 'name', 'placement_group', 'vpc_zone_identifier']
search_facets = self.filters_form.facets
# sort_keys are passed to sorting drop-down
self.is_vpc_supported = BaseView.is_vpc_supported(request)
if not self.is_vpc_supported:
del self.filters_form.vpc_zone_identifier
self.render_dict = dict(
filter_keys=self.filter_keys,
search_facets=BaseView.escape_json(json.dumps(search_facets)),
sort_keys=self.get_sort_keys(),
prefix=self.prefix,
initial_sort_key=self.initial_sort_key,
json_items_endpoint=self.json_items_endpoint,
delete_form=self.delete_form,
)
@view_config(route_name='scalinggroups', renderer=TEMPLATE, request_method='GET')
def scalinggroups_landing(self):
return self.render_dict
@view_config(route_name='scalinggroups_delete', request_method='POST', renderer=TEMPLATE)
def scalinggroups_delete(self):
if self.delete_form.validate():
location = self.request.route_path('scalinggroups')
name = self.request.params.get('name')
with boto_error_handler(self.request, location):
self.log_request(_(u"Deleting scaling group {0}").format(name))
conn = self.get_connection(conn_type='autoscale')
scaling_group = self.get_scaling_group_by_name(name)
# Need to shut down instances prior to scaling group deletion
scaling_group.shutdown_instances()
self.wait_for_instances_to_shutdown(scaling_group)
conn.delete_auto_scaling_group(name)
prefix = _(u'Successfully deleted scaling group')
msg = u'{0} {1}'.format(prefix, name)
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=location)
else:
self.request.error_messages = self.delete_form.get_errors_list()
return self.render_dict
def get_scaling_group_by_name(self, name):
names = [name]
scaling_groups = self.autoscale_conn.get_all_groups(names) if self.autoscale_conn else []
if scaling_groups:
return scaling_groups[0]
return []
@staticmethod
def get_sort_keys():
return [
dict(key='name', name=_(u'Name: A to Z')),
dict(key='-name', name=_(u'Name: Z to A')),
dict(key='-status', name=_(u'Health status')),
dict(key='-current_instances_count', name=_(u'Current instances')),
dict(key='launch_config', name=_(u'Launch configuration')),
dict(key='availability_zones', name=_(u'Availability zones')),
]
class ScalingGroupsJsonView(LandingPageView):
@view_config(route_name='scalinggroups_json', renderer='json', request_method='POST')
def scalinggroups_json(self):
if not(self.is_csrf_valid()):
return JSONResponse(status=400, message="missing CSRF token")
scalinggroups = []
with boto_error_handler(self.request):
items = self.filter_items(
self.get_items(), ignore=['availability_zones', 'vpc_zone_identifier'], autoscale=True)
if self.request.params.getall('availability_zones'):
items = self.filter_by_availability_zones(items)
if self.request.params.getall('vpc_zone_identifier'):
items = self.filter_by_vpc_zone_identifier(items)
cw_conn = self.get_connection(conn_type='cloudwatch')
# Get alarms for ASGs and build a list of resource ids to optimize alarm status fetch
alarms = [alarm for alarm in cw_conn.describe_alarms() if 'AutoScalingGroupName' in alarm.dimensions]
alarm_resource_ids = set(list(
chain.from_iterable([chain.from_iterable(alarm.dimensions.values()) for alarm in alarms])
))
for group in items:
alarm_status = ''
if group.name in alarm_resource_ids:
alarm_status = Alarm.get_resource_alarm_status(group.name, alarms)
group_instances = group.instances or []
all_healthy = all(instance.health_status == 'Healthy' for instance in group_instances)
scalinggroups.append(dict(
availability_zones=', '.join(sorted(group.availability_zones)),
load_balancers=', '.join(sorted(group.load_balancers)),
desired_capacity=group.desired_capacity,
launch_config_name=group.launch_config_name,
max_size=group.max_size,
min_size=group.min_size,
name=group.name,
placement_group=group.placement_group,
termination_policies=', '.join(group.termination_policies),
current_instances_count=len(group_instances),
status='Healthy' if all_healthy else 'Unhealthy',
alarm_status=alarm_status,
))
return dict(results=scalinggroups)
@view_config(route_name='scalinggroup_names_json', renderer='json', request_method='GET')
def scalinggroup_names_json(self):
items = self.get_items()
names = [item.name for item in items]
return dict(
scalinggroups=names
)
def get_items(self):
conn = self.get_connection(conn_type='autoscale')
return conn.get_all_groups() if conn else []
def filter_by_availability_zones(self, items):
filtered_items = []
for item in items:
is_matched = False
for zone in self.request.params.getall('availability_zones'):
for selected_zone in item.availability_zones:
if selected_zone == zone:
is_matched = True
if is_matched:
filtered_items.append(item)
return filtered_items
def filter_by_vpc_zone_identifier(self, items):
filtered_items = []
for item in items:
is_matched = False
for vpc_zone in self.request.params.getall('vpc_zone_identifier'):
if item.vpc_zone_identifier is None or item.vpc_zone_identifier == '':
# Handle the 'No subnets' Case
if vpc_zone == 'None':
is_matched = True
elif item.vpc_zone_identifier and item.vpc_zone_identifier.find(vpc_zone) != -1:
is_matched = True
if is_matched:
filtered_items.append(item)
return filtered_items
class BaseScalingGroupView(BaseView):
def __init__(self, request):
super(BaseScalingGroupView, self).__init__(request)
self.autoscale_conn = self.get_connection(conn_type='autoscale')
self.cloudwatch_conn = self.get_connection(conn_type='cloudwatch')
self.elb_conn = self.get_connection(conn_type='elb')
self.vpc_conn = self.get_connection(conn_type='vpc')
self.ec2_conn = self.get_connection()
self.is_vpc_supported = BaseView.is_vpc_supported(request)
self.termination_policies_placeholder_text = _(u'Select one or more termination policies...')
def get_scaling_group(self):
scalinggroup_param = self.request.matchdict.get('id') # id = scaling_group.name
scalinggroups_param = [scalinggroup_param]
scaling_groups = []
if self.autoscale_conn:
scaling_groups = self.autoscale_conn.get_all_groups(names=scalinggroups_param)
return scaling_groups[0] if scaling_groups else None
def get_launch_configuration(self, launch_config_name):
if self.autoscale_conn:
launch_configs = self.autoscale_conn.get_all_launch_configurations(names=[launch_config_name])
return launch_configs[0] if launch_configs else None
return None
def get_alarms(self):
if self.cloudwatch_conn:
return self.cloudwatch_conn.describe_alarms()
return []
def get_policies(self, scaling_group):
policies = []
if self.autoscale_conn and scaling_group:
policies = self.autoscale_conn.get_all_policies(as_group=scaling_group.name)
return sorted(policies)
def parse_tags_param(self, scaling_group_name=None):
tags_json = self.request.params.get('tags')
tags_list = json.loads(tags_json) if tags_json else []
tags = []
for tag in tags_list:
value = tag.get('value')
if value is not None:
value = self.unescape_braces(value.strip())
tags.append(Tag(
resource_id=scaling_group_name,
key=self.unescape_braces(tag.get('name', '').strip()),
value=value,
propagate_at_launch=tag.get('propagate_at_launch', False),
))
return tags
class ScalingGroupView(BaseScalingGroupView, DeleteScalingGroupMixin):
"""Views for Scaling Group detail page"""
TEMPLATE = '../templates/scalinggroups/scalinggroup_view.pt'
def __init__(self, request):
super(ScalingGroupView, self).__init__(request)
self.title_parts = [_(u'Scaling Group'), request.matchdict.get('id'), _(u'General')]
with boto_error_handler(request):
self.scaling_group = self.get_scaling_group()
if not self.scaling_group:
raise HTTPNotFound()
self.policies = self.get_policies(self.scaling_group)
self.vpc = self.get_vpc(self.scaling_group)
self.vpc_name = TaggedItemView.get_display_name(self.vpc) if self.vpc else ''
self.activities = self.autoscale_conn.get_all_activities(self.scaling_group.name, max_records=1)
self.edit_form = ScalingGroupEditForm(
self.request, scaling_group=self.scaling_group, autoscale_conn=self.autoscale_conn, ec2_conn=self.ec2_conn,
vpc_conn=self.vpc_conn, elb_conn=self.elb_conn, formdata=self.request.params or None)
self.delete_form = ScalingGroupDeleteForm(self.request, formdata=self.request.params or None)
self.is_vpc_supported = BaseView.is_vpc_supported(request)
tags = [{
'name': tag.key,
'value': tag.value,
'propagate_at_launch': tag.propagate_at_launch
} for tag in self.scaling_group.tags or []]
tags = BaseView.escape_json(json.dumps(tags))
cause = None
if len(self.activities) > 0 and hasattr(self.activities[0], 'cause'):
cause = self.activities[0].cause
causes = cause.split('At')
causes = causes[1:]
cause = []
for c in causes:
idx = c.find('Z') + 1
date_string = c[:idx]
date_obj = parser.parse(date_string)
cause.append(dict(date=date_obj, msg=c[idx:]))
self.render_dict = dict(
scaling_group=self.scaling_group,
scaling_group_name=self.escape_braces(self.scaling_group.name) if self.scaling_group else '',
tags=tags,
activity_cause=cause,
vpc_network=self.vpc_name,
policies=self.policies,
edit_form=self.edit_form,
delete_form=self.delete_form,
avail_zone_placeholder_text=_(u'Select one or more availability zones...'),
termination_policies_placeholder_text=self.termination_policies_placeholder_text,
controller_options_json=self.get_controller_options_json(),
is_vpc_supported=self.is_vpc_supported,
)
@view_config(route_name='scalinggroup_view', renderer=TEMPLATE)
def scalinggroup_view(self):
if self.scaling_group is None:
raise HTTPNotFound()
return self.render_dict
@view_config(route_name='scalinggroup_update', request_method='POST', renderer=TEMPLATE)
def scalinggroup_update(self):
if self.scaling_group is None:
raise HTTPNotFound()
if not self.is_vpc_supported or self.request.params.get('vpc_network') is None:
del self.edit_form.vpc_network
del self.edit_form.vpc_subnet
if self.edit_form.validate():
location = self.request.route_path('scalinggroup_view', id=self.scaling_group.name)
with boto_error_handler(self.request, location):
self.log_request(_(u"Updating scaling group {0}").format(self.scaling_group.name))
self.update_tags()
self.update_properties()
prefix = _(u'Successfully updated scaling group')
msg = u'{0} {1}'.format(prefix, self.scaling_group.name)
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=location)
else:
self.request.error_messages = self.edit_form.get_errors_list()
return self.render_dict
@view_config(route_name='scalinggroup_delete', request_method='POST', renderer=TEMPLATE)
def scalinggroup_delete(self):
if self.scaling_group is None:
raise HTTPNotFound()
if self.delete_form.validate():
location = self.request.route_path('scalinggroups')
name = self.unescape_braces(self.request.params.get('name'))
with boto_error_handler(self.request, location):
# Need to shut down instances prior to scaling group deletion
self.log_request(_(u"Terminating scaling group {0} instances").format(name))
self.scaling_group.shutdown_instances()
self.wait_for_instances_to_shutdown(self.scaling_group)
self.log_request(_(u"Deleting scaling group {0}").format(name))
self.autoscale_conn.delete_auto_scaling_group(name)
prefix = _(u'Successfully deleted scaling group')
msg = u'{0} {1}'.format(prefix, name)
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=location)
else:
self.request.error_messages = self.delete_form.get_errors_list()
return self.render_dict
def update_tags(self):
scaling_group_tags = self.scaling_group.tags
if scaling_group_tags:
# cull tags that start with aws: or euca:
scaling_group_tags = [
tag for tag in self.scaling_group.tags if
tag.key.find('aws:') == -1 and tag.key.find('euca:') == -1
]
updated_tags_list = self.parse_tags_param(scaling_group_name=self.scaling_group.name)
(del_tags, update_tags) = self.optimize_tag_update(scaling_group_tags, updated_tags_list)
# Delete existing tags first
if del_tags:
self.autoscale_conn.delete_tags(del_tags)
if update_tags:
self.autoscale_conn.create_or_update_tags(update_tags)
@staticmethod
def optimize_tag_update(orig_tags, updated_tags):
# cull tags that haven't changed
if orig_tags and len(updated_tags) > 0:
del_tags = []
for tag in orig_tags:
# find tags where keys match
tag_keys = [utag for utag in updated_tags if utag.key == tag.key]
if tag_keys:
# find tags where keys also match
tag_values = [utag for utag in tag_keys if utag.value == tag.value]
if tag_values:
# find tags where prop flag also matches
tag_prop = [utag for utag in tag_values if utag.propagate_at_launch == tag.propagate_at_launch]
if len(tag_prop) == 1: # we should never have more than 1 match
# save tag from original list to avoid modifying list we are iterating through
del_tags.append(tag)
# remove from updated list since that will make subsequent searches faster
updated_tags.remove(tag_prop[0])
# finally, delete the tags we found form original list
for tag in del_tags:
orig_tags.remove(tag)
return orig_tags, updated_tags
def update_properties(self):
self.scaling_group.desired_capacity = self.request.params.get('desired_capacity', 1)
self.scaling_group.launch_config_name = self.unescape_braces(self.request.params.get('launch_config'))
vpc_subnets = self.request.params.getall('vpc_subnet')
if vpc_subnets and vpc_subnets[0] != 'None':
self.scaling_group.vpc_zone_identifier = ','.join(
[str(x) for x in vpc_subnets]
)
# If VPC subnet exists, do not specify availability zones; the API will figure them out based on the VPC subnets
if not self.scaling_group.vpc_zone_identifier:
self.scaling_group.availability_zones = self.request.params.getall('availability_zones')
else:
self.scaling_group.availability_zones = ''
self.scaling_group.termination_policies = self.request.params.getall('termination_policies')
# on AWS, the option 'Default' must appear at the end of the list
if 'Default' in self.scaling_group.termination_policies:
self.scaling_group.termination_policies.remove('Default')
self.scaling_group.termination_policies.append('Default')
self.scaling_group.max_size = self.request.params.get('max_size', 1)
self.scaling_group.min_size = self.request.params.get('min_size', 0)
self.scaling_group.health_check_type = 'EC2'
self.scaling_group.health_check_period = self.request.params.get('health_check_period', 120)
self.scaling_group.default_cooldown = self.request.params.get('default_cooldown', 120)
self.scaling_group.update()
def get_vpc(self, scaling_group):
with boto_error_handler(self.request):
if self.vpc_conn and scaling_group and scaling_group.vpc_zone_identifier:
vpc_subnets = scaling_group.vpc_zone_identifier.split(',')
vpc_subnet = self.vpc_conn.get_all_subnets(subnet_ids=vpc_subnets[0])
if vpc_subnet:
this_subnet = vpc_subnet[0]
if this_subnet and this_subnet.vpc_id:
this_vpc = self.vpc_conn.get_all_vpcs(vpc_ids=[this_subnet.vpc_id])
if this_vpc:
return this_vpc[0]
return None
def get_controller_options_json(self):
if self.scaling_group is None:
return '{}'
return BaseView.escape_json(json.dumps({
'scaling_group_name': self.scaling_group.name,
'policies_count': len(self.policies),
'availability_zones': self.scaling_group.availability_zones,
'termination_policies': self.scaling_group.termination_policies,
}))
class ScalingGroupInstancesView(BaseScalingGroupView):
"""View for Scaling Group Manage Instances page"""
TEMPLATE = '../templates/scalinggroups/scalinggroup_instances.pt'
def __init__(self, request):
super(ScalingGroupInstancesView, self).__init__(request)
self.title_parts = [_(u'Scaling Group'), request.matchdict.get('id'), _(u'Instances')]
self.scaling_group = self.get_scaling_group()
self.policies = self.get_policies(self.scaling_group)
self.markunhealthy_form = ScalingGroupInstancesMarkUnhealthyForm(
self.request, formdata=self.request.params or None)
self.terminate_form = ScalingGroupInstancesTerminateForm(self.request, formdata=self.request.params or None)
self.render_dict = dict(
scaling_group=self.scaling_group,
scaling_group_name=self.escape_braces(self.scaling_group.name),
policies=self.policies,
markunhealthy_form=self.markunhealthy_form,
terminate_form=self.terminate_form,
json_items_endpoint=self.request.route_path('scalinggroup_instances_json', id=self.scaling_group.name),
)
@view_config(route_name='scalinggroup_instances', renderer=TEMPLATE, request_method='GET')
def scalinggroup_instances(self):
return self.render_dict
@view_config(route_name='scalinggroup_instances_markunhealthy', renderer=TEMPLATE, request_method='POST')
def scalinggroup_instances_markunhealthy(self):
location = self.request.route_path('scalinggroup_instances', id=self.scaling_group.name)
if self.markunhealthy_form.validate():
instance_id = self.request.params.get('instance_id')
respect_grace_period = self.request.params.get('respect_grace_period') == 'y'
with boto_error_handler(self.request, location):
self.log_request(_(u"Marking instance {0} unhealthy").format(instance_id))
self.autoscale_conn.set_instance_health(
instance_id, 'Unhealthy', should_respect_grace_period=respect_grace_period)
prefix = _(u'Successfully marked the following instance as unhealthy:')
msg = u'{0} {1}'.format(prefix, instance_id)
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=location)
else:
self.request.error_messages = self.markunhealthy_form.get_errors_list()
return self.render_dict
@view_config(route_name='scalinggroup_instances_terminate', renderer=TEMPLATE, request_method='POST')
def scalinggroup_instances_terminate(self):
location = self.request.route_path('scalinggroup_instances', id=self.scaling_group.name)
if self.terminate_form.validate():
instance_id = self.request.params.get('instance_id')
decrement_capacity = self.request.params.get('decrement_capacity') == 'y'
with boto_error_handler(self.request, location):
self.log_request(_(u"Terminating scaling group {0} instance {1}").format(
self.scaling_group.name, instance_id))
self.autoscale_conn.terminate_instance(instance_id, decrement_capacity=decrement_capacity)
prefix = _(u'Successfully sent terminate request for instance')
msg = u'{0} {1}'.format(prefix, instance_id)
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=location)
else:
self.request.error_messages = self.terminate_form.get_errors_list()
return self.render_dict
class ScalingGroupInstancesJsonView(BaseScalingGroupView):
"""JSON response for Scaling Group Manage Instances page"""
def __init__(self, request):
super(ScalingGroupInstancesJsonView, self).__init__(request)
self.scaling_group = self.get_scaling_group()
self.ec2_conn = self.get_connection()
self.instance_objects = self.get_instance_objects()
@view_config(route_name='scalinggroup_instances_json', renderer='json', request_method='GET')
def scalinggroup_instances_json(self):
instances = []
transitional_states = ['Unhealthy', 'Pending']
with boto_error_handler(self.request):
items = self.get_instances()
for instance in items:
is_transitional = any([
instance.lifecycle_state in transitional_states,
instance.health_status in transitional_states,
])
instances.append(dict(
id=instance.instance_id,
name=self.get_display_name(instance.instance_id),
status=instance.health_status,
availability_zone=instance.availability_zone,
launch_config=instance.launch_config_name,
lifecycle_state=instance.lifecycle_state,
transitional=is_transitional,
))
return dict(results=instances)
def get_instances(self):
if self.scaling_group.instances is None:
return []
return sorted(self.scaling_group.instances, key=attrgetter('instance_id'))
def get_instance_objects(self):
if self.scaling_group.instances is None:
return []
scaling_group_instances_ids = [instance.instance_id for instance in self.scaling_group.instances]
instances = self.ec2_conn.get_only_instances(instance_ids=scaling_group_instances_ids)
return sorted(instances, key=attrgetter('id'))
def get_display_name(self, instance_id):
matched_instance = [instance for instance in self.instance_objects if instance.id == instance_id]
instance = matched_instance[0] if matched_instance else None
if instance is None:
return instance_id
return TaggedItemView.get_display_name(instance)
class ScalingGroupHistoryView(BaseScalingGroupView):
"""View for Scaling Group History page"""
TEMPLATE = '../templates/scalinggroups/scalinggroup_history.pt'
def __init__(self, request):
super(ScalingGroupHistoryView, self).__init__(request)
with boto_error_handler(request):
self.scaling_group = self.get_scaling_group()
self.filter_keys = ['status', 'description']
self.sort_keys = self.get_sort_keys()
search_facets = [
{'name': 'status', 'label': _(u"Status"), 'options': [
{'key': 'successful', 'label': _("Successful")},
{'key': 'in-progress', 'label': _("In progress")},
{'key': 'failed', 'label': _("Failed")},
{'key': 'not-yet-in-service', 'label': _("Not yet in service")},
{'key': 'canceled', 'label': _("Canceled")},
{'key': 'waiting-for-launch', 'label': _("Waiting for launch")},
{'key': 'waiting-for-terminate', 'label': _("Waiting for terminate")}
]}
]
self.delete_form = ScalingGroupPolicyDeleteForm(self.request, formdata=self.request.params or None)
self.render_dict = dict(
scaling_group=self.scaling_group,
scaling_group_name=self.escape_braces(self.scaling_group.name),
filter_keys=self.filter_keys,
search_facets=BaseView.escape_json(json.dumps(search_facets)),
sort_keys=self.sort_keys,
initial_sort_key='-end_time',
delete_form=self.delete_form,
)
@view_config(route_name='scalinggroup_history', renderer=TEMPLATE, request_method='GET')
def scalinggroup_history(self):
return self.render_dict
@view_config(route_name='scalinggroup_history_json', renderer='json', request_method='GET')
def scalinggroup_history_json(self):
RECORD_LIMIT = 1000
with boto_error_handler(self.request):
items = self.autoscale_conn.get_all_activities(self.scaling_group.name)
if len(items) > RECORD_LIMIT:
items = items[:RECORD_LIMIT]
activities = []
for activity in items:
activities.append(dict(
activity_id=activity.activity_id,
status=activity.status_code,
description=activity.description,
start_time=activity.start_time.isoformat(),
end_time=activity.end_time.isoformat() if activity.end_time else '',
))
return dict(results=activities)
@view_config(route_name='scalinggroup_history_details_json', renderer='json', request_method='GET')
def scalinggroup_history_details_json(self):
with boto_error_handler(self.request):
activity_id = self.request.matchdict.get('activity')
items = self.autoscale_conn.get_all_activities(self.scaling_group.name, [activity_id])
if len(items) > 0:
activity = items[0]
cause = None
if hasattr(activity, 'cause'):
cause = activity.cause
causes = cause.split('At')
causes = causes[1:]
cause = []
for c in causes:
idx = c.find('Z') + 1
date_string = c[:idx]
date_obj = parser.parse(date_string)
cause.append(dict(date=date_obj.isoformat(), msg=c[idx:]))
details = dict(
activity_id=activity.activity_id,
status=activity.status_code,
description=activity.status_message,
cause=cause
)
return dict(results=details)
else:
raise JSONError(message=_(u'Activity ID not found ') + activity_id, status=401)
@staticmethod
def get_sort_keys():
return [
dict(key='-start_time', name=_(u'Start time: most recent')),
dict(key='-end_time', name=_(u'End time: most recent')),
dict(key='status', name=_(u'Status: A to Z')),
dict(key='-status', name=_(u'Status: Z to A')),
dict(key='description', name=_(u'Description: A to Z')),
dict(key='-description', name=_(u'Description: Z to A')),
]
class ScalingGroupPoliciesView(BaseScalingGroupView):
"""View for Scaling Group Policies page"""
TEMPLATE = '../templates/scalinggroups/scalinggroup_policies.pt'
def __init__(self, request):
super(ScalingGroupPoliciesView, self).__init__(request)
self.title_parts = [_(u'Scaling Group'), request.matchdict.get('id'), _(u'Policies')]
policy_ids = {}
scaling_policies = []
with boto_error_handler(request):
self.alarms = self.get_alarms()
self.scaling_group = self.get_scaling_group()
policies = self.get_policies(self.scaling_group)
for policy in policies:
policy_alarms = []
if hasattr(policy, 'alarms'):
for alarm in self.alarms:
for policy_alarm in policy.alarms:
if alarm.name == policy_alarm.name and alarm not in policy_alarms:
policy_alarms.append(alarm)
encoded_policy_name = policy.name.encode('UTF-8')
policy_ids[encoded_policy_name] = md5(encoded_policy_name).hexdigest()[:8]
scale_text_prefix = _('Remove') if policy.scaling_adjustment < 0 else _('Add')
scale_type = _('instances') if policy.adjustment_type == 'ChangeInCapacity' else '%'
if policy.adjustment_type == 'ChangeInCapacity' and abs(policy.scaling_adjustment) == 1:
scale_type = _('instance')
scale_text = '{0} {1} {2}'.format(scale_text_prefix, abs(policy.scaling_adjustment), scale_type)
scaling_policies.append(dict(
name=policy.name,
encoded_name=encoded_policy_name,
alarms=policy_alarms,
cooldown=policy.cooldown,
scale_text=scale_text,
))
self.create_form = ScalingGroupPolicyCreateForm(
self.request, scaling_group=self.scaling_group, alarms=self.alarms, formdata=self.request.params or None)
self.delete_form = ScalingGroupPolicyDeleteForm(self.request, formdata=self.request.params or None)
self.render_dict = dict(
scaling_group=self.scaling_group,
scaling_group_name=self.escape_braces(self.scaling_group.name),
create_form=self.create_form,
delete_form=self.delete_form,
policies=sorted(scaling_policies, key=itemgetter('name')),
policy_ids=policy_ids,
scale_down_text=_(u'Scale down by'),
scale_up_text=_(u'Scale up by'),
)
@view_config(route_name='scalinggroup_policies', renderer=TEMPLATE, request_method='GET')
def scalinggroup_policies(self):
return self.render_dict
@view_config(route_name='scalinggroup_policy_delete', renderer=TEMPLATE, request_method='POST')
def scalinggroup_policy_delete(self):
if self.delete_form.validate():
location = self.request.route_path('scalinggroup_policies', id=self.scaling_group.name)
policy_name = self.request.params.get('name')
with boto_error_handler(self.request, location):
self.log_request(_(u"Deleting scaling group {0} policy {1}").format(
self.scaling_group.name, policy_name))
self.autoscale_conn.delete_policy(policy_name, autoscale_group=self.scaling_group.name)
prefix = _(u'Successfully deleted scaling group policy')
msg = u'{0} {1}'.format(prefix, policy_name)
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=location)
else:
self.request.error_messages = self.delete_form.get_errors_list()
return self.render_dict
class ScalingGroupPolicyView(BaseScalingGroupView):
"""View for creating a Scaling Group policy"""
TEMPLATE = '../templates/scalinggroups/scalinggroup_policy.pt'
def __init__(self, request):
super(ScalingGroupPolicyView, self).__init__(request)
with boto_error_handler(request):
self.scaling_group = self.get_scaling_group()
self.alarms = self.get_alarms()
self.policy_form = ScalingGroupPolicyCreateForm(
self.request, scaling_group=self.scaling_group, alarms=self.alarms, formdata=self.request.params or None)
self.render_dict = dict(
scaling_group=self.scaling_group,
scaling_group_name=self.escape_braces(self.scaling_group.name),
alarm_choices=json.dumps(dict(self.policy_form.alarm.choices)),
policy_form=self.policy_form,
has_elb=bool(self.scaling_group.load_balancers),
load_balancers_json=json.dumps(self.scaling_group.load_balancers),
metric_unit_mapping=self.get_metric_unit_mapping(),
scale_down_text=_(u'Scale down by'),
scale_up_text=_(u'Scale up by'),
)
@view_config(route_name='scalinggroup_policy_new', renderer=TEMPLATE, request_method='GET')
def scalinggroup_policy_new(self):
return self.render_dict
@view_config(route_name='scalinggroup_policy_create', renderer=TEMPLATE, request_method='POST')
def scalinggroup_policy_create(self):
location = self.request.route_path('scalinggroup_policies', id=self.scaling_group.name)
if self.policy_form.validate():
adjustment_amount = self.request.params.get('adjustment_amount')
adjustment_direction = self.request.params.get('adjustment_direction', 'up')
scaling_adjustment = int(adjustment_amount)
if adjustment_direction == 'down':
scaling_adjustment = -scaling_adjustment
scaling_policy = ScalingPolicy(
name=self.request.params.get('name'),
as_name=self.scaling_group.name,
adjustment_type=self.request.params.get('adjustment_type'),
scaling_adjustment=scaling_adjustment,
cooldown=self.request.params.get('cooldown'),
)
with boto_error_handler(self.request, location):
self.log_request(_(u"Creating scaling group {0} policy {1}").format(
self.scaling_group.name, scaling_policy.name))
# Create scaling policy
self.autoscale_conn.create_scaling_policy(scaling_policy)
created_scaling_policy = self.autoscale_conn.get_all_policies(
as_group=self.scaling_group.name, policy_names=[scaling_policy.name])[0]
# Attach policy to alarm
alarm_name = self.request.params.get('alarm')
alarm = self.cloudwatch_conn.describe_alarms(alarm_names=[alarm_name])[0]
alarm.comparison = alarm._cmp_map.get(alarm.comparison) # See https://github.com/boto/boto/issues/1311
# TODO: Detect if an alarm has 5 scaling policies attached to it and abort accordingly
if created_scaling_policy.policy_arn not in alarm.alarm_actions:
alarm.alarm_actions.append(created_scaling_policy.policy_arn)
alarm.update()
prefix = _(u'Successfully created scaling group policy')
msg = u'{0} {1}'.format(prefix, scaling_policy.name)
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=location)
else:
self.request.error_messages = self.policy_form.get_errors_list()
return self.render_dict
@staticmethod
def get_metric_unit_mapping():
metric_units = {}
for mtype in METRIC_TYPES:
metric_units[mtype.get('name')] = mtype.get('unit')
return metric_units
class ScalingGroupPolicyJsonView(BaseScalingGroupView):
def __init__(self, request):
super(ScalingGroupPolicyJsonView, self).__init__(request)
self.scaling_group_name = request.matchdict.get('id')
@view_config(route_name='scalinggroup_policies_json', renderer='json', request_method='GET')
def get_policies_for_scaling_group(self):
scaling_group = self.get_scaling_group()
policies = self.get_policies(scaling_group)
policy_names = {}
for policy in policies:
policy_names[policy.name] = {
'arn': policy.policy_arn,
'scaling_adjustment': policy.scaling_adjustment
}
return dict(
policies=policy_names
)
class ScalingGroupWizardView(BaseScalingGroupView):
"""View for Create Scaling Group wizard"""
TEMPLATE = '../templates/scalinggroups/scalinggroup_wizard.pt'
def __init__(self, request):
super(ScalingGroupWizardView, self).__init__(request)
self.title_parts = [_(u'Scaling Group'), _(u'Create')]
with boto_error_handler(self.request):
self.create_form = ScalingGroupCreateForm(
self.request, autoscale_conn=self.autoscale_conn, ec2_conn=self.ec2_conn,
vpc_conn=self.vpc_conn, elb_conn=self.elb_conn, formdata=self.request.params or None)
self.vpc_subnet_choices_json = self.get_vpc_subnets_list()
self.render_dict = dict(
create_form=self.create_form,
launch_config_param=escape(self.request.params.get('launch_config', '')),
avail_zones_placeholder_text=_(u'Select availability zones...'),
elb_placeholder_text=_(u'Select load balancers...'),
vpc_subnet_placeholder_text=_(u'Select VPC subnets...'),
controller_options_json=self.get_controller_options_json(),
is_vpc_supported=self.is_vpc_supported,
termination_policies_placeholder_text=self.termination_policies_placeholder_text,
)
def get_controller_options_json(self):
return BaseView.escape_json(json.dumps({
'launchconfigs_count': len(self.create_form.launch_config.choices) - 1, # Ignore blank choice
'vpc_subnet_choices_json': self.vpc_subnet_choices_json,
'default_vpc_network': self.get_default_vpc_network(),
}))
def get_default_vpc_network(self):
default_vpc = self.request.session.get('default_vpc', [])
if self.is_vpc_supported:
if 'none' in default_vpc or 'None' in default_vpc:
if self.cloud_type == 'aws':
return 'None'
# for euca, return the first vpc on the list
if self.vpc_conn:
with boto_error_handler(self.request):
vpc_networks = self.vpc_conn.get_all_vpcs()
if vpc_networks:
return vpc_networks[0].id
else:
return default_vpc[0]
return 'None'
@view_config(route_name='scalinggroup_new', renderer=TEMPLATE, request_method='GET')
def scalinggroup_new(self):
"""Displays the Launch Instance wizard"""
return self.render_dict
@view_config(route_name='scalinggroup_create', renderer=TEMPLATE, request_method='POST')
def scalinggroup_create(self):
"""Handles the POST from the Create Scaling Group wizard"""
if not self.is_vpc_supported:
del self.create_form.vpc_network
del self.create_form.vpc_subnet
if self.create_form.validate():
with boto_error_handler(self.request, self.request.route_path('scalinggroups')):
scaling_group_name = self.request.params.get('name')
self.log_request(_(u"Creating scaling group {0}").format(scaling_group_name))
launch_config_name = self.unescape_braces(self.request.params.get('launch_config'))
vpc_network = self.request.params.get('vpc_network') or None
if vpc_network == 'None':
vpc_network = None
vpc_subnets = self.request.params.getall('vpc_subnet')
params = dict(
name=scaling_group_name,
launch_config=launch_config_name,
load_balancers=self.request.params.getall('load_balancers'),
health_check_type='EC2',
health_check_period=self.request.params.get('health_check_period'),
desired_capacity=self.request.params.get('desired_capacity'),
min_size=self.request.params.get('min_size'),
max_size=self.request.params.get('max_size'),
tags=self.parse_tags_param(scaling_group_name=scaling_group_name),
)
if vpc_network is None:
# EC2-Classic case
params.update(dict(
availability_zones=self.request.params.getall('availability_zones'),
))
scaling_group = AutoScalingGroup(**params)
else:
# EC2-VPC case
params.update(dict(
vpc_zone_identifier=vpc_subnets,
))
scaling_group = AutoScalingGroup(**params)
scaling_group.termination_policies = self.request.params.getall('termination_policies')
# The 'Default' option must appear at the end of the list
if 'Default' in scaling_group.termination_policies:
scaling_group.termination_policies.remove('Default')
scaling_group.termination_policies.append('Default')
self.autoscale_conn.create_auto_scaling_group(scaling_group)
msg = _(u'Successfully created scaling group')
msg += u' {0}'.format(scaling_group.name)
self.request.session.flash(msg, queue=Notification.SUCCESS)
location = self.request.route_path('scalinggroup_view', id=scaling_group.name)
return HTTPFound(location=location)
else:
self.request.error_messages = self.create_form.get_errors_list()
return self.render_dict
def get_vpc_subnets_list(self):
subnets = []
if self.vpc_conn:
with boto_error_handler(self.request):
vpc_subnets = self.vpc_conn.get_all_subnets()
for vpc_subnet in vpc_subnets:
subnets.append(dict(
id=vpc_subnet.id,
vpc_id=vpc_subnet.vpc_id,
availability_zone=vpc_subnet.availability_zone,
state=vpc_subnet.state,
cidr_block=vpc_subnet.cidr_block,
))
return subnets
class ScalingGroupMonitoringView(BaseScalingGroupView):
VIEW_TEMPLATE = '../templates/scalinggroups/scalinggroup_monitoring.pt'
def __init__(self, request):
super(ScalingGroupMonitoringView, self).__init__(request)
self.title_parts = [_(u'Scaling group'), request.matchdict.get('id'), _(u'Monitoring')]
self.cw_conn = self.get_connection(conn_type='cloudwatch')
self.monitoring_form = ScalingGroupMonitoringForm(self.request, formdata=self.request.params or None)
with boto_error_handler(self.request):
self.scaling_group = self.get_scaling_group()
self.launch_configuration = self.get_launch_configuration(self.scaling_group.launch_config_name)
metrics_collection_enabled = True if self.scaling_group.enabled_metrics else False
launchconfig_monitoring_enabled = False
if self.launch_configuration.instance_monitoring.enabled == 'true':
launchconfig_monitoring_enabled = True
duration_help_text = _(u'Changing the time will update charts for both instance and scaling group metrics.')
self.render_dict = dict(
scaling_group=self.scaling_group,
scaling_group_name=self.scaling_group.name,
launch_config_name=self.launch_configuration.name,
has_elb=bool(self.scaling_group.load_balancers),
load_balancers_json=json.dumps(self.scaling_group.load_balancers),
monitoring_form=self.monitoring_form,
metrics_collection_enabled=metrics_collection_enabled,
launchconfig_monitoring_enabled=launchconfig_monitoring_enabled,
duration_help_text=duration_help_text,
duration_choices=MONITORING_DURATION_CHOICES,
statistic_choices=STATISTIC_CHOICES,
controller_options_json=self.get_controller_options_json()
)
@view_config(route_name='scalinggroup_monitoring', renderer=VIEW_TEMPLATE, request_method='GET')
def scallinggroup_monitoring(self):
if self.scaling_group is None:
raise HTTPNotFound()
return self.render_dict
@view_config(route_name='scalinggroup_monitoring_update', renderer=VIEW_TEMPLATE, request_method='POST')
def scalinggroup_monitoring_update(self):
"""Enable or disable metrics collection for the scaling group"""
if self.monitoring_form.validate():
if self.scaling_group:
enabled_metrics = self.scaling_group.enabled_metrics
action = 'disabled' if enabled_metrics else 'enabled'
location = self.request.route_path('scalinggroup_monitoring', id=self.scaling_group.name)
with boto_error_handler(self.request, location):
self.log_request(_(u"Metrics collection for scaling group {0} {1}").format(
self.scaling_group.name, action))
if enabled_metrics:
self.autoscale_conn.disable_metrics_collection(self.scaling_group.name)
else:
# TODO: The GroupStandbyInstances metric is not included by default, so include it here
# when Eucalyptus supports Standby instances
self.autoscale_conn.enable_metrics_collection(self.scaling_group.name, '1Minute')
msg = _(
u'Request successfully submitted. It may take a moment for metrics collection to update.')
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=location)
def get_controller_options_json(self):
charts_list = SCALING_GROUP_INSTANCE_MONITORING_CHARTS_LIST + SCALING_GROUP_MONITORING_CHARTS_LIST
if not self.scaling_group:
return ''
return BaseView.escape_json(json.dumps({
'metric_title_mapping': {},
'charts_list': charts_list,
'granularity_choices': GRANULARITY_CHOICES,
'duration_granularities_mapping': DURATION_GRANULARITY_CHOICES_MAPPING,
}))
| 49.823585 | 120 | 0.650143 |
d0bb662aa320551d72fe0100b8a030fc72b47934 | 2,701 | py | Python | calm/fix-missing-src-hint.py | jon-turney/calm | a9db642904aa406f08dddb06798b576ec14949b8 | [
"MIT"
] | 5 | 2016-03-18T22:12:31.000Z | 2022-02-02T16:01:52.000Z | calm/fix-missing-src-hint.py | jon-turney/calm | a9db642904aa406f08dddb06798b576ec14949b8 | [
"MIT"
] | null | null | null | calm/fix-missing-src-hint.py | jon-turney/calm | a9db642904aa406f08dddb06798b576ec14949b8 | [
"MIT"
] | 3 | 2016-03-21T12:54:16.000Z | 2022-03-06T09:41:06.000Z | #!/usr/bin/env python3
#
# Copyright (c) 2020 Jon Turney
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import argparse
import logging
import os
import re
import shutil
import sys
from . import common_constants
#
#
#
def fix_hints(relarea):
for (dirpath, _subdirs, files) in os.walk(relarea):
for f in files:
match = re.match(r'^(.*)-src\.tar' + common_constants.PACKAGE_COMPRESSIONS_RE + r'$', f)
if match:
pvr = match.group(1)
old = pvr + '.hint'
new = pvr + '-src.hint'
if (old in files) and (new not in files):
logging.info("copying '%s' to '%s'" % (old, new))
shutil.copy2(os.path.join(dirpath, old), os.path.join(dirpath, new))
if f.replace('-src', '') not in files:
logging.info("removing '%s'" % (old))
os.rename(os.path.join(dirpath, old), os.path.join(dirpath, old + '.bak'))
#
#
#
def main():
relarea_default = common_constants.FTP
parser = argparse.ArgumentParser(description='src hint creator')
parser.add_argument('-v', '--verbose', action='count', dest='verbose', help='verbose output', default=0)
parser.add_argument('--releasearea', action='store', metavar='DIR', help="release directory (default: " + relarea_default + ")", default=relarea_default, dest='relarea')
(args) = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format=os.path.basename(sys.argv[0]) + ': %(message)s')
fix_hints(args.relarea)
#
#
#
if __name__ == "__main__":
sys.exit(main())
| 33.7625 | 173 | 0.664198 |
6fb2783b26fe7b38c3e1cd535a7dff15f5df1f97 | 971 | py | Python | recipe_scrapers/foodrepublic.py | huskywhale/recipe-scraper | d5570d1d5203cfd8dca29f4f6e5f81e10997fdd2 | [
"MIT"
] | 14 | 2017-04-06T18:30:04.000Z | 2021-09-04T16:10:34.000Z | recipe_scrapers/foodrepublic.py | huskywhale/recipe-scraper | d5570d1d5203cfd8dca29f4f6e5f81e10997fdd2 | [
"MIT"
] | null | null | null | recipe_scrapers/foodrepublic.py | huskywhale/recipe-scraper | d5570d1d5203cfd8dca29f4f6e5f81e10997fdd2 | [
"MIT"
] | 12 | 2018-02-21T00:53:31.000Z | 2021-07-20T02:44:50.000Z | from ._abstract import AbstractScraper
from ._utils import get_minutes, normalize_string
class FoodRepublic(AbstractScraper):
@classmethod
def host(self):
return 'foodrepublic.com'
def title(self):
return self.soup.find('h3', {'class': 'recipe-title'}).get_text()
def total_time(self):
return get_minutes(self.soup.find('li', {'class': 'prep-time'})) +\
get_minutes(self.soup.find('li', {'class': 'cook-time'}))
def ingredients(self):
ingredients_html = self.soup.findAll('li', {'itemprop': "recipeIngredient"})
return [
normalize_string(ingredient.get_text())
for ingredient in ingredients_html
]
def instructions(self):
instructions_html = self.soup.find('div', {'class': 'directions'}).findAll('li')
return '\n'.join([
normalize_string(instruction.get_text())
for instruction in instructions_html
])
| 29.424242 | 88 | 0.625129 |
047798f6dc1450c40817218fe921e97a486f6e59 | 58,736 | py | Python | Lib/logging/handlers.py | CraftSpider/cpython | 0f2f35e15f9fbee44ce042b724348419d8136bc5 | [
"CNRI-Python-GPL-Compatible"
] | 2 | 2020-10-23T02:42:10.000Z | 2020-10-23T02:50:48.000Z | Lib/logging/handlers.py | CraftSpider/cpython | 0f2f35e15f9fbee44ce042b724348419d8136bc5 | [
"CNRI-Python-GPL-Compatible"
] | 2 | 2021-05-31T13:39:43.000Z | 2021-06-01T12:00:13.000Z | Lib/logging/handlers.py | CraftSpider/cpython | 0f2f35e15f9fbee44ce042b724348419d8136bc5 | [
"CNRI-Python-GPL-Compatible"
] | 2 | 2020-02-24T03:23:47.000Z | 2020-02-24T07:45:37.000Z | # Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import logging, socket, os, pickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
import threading
import copy
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
namer = None
rotator = None
def __init__(self, filename, mode, encoding=None, delay=False, errors=None):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode=mode,
encoding=encoding, delay=delay,
errors=errors)
self.mode = mode
self.encoding = encoding
self.errors = errors
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except Exception:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
encoding=None, delay=False, errors=None):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding,
delay=delay, errors=errors)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0,
encoding=None, delay=False, utc=False, atTime=None,
errors=None):
BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding,
delay=delay, errors=errors)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
self.atTime = atTime
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
# The following line added because the filename passed in could be a
# path object (see Issue #27493), but self.baseFilename will be a string
filename = self.baseFilename
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
currentDay = t[6]
# r is the number of seconds left between now and the next rotation
if self.atTime is None:
rotate_ts = _MIDNIGHT
else:
rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
self.atTime.second)
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
if r < 0:
# Rotate time is before the current time (for example when
# self.rotateAt is 13:45 and it now 14:15), rotation is
# tomorrow.
r += _MIDNIGHT
currentDay = (currentDay + 1) % 7
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = currentDay # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
if len(result) < self.backupCount:
result = []
else:
result.sort()
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False,
errors=None):
logging.FileHandler.__init__(self, filename, mode=mode,
encoding=encoding, delay=delay,
errors=errors)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def reopenIfNeeded(self):
"""
Reopen log file if needed.
Checks if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except FileNotFoundError:
sres = None
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
self.stream = None # See Issue #21742: _open () might fail.
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
def emit(self, record):
"""
Emit a record.
If underlying file has changed, reopen the file before emitting the
record to it.
"""
self.reopenIfNeeded()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
if port is None:
self.address = host
else:
self.address = (host, port)
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
if self.port is not None:
result = socket.create_connection(self.address, timeout=timeout)
else:
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
result.settimeout(timeout)
try:
result.connect(self.address)
except OSError:
result.close() # Issue 19182
raise
return result
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except OSError:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
self.sock.sendall(s)
except OSError: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
# Issue #25685: delete 'message' if present: redundant with 'msg'
d.pop('message', None)
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except Exception:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
sock = self.sock
if sock:
self.sock = None
sock.close()
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
if self.port is None:
family = socket.AF_UNIX
else:
family = socket.AF_INET
s = socket.socket(family, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, self.address)
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
LOG_NTP = 12 # NTP subsystem
LOG_SECURITY = 13 # Log audit
LOG_CONSOLE = 14 # Log alert
LOG_SOLCRON = 15 # Scheduling daemon (Solaris)
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"console": LOG_CONSOLE,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"ntp": LOG_NTP,
"security": LOG_SECURITY,
"solaris-cron": LOG_SOLCRON,
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used. If socktype is
specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
socket type will be used. For Unix sockets, you can also specify a
socktype of None, in which case socket.SOCK_DGRAM will be used, falling
back to socket.SOCK_STREAM.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = True
# Syslog server may be unavailable during handler initialisation.
# C's openlog() function also ignores connection errors.
# Moreover, we ignore these errors while logging, so it not worse
# to ignore it also here.
try:
self._connect_unixsocket(address)
except OSError:
pass
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
host, port = address
ress = socket.getaddrinfo(host, port, 0, socktype)
if not ress:
raise OSError("getaddrinfo returns an empty list")
for res in ress:
af, socktype, proto, _, sa = res
err = sock = None
try:
sock = socket.socket(af, socktype, proto)
if socktype == socket.SOCK_STREAM:
sock.connect(sa)
break
except OSError as exc:
err = exc
if sock is not None:
sock.close()
if err is not None:
raise err
self.socket = sock
self.socktype = socktype
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
raise
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
self.socket.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
try:
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
# We need to convert record level to lowercase, maybe this will
# change in the future.
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
if self.unixsocket:
try:
self.socket.send(msg)
except OSError:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except Exception:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, (list, tuple)):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, (list, tuple)):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.message import EmailMessage
import email.utils
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = EmailMessage()
msg['From'] = self.fromaddr
msg['To'] = ','.join(self.toaddrs)
msg['Subject'] = self.getSubject(record)
msg['Date'] = email.utils.localtime()
msg.set_content(self.format(record))
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.send_message(msg)
smtp.quit()
except Exception:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except Exception:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None,
context=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
if not secure and context is not None:
raise ValueError("context parameter only makes sense "
"with secure=True")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
self.context = context
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import http.client, urllib.parse
host = self.host
if self.secure:
h = http.client.HTTPSConnection(host, context=self.context)
else:
h = http.client.HTTPConnection(host)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
# See issue #30904: putrequest call above already adds this header
# on Python 3.x.
# h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except Exception:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer.clear()
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
try:
self.flush()
finally:
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
flushOnClose=True):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
The ``flushOnClose`` argument is ``True`` for backward compatibility
reasons - the old behaviour is that when the handler is closed, the
buffer is flushed, even if the flush level hasn't been exceeded nor the
capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
# See Issue #26559 for why this has been added
self.flushOnClose = flushOnClose
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer.clear()
finally:
self.release()
def close(self):
"""
Flush, if appropriately configured, set the target to None and lose the
buffer.
"""
try:
if self.flushOnClose:
self.flush()
finally:
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also returns the formatted
# message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info and exc_text attributes, as they are no longer
# needed and, if not None, will typically not be pickleable.
msg = self.format(record)
# bpo-35726: make copy of record to avoid affecting other handlers in the chain.
record = copy.copy(record)
record.message = msg
record.msg = msg
record.args = None
record.exc_info = None
record.exc_text = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except Exception:
self.handleError(record)
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers, respect_handler_level=False):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._thread = None
self.respect_handler_level = respect_handler_level
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def prepare(self, record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
if not self.respect_handler_level:
process = True
else:
process = record.levelno >= handler.level
if process:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while True:
try:
record = self.dequeue(True)
if record is self._sentinel:
if has_task_done:
q.task_done()
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self.enqueue_sentinel()
self._thread.join()
self._thread = None
| 38.41465 | 120 | 0.578827 |
7cfc7099fe9481fed969b14fb697e14c8cde9d44 | 5,428 | py | Python | gdpr_assist/anonymiser.py | mopitz199/django-gdpr-assist | c7428d64bee85558541eceff1f42ecb44fc14c75 | [
"BSD-3-Clause"
] | null | null | null | gdpr_assist/anonymiser.py | mopitz199/django-gdpr-assist | c7428d64bee85558541eceff1f42ecb44fc14c75 | [
"BSD-3-Clause"
] | null | null | null | gdpr_assist/anonymiser.py | mopitz199/django-gdpr-assist | c7428d64bee85558541eceff1f42ecb44fc14c75 | [
"BSD-3-Clause"
] | 1 | 2019-12-11T13:31:57.000Z | 2019-12-11T13:31:57.000Z | """
Anonymisation functionality
"""
import datetime
import uuid
from django.db import models
from django.utils.timezone import now
from .deletion import ANONYMISE
from .exceptions import AnonymiseError
from .registry import registry
anonymisers = {}
def register(*field_classes):
def outer(fn):
# Register function
for cls in field_classes:
anonymisers[cls] = fn
return fn
return outer
@register(
models.BigIntegerField,
models.IntegerField,
models.PositiveIntegerField,
models.PositiveSmallIntegerField,
models.SmallIntegerField,
)
def anonymise_int(instance, field_name, field, value):
if field.null:
return None
return 0
@register(
models.CharField,
models.SlugField,
models.TextField,
)
def anonymise_char(instance, field_name, field, value):
if field.blank and not field._unique:
return ''
return str(instance.pk)
@register(
models.BinaryField,
)
def anonymise_binary(instance, field_name, field, value):
if field.null:
return None
return b''
@register(
models.BooleanField,
models.NullBooleanField,
)
def anonymise_boolean(instance, field_name, field, value):
if field.null:
return None
return False
@register(
models.DateField,
)
def anonymise_date(instance, field_name, field, value):
if field.null:
return None
return datetime.date.today()
@register(
models.DateTimeField,
)
def anonymise_datetime(instance, field_name, field, value):
if field.null:
return None
return now()
@register(
models.TimeField,
)
def anonymise_time(instance, field_name, field, value):
if field.null:
return None
return datetime.time()
@register(
models.DurationField,
)
def anonymise_duration(instance, field_name, field, value):
if field.null:
return None
return datetime.timedelta(0)
@register(
models.DecimalField,
models.FloatField,
)
def anonymise_decimal(instance, field_name, field, value):
if field.null:
return None
return 0
@register(
models.FileField,
models.FilePathField,
models.ImageField,
)
def anonymise_file(instance, field_name, field, value):
if field.null:
return None
raise AnonymiseError(
'Cannot anonymise {} - can only null file fields'.format(
field_name,
)
)
@register(
models.EmailField,
)
def anonymise_email(instance, field_name, field, value):
if field.null:
return None
return '{}@anon.example.com'.format(instance.pk)
@register(
models.GenericIPAddressField,
)
def anonymise_ip(instance, field_name, field, value):
if field.null:
return None
return '0.0.0.0'
@register(
models.URLField,
)
def anonymise_url(instance, field_name, field, value):
if field.blank:
return ''
return 'http://{}.anon.example.com'.format(instance.pk)
@register(
models.UUIDField,
)
def anonymise_uuid(instance, field_name, field, value):
if field.null:
return None
if field.unique:
return uuid.uuid4()
return uuid.UUID('{00000000-0000-0000-0000-000000000000}')
@register(
models.ForeignKey,
models.OneToOneField,
)
def anonymise_relationship(instance, field_name, field, value):
if field.null:
return None
raise AnonymiseError(
'Cannot anonymise {} - can only null relationship field'.format(
field_name,
)
)
@register(
models.ManyToManyField,
)
def anonymise_manytomany(instance, field_name, field, value):
raise AnonymiseError(
'Cannot anonymise {} - cannot anonymise ManyToManyField'.format(
field_name,
)
)
def anonymise_field(instance, field_name):
"""
Default field anonymiser
"""
cls = instance.__class__
# Check field isn't pk
if cls._meta.pk.name == field_name:
raise AnonymiseError('Cannot anonymise primary key')
# Find field
field = cls._meta.get_field(field_name)
value = getattr(instance, field_name)
# Find anonymiser
if field.__class__ not in anonymisers:
raise AnonymiseError('Unknown field type for anonymiser')
anonymiser = anonymisers[field.__class__]
# Anonymise
anonymised = anonymiser(instance, field_name, field, value)
setattr(instance, field_name, anonymised)
def anonymise_related_objects(obj, anonymised=None):
"""
See if there are any related models which need to be anonymised.
They will be any reverse relations to PrivacyModel subclasses where their
OneToOneField and ForeignKey on_delete is ANONYMISE.
"""
if anonymised is None:
anonymised = []
relation_fields = [
field for field in type(obj)._meta.get_fields()
if (
(field.one_to_many or field.one_to_one) and
field.auto_created and
not field.concrete and
field.related_model in registry and
isinstance(field.on_delete, ANONYMISE)
)
]
for field in relation_fields:
related_objects = field.related_model._base_manager.filter(
**{str(field.field.name): obj}
)
for related_obj in related_objects:
if related_obj not in anonymised:
related_obj.anonymise()
anonymised.append(related_obj)
return anonymised
| 21.203125 | 77 | 0.66857 |
02f0f4f5133fe5f5c604e72817e1c6e11c788a8e | 173 | py | Python | tasks.py | vlcinsky/python-fastapi-github-timeline | e066dc5811fde884af9ddf8f01d14675a9fef302 | [
"MIT"
] | 4 | 2021-08-03T09:32:41.000Z | 2022-03-25T23:06:49.000Z | tasks.py | vlcinsky/python-fastapi-github-timeline | e066dc5811fde884af9ddf8f01d14675a9fef302 | [
"MIT"
] | null | null | null | tasks.py | vlcinsky/python-fastapi-github-timeline | e066dc5811fde884af9ddf8f01d14675a9fef302 | [
"MIT"
] | 1 | 2021-11-09T08:57:39.000Z | 2021-11-09T08:57:39.000Z | from invoke import task
@task
def app_run(c):
"""Run web app in development mode"""
c.run(
"uvicorn app:app --reload --port=8000",
pty=True,
)
| 15.727273 | 47 | 0.572254 |
81c96a61ab11f16f369f572e8938e8fa363bfec5 | 4,276 | py | Python | tests/test_cityhash.py | yujiaao/python-cityhash | bc99c439fd642adce0cf336aee7f6e4c4c9ba81e | [
"MIT"
] | 2 | 2021-03-03T08:42:46.000Z | 2021-03-06T18:07:30.000Z | tests/test_cityhash.py | yujiaao/python-cityhash | bc99c439fd642adce0cf336aee7f6e4c4c9ba81e | [
"MIT"
] | null | null | null | tests/test_cityhash.py | yujiaao/python-cityhash | bc99c439fd642adce0cf336aee7f6e4c4c9ba81e | [
"MIT"
] | 1 | 2021-03-03T08:43:53.000Z | 2021-03-03T08:43:53.000Z | import array
import unittest
import random
import string
import sys
from cityhash import (
CityHash32, CityHash64, CityHash64WithSeed, CityHash64WithSeeds,
CityHash128, CityHash128WithSeed,
)
if sys.version_info[0] >= 3:
long = int
def random_string(n, alphabet=string.ascii_lowercase):
return ''.join(random.choice(alphabet) for _ in range(n))
def random_splits(string, n, nsplits=2):
splits = sorted([random.randint(0, n) for _ in range(nsplits - 1)])
splits = [0] + splits + [n]
for a, b in zip(splits, splits[1:]):
yield string[a:b]
class TestStandalone(unittest.TestCase):
def test_string_unicode_32(self):
"""Empty Python string has same hash value as empty Unicode string
"""
self.assertEqual(CityHash32(""), CityHash32(u""))
def test_string_unicode_64(self):
"""Empty Python string has same hash value as empty Unicode string
"""
self.assertEqual(CityHash64WithSeed(""), CityHash64WithSeed(u""))
def test_string_unicode_128(self):
"""Empty Python string has same hash value as empty Unicode string
"""
self.assertEqual(CityHash128WithSeed(""), CityHash128WithSeed(u""))
def test_consistent_encoding_32(self):
"""ASCII-range Unicode strings have the same hash values as ASCII strings
"""
text = u"abracadabra"
self.assertEqual(CityHash32(text), CityHash32(text.encode("utf-8")))
def test_consistent_encoding_64(self):
"""ASCII-range Unicode strings have the same hash values as ASCII strings
"""
text = u"abracadabra"
self.assertEqual(CityHash64WithSeed(text), CityHash64WithSeed(text.encode("utf-8")))
def test_consistent_encoding_128(self):
"""ASCII-range Unicode strings have the same hash values as ASCII strings
"""
text = u"abracadabra"
self.assertEqual(CityHash128WithSeed(text), CityHash128WithSeed(text.encode("utf-8")))
def test_unicode_1_32(self):
"""Accepts Unicode input"""
test_case = u"abc"
self.assertTrue(isinstance(CityHash32(test_case), int))
def test_unicode_1_64(self):
"""Accepts Unicode input"""
test_case = u"abc"
self.assertTrue(isinstance(CityHash64WithSeed(test_case), long))
def test_unicode_1_128(self):
"""Accepts Unicode input"""
test_case = u"abc"
self.assertTrue(isinstance(CityHash128WithSeed(test_case), long))
def test_unicode_2_32(self):
"""Accepts Unicode input outside of ASCII range"""
test_case = u'\u2661'
self.assertTrue(isinstance(CityHash32(test_case), int))
def test_unicode_2_64(self):
"""Accepts Unicode input outside of ASCII range"""
test_case = u'\u2661'
self.assertTrue(isinstance(CityHash64WithSeed(test_case), long))
def test_unicode_2_128(self):
"""Accepts Unicode input outside of ASCII range"""
test_case = u'\u2661'
self.assertTrue(isinstance(CityHash128WithSeed(test_case), long))
def test_unicode_2_128_seed(self):
"""Accepts Unicode input outside of ASCII range"""
test_case = u'\u2661'
self.assertTrue(isinstance(CityHash128WithSeed(test_case, seed=CityHash128WithSeed(test_case)), long))
def test_argument_types(self):
"""Accepts different kinds of buffer-compatible objects"""
funcs = [CityHash32, CityHash64, CityHash128,
CityHash64WithSeed, CityHash64WithSeeds,
CityHash128WithSeed]
args = [b'ab\x00c', bytearray(b'ab\x00c'), memoryview(b'ab\x00c')]
for func in funcs:
values = set(func(arg) for arg in args)
self.assertEqual(len(values), 1, values)
def test_refcounts(self):
"""Doesn't leak references to its argument"""
funcs = [CityHash32, CityHash64, CityHash128,
CityHash64WithSeed, CityHash64WithSeeds,
CityHash128WithSeed]
args = ['abc', b'abc', bytearray(b'def'), memoryview(b'ghi')]
for func in funcs:
for arg in args:
old_refcount = sys.getrefcount(arg)
func(arg)
self.assertEqual(sys.getrefcount(arg), old_refcount)
| 35.932773 | 110 | 0.656221 |
c8df3be2dab9921691bd4d2d51525873b99e6cf9 | 427 | py | Python | articles/migrations/old/0007_author_slug.py | JClubb3/johnj.club | a10bb4e15da38a5bf2e265790fe2bf7603e5474c | [
"MIT"
] | null | null | null | articles/migrations/old/0007_author_slug.py | JClubb3/johnj.club | a10bb4e15da38a5bf2e265790fe2bf7603e5474c | [
"MIT"
] | null | null | null | articles/migrations/old/0007_author_slug.py | JClubb3/johnj.club | a10bb4e15da38a5bf2e265790fe2bf7603e5474c | [
"MIT"
] | null | null | null | # Generated by Django 2.0 on 2018-02-16 15:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0006_auto_20180216_0947'),
]
operations = [
migrations.AddField(
model_name='author',
name='slug',
field=models.SlugField(default='', help_text='A no space name to be used for URLs'),
),
]
| 22.473684 | 96 | 0.604215 |
a6f24509ca1f20fb06b3bcbe4ed5f804a04bc039 | 2,392 | bzl | Python | tools/sass_bundle.bzl | MarkArranz/material2 | 3255cf3c3675037725ea579fcdfa373d06977fb4 | [
"MIT"
] | 4 | 2017-09-12T16:12:28.000Z | 2018-05-02T21:16:55.000Z | tools/sass_bundle.bzl | MarkArranz/material2 | 3255cf3c3675037725ea579fcdfa373d06977fb4 | [
"MIT"
] | 2 | 2018-07-12T23:57:08.000Z | 2018-07-13T00:15:06.000Z | tools/sass_bundle.bzl | MarkArranz/material2 | 3255cf3c3675037725ea579fcdfa373d06977fb4 | [
"MIT"
] | 3 | 2017-11-23T04:20:48.000Z | 2018-05-02T21:17:13.000Z | # Implementation of sass_bundle that performs an action
def _sass_bundle(ctx):
# Define arguments that will be passed to the underlying nodejs script.
args = ctx.actions.args()
# The entry-point scss file for the bundle.
args.add("--entry")
args.add(ctx.attr.entry_point.files)
# The list of files that can be included in the bundle.
args.add("--srcs")
args.add(ctx.files.srcs, join_with =",")
# The generated bundle's filename.
args.add("--output")
args.add(ctx.outputs.output_name.path)
# Define an "action" that will run the nodejs_binary executable. This is
# the main thing that sass_bundle rule does.
ctx.actions.run(
inputs = ctx.files.srcs + ctx.files.entry_point,
executable = ctx.executable._sass_bundle,
outputs = [ctx.outputs.output_name],
arguments = [args],
)
# The return value describes what the rule is producing.
# In this case, we can use a `DefaultInfo`, since the rule only produces
# a single output file.
return [DefaultInfo(files = depset([ctx.outputs.output_name]))]
# Rule definition for sass_bundle that defines attributes and outputs.
sass_bundle = rule(
# Point to the function that will execute for this rule.
implementation = _sass_bundle,
# The attributes that can be set to this rule.
attrs = {
# The source files for this rule. This must include all sass files that
# *could* be included in the bundle, as only the files that this rule knows
# about (i.e. the labels) will be available in the bazel sandbox in which
# the nodejs_binary runs.
"srcs": attr.label_list(allow_files = True),
# The name of the file to be output from this rule. The rule will fail if
# the nodejs_binary does not produce this output file. By using
# `attr.output()`, we can omit the separate `outputs` declaration a more
# complicated rule would need.
"output_name": attr.output(),
# The scss entry-point. Note that this uses a label and not a string
# in order to make bazel aware that this file is a *dependency* of the
# rule (and will thus be available to the nodejs_binary in the sandbox).
"entry_point": attr.label(mandatory = True, allow_single_file = True),
# The executable (bundler) for this rule (private).
"_sass_bundle": attr.label(
default = Label("//tools:sass_bundle"),
executable = True,
cfg = "host"
)},
)
| 37.375 | 79 | 0.704013 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.