hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acebda4ccf3fbb124285799a48de84bd1ad53d4e | 451 | py | Python | Status.py | HengLiEn/App-Development-Final-Assignment- | d717d7ca49ea6e186256516bc177f679bb293fdd | [
"MIT"
] | null | null | null | Status.py | HengLiEn/App-Development-Final-Assignment- | d717d7ca49ea6e186256516bc177f679bb293fdd | [
"MIT"
] | null | null | null | Status.py | HengLiEn/App-Development-Final-Assignment- | d717d7ca49ea6e186256516bc177f679bb293fdd | [
"MIT"
] | null | null | null | class Status:
status_id = 0
def __init__(self, status):
Status.status_id += 1
self.__status_id = Status.status_id
self.__status = status
def get_status_id(self):
return self.__status_id
def get_status(self):
return self.__status
def set_status_id(self, status_id):
self.__status_id = status_id
def set_status(self, status):
self.__status = status
| 22.55 | 44 | 0.611973 |
acebdb47a0268457432b4bb796b0fc1251180006 | 273 | py | Python | main.py | RedCrabVb/Extract_doc_features | fb04fcb9c7e66ae392c31a68f43f6ebc780ec12f | [
"MIT"
] | null | null | null | main.py | RedCrabVb/Extract_doc_features | fb04fcb9c7e66ae392c31a68f43f6ebc780ec12f | [
"MIT"
] | null | null | null | main.py | RedCrabVb/Extract_doc_features | fb04fcb9c7e66ae392c31a68f43f6ebc780ec12f | [
"MIT"
] | null | null | null | import sys
import json
from extract_doc_features import extract_doc_features
sys.stdout.encoding
for param in sys.argv:
try:
d = extract_doc_features(param)
print(json.dumps(d, ensure_ascii=False, indent=4))
except Exception as e:
print(e)
| 22.75 | 58 | 0.714286 |
acebdbd67f20263ed2462b34864d266c6a9d5dc5 | 2,571 | py | Python | zenml/utils/yaml_utils.py | birdiesanders/zenml | fca11c17ccf941aa14920ef4eab470b94b26ccbe | [
"Apache-2.0"
] | 1 | 2021-05-04T17:11:23.000Z | 2021-05-04T17:11:23.000Z | zenml/utils/yaml_utils.py | birdiesanders/zenml | fca11c17ccf941aa14920ef4eab470b94b26ccbe | [
"Apache-2.0"
] | null | null | null | zenml/utils/yaml_utils.py | birdiesanders/zenml | fca11c17ccf941aa14920ef4eab470b94b26ccbe | [
"Apache-2.0"
] | 1 | 2020-12-27T08:16:42.000Z | 2020-12-27T08:16:42.000Z | # Copyright (c) maiot GmbH 2020. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
from pathlib import Path
from typing import Text, Dict
import yaml
from zenml.utils import path_utils
def write_yaml(file_path: Text, contents: Dict):
"""
Write contents as YAML format to file_path.
Args:
file_path (str): Path to YAML file.
contents (dict): Contents of YAML file as dict.
"""
if not path_utils.is_remote(file_path):
dir_ = str(Path(file_path).parent)
if not path_utils.is_dir(dir_):
# If it is a local path and it doesnt exist, raise Exception.
raise Exception(f'Directory {dir_} does not exist.')
path_utils.write_file_contents(file_path, yaml.dump(contents))
def read_yaml(file_path: Text):
"""
Read YAML on file path and returns contents as dict.
Args:
file_path (str): Path to YAML file.
"""
if path_utils.file_exists(file_path):
with open(file_path, 'r') as f:
return yaml.load(f.read(), Loader=yaml.FullLoader)
else:
raise Exception(f'{file_path} does not exist.')
def write_json(file_path: Text, contents: Dict):
"""
Write contents as JSON format to file_path.
Args:
file_path (str): Path to JSON file.
contents (dict): Contents of JSON file as dict.
"""
if not path_utils.is_remote(file_path):
dir_ = str(Path(file_path).parent)
if not path_utils.is_dir(dir_):
# If it is a local path and it doesnt exist, raise Exception.
raise Exception(f'Directory {dir_} does not exist.')
path_utils.write_file_contents(file_path, json.dumps(contents))
def read_json(file_path: Text):
"""
Read JSON on file path and returns contents as dict.
Args:
file_path (str): Path to JSON file.
"""
if path_utils.file_exists(file_path):
with open(file_path, 'r') as f:
return json.loads(f.read())
else:
raise Exception(f'{file_path} does not exist.')
| 31.353659 | 73 | 0.668611 |
acebdbdc463613139b9bb241f412e3e7a4ae4233 | 34,237 | py | Python | Lib/test/test_compile.py | Himanshu-Lakhara/cpython | 3b20d3454e8082e07dba93617793de5dc9237828 | [
"PSF-2.0"
] | 3 | 2018-03-09T06:50:34.000Z | 2021-01-14T04:55:43.000Z | Lib/test/test_compile.py | Himanshu-Lakhara/cpython | 3b20d3454e8082e07dba93617793de5dc9237828 | [
"PSF-2.0"
] | 6 | 2020-01-31T18:04:48.000Z | 2021-06-05T10:53:55.000Z | Lib/test/test_compile.py | Himanshu-Lakhara/cpython | 3b20d3454e8082e07dba93617793de5dc9237828 | [
"PSF-2.0"
] | 2 | 2021-02-14T15:20:06.000Z | 2021-11-13T15:15:58.000Z | import math
import os
import unittest
import sys
import _ast
import tempfile
import types
from test import support
from test.support import script_helper, FakePath
class TestSpecifics(unittest.TestCase):
def compile_single(self, source):
compile(source, "<single>", "single")
def assertInvalidSingle(self, source):
self.assertRaises(SyntaxError, self.compile_single, source)
def test_no_ending_newline(self):
compile("hi", "<test>", "exec")
compile("hi\r", "<test>", "exec")
def test_empty(self):
compile("", "<test>", "exec")
def test_other_newlines(self):
compile("\r\n", "<test>", "exec")
compile("\r", "<test>", "exec")
compile("hi\r\nstuff\r\ndef f():\n pass\r", "<test>", "exec")
compile("this_is\rreally_old_mac\rdef f():\n pass", "<test>", "exec")
def test_debug_assignment(self):
# catch assignments to __debug__
self.assertRaises(SyntaxError, compile, '__debug__ = 1', '?', 'single')
import builtins
prev = builtins.__debug__
setattr(builtins, '__debug__', 'sure')
self.assertEqual(__debug__, prev)
setattr(builtins, '__debug__', prev)
def test_argument_handling(self):
# detect duplicate positional and keyword arguments
self.assertRaises(SyntaxError, eval, 'lambda a,a:0')
self.assertRaises(SyntaxError, eval, 'lambda a,a=1:0')
self.assertRaises(SyntaxError, eval, 'lambda a=1,a=1:0')
self.assertRaises(SyntaxError, exec, 'def f(a, a): pass')
self.assertRaises(SyntaxError, exec, 'def f(a = 0, a = 1): pass')
self.assertRaises(SyntaxError, exec, 'def f(a): global a; a = 1')
def test_syntax_error(self):
self.assertRaises(SyntaxError, compile, "1+*3", "filename", "exec")
def test_none_keyword_arg(self):
self.assertRaises(SyntaxError, compile, "f(None=1)", "<string>", "exec")
def test_duplicate_global_local(self):
self.assertRaises(SyntaxError, exec, 'def f(a): global a; a = 1')
def test_exec_with_general_mapping_for_locals(self):
class M:
"Test mapping interface versus possible calls from eval()."
def __getitem__(self, key):
if key == 'a':
return 12
raise KeyError
def __setitem__(self, key, value):
self.results = (key, value)
def keys(self):
return list('xyz')
m = M()
g = globals()
exec('z = a', g, m)
self.assertEqual(m.results, ('z', 12))
try:
exec('z = b', g, m)
except NameError:
pass
else:
self.fail('Did not detect a KeyError')
exec('z = dir()', g, m)
self.assertEqual(m.results, ('z', list('xyz')))
exec('z = globals()', g, m)
self.assertEqual(m.results, ('z', g))
exec('z = locals()', g, m)
self.assertEqual(m.results, ('z', m))
self.assertRaises(TypeError, exec, 'z = b', m)
class A:
"Non-mapping"
pass
m = A()
self.assertRaises(TypeError, exec, 'z = a', g, m)
# Verify that dict subclasses work as well
class D(dict):
def __getitem__(self, key):
if key == 'a':
return 12
return dict.__getitem__(self, key)
d = D()
exec('z = a', g, d)
self.assertEqual(d['z'], 12)
def test_extended_arg(self):
longexpr = 'x = x or ' + '-x' * 2500
g = {}
code = '''
def f(x):
%s
%s
%s
%s
%s
%s
%s
%s
%s
%s
# the expressions above have no effect, x == argument
while x:
x -= 1
# EXTENDED_ARG/JUMP_ABSOLUTE here
return x
''' % ((longexpr,)*10)
exec(code, g)
self.assertEqual(g['f'](5), 0)
def test_argument_order(self):
self.assertRaises(SyntaxError, exec, 'def f(a=1, b): pass')
def test_float_literals(self):
# testing bad float literals
self.assertRaises(SyntaxError, eval, "2e")
self.assertRaises(SyntaxError, eval, "2.0e+")
self.assertRaises(SyntaxError, eval, "1e-")
self.assertRaises(SyntaxError, eval, "3-4e/21")
def test_indentation(self):
# testing compile() of indented block w/o trailing newline"
s = """
if 1:
if 2:
pass"""
compile(s, "<string>", "exec")
# This test is probably specific to CPython and may not generalize
# to other implementations. We are trying to ensure that when
# the first line of code starts after 256, correct line numbers
# in tracebacks are still produced.
def test_leading_newlines(self):
s256 = "".join(["\n"] * 256 + ["spam"])
co = compile(s256, 'fn', 'exec')
self.assertEqual(co.co_firstlineno, 257)
self.assertEqual(co.co_lnotab, bytes())
def test_literals_with_leading_zeroes(self):
for arg in ["077787", "0xj", "0x.", "0e", "090000000000000",
"080000000000000", "000000000000009", "000000000000008",
"0b42", "0BADCAFE", "0o123456789", "0b1.1", "0o4.2",
"0b101j2", "0o153j2", "0b100e1", "0o777e1", "0777",
"000777", "000000000000007"]:
self.assertRaises(SyntaxError, eval, arg)
self.assertEqual(eval("0xff"), 255)
self.assertEqual(eval("0777."), 777)
self.assertEqual(eval("0777.0"), 777)
self.assertEqual(eval("000000000000000000000000000000000000000000000000000777e0"), 777)
self.assertEqual(eval("0777e1"), 7770)
self.assertEqual(eval("0e0"), 0)
self.assertEqual(eval("0000e-012"), 0)
self.assertEqual(eval("09.5"), 9.5)
self.assertEqual(eval("0777j"), 777j)
self.assertEqual(eval("000"), 0)
self.assertEqual(eval("00j"), 0j)
self.assertEqual(eval("00.0"), 0)
self.assertEqual(eval("0e3"), 0)
self.assertEqual(eval("090000000000000."), 90000000000000.)
self.assertEqual(eval("090000000000000.0000000000000000000000"), 90000000000000.)
self.assertEqual(eval("090000000000000e0"), 90000000000000.)
self.assertEqual(eval("090000000000000e-0"), 90000000000000.)
self.assertEqual(eval("090000000000000j"), 90000000000000j)
self.assertEqual(eval("000000000000008."), 8.)
self.assertEqual(eval("000000000000009."), 9.)
self.assertEqual(eval("0b101010"), 42)
self.assertEqual(eval("-0b000000000010"), -2)
self.assertEqual(eval("0o777"), 511)
self.assertEqual(eval("-0o0000010"), -8)
def test_unary_minus(self):
# Verify treatment of unary minus on negative numbers SF bug #660455
if sys.maxsize == 2147483647:
# 32-bit machine
all_one_bits = '0xffffffff'
self.assertEqual(eval(all_one_bits), 4294967295)
self.assertEqual(eval("-" + all_one_bits), -4294967295)
elif sys.maxsize == 9223372036854775807:
# 64-bit machine
all_one_bits = '0xffffffffffffffff'
self.assertEqual(eval(all_one_bits), 18446744073709551615)
self.assertEqual(eval("-" + all_one_bits), -18446744073709551615)
else:
self.fail("How many bits *does* this machine have???")
# Verify treatment of constant folding on -(sys.maxsize+1)
# i.e. -2147483648 on 32 bit platforms. Should return int.
self.assertIsInstance(eval("%s" % (-sys.maxsize - 1)), int)
self.assertIsInstance(eval("%s" % (-sys.maxsize - 2)), int)
if sys.maxsize == 9223372036854775807:
def test_32_63_bit_values(self):
a = +4294967296 # 1 << 32
b = -4294967296 # 1 << 32
c = +281474976710656 # 1 << 48
d = -281474976710656 # 1 << 48
e = +4611686018427387904 # 1 << 62
f = -4611686018427387904 # 1 << 62
g = +9223372036854775807 # 1 << 63 - 1
h = -9223372036854775807 # 1 << 63 - 1
for variable in self.test_32_63_bit_values.__code__.co_consts:
if variable is not None:
self.assertIsInstance(variable, int)
def test_sequence_unpacking_error(self):
# Verify sequence packing/unpacking with "or". SF bug #757818
i,j = (1, -1) or (-1, 1)
self.assertEqual(i, 1)
self.assertEqual(j, -1)
def test_none_assignment(self):
stmts = [
'None = 0',
'None += 0',
'__builtins__.None = 0',
'def None(): pass',
'class None: pass',
'(a, None) = 0, 0',
'for None in range(10): pass',
'def f(None): pass',
'import None',
'import x as None',
'from x import None',
'from x import y as None'
]
for stmt in stmts:
stmt += "\n"
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'single')
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'exec')
def test_import(self):
succeed = [
'import sys',
'import os, sys',
'import os as bar',
'import os.path as bar',
'from __future__ import nested_scopes, generators',
'from __future__ import (nested_scopes,\ngenerators)',
'from __future__ import (nested_scopes,\ngenerators,)',
'from sys import stdin, stderr, stdout',
'from sys import (stdin, stderr,\nstdout)',
'from sys import (stdin, stderr,\nstdout,)',
'from sys import (stdin\n, stderr, stdout)',
'from sys import (stdin\n, stderr, stdout,)',
'from sys import stdin as si, stdout as so, stderr as se',
'from sys import (stdin as si, stdout as so, stderr as se)',
'from sys import (stdin as si, stdout as so, stderr as se,)',
]
fail = [
'import (os, sys)',
'import (os), (sys)',
'import ((os), (sys))',
'import (sys',
'import sys)',
'import (os,)',
'import os As bar',
'import os.path a bar',
'from sys import stdin As stdout',
'from sys import stdin a stdout',
'from (sys) import stdin',
'from __future__ import (nested_scopes',
'from __future__ import nested_scopes)',
'from __future__ import nested_scopes,\ngenerators',
'from sys import (stdin',
'from sys import stdin)',
'from sys import stdin, stdout,\nstderr',
'from sys import stdin si',
'from sys import stdin,'
'from sys import (*)',
'from sys import (stdin,, stdout, stderr)',
'from sys import (stdin, stdout),',
]
for stmt in succeed:
compile(stmt, 'tmp', 'exec')
for stmt in fail:
self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'exec')
def test_for_distinct_code_objects(self):
# SF bug 1048870
def f():
f1 = lambda x=1: x
f2 = lambda x=2: x
return f1, f2
f1, f2 = f()
self.assertNotEqual(id(f1.__code__), id(f2.__code__))
def test_lambda_doc(self):
l = lambda: "foo"
self.assertIsNone(l.__doc__)
def test_encoding(self):
code = b'# -*- coding: badencoding -*-\npass\n'
self.assertRaises(SyntaxError, compile, code, 'tmp', 'exec')
code = '# -*- coding: badencoding -*-\n"\xc2\xa4"\n'
compile(code, 'tmp', 'exec')
self.assertEqual(eval(code), '\xc2\xa4')
code = '"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xc2\xa4')
code = b'"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xa4')
code = b'# -*- coding: latin1 -*-\n"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xc2\xa4')
code = b'# -*- coding: utf-8 -*-\n"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xa4')
code = b'# -*- coding: iso8859-15 -*-\n"\xc2\xa4"\n'
self.assertEqual(eval(code), '\xc2\u20ac')
code = '"""\\\n# -*- coding: iso8859-15 -*-\n\xc2\xa4"""\n'
self.assertEqual(eval(code), '# -*- coding: iso8859-15 -*-\n\xc2\xa4')
code = b'"""\\\n# -*- coding: iso8859-15 -*-\n\xc2\xa4"""\n'
self.assertEqual(eval(code), '# -*- coding: iso8859-15 -*-\n\xa4')
def test_subscripts(self):
# SF bug 1448804
# Class to make testing subscript results easy
class str_map(object):
def __init__(self):
self.data = {}
def __getitem__(self, key):
return self.data[str(key)]
def __setitem__(self, key, value):
self.data[str(key)] = value
def __delitem__(self, key):
del self.data[str(key)]
def __contains__(self, key):
return str(key) in self.data
d = str_map()
# Index
d[1] = 1
self.assertEqual(d[1], 1)
d[1] += 1
self.assertEqual(d[1], 2)
del d[1]
self.assertNotIn(1, d)
# Tuple of indices
d[1, 1] = 1
self.assertEqual(d[1, 1], 1)
d[1, 1] += 1
self.assertEqual(d[1, 1], 2)
del d[1, 1]
self.assertNotIn((1, 1), d)
# Simple slice
d[1:2] = 1
self.assertEqual(d[1:2], 1)
d[1:2] += 1
self.assertEqual(d[1:2], 2)
del d[1:2]
self.assertNotIn(slice(1, 2), d)
# Tuple of simple slices
d[1:2, 1:2] = 1
self.assertEqual(d[1:2, 1:2], 1)
d[1:2, 1:2] += 1
self.assertEqual(d[1:2, 1:2], 2)
del d[1:2, 1:2]
self.assertNotIn((slice(1, 2), slice(1, 2)), d)
# Extended slice
d[1:2:3] = 1
self.assertEqual(d[1:2:3], 1)
d[1:2:3] += 1
self.assertEqual(d[1:2:3], 2)
del d[1:2:3]
self.assertNotIn(slice(1, 2, 3), d)
# Tuple of extended slices
d[1:2:3, 1:2:3] = 1
self.assertEqual(d[1:2:3, 1:2:3], 1)
d[1:2:3, 1:2:3] += 1
self.assertEqual(d[1:2:3, 1:2:3], 2)
del d[1:2:3, 1:2:3]
self.assertNotIn((slice(1, 2, 3), slice(1, 2, 3)), d)
# Ellipsis
d[...] = 1
self.assertEqual(d[...], 1)
d[...] += 1
self.assertEqual(d[...], 2)
del d[...]
self.assertNotIn(Ellipsis, d)
# Tuple of Ellipses
d[..., ...] = 1
self.assertEqual(d[..., ...], 1)
d[..., ...] += 1
self.assertEqual(d[..., ...], 2)
del d[..., ...]
self.assertNotIn((Ellipsis, Ellipsis), d)
def test_annotation_limit(self):
# more than 255 annotations, should compile ok
s = "def f(%s): pass"
s %= ', '.join('a%d:%d' % (i,i) for i in range(300))
compile(s, '?', 'exec')
def test_mangling(self):
class A:
def f():
__mangled = 1
__not_mangled__ = 2
import __mangled_mod
import __package__.module
self.assertIn("_A__mangled", A.f.__code__.co_varnames)
self.assertIn("__not_mangled__", A.f.__code__.co_varnames)
self.assertIn("_A__mangled_mod", A.f.__code__.co_varnames)
self.assertIn("__package__", A.f.__code__.co_varnames)
def test_compile_ast(self):
fname = __file__
if fname.lower().endswith('pyc'):
fname = fname[:-1]
with open(fname, 'r') as f:
fcontents = f.read()
sample_code = [
['<assign>', 'x = 5'],
['<ifblock>', """if True:\n pass\n"""],
['<forblock>', """for n in [1, 2, 3]:\n print(n)\n"""],
['<deffunc>', """def foo():\n pass\nfoo()\n"""],
[fname, fcontents],
]
for fname, code in sample_code:
co1 = compile(code, '%s1' % fname, 'exec')
ast = compile(code, '%s2' % fname, 'exec', _ast.PyCF_ONLY_AST)
self.assertTrue(type(ast) == _ast.Module)
co2 = compile(ast, '%s3' % fname, 'exec')
self.assertEqual(co1, co2)
# the code object's filename comes from the second compilation step
self.assertEqual(co2.co_filename, '%s3' % fname)
# raise exception when node type doesn't match with compile mode
co1 = compile('print(1)', '<string>', 'exec', _ast.PyCF_ONLY_AST)
self.assertRaises(TypeError, compile, co1, '<ast>', 'eval')
# raise exception when node type is no start node
self.assertRaises(TypeError, compile, _ast.If(), '<ast>', 'exec')
# raise exception when node has invalid children
ast = _ast.Module()
ast.body = [_ast.BoolOp()]
self.assertRaises(TypeError, compile, ast, '<ast>', 'exec')
def test_dict_evaluation_order(self):
i = 0
def f():
nonlocal i
i += 1
return i
d = {f(): f(), f(): f()}
self.assertEqual(d, {1: 2, 3: 4})
def test_compile_filename(self):
for filename in 'file.py', b'file.py':
code = compile('pass', filename, 'exec')
self.assertEqual(code.co_filename, 'file.py')
for filename in bytearray(b'file.py'), memoryview(b'file.py'):
with self.assertWarns(DeprecationWarning):
code = compile('pass', filename, 'exec')
self.assertEqual(code.co_filename, 'file.py')
self.assertRaises(TypeError, compile, 'pass', list(b'file.py'), 'exec')
@support.cpython_only
def test_same_filename_used(self):
s = """def f(): pass\ndef g(): pass"""
c = compile(s, "myfile", "exec")
for obj in c.co_consts:
if isinstance(obj, types.CodeType):
self.assertIs(obj.co_filename, c.co_filename)
def test_single_statement(self):
self.compile_single("1 + 2")
self.compile_single("\n1 + 2")
self.compile_single("1 + 2\n")
self.compile_single("1 + 2\n\n")
self.compile_single("1 + 2\t\t\n")
self.compile_single("1 + 2\t\t\n ")
self.compile_single("1 + 2 # one plus two")
self.compile_single("1; 2")
self.compile_single("import sys; sys")
self.compile_single("def f():\n pass")
self.compile_single("while False:\n pass")
self.compile_single("if x:\n f(x)")
self.compile_single("if x:\n f(x)\nelse:\n g(x)")
self.compile_single("class T:\n pass")
def test_bad_single_statement(self):
self.assertInvalidSingle('1\n2')
self.assertInvalidSingle('def f(): pass')
self.assertInvalidSingle('a = 13\nb = 187')
self.assertInvalidSingle('del x\ndel y')
self.assertInvalidSingle('f()\ng()')
self.assertInvalidSingle('f()\n# blah\nblah()')
self.assertInvalidSingle('f()\nxy # blah\nblah()')
self.assertInvalidSingle('x = 5 # comment\nx = 6\n')
def test_particularly_evil_undecodable(self):
# Issue 24022
src = b'0000\x00\n00000000000\n\x00\n\x9e\n'
with tempfile.TemporaryDirectory() as tmpd:
fn = os.path.join(tmpd, "bad.py")
with open(fn, "wb") as fp:
fp.write(src)
res = script_helper.run_python_until_end(fn)[0]
self.assertIn(b"Non-UTF-8", res.err)
def test_yet_more_evil_still_undecodable(self):
# Issue #25388
src = b"#\x00\n#\xfd\n"
with tempfile.TemporaryDirectory() as tmpd:
fn = os.path.join(tmpd, "bad.py")
with open(fn, "wb") as fp:
fp.write(src)
res = script_helper.run_python_until_end(fn)[0]
self.assertIn(b"Non-UTF-8", res.err)
@support.cpython_only
def test_compiler_recursion_limit(self):
# Expected limit is sys.getrecursionlimit() * the scaling factor
# in symtable.c (currently 3)
# We expect to fail *at* that limit, because we use up some of
# the stack depth limit in the test suite code
# So we check the expected limit and 75% of that
# XXX (ncoghlan): duplicating the scaling factor here is a little
# ugly. Perhaps it should be exposed somewhere...
fail_depth = sys.getrecursionlimit() * 3
success_depth = int(fail_depth * 0.75)
def check_limit(prefix, repeated):
expect_ok = prefix + repeated * success_depth
self.compile_single(expect_ok)
broken = prefix + repeated * fail_depth
details = "Compiling ({!r} + {!r} * {})".format(
prefix, repeated, fail_depth)
with self.assertRaises(RecursionError, msg=details):
self.compile_single(broken)
check_limit("a", "()")
check_limit("a", ".b")
check_limit("a", "[0]")
check_limit("a", "*a")
def test_null_terminated(self):
# The source code is null-terminated internally, but bytes-like
# objects are accepted, which could be not terminated.
with self.assertRaisesRegex(ValueError, "cannot contain null"):
compile("123\x00", "<dummy>", "eval")
with self.assertRaisesRegex(ValueError, "cannot contain null"):
compile(memoryview(b"123\x00"), "<dummy>", "eval")
code = compile(memoryview(b"123\x00")[1:-1], "<dummy>", "eval")
self.assertEqual(eval(code), 23)
code = compile(memoryview(b"1234")[1:-1], "<dummy>", "eval")
self.assertEqual(eval(code), 23)
code = compile(memoryview(b"$23$")[1:-1], "<dummy>", "eval")
self.assertEqual(eval(code), 23)
# Also test when eval() and exec() do the compilation step
self.assertEqual(eval(memoryview(b"1234")[1:-1]), 23)
namespace = dict()
exec(memoryview(b"ax = 123")[1:-1], namespace)
self.assertEqual(namespace['x'], 12)
def check_constant(self, func, expected):
for const in func.__code__.co_consts:
if repr(const) == repr(expected):
break
else:
self.fail("unable to find constant %r in %r"
% (expected, func.__code__.co_consts))
# Merging equal constants is not a strict requirement for the Python
# semantics, it's a more an implementation detail.
@support.cpython_only
def test_merge_constants(self):
# Issue #25843: compile() must merge constants which are equal
# and have the same type.
def check_same_constant(const):
ns = {}
code = "f1, f2 = lambda: %r, lambda: %r" % (const, const)
exec(code, ns)
f1 = ns['f1']
f2 = ns['f2']
self.assertIs(f1.__code__, f2.__code__)
self.check_constant(f1, const)
self.assertEqual(repr(f1()), repr(const))
check_same_constant(None)
check_same_constant(0)
check_same_constant(0.0)
check_same_constant(b'abc')
check_same_constant('abc')
# Note: "lambda: ..." emits "LOAD_CONST Ellipsis",
# whereas "lambda: Ellipsis" emits "LOAD_GLOBAL Ellipsis"
f1, f2 = lambda: ..., lambda: ...
self.assertIs(f1.__code__, f2.__code__)
self.check_constant(f1, Ellipsis)
self.assertEqual(repr(f1()), repr(Ellipsis))
# {0} is converted to a constant frozenset({0}) by the peephole
# optimizer
f1, f2 = lambda x: x in {0}, lambda x: x in {0}
self.assertIs(f1.__code__, f2.__code__)
self.check_constant(f1, frozenset({0}))
self.assertTrue(f1(0))
def test_dont_merge_constants(self):
# Issue #25843: compile() must not merge constants which are equal
# but have a different type.
def check_different_constants(const1, const2):
ns = {}
exec("f1, f2 = lambda: %r, lambda: %r" % (const1, const2), ns)
f1 = ns['f1']
f2 = ns['f2']
self.assertIsNot(f1.__code__, f2.__code__)
self.assertNotEqual(f1.__code__, f2.__code__)
self.check_constant(f1, const1)
self.check_constant(f2, const2)
self.assertEqual(repr(f1()), repr(const1))
self.assertEqual(repr(f2()), repr(const2))
check_different_constants(0, 0.0)
check_different_constants(+0.0, -0.0)
check_different_constants((0,), (0.0,))
check_different_constants('a', b'a')
check_different_constants(('a',), (b'a',))
# check_different_constants() cannot be used because repr(-0j) is
# '(-0-0j)', but when '(-0-0j)' is evaluated to 0j: we loose the sign.
f1, f2 = lambda: +0.0j, lambda: -0.0j
self.assertIsNot(f1.__code__, f2.__code__)
self.check_constant(f1, +0.0j)
self.check_constant(f2, -0.0j)
self.assertEqual(repr(f1()), repr(+0.0j))
self.assertEqual(repr(f2()), repr(-0.0j))
# {0} is converted to a constant frozenset({0}) by the peephole
# optimizer
f1, f2 = lambda x: x in {0}, lambda x: x in {0.0}
self.assertIsNot(f1.__code__, f2.__code__)
self.check_constant(f1, frozenset({0}))
self.check_constant(f2, frozenset({0.0}))
self.assertTrue(f1(0))
self.assertTrue(f2(0.0))
def test_path_like_objects(self):
# An implicit test for PyUnicode_FSDecoder().
compile("42", FakePath("test_compile_pathlike"), "single")
def test_stack_overflow(self):
# bpo-31113: Stack overflow when compile a long sequence of
# complex statements.
compile("if a: b\n" * 200000, "<dummy>", "exec")
class TestExpressionStackSize(unittest.TestCase):
# These tests check that the computed stack size for a code object
# stays within reasonable bounds (see issue #21523 for an example
# dysfunction).
N = 100
def check_stack_size(self, code):
# To assert that the alleged stack size is not O(N), we
# check that it is smaller than log(N).
if isinstance(code, str):
code = compile(code, "<foo>", "single")
max_size = math.ceil(math.log(len(code.co_code)))
self.assertLessEqual(code.co_stacksize, max_size)
def test_and(self):
self.check_stack_size("x and " * self.N + "x")
def test_or(self):
self.check_stack_size("x or " * self.N + "x")
def test_and_or(self):
self.check_stack_size("x and x or " * self.N + "x")
def test_chained_comparison(self):
self.check_stack_size("x < " * self.N + "x")
def test_if_else(self):
self.check_stack_size("x if x else " * self.N + "x")
def test_binop(self):
self.check_stack_size("x + " * self.N + "x")
def test_func_and(self):
code = "def f(x):\n"
code += " x and x\n" * self.N
self.check_stack_size(code)
class TestStackSizeStability(unittest.TestCase):
# Check that repeating certain snippets doesn't increase the stack size
# beyond what a single snippet requires.
def check_stack_size(self, snippet, async_=False):
def compile_snippet(i):
ns = {}
script = """def func():\n""" + i * snippet
if async_:
script = "async " + script
code = compile(script, "<script>", "exec")
exec(code, ns, ns)
return ns['func'].__code__
sizes = [compile_snippet(i).co_stacksize for i in range(2, 5)]
if len(set(sizes)) != 1:
import dis, io
out = io.StringIO()
dis.dis(compile_snippet(1), file=out)
self.fail("stack sizes diverge with # of consecutive snippets: "
"%s\n%s\n%s" % (sizes, snippet, out.getvalue()))
def test_if(self):
snippet = """
if x:
a
"""
self.check_stack_size(snippet)
def test_if_else(self):
snippet = """
if x:
a
elif y:
b
else:
c
"""
self.check_stack_size(snippet)
def test_try_except_bare(self):
snippet = """
try:
a
except:
b
"""
self.check_stack_size(snippet)
def test_try_except_qualified(self):
snippet = """
try:
a
except ImportError:
b
except:
c
else:
d
"""
self.check_stack_size(snippet)
def test_try_except_as(self):
snippet = """
try:
a
except ImportError as e:
b
except:
c
else:
d
"""
self.check_stack_size(snippet)
def test_try_finally(self):
snippet = """
try:
a
finally:
b
"""
self.check_stack_size(snippet)
def test_with(self):
snippet = """
with x as y:
a
"""
self.check_stack_size(snippet)
def test_while_else(self):
snippet = """
while x:
a
else:
b
"""
self.check_stack_size(snippet)
def test_for(self):
snippet = """
for x in y:
a
"""
self.check_stack_size(snippet)
def test_for_else(self):
snippet = """
for x in y:
a
else:
b
"""
self.check_stack_size(snippet)
def test_for_break_continue(self):
snippet = """
for x in y:
if z:
break
elif u:
continue
else:
a
else:
b
"""
self.check_stack_size(snippet)
def test_for_break_continue_inside_try_finally_block(self):
snippet = """
for x in y:
try:
if z:
break
elif u:
continue
else:
a
finally:
f
else:
b
"""
self.check_stack_size(snippet)
def test_for_break_inside_finally_block(self):
snippet = """
for x in y:
try:
t
finally:
if z:
break
else:
a
else:
b
"""
self.check_stack_size(snippet)
def test_for_break_continue_inside_except_block(self):
snippet = """
for x in y:
try:
t
except:
if z:
break
elif u:
continue
else:
a
else:
b
"""
self.check_stack_size(snippet)
def test_for_break_continue_inside_with_block(self):
snippet = """
for x in y:
with c:
if z:
break
elif u:
continue
else:
a
else:
b
"""
self.check_stack_size(snippet)
def test_return_inside_try_finally_block(self):
snippet = """
try:
if z:
return
else:
a
finally:
f
"""
self.check_stack_size(snippet)
def test_return_inside_finally_block(self):
snippet = """
try:
t
finally:
if z:
return
else:
a
"""
self.check_stack_size(snippet)
def test_return_inside_except_block(self):
snippet = """
try:
t
except:
if z:
return
else:
a
"""
self.check_stack_size(snippet)
def test_return_inside_with_block(self):
snippet = """
with c:
if z:
return
else:
a
"""
self.check_stack_size(snippet)
def test_async_with(self):
snippet = """
async with x as y:
a
"""
self.check_stack_size(snippet, async_=True)
def test_async_for(self):
snippet = """
async for x in y:
a
"""
self.check_stack_size(snippet, async_=True)
def test_async_for_else(self):
snippet = """
async for x in y:
a
else:
b
"""
self.check_stack_size(snippet, async_=True)
def test_for_break_continue_inside_async_with_block(self):
snippet = """
for x in y:
async with c:
if z:
break
elif u:
continue
else:
a
else:
b
"""
self.check_stack_size(snippet, async_=True)
def test_return_inside_async_with_block(self):
snippet = """
async with c:
if z:
return
else:
a
"""
self.check_stack_size(snippet, async_=True)
if __name__ == "__main__":
unittest.main()
| 34.134596 | 95 | 0.521921 |
acebdca0d30e3d8c72b3d96bd6f5180e96d5828d | 20 | py | Python | NBX_OLED/__init__.py | xiaokexiang/TFOLED | a24475c800da80a1c36471097690c5fa957ed6de | [
"MIT"
] | 96 | 2020-08-07T08:13:36.000Z | 2022-03-29T07:01:49.000Z | NBX_OLED/__init__.py | xiaokexiang/TFOLED | a24475c800da80a1c36471097690c5fa957ed6de | [
"MIT"
] | 3 | 2020-10-31T02:23:02.000Z | 2021-01-14T12:53:09.000Z | NBX_OLED/__init__.py | xiaokexiang/TFOLED | a24475c800da80a1c36471097690c5fa957ed6de | [
"MIT"
] | 17 | 2020-09-22T02:47:18.000Z | 2022-01-16T05:57:42.000Z | from .OLED import *
| 10 | 19 | 0.7 |
acebdd736a1772ce0455b6f3bf14fc4aa7e95c9f | 4,344 | py | Python | Conteudo das Aulas/068/Adivinha numero - Gabarito.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | Conteudo das Aulas/068/Adivinha numero - Gabarito.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | Conteudo das Aulas/068/Adivinha numero - Gabarito.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | """
Crie o jogo do adivinha número!
Usando os cartões na tupla abaixo, o programa vai pedir para o usuário
pensar em um número entre 1 a 63. Em seguida o programa mostrara um cartão
para o usuário perguntando se o número pensado se encontra nesse cartão.
O usuário deverá digitar sim ou não. Depois de mostrados todos os cartões
o programa deverá mostrar o número que o usuário pensou.
Use o cabeçário da classe jogo para implementar os métodos necessários para rodar o jogo.
Escreve uma excessão SoSimOuNao para verificar a entrada do usuário (coloque-a dentro
de um bloco try).
Um mágico nunca revela seus segredos, por isso veja o método adivinha número na classe jogo
para ver se você consegue descobrir como a adivinhação é feita!
"""
class SoSimOuNao(Exception):
def __str__(self):
return 'Você só pode digitar sim ou não, nada mais'
class Jogo(object):
def __init__(self):
self.__cartoes = ('''
1 3 5 7 9 11 13 15
17 19 21 23 25 27 29 31
33 35 37 39 41 43 45 47
49 51 53 55 57 59 61 63
''','''
2 3 6 7 10 11 14 15
18 19 22 23 26 27 30 31
34 35 38 39 42 43 46 47
50 51 54 55 58 59 62 63
''','''
4 5 6 7 12 13 14 15
20 21 22 23 28 29 30 31
36 37 38 39 44 45 46 47
52 53 54 55 60 61 62 63
''','''
8 9 10 11 12 13 14 15
24 25 26 27 28 29 30 31
40 41 42 43 44 45 46 47
56 57 58 59 60 61 62 63
''','''
16 17 18 19 20 21 22 23
24 25 26 27 28 29 30 31
48 49 50 51 52 53 54 55
56 57 58 59 60 61 62 63
''','''
32 33 34 35 36 37 38 39
40 41 42 43 44 45 46 47
48 49 50 51 52 53 54 55
56 57 58 59 60 61 62 63
''')
self.__card = 0
self.__num = 0
self.main()
############# Métodos a Serem implementadas ###########
def apresentação(self):
"""
Método que imprime uma apresentação e explicação rápida de como
funciona o jogo.
"""
print('Ola bem-vindo ao jogo de advinhar números')
print('Para jogar você deve pensar em um número entre 1 e 63')
print('Depois vou lhe mostrar diversos cartões e você deve me dizer')
print('se o número que você pensou está dentro desses cartões')
print('E por fim, vou adivinhar o número que você pensou')
def recebeEntradaDoUsuário(self):
"""
Função que recebe a entrada lida com ela de forma adequada
Devolve True se o usuário digitou sim e False se digitou não
"""
while True:
try:
resp = input('\nO número está nessa cartela? (s/sim ou n/não)\n').lower()
if not resp.isalpha():
print('Digite apenas letras!!')
elif resp.startswith('s'):
return True
elif resp.startswith('n'):
return False
else:
raise SoSimOuNao
except SoSimOuNao:
print(SoSimOuNao())
def imprimeNumeroSecreto(self):
"""
Imprime uma mensagem legal apresentando self.__num
"""
print('\nDeixa eu adivinhar!')
print('Você pensou no número', self.__num)
print('Foi não foi?!!')
print('HAHAHAHA, eu sou um gênio!')
############### Métodos já feitos ###############
def main(self):
"""
Método principal do jogo, nele se organiza tudo
"""
self.apresentação()
for i in range(len(self.__cartoes)):
self.__card = i
self.mostraCartão()
self.adicionaNumero(self.recebeEntradaDoUsuário())
self.imprimeNumeroSecreto()
def mostraCartão(self):
"""
Escolhe um cartão e o mostra para o usuário
"""
print(self.__cartoes[self.__card])
def adicionaNumero(self, esta):
"""
Adiciona valor ao número secreto
"""
if esta:
self.__num += int(self.__cartoes[self.__card].split()[0])
if __name__ == '__main__':
x = Jogo()
| 33.160305 | 91 | 0.544199 |
acebdda42a3718b05ab4a4bcb87096c7c245e98e | 2,568 | py | Python | tests/test_number_line.py | joinee0208/manim | 8e1407e7fd5c01258f75748cc947d31d67ffd92b | [
"MIT"
] | 1 | 2021-10-17T15:43:51.000Z | 2021-10-17T15:43:51.000Z | tests/test_number_line.py | joinee0208/manim | 8e1407e7fd5c01258f75748cc947d31d67ffd92b | [
"MIT"
] | null | null | null | tests/test_number_line.py | joinee0208/manim | 8e1407e7fd5c01258f75748cc947d31d67ffd92b | [
"MIT"
] | null | null | null | from __future__ import annotations
import numpy as np
from manim import NumberLine
from manim.mobject.numbers import Integer
def test_unit_vector():
"""Check if the magnitude of unit vector along
the NumberLine is equal to its unit_size."""
axis1 = NumberLine(unit_size=0.4)
axis2 = NumberLine(x_range=[-2, 5], length=12)
for axis in (axis1, axis2):
assert np.linalg.norm(axis.get_unit_vector()) == axis.unit_size
def test_decimal_determined_by_step():
"""Checks that step size is considered when determining the number of decimal
places."""
axis = NumberLine(x_range=[-2, 2, 0.5])
expected_decimal_places = 1
actual_decimal_places = axis.decimal_number_config["num_decimal_places"]
assert actual_decimal_places == expected_decimal_places, (
"Expected 1 decimal place but got " + actual_decimal_places
)
axis2 = NumberLine(x_range=[-1, 1, 0.25])
expected_decimal_places = 2
actual_decimal_places = axis2.decimal_number_config["num_decimal_places"]
assert actual_decimal_places == expected_decimal_places, (
"Expected 1 decimal place but got " + actual_decimal_places
)
def test_decimal_config_overrides_defaults():
"""Checks that ``num_decimal_places`` is determined by step size and gets overridden by ``decimal_number_config``."""
axis = NumberLine(
x_range=[-2, 2, 0.5],
decimal_number_config={"num_decimal_places": 0},
)
expected_decimal_places = 0
actual_decimal_places = axis.decimal_number_config["num_decimal_places"]
assert actual_decimal_places == expected_decimal_places, (
"Expected 1 decimal place but got " + actual_decimal_places
)
def test_whole_numbers_step_size_default_to_0_decimal_places():
"""Checks that ``num_decimal_places`` defaults to 0 when a whole number step size is passed."""
axis = NumberLine(x_range=[-2, 2, 1])
expected_decimal_places = 0
actual_decimal_places = axis.decimal_number_config["num_decimal_places"]
assert actual_decimal_places == expected_decimal_places, (
"Expected 1 decimal place but got " + actual_decimal_places
)
def test_add_labels():
expected_label_length = 6
num_line = NumberLine(x_range=[-4, 4])
num_line.add_labels(
dict(zip(list(range(-3, 3)), [Integer(m) for m in range(-1, 5)])),
)
actual_label_length = len(num_line.labels)
assert (
actual_label_length == expected_label_length
), f"Expected a VGroup with {expected_label_length} integers but got {actual_label_length}."
| 37.217391 | 121 | 0.721573 |
acebdda863c734061c0c050145bed8e6951bf40e | 4,017 | py | Python | Toolkits/Discovery/meta/searx/tests/unit/engines/test_faroo.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | null | null | null | Toolkits/Discovery/meta/searx/tests/unit/engines/test_faroo.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | null | null | null | Toolkits/Discovery/meta/searx/tests/unit/engines/test_faroo.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from collections import defaultdict
import mock
from searx.engines import faroo
from searx.testing import SearxTestCase
class TestFarooEngine(SearxTestCase):
def test_request(self):
query = 'test_query'
dicto = defaultdict(dict)
dicto['pageno'] = 1
dicto['language'] = 'fr_FR'
dicto['category'] = 'general'
params = faroo.request(query, dicto)
self.assertIn('url', params)
self.assertIn(query, params['url'])
self.assertIn('faroo.com', params['url'])
self.assertIn('en', params['url'])
self.assertIn('web', params['url'])
dicto['language'] = 'all'
params = faroo.request(query, dicto)
self.assertIn('en', params['url'])
dicto['language'] = 'de_DE'
params = faroo.request(query, dicto)
self.assertIn('de', params['url'])
def test_response(self):
self.assertRaises(AttributeError, faroo.response, None)
self.assertRaises(AttributeError, faroo.response, [])
self.assertRaises(AttributeError, faroo.response, '')
self.assertRaises(AttributeError, faroo.response, '[]')
response = mock.Mock(text='{}')
self.assertEqual(faroo.response(response), [])
response = mock.Mock(text='{"data": []}')
self.assertEqual(faroo.response(response), [])
response = mock.Mock(text='{"data": []}', status_code=401)
self.assertRaises(Exception, faroo.response, response)
response = mock.Mock(text='{"data": []}', status_code=429)
self.assertRaises(Exception, faroo.response, response)
json = """
{
"results": [
{
"title": "This is the title",
"kwic": "This is the content",
"content": "",
"url": "http://this.is.the.url/",
"iurl": "",
"domain": "css3test.com",
"author": "Jim Dalrymple",
"news": true,
"votes": "10",
"date": 1360622563000,
"related": []
},
{
"title": "This is the title2",
"kwic": "This is the content2",
"content": "",
"url": "http://this.is.the.url2/",
"iurl": "",
"domain": "css3test.com",
"author": "Jim Dalrymple",
"news": false,
"votes": "10",
"related": []
},
{
"title": "This is the title3",
"kwic": "This is the content3",
"content": "",
"url": "http://this.is.the.url3/",
"iurl": "http://upload.wikimedia.org/optimized.jpg",
"domain": "css3test.com",
"author": "Jim Dalrymple",
"news": false,
"votes": "10",
"related": []
}
],
"query": "test",
"suggestions": [],
"count": 100,
"start": 1,
"length": 10,
"time": "15"
}
"""
response = mock.Mock(text=json)
results = faroo.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 4)
self.assertEqual(results[0]['title'], 'This is the title')
self.assertEqual(results[0]['url'], 'http://this.is.the.url/')
self.assertEqual(results[0]['content'], 'This is the content')
self.assertEqual(results[1]['title'], 'This is the title2')
self.assertEqual(results[1]['url'], 'http://this.is.the.url2/')
self.assertEqual(results[1]['content'], 'This is the content2')
self.assertEqual(results[3]['img_src'], 'http://upload.wikimedia.org/optimized.jpg')
json = """
{}
"""
response = mock.Mock(text=json)
results = faroo.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
| 34.333333 | 92 | 0.513069 |
acebddcc5378a1a2997a2ec50a27a0bc234111ad | 31,220 | py | Python | sdk/tables/azure-data-tables/azure/data/tables/aio/_table_client_async.py | mirespace/python-azure | bc98fd7949ba6c2d6bc1bd396317e98c50c09d77 | [
"MIT"
] | null | null | null | sdk/tables/azure-data-tables/azure/data/tables/aio/_table_client_async.py | mirespace/python-azure | bc98fd7949ba6c2d6bc1bd396317e98c50c09d77 | [
"MIT"
] | null | null | null | sdk/tables/azure-data-tables/azure/data/tables/aio/_table_client_async.py | mirespace/python-azure | bc98fd7949ba6c2d6bc1bd396317e98c50c09d77 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import functools
<<<<<<< HEAD
from typing import List, Union, Any, Optional, Mapping, Iterable, Dict, overload, cast, TYPE_CHECKING
=======
from typing import AsyncIterable, List, Union, Any, Optional, Mapping, Iterable, Dict, overload, cast, TYPE_CHECKING
>>>>>>> main
try:
from urllib.parse import urlparse, unquote
except ImportError:
from urlparse import urlparse # type: ignore
from urllib2 import unquote # type: ignore
from azure.core import MatchConditions
from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
from azure.core.async_paging import AsyncItemPaged
from azure.core.exceptions import HttpResponseError
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from .._base_client import parse_connection_str
from .._entity import TableEntity
from .._generated.models import SignedIdentifier, TableProperties, QueryOptions
from .._models import TableAccessPolicy, TableItem
from .._serialize import serialize_iso, _parameter_filter_substitution, _prepare_key
from .._deserialize import deserialize_iso, _return_headers_and_deserialized
from .._error import (
_process_table_error,
_validate_table_name,
_decode_error,
_reraise_error
)
from .._models import UpdateMode
from .._deserialize import _convert_to_entity, _trim_service_metadata
from .._serialize import _add_entity_properties, _get_match_headers
from .._table_client import EntityType, TransactionOperationType
from ._base_client_async import AsyncTablesBaseClient
from ._models import TableEntityPropertiesPaged
from ._table_batch_async import TableBatchOperations
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
class TableClient(AsyncTablesBaseClient):
"""A client to interact with a specific Table in an Azure Tables account.
:ivar str account_name: The name of the Tables account.
:ivar str table_name: The name of the table.
:ivar str url: The full URL to the Tables account.
"""
def __init__( # pylint: disable=missing-client-constructor-parameter-credential
self,
endpoint: str,
table_name: str,
*,
credential: Optional[Union[AzureSasCredential, AzureNamedKeyCredential, "AsyncTokenCredential"]] = None,
**kwargs
) -> None:
"""Create TableClient from a Credential.
:param str endpoint: A URL to an Azure Tables account.
:param str table_name: The table name.
:keyword credential:
The credentials with which to authenticate. This is optional if the
account URL already has a SAS token. The value can be one of AzureNamedKeyCredential (azure-core),
AzureSasCredential (azure-core), or TokenCredentials from azure-identity.
:paramtype credential:
:class:`~azure.core.credentials.AzureNamedKeyCredential` or
:class:`~azure.core.credentials.AzureSasCredential` or
:class:`~azure.core.credentials.TokenCredential`
:returns: None
"""
if not table_name:
raise ValueError("Please specify a table name.")
_validate_table_name(table_name)
self.table_name = table_name
super(TableClient, self).__init__(endpoint, credential=credential, **kwargs)
def _format_url(self, hostname):
"""Format the endpoint URL according to the current location
mode hostname.
"""
return "{}://{}{}".format(self.scheme, hostname, self._query_str)
@classmethod
def from_connection_string(
cls,
conn_str: str,
table_name: str,
**kwargs
) -> 'TableClient':
"""Create TableClient from a Connection string.
:param str conn_str: A connection string to an Azure Tables account.
:param str table_name: The table name.
:returns: A table client.
:rtype: :class:`~azure.data.tables.TableClient`
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_create_client_async.py
:start-after: [START create_table_client]
:end-before: [END create_table_client]
:language: python
:dedent: 8
:caption: Creating the TableClient from a connection string.
"""
endpoint, credential = parse_connection_str(
conn_str=conn_str, credential=None, keyword_args=kwargs
)
return cls(endpoint, table_name=table_name, credential=credential, **kwargs)
@classmethod
def from_table_url(
cls,
table_url: str,
**kwargs
) -> 'TableClient':
"""A client to interact with a specific Table.
:param str table_url: The full URI to the table, including SAS token if used.
:keyword credential:
The credentials with which to authenticate. This is optional if the
table URL already has a SAS token. The value can be one of AzureNamedKeyCredential
or AzureSasCredential from azure-core.
:paramtype credential:
:class:`~azure.core.credentials.AzureNamedKeyCredential` or
:class:`~azure.core.credentials.AzureSasCredential`
:returns: A table client.
:rtype: :class:`~azure.data.tables.TableClient`
"""
try:
if not table_url.lower().startswith("http"):
table_url = "https://" + table_url
except AttributeError:
raise ValueError("Table URL must be a string.")
parsed_url = urlparse(table_url.rstrip("/"))
if not parsed_url.netloc:
raise ValueError("Invalid URL: {}".format(table_url))
table_path = parsed_url.path.lstrip("/").split("/")
account_path = ""
if len(table_path) > 1:
account_path = "/" + "/".join(table_path[:-1])
endpoint = "{}://{}{}?{}".format(
parsed_url.scheme,
parsed_url.netloc.rstrip("/"),
account_path,
parsed_url.query,
)
table_name = unquote(table_path[-1])
if table_name.lower().startswith("tables('"):
table_name = table_name[8:-2]
if not table_name:
raise ValueError(
"Invalid URL. Please provide a URL with a valid table name"
)
return cls(endpoint, table_name=table_name, **kwargs)
@distributed_trace_async
async def get_table_access_policy(self, **kwargs) -> Mapping[str, Optional[TableAccessPolicy]]:
"""
Retrieves details about any stored access policies specified on the table that may be
used with Shared Access Signatures.
:return: Dictionary of SignedIdentifiers
:rtype: Dict[str, Optional[:class:`~azure.data.tables.TableAccessPolicy`]]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
"""
timeout = kwargs.pop("timeout", None)
try:
_, identifiers = await self._client.table.get_access_policy(
table=self.table_name,
timeout=timeout,
cls=kwargs.pop("cls", None) or _return_headers_and_deserialized,
**kwargs
)
except HttpResponseError as error:
_process_table_error(error)
output = {} # type: Dict[str, Optional[TableAccessPolicy]]
for identifier in cast(List[SignedIdentifier], identifiers):
if identifier.access_policy:
output[identifier.id] = TableAccessPolicy(
start=deserialize_iso(identifier.access_policy.start),
expiry=deserialize_iso(identifier.access_policy.expiry),
permission=identifier.access_policy.permission
)
else:
output[identifier.id] = None
return output
@distributed_trace_async
async def set_table_access_policy(
self,
signed_identifiers: Mapping[str, Optional[TableAccessPolicy]],
**kwargs
) -> None:
"""Sets stored access policies for the table that may be used with Shared Access Signatures.
:param signed_identifiers: Access policies to set for the table
:type signed_identifiers: Dict[str, :class:`~azure.data.tables.TableAccessPolicy`]
:return: None
:rtype: None
:raises: :class:`~azure.core.exceptions.HttpResponseError`
"""
identifiers = []
for key, value in signed_identifiers.items():
payload = None
if value:
payload = TableAccessPolicy(
start=serialize_iso(value.start),
expiry=serialize_iso(value.expiry),
permission=value.permission
)
identifiers.append(SignedIdentifier(id=key, access_policy=payload))
try:
await self._client.table.set_access_policy(
table=self.table_name, table_acl=identifiers or None, **kwargs # type: ignore
)
except HttpResponseError as error:
try:
_process_table_error(error)
except HttpResponseError as table_error:
if (table_error.error_code == 'InvalidXmlDocument' # type: ignore
and len(identifiers) > 5):
raise ValueError(
'Too many access policies provided. The server does not support setting '
'more than 5 access policies on a single resource.'
)
raise
@distributed_trace_async
async def create_table(self, **kwargs) -> TableItem:
"""Creates a new table under the given account.
:return: A TableItem representing the created table.
:rtype: :class:`~azure.data.tables.TableItem`
:raises: :class:`~azure.core.exceptions.ResourceExistsError` If the entity already exists
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_create_delete_table_async.py
:start-after: [START create_table]
:end-before: [END create_table]
:language: python
:dedent: 8
:caption: Creating a table from the TableClient object.
"""
table_properties = TableProperties(table_name=self.table_name)
try:
result = await self._client.table.create(table_properties, **kwargs)
except HttpResponseError as error:
_process_table_error(error)
return TableItem(name=result.table_name) # type: ignore
@distributed_trace_async
async def delete_table(self, **kwargs) -> None:
"""Deletes the table under the current account. No error will be raised if
the given table name is not found.
:return: None
:rtype: None
:raises: :class:`~azure.core.exceptions.HttpResponseError`
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_create_delete_table_async.py
:start-after: [START delete_from_table_client]
:end-before: [END delete_from_table_client]
:language: python
:dedent: 8
:caption: Deleting a table from the TableClient object.
"""
try:
await self._client.table.delete(table=self.table_name, **kwargs)
except HttpResponseError as error:
if error.status_code == 404:
return
_process_table_error(error)
@overload
async def delete_entity(self, partition_key: str, row_key: str, **kwargs: Any) -> None:
...
@overload
async def delete_entity(self, entity: Union[TableEntity, Mapping[str, Any]], **kwargs: Any) -> None:
...
@distributed_trace_async
async def delete_entity(self, *args: Union[TableEntity, str], **kwargs: Any) -> None:
"""Deletes the specified entity in a table. No error will be raised if
the entity or PartitionKey-RowKey pairing is not found.
:param str partition_key: The partition key of the entity.
:param str row_key: The row key of the entity.
:param entity: The entity to delete
:type entity: Union[TableEntity, Mapping[str, str]]
:keyword str etag: Etag of the entity
:keyword match_condition: The condition under which to perform the operation.
Supported values include: MatchConditions.IfNotModified, MatchConditions.Unconditionally.
The default value is Unconditionally.
:paramtype match_condition: ~azure.core.MatchConditions
:return: None
:rtype: None
:raises: :class:`~azure.core.exceptions.HttpResponseError`
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_insert_delete_entities_async.py
:start-after: [START delete_entity]
:end-before: [END delete_entity]
:language: python
:dedent: 8
:caption: Adding an entity to a Table
"""
try:
entity = kwargs.pop('entity', None)
if not entity:
entity = args[0]
partition_key = entity['PartitionKey']
row_key = entity['RowKey']
except (TypeError, IndexError):
partition_key = kwargs.pop('partition_key', None)
if not partition_key:
partition_key = args[0]
row_key = kwargs.pop("row_key", None)
if not row_key:
row_key = args[1]
match_condition = kwargs.pop("match_condition", None)
etag = kwargs.pop("etag", None)
if match_condition and entity and not etag:
try:
etag = entity.metadata.get("etag", None) # type: ignore
except (AttributeError, TypeError):
pass
if_match = _get_match_headers(
etag=etag,
match_condition=match_condition or MatchConditions.Unconditionally,
)
try:
await self._client.table.delete_entity(
table=self.table_name,
partition_key=_prepare_key(partition_key),
row_key=_prepare_key(row_key),
if_match=if_match,
**kwargs
)
except HttpResponseError as error:
if error.status_code == 404:
return
_process_table_error(error)
@distributed_trace_async
async def create_entity(
self,
entity: EntityType,
**kwargs
) -> Mapping[str, Any]:
"""Insert entity in a table.
:param entity: The properties for the table entity.
:type entity: Union[TableEntity, Mapping[str, Any]]
:return: Dictionary mapping operation metadata returned from the service
:rtype: Dict[str,str]
:raises: :class:`~azure.core.exceptions.ResourceExistsError` If the entity already exists
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_insert_delete_entities_async.py
:start-after: [START create_entity]
:end-before: [END create_entity]
:language: python
:dedent: 8
:caption: Adding an entity to a Table
"""
entity = _add_entity_properties(entity)
try:
metadata, content = await self._client.table.insert_entity( # type: ignore
table=self.table_name,
table_entity_properties=entity, # type: ignore
cls=kwargs.pop("cls", _return_headers_and_deserialized),
**kwargs
)
except HttpResponseError as error:
decoded = _decode_error(error.response, error.message)
if decoded.error_code == "PropertiesNeedValue":
if entity.get("PartitionKey") is None:
raise ValueError("PartitionKey must be present in an entity")
if entity.get("RowKey") is None:
raise ValueError("RowKey must be present in an entity")
_reraise_error(error)
return _trim_service_metadata(metadata, content=content) # type: ignore
@distributed_trace_async
async def update_entity(
self,
entity: EntityType,
mode: Union[str, UpdateMode] = UpdateMode.MERGE,
**kwargs
) -> Mapping[str, Any]:
"""Update entity in a table.
:param entity: The properties for the table entity.
:type entity: :class:`~azure.data.tables.TableEntity` or Dict[str,str]
:param mode: Merge or Replace entity
:type mode: :class:`~azure.data.tables.UpdateMode`
:keyword str etag: Etag of the entity
:keyword match_condition: The condition under which to perform the operation.
Supported values include: MatchConditions.IfNotModified, MatchConditions.Unconditionally.
The default value is Unconditionally.
:paramtype match_condition: ~azure.core.MatchCondition
:return: Dictionary of operation metadata returned from service
:rtype: Dict[str,str]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_update_upsert_merge_entities_async.py
:start-after: [START update_entity]
:end-before: [END update_entity]
:language: python
:dedent: 16
:caption: Querying entities from a TableClient
"""
match_condition = kwargs.pop("match_condition", None)
etag = kwargs.pop("etag", None)
if match_condition and entity and not etag:
try:
etag = entity.metadata.get("etag", None) # type: ignore
except (AttributeError, TypeError):
pass
if_match = _get_match_headers(
etag=etag,
match_condition=match_condition or MatchConditions.Unconditionally,
)
partition_key = entity["PartitionKey"]
row_key = entity["RowKey"]
entity = _add_entity_properties(entity)
try:
metadata = None
content = None
if mode == UpdateMode.REPLACE:
metadata, content = await self._client.table.update_entity( # type: ignore
table=self.table_name,
partition_key=_prepare_key(partition_key),
row_key=_prepare_key(row_key),
table_entity_properties=entity, # type: ignore
if_match=if_match,
cls=kwargs.pop("cls", _return_headers_and_deserialized),
**kwargs
)
elif mode == UpdateMode.MERGE:
metadata, content = await self._client.table.merge_entity( # type: ignore
table=self.table_name,
partition_key=_prepare_key(partition_key),
row_key=_prepare_key(row_key),
if_match=if_match,
cls=kwargs.pop("cls", _return_headers_and_deserialized),
table_entity_properties=entity, # type: ignore
**kwargs
)
else:
raise ValueError("Mode type '{}' is not supported.".format(mode))
except HttpResponseError as error:
_process_table_error(error)
return _trim_service_metadata(metadata, content=content) # type: ignore
@distributed_trace
def list_entities(self, **kwargs) -> AsyncItemPaged[TableEntity]:
"""Lists entities in a table.
:keyword int results_per_page: Number of entities returned per service request.
:keyword select: Specify desired properties of an entity to return.
:paramtype select: str or List[str]
:return: AsyncItemPaged[:class:`~azure.data.tables.TableEntity`]
:rtype: ~azure.core.async_paging.AsyncItemPaged[TableEntity]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_update_upsert_merge_entities_async.py
:start-after: [START list_entities]
:end-before: [END list_entities]
:language: python
:dedent: 16
:caption: Querying entities from a TableClient
"""
user_select = kwargs.pop("select", None)
if user_select and not isinstance(user_select, str):
user_select = ",".join(user_select)
top = kwargs.pop("results_per_page", None)
command = functools.partial(self._client.table.query_entities, **kwargs)
return AsyncItemPaged(
command,
table=self.table_name,
results_per_page=top,
select=user_select,
page_iterator_class=TableEntityPropertiesPaged,
)
@distributed_trace
def query_entities(
self,
query_filter: str,
**kwargs
) -> AsyncItemPaged[TableEntity]:
"""Lists entities in a table.
:param str query_filter: Specify a filter to return certain entities
:keyword int results_per_page: Number of entities returned per service request.
:keyword select: Specify desired properties of an entity to return.
:paramtype select: str or List[str]
:keyword parameters: Dictionary for formatting query with additional, user defined parameters
:paramtype parameters: Dict[str, Any]
:return: AsyncItemPaged[:class:`~azure.data.tables.TableEntity`]
:rtype: ~azure.core.async_paging.AsyncItemPaged[TableEntity]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_query_table_async.py
:start-after: [START query_entities]
:end-before: [END query_entities]
:language: python
:dedent: 8
:caption: Querying entities from a TableClient
"""
parameters = kwargs.pop("parameters", None)
query_filter = _parameter_filter_substitution(
parameters, query_filter
)
top = kwargs.pop("results_per_page", None)
user_select = kwargs.pop("select", None)
if user_select and not isinstance(user_select, str):
user_select = ",".join(user_select)
command = functools.partial(self._client.table.query_entities, **kwargs)
return AsyncItemPaged(
command,
table=self.table_name,
results_per_page=top,
filter=query_filter,
select=user_select,
page_iterator_class=TableEntityPropertiesPaged,
)
@distributed_trace_async
async def get_entity(
self,
partition_key: str,
row_key: str,
**kwargs
) -> TableEntity:
"""Get a single entity in a table.
:param partition_key: The partition key of the entity.
:type partition_key: str
:param row_key: The row key of the entity.
:type row_key: str
:keyword select: Specify desired properties of an entity to return.
:paramtype select: str or List[str]
:return: Dictionary mapping operation metadata returned from the service
:rtype: :class:`~azure.data.tables.TableEntity`
:raises: :class:`~azure.core.exceptions.HttpResponseError`
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_update_upsert_merge_entities_async.py
:start-after: [START get_entity]
:end-before: [END get_entity]
:language: python
:dedent: 16
:caption: Getting an entity from PartitionKey and RowKey
"""
user_select = kwargs.pop("select", None)
if user_select and not isinstance(user_select, str):
user_select = ",".join(user_select)
try:
entity = await self._client.table.query_entity_with_partition_and_row_key(
table=self.table_name,
partition_key=_prepare_key(partition_key),
row_key=_prepare_key(row_key),
query_options=QueryOptions(select=user_select),
**kwargs
)
properties = _convert_to_entity(entity)
except HttpResponseError as error:
_process_table_error(error)
return properties
@distributed_trace_async
async def upsert_entity(
self,
entity: EntityType,
mode: Union[str, UpdateMode] = UpdateMode.MERGE,
**kwargs
) -> Mapping[str, Any]:
"""Update/Merge or Insert entity into table.
:param entity: The properties for the table entity.
:type entity: :class:`~azure.data.tables.TableEntity` or Dict[str,str]
:param mode: Merge or Replace entity
:type mode: :class:`~azure.data.tables.UpdateMode`
:return: Dictionary mapping operation metadata returned from the service
:rtype: Dict[str,str]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_update_upsert_merge_entities_async.py
:start-after: [START upsert_entity]
:end-before: [END upsert_entity]
:language: python
:dedent: 16
:caption: Update/Merge or Insert an entity into a table
"""
partition_key = entity["PartitionKey"]
row_key = entity["RowKey"]
entity = _add_entity_properties(entity)
try:
metadata = None
content = None
if mode == UpdateMode.MERGE:
metadata, content = await self._client.table.merge_entity( # type: ignore
table=self.table_name,
partition_key=_prepare_key(partition_key),
row_key=_prepare_key(row_key),
table_entity_properties=entity, # type: ignore
cls=kwargs.pop("cls", _return_headers_and_deserialized),
**kwargs
)
elif mode == UpdateMode.REPLACE:
metadata, content = await self._client.table.update_entity( # type: ignore
table=self.table_name,
partition_key=_prepare_key(partition_key),
row_key=_prepare_key(row_key),
table_entity_properties=entity, # type: ignore
cls=kwargs.pop("cls", _return_headers_and_deserialized),
**kwargs
)
else:
raise ValueError(
"""Update mode {} is not supported.
For a list of supported modes see the UpdateMode enum""".format(
mode
)
)
except HttpResponseError as error:
_process_table_error(error)
return _trim_service_metadata(metadata, content=content) # type: ignore
@distributed_trace_async
async def submit_transaction(
self,
<<<<<<< HEAD
operations: Iterable[TransactionOperationType],
=======
operations: Union[
Iterable[TransactionOperationType], AsyncIterable[TransactionOperationType]
],
>>>>>>> main
**kwargs
) -> List[Mapping[str, Any]]:
"""Commit a list of operations as a single transaction.
If any one of these operations fails, the entire transaction will be rejected.
<<<<<<< HEAD
:param operations: The list of operations to commit in a transaction. This should be a list of
tuples containing an operation name, the entity on which to operate, and optionally, a dict of additional
kwargs for that operation.
:type operations: Iterable[Tuple[str, EntityType]]
=======
:param operations: The list of operations to commit in a transaction. This should be an iterable
(or async iterable) of tuples containing an operation name, the entity on which to operate,
and optionally, a dict of additional kwargs for that operation. For example::
- ('upsert', {'PartitionKey': 'A', 'RowKey': 'B'})
- ('upsert', {'PartitionKey': 'A', 'RowKey': 'B'}, {'mode': UpdateMode.REPLACE})
:type operations:
Union[Iterable[Tuple[str, Entity, Mapping[str, Any]]],AsyncIterable[Tuple[str, Entity, Mapping[str, Any]]]]
>>>>>>> main
:return: A list of mappings with response metadata for each operation in the transaction.
:rtype: List[Mapping[str, Any]]
:raises ~azure.data.tables.TableTransactionError:
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_batching_async.py
:start-after: [START batching]
:end-before: [END batching]
:language: python
:dedent: 8
:caption: Using transactions to send multiple requests at once
"""
batched_requests = TableBatchOperations(
self._client,
self._client._serialize, # pylint: disable=protected-access
self._client._deserialize, # pylint: disable=protected-access
self._client._config, # pylint: disable=protected-access
self.table_name,
is_cosmos_endpoint=self._cosmos_endpoint,
**kwargs
)
<<<<<<< HEAD
for operation in operations:
try:
operation_kwargs = operation[2] # type: ignore
except IndexError:
operation_kwargs = {}
try:
getattr(batched_requests, operation[0].lower())(operation[1], **operation_kwargs)
except AttributeError:
raise ValueError("Unrecognized operation: {}".format(operation))
=======
try:
for operation in operations: # type: ignore
batched_requests.add_operation(operation)
except TypeError:
try:
async for operation in operations: # type: ignore
batched_requests.add_operation(operation)
except TypeError:
raise TypeError(
"The value of 'operations' must be an iterator or async iterator "
"of Tuples. Please check documentation for correct Tuple format."
)
>>>>>>> main
return await self._batch_send(*batched_requests.requests, **kwargs)
| 41.737968 | 116 | 0.61294 |
acebde1cf68ad1333a75c08bbfb6ee4927652b4b | 1,068 | py | Python | parlai/tasks/dbll_movie/build.py | shagunsodhani/ParlAI | 5b634b844807372adfb0f6d6e5c42341ac8138f0 | [
"BSD-3-Clause"
] | 1 | 2017-06-26T07:46:33.000Z | 2017-06-26T07:46:33.000Z | parlai/tasks/dbll_movie/build.py | shagunsodhani/ParlAI | 5b634b844807372adfb0f6d6e5c42341ac8138f0 | [
"BSD-3-Clause"
] | null | null | null | parlai/tasks/dbll_movie/build.py | shagunsodhani/ParlAI | 5b634b844807372adfb0f6d6e5c42341ac8138f0 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import parlai.tasks.wikimovies.build as wikimovies_build
import os
def build(opt):
# Depends upon another dataset, wikimovies, build that first.
wikimovies_build.build(opt)
dpath = os.path.join(opt['datapath'], 'DBLL')
if not build_data.built(dpath):
print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
fname = 'dbll.tgz'
url = 'https://s3.amazonaws.com/fair-data/parlai/dbll/' + fname
build_data.download(url, dpath, fname)
build_data.untar(dpath, fname)
# Mark the data as built.
build_data.mark_done(dpath)
| 33.375 | 77 | 0.692884 |
acebde6f98a7275303497b01fd7f1751ee5e2189 | 139 | py | Python | dmdd/__init__.py | hyounggyu/dmdd | 5d3cdc0b49be452349d0778e3ef4dd6fa4cc9045 | [
"MIT"
] | 13 | 2015-06-16T02:11:23.000Z | 2021-04-22T03:01:01.000Z | dmdd/__init__.py | hyounggyu/dmdd | 5d3cdc0b49be452349d0778e3ef4dd6fa4cc9045 | [
"MIT"
] | 2 | 2015-07-28T14:34:56.000Z | 2016-04-07T02:44:29.000Z | dmdd/__init__.py | hyounggyu/dmdd | 5d3cdc0b49be452349d0778e3ef4dd6fa4cc9045 | [
"MIT"
] | 14 | 2015-06-22T15:00:04.000Z | 2021-07-13T21:13:26.000Z | __version__ = '0.2'
try:
__DMDD_SETUP__
except NameError:
__DMDD_SETUP__ = False
if not __DMDD_SETUP__:
from .dmdd import *
| 12.636364 | 26 | 0.697842 |
acebde97f33454a9b367ce23487d8c6815e2b0dc | 1,551 | py | Python | mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | 3,200 | 2020-02-17T12:45:41.000Z | 2022-03-31T20:21:16.000Z | mindspore/python/mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 176 | 2020-02-12T02:52:11.000Z | 2022-03-28T22:15:55.000Z | mindspore/python/mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 621 | 2020-03-09T01:31:41.000Z | 2022-03-30T03:43:19.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""UnsortedSegmentSum op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
unsorted_segment_sum_ds_op_info = TBERegOp("UnsortedSegmentSum") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("unsorted_segment_sum.so") \
.compute_cost(10) \
.kernel_name("unsorted_segment_sum") \
.partial_flag(True) \
.dynamic_shape(True) \
.input(0, "x", False, "required", "all") \
.input(1, "segment_ids", False, "required", "all") \
.input(2, "num_segments", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F32_Default, DataType.I32_Default, DataType.I32_Default, DataType.F32_Default) \
.get_op_info()
@op_info_register(unsorted_segment_sum_ds_op_info)
def _unsorted_segment_sum_ds_tbe():
"""UnsortedSegmentSumUnknown TBE register"""
return
| 39.769231 | 107 | 0.693746 |
acebdfb1b357ad9e3498dc182160c3928ff7e83b | 1,559 | py | Python | lenstronomy/LightModel/Profiles/nie.py | DarthLazar/lenstronomy | 64f72a89bb08ef19d3641b7e5e048238632e9094 | [
"MIT"
] | 1 | 2021-08-17T12:37:38.000Z | 2021-08-17T12:37:38.000Z | lenstronomy/LightModel/Profiles/nie.py | DarthLazar/lenstronomy | 64f72a89bb08ef19d3641b7e5e048238632e9094 | [
"MIT"
] | 1 | 2022-02-26T21:04:47.000Z | 2022-02-26T21:04:47.000Z | lenstronomy/LightModel/Profiles/nie.py | DarthLazar/lenstronomy | 64f72a89bb08ef19d3641b7e5e048238632e9094 | [
"MIT"
] | 1 | 2022-02-08T20:31:45.000Z | 2022-02-08T20:31:45.000Z | import numpy as np
import lenstronomy.Util.param_util as param_util
from lenstronomy.Util import util
from lenstronomy.LightModel.Profiles.profile_base import LightProfileBase
__all__ = ['NIE']
class NIE(LightProfileBase):
"""
non-divergent isothermal ellipse (projected)
This is effectively the convergence profile of the NIE lens model with an amplitude 'amp' rather than an Einstein
radius 'theta_E'
"""
param_names = ['amp', 'e1', 'e2', 's_scale', 'center_x', 'center_y']
lower_limit_default = {'amp': 0, 'e1': -0.5, 'e2': -0.5, 's_scale': 0, 'center_x': -100, 'center_y': -100}
upper_limit_default = {'amp': 100, 'e1': 0.5, 'e2': 0.5, 's_scale': 100, 'center_x': 100, 'center_y': 100}
def function(self, x, y, amp, e1, e2, s_scale, center_x=0, center_y=0):
"""
:param x: x-coordinate
:param y: y-coordinate
:param amp: surface brightness normalization
:param e1: eccentricity component
:param e2: eccentricity component
:param s_scale: smoothing scale (square averaged of minor and major axis)
:param center_x: center of profile
:param center_y: center of profile
:return: surface brightness of NIE profile
"""
x_ = x - center_x
y_ = y - center_y
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
# rotate
x__, y__ = util.rotate(x_, y_, phi_G)
s = s_scale * np.sqrt((1 + q ** 2) / (2 * q ** 2))
f_ = amp/2. * (q**2 * (s**2 + x__**2) + y__**2)**(-1./2)
return f_
| 38.02439 | 117 | 0.618345 |
acebe039ac554a40c93300e61eb0ac13d0bb90a3 | 3,509 | py | Python | json_ref_dict/uri.py | RangelReale/json-ref-dict | 1ed1c96707359b5d648bafd3062a4446b469b682 | [
"MIT"
] | null | null | null | json_ref_dict/uri.py | RangelReale/json-ref-dict | 1ed1c96707359b5d648bafd3062a4446b469b682 | [
"MIT"
] | null | null | null | json_ref_dict/uri.py | RangelReale/json-ref-dict | 1ed1c96707359b5d648bafd3062a4446b469b682 | [
"MIT"
] | null | null | null | from os import path
import re
from typing import NamedTuple
from urllib.parse import urlparse
from json_ref_dict.exceptions import ReferenceParseError
JSON_REF_REGEX = r"^((?P<uri_base>.*)\/)?(?P<uri_name>.*)#(?P<pointer>\/.*)"
class URI(NamedTuple):
"""URI for a schema or subschema."""
uri_base: str
uri_name: str
pointer: str
@classmethod
def from_string(cls, string: str) -> "URI":
"""Contruct from string."""
if "#" not in string:
string += "#/"
if string.endswith("#"):
string += "/"
match = re.match(JSON_REF_REGEX, string, re.DOTALL)
if not match:
raise ReferenceParseError(
f"Couldn't parse '{string}' as a valid reference. "
"References must be of the format "
"{base_uri}#{json_pointer}, where 'json_pointer' "
"begins with '/'"
)
return URI(**match.groupdict())
@property
def root(self):
"""String representation excluding the JSON pointer."""
return path.join(*filter(None, [self.uri_base, self.uri_name]))
def _get_relative(self, reference: str) -> "URI":
"""Get a new URI relative to the current root."""
if not isinstance(reference, str):
raise TypeError(f"Got invalid value for '$ref': {reference}.")
if not reference.split("#")[0]: # Local reference.
reference = reference.split("#")[1] or "/"
return URI(self.uri_base, self.uri_name, reference)
# Remote reference.
return self.from_string(
path.join(*filter(None, [self.uri_base, reference]))
)
def relative(self, reference: str) -> "URI":
"""Get a new URI relative to the current root.
:raises ReferenceParseError: if relative reference is equal
to the current reference.
:return: The URI of the reference relative to the current URI.
"""
if is_absolute(reference):
relative_uri = URI.from_string(reference)
else:
relative_uri = self._get_relative(reference)
if relative_uri == self:
raise ReferenceParseError(
f"Reference: '{reference}' from context '{self}' is "
"self-referential. Cannot resolve."
)
return relative_uri
def get(self, *pointer_segments: str) -> "URI":
"""Get a new URI representing a member of the current URI."""
return self.__class__(
uri_base=self.uri_base,
uri_name=self.uri_name,
pointer=path.join(self.pointer, *pointer_segments),
)
def back(self) -> "URI":
"""Pop a segment from the pointer."""
segments = self.pointer.split("/")
pointer = path.join("/", *segments[:-1])
return self.__class__(
uri_base=self.uri_base, uri_name=self.uri_name, pointer=pointer
)
def __repr__(self) -> str:
"""String representation of the URI."""
return self.root + f"#{self.pointer}"
def is_absolute(ref: str) -> bool:
"""Check if URI is absolute based on scheme."""
parsed = urlparse(ref)
if parsed.scheme:
return True
return False
def parse_segment(segment: str) -> str:
"""Parse a pointer segment.
Individual segments need to replace special chars, as per RFC-6901:
https://tools.ietf.org/html/rfc6901
"""
return segment.replace("~", "~0").replace("/", "~1")
| 32.794393 | 76 | 0.589057 |
acebe0c1d12d05d19bf77d8ce173bac6478e98bd | 381 | py | Python | arelle/Version.py | derekgengenbacher-wf/Arelle-1 | 192c72b51997021fec9dbb8b0f0ab23622c174fa | [
"Apache-2.0"
] | null | null | null | arelle/Version.py | derekgengenbacher-wf/Arelle-1 | 192c72b51997021fec9dbb8b0f0ab23622c174fa | [
"Apache-2.0"
] | null | null | null | arelle/Version.py | derekgengenbacher-wf/Arelle-1 | 192c72b51997021fec9dbb8b0f0ab23622c174fa | [
"Apache-2.0"
] | null | null | null | '''
This module represents the time stamp when Arelle was last built
@author: Mark V Systems Limited
(c) Copyright 2021 Mark V Systems Limited, All rights reserved.
'''
__version__ = '1.2021.09.02' # number version of code base and date compiled
version = '2021-09-02 04:37 UTC' # string version of date compiled
copyrightLatestYear = '2021' # string version of year compiled
| 34.636364 | 77 | 0.750656 |
acebe0c4611fbca226caced1a3789dfdc53858e1 | 1,571 | py | Python | results/numExp00_NSE/main_NSE_test_InteractionPictureMethod.py | Huzzi-Aliaas/Internship_HB2021 | f33bbc1fcee22b54b55d94eba166c9e409e103f9 | [
"MIT"
] | null | null | null | results/numExp00_NSE/main_NSE_test_InteractionPictureMethod.py | Huzzi-Aliaas/Internship_HB2021 | f33bbc1fcee22b54b55d94eba166c9e409e103f9 | [
"MIT"
] | null | null | null | results/numExp00_NSE/main_NSE_test_InteractionPictureMethod.py | Huzzi-Aliaas/Internship_HB2021 | f33bbc1fcee22b54b55d94eba166c9e409e103f9 | [
"MIT"
] | null | null | null | import sys; sys.path.append('../../')
import numpy as np
from gnse.solver import Interaction_picture_method
from gnse.tools import plot_evolution
from gnse.config import FTFREQ
def main():
# -- SET PARAMETERS FOR COMPUTATIONAL DOMAIN
tMax = 40.0 # (fs) bound for time mesh
Nt = 2 ** 14 # (-) number of sample points: t-axis
zMax = np.pi # (micron) upper limit for propagation routine
Nz = 1000 # (-) number of sample points: z-axis
nSkip = 2 # (-) keep only every nskip-th system state
# -- SET FIBER PARAMETERS
b2 = -1.0 # (fs^2/micron)
beta = lambda w: 0.5 * b2 * w * w # (1/micron)
beta1 = lambda w: b2 * w # (fs/micron)
beta2 = lambda w: b2 # (fs^2/micron)
gamma = 1e-8 # (W/micron)
# -- SET PULSE PARAMETERS
c0 = 0.29979 # (fs/micron) free space speed of light
t0 = 1.0 # (fs) pulse duration
P0 = np.abs(beta2(0)) / t0 / t0 / gamma
u_sol = (
lambda t, z: 2 * np.sqrt(P0) * np.exp(0.5j * gamma * P0 * z) / np.cosh(t / t0)
)
# -- INITIALIZE COMPUTATIONAL DOMAIN
t = np.linspace(-tMax, tMax, Nt, endpoint=False)
w = FTFREQ(t.size, d=t[1] - t[0]) * 2 * np.pi
z = np.linspace(0, zMax, Nz + 1)
# -- INITIALIZE SOLVER
my_solver = Interaction_picture_method(z, t, beta(w), gamma, nSkip=nSkip)
# -- SET INITIAL CONDITION AND RUN
A0_t = u_sol(t, 0)
my_solver.solve(A0_t)
# -- SHOW RESULTS
plot_evolution(
my_solver.z, my_solver.t, my_solver.utz, tLim=(-10, 10), wLim=(-20, 20)
)
if __name__ == "__main__":
main()
| 30.211538 | 86 | 0.596435 |
acebe16dea5bc3e7e6769ca38dff0c0356cd241a | 2,653 | py | Python | salt/utils/preseed.py | jubrad/salt | 7960334fb726cfde45e6409da79a65535c626685 | [
"Apache-2.0"
] | 1 | 2021-08-14T13:48:38.000Z | 2021-08-14T13:48:38.000Z | salt/utils/preseed.py | jubrad/salt | 7960334fb726cfde45e6409da79a65535c626685 | [
"Apache-2.0"
] | 3 | 2015-03-31T14:44:05.000Z | 2015-06-18T19:02:24.000Z | salt/utils/preseed.py | jubrad/salt | 7960334fb726cfde45e6409da79a65535c626685 | [
"Apache-2.0"
] | 1 | 2020-01-02T09:03:24.000Z | 2020-01-02T09:03:24.000Z | # -*- coding: utf-8 -*-
'''
Utilities for managing Debian preseed
.. versionadded:: Beryllium
'''
from __future__ import absolute_import
import yaml
import shlex
import salt.utils.files
def mksls(src, dst=None):
'''
Convert a preseed file to an SLS file
'''
ps_opts = {}
with salt.utils.files.fopen(src, 'r') as fh_:
for line in fh_:
if line.startswith('#'):
continue
if not line.strip():
continue
comps = shlex.split(line)
if comps[0] not in ps_opts.keys():
ps_opts[comps[0]] = {}
cmds = comps[1].split('/')
pointer = ps_opts[comps[0]]
for cmd in cmds:
pointer = pointer.setdefault(cmd, {})
pointer['type'] = comps[2]
if len(comps) > 3:
pointer['argument'] = comps[3]
sls = {}
# Set language
# ( This looks like it maps to something else )
sls[ps_opts['d-i']['languagechooser']['language-name-fb']['argument']] = {
'locale': ['system']
}
# Set keyboard
# ( This looks like it maps to something else )
sls[ps_opts['d-i']['kbd-chooser']['method']['argument']] = {
'keyboard': ['system']
}
# Set timezone
timezone = ps_opts['d-i']['time']['zone']['argument']
sls[timezone] = {'timezone': ['system']}
if ps_opts['d-i']['tzconfig']['gmt']['argument'] == 'true':
sls[timezone]['timezone'].append('utc')
# Set network
if 'netcfg' in ps_opts['d-i'].keys():
iface = ps_opts['d-i']['netcfg']['choose_interface']['argument']
sls[iface] = {}
sls[iface]['enabled'] = True
if ps_opts['d-i']['netcfg']['confirm_static'] == 'true':
sls[iface]['proto'] = 'static'
elif ps_opts['d-i']['netcfg']['disable_dhcp'] == 'false':
sls[iface]['proto'] = 'dhcp'
sls[iface]['netmask'] = ps_opts['d-i']['netcfg']['get_netmask']['argument']
sls[iface]['domain'] = ps_opts['d-i']['netcfg']['get_domain']['argument']
sls[iface]['gateway'] = ps_opts['d-i']['netcfg']['get_gateway']['argument']
sls[iface]['hostname'] = ps_opts['d-i']['netcfg']['get_hostname']['argument']
sls[iface]['ipaddress'] = ps_opts['d-i']['netcfg']['get_ipaddress']['argument']
sls[iface]['nameservers'] = ps_opts['d-i']['netcfg']['get_nameservers']['argument']
if dst is not None:
with salt.utils.files.fopen(dst, 'w') as fh_:
fh_.write(yaml.safe_dump(sls, default_flow_style=False))
else:
return yaml.safe_dump(sls, default_flow_style=False)
| 33.582278 | 91 | 0.549567 |
acebe1eed783dc0b5f509ac40560ca114afb437c | 435 | py | Python | intel_parse.py | m4lwhere/honeypot | 00afaf663a95d4d66d29c4cbbb0f3aac10a33290 | [
"MIT"
] | null | null | null | intel_parse.py | m4lwhere/honeypot | 00afaf663a95d4d66d29c4cbbb0f3aac10a33290 | [
"MIT"
] | null | null | null | intel_parse.py | m4lwhere/honeypot | 00afaf663a95d4d66d29c4cbbb0f3aac10a33290 | [
"MIT"
] | null | null | null | import csv, sys
from datetime import *
def parse(days, new_file):
days = sys.argv[1]
new_file = sys.argv[2]
now = datetime.now()
old = now - timedelta(days = days)
better_now = now.strftime("%F")
better_old = old.strftime("%F")
with open(new_file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['Date'] > better_old:
print(row['IP'])
| 21.75 | 40 | 0.581609 |
acebe202ad69cd5b4082135325adabe0546e3dbb | 1,228 | py | Python | metatests/cloudcafe/networking/lbaas/common/client.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | null | null | null | metatests/cloudcafe/networking/lbaas/common/client.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | null | null | null | metatests/cloudcafe/networking/lbaas/common/client.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | null | null | null | """
Copyright 2014 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from lbaascafe.lbaas.lbaas_api.clients.base_load_balancers_client \
import BaseLoadBalancersClient
class BaseLoadBalancersClientFixture(unittest.TestCase):
"""
@summary: Base Load Balancers Client for Load Balancer Client Tests
"""
SERIALIZE = None
DESERIALIZE = None
@classmethod
def setUpClass(cls):
super(BaseLoadBalancersClientFixture, cls).setUpClass()
cls.auth_token = "fake_auth_token"
cls.load_balancer_id = "12345"
cls.url = "http://fake.url.endpoint"
cls.load_balancers_url = '{url}/{suffix}'.format(
url=cls.url, suffix=BaseLoadBalancersClient._SUFFIX)
| 31.487179 | 72 | 0.740228 |
acebe259a664bb39407de2a6be5c7d62af73979d | 1,377 | py | Python | eridanus/chart.py | mithrandi/eridanus | 11c80c7024548ce7c41800b077d3d0a738a04875 | [
"MIT"
] | null | null | null | eridanus/chart.py | mithrandi/eridanus | 11c80c7024548ce7c41800b077d3d0a738a04875 | [
"MIT"
] | 5 | 2021-03-18T20:19:01.000Z | 2022-03-11T23:14:44.000Z | eridanus/chart.py | mithrandi/eridanus | 11c80c7024548ce7c41800b077d3d0a738a04875 | [
"MIT"
] | 1 | 2018-01-10T15:15:15.000Z | 2018-01-10T15:15:15.000Z | import cairo
from itertools import izip
from StringIO import StringIO
from pycha.chart import Option
from pycha.pie import PieChart
from pycha.color import DEFAULT_COLOR
def pieChart(fd, width, height, title, data, labels, bgColor=None, labelColor='#000000', colorScheme=DEFAULT_COLOR):
dataSet = [(name, [[0, value]]) for name, value in izip(labels, data)]
axisLabels = [dict(v=i, label=label) for i, label in enumerate(labels)]
options = Option(
title=title,
titleFont='Times',
titleFontSize=24,
pieRadius=0.35,
legend=Option(hide=True),
colorScheme=colorScheme,
background=Option(baseColor=bgColor),
axis=Option(labelColor=labelColor,
x=Option(ticks=axisLabels)))
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
chart = PieChart(surface, options)
chart.addDataset(dataSet)
chart.render()
surface.write_to_png(fd)
def contributors(manager, limit=10, **kw):
labels, data = zip(*sorted(manager.topContributors(limit=limit), key=lambda x: x[1]))
fd = StringIO()
title = u'Top %d %s contributors' % (len(data), manager.channel)
pieChart(fd=fd,
width=700,
height=700,
title=title,
data=data,
labels=labels,
**kw)
fd.seek(0)
return fd
| 28.102041 | 116 | 0.63907 |
acebe27cb1c947ecc7dd9b5827d39ebc1963cb39 | 306 | py | Python | conbench/tests/entities/test_user.py | jonkeane/conbench | f096cc2f8b7a85d8e9aea32d8310127cf1923212 | [
"MIT"
] | 48 | 2020-03-02T16:55:46.000Z | 2022-02-26T00:35:57.000Z | conbench/tests/entities/test_user.py | jonkeane/conbench | f096cc2f8b7a85d8e9aea32d8310127cf1923212 | [
"MIT"
] | 103 | 2020-03-23T00:22:46.000Z | 2022-03-31T22:34:40.000Z | conbench/tests/entities/test_user.py | jonkeane/conbench | f096cc2f8b7a85d8e9aea32d8310127cf1923212 | [
"MIT"
] | 6 | 2020-03-04T17:52:35.000Z | 2022-03-30T11:53:40.000Z | from ...entities.user import User
def test_user_repr():
user = User(name="Gwen Clarke", email="gwen@example.com")
assert repr(user) == "<User gwen@example.com>"
def test_user_str():
user = User(name="Gwen Clarke", email="gwen@example.com")
assert str(user) == "<User gwen@example.com>"
| 25.5 | 61 | 0.666667 |
acebe27f765752f1ec74098cabc1810160108a36 | 6,553 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/tlvprofile/object_1ba6063c8cfb61359d0cafa499ed49e4.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/tlvprofile/object_1ba6063c8cfb61359d0cafa499ed49e4.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/tlvprofile/object_1ba6063c8cfb61359d0cafa499ed49e4.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Object(Base):
"""Tlv object container which can contain one of a field, sub tlv or container
The Object class encapsulates a list of object resources that are managed by the system.
A list of resources can be retrieved from the server using the Object.find() method.
"""
__slots__ = ()
_SDM_NAME = 'object'
_SDM_ATT_MAP = {
'Name': 'name',
}
def __init__(self, parent):
super(Object, self).__init__(parent)
@property
def Container(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.container_ad14fff79850a810bf70af3c662f313a.Container): An instance of the Container class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.container_ad14fff79850a810bf70af3c662f313a import Container
if self._properties.get('Container', None) is None:
return Container(self)
else:
return self._properties.get('Container')
@property
def Field(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.field_e196f9febcf3a6c28484d9f1e36ac377.Field): An instance of the Field class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.field_e196f9febcf3a6c28484d9f1e36ac377 import Field
if self._properties.get('Field', None) is None:
return Field(self)
else:
return self._properties.get('Field')
@property
def RepeatableContainer(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.repeatablecontainer_a17d3ce6bb8123640f8dd7d1e6a6435c.RepeatableContainer): An instance of the RepeatableContainer class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.repeatablecontainer_a17d3ce6bb8123640f8dd7d1e6a6435c import RepeatableContainer
if self._properties.get('RepeatableContainer', None) is None:
return RepeatableContainer(self)
else:
return self._properties.get('RepeatableContainer')
@property
def SubTlv(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.subtlv_7c94061598b794f7b720de3bb85f6cdb.SubTlv): An instance of the SubTlv class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.subtlv_7c94061598b794f7b720de3bb85f6cdb import SubTlv
if self._properties.get('SubTlv', None) is None:
return SubTlv(self)
else:
return self._properties.get('SubTlv')
@property
def Name(self):
"""
Returns
-------
- str: The name of the object
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, Name=None):
"""Updates object resource on the server.
Args
----
- Name (str): The name of the object
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Name=None):
"""Finds and retrieves object resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve object resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all object resources from the server.
Args
----
- Name (str): The name of the object
Returns
-------
- self: This instance with matching object resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of object data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the object resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 38.098837 | 202 | 0.677552 |
acebe3320de404c2a8f8a075c58dac0d86d89226 | 893 | py | Python | RecoLocalTracker/SiPixelRecHits/python/PixelCPETemplateReco_cfi.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | RecoLocalTracker/SiPixelRecHits/python/PixelCPETemplateReco_cfi.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 7 | 2016-07-17T02:34:54.000Z | 2019-08-13T07:58:37.000Z | RecoLocalTracker/SiPixelRecHits/python/PixelCPETemplateReco_cfi.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | import FWCore.ParameterSet.Config as cms
templates = cms.ESProducer("PixelCPETemplateRecoESProducer",
ComponentName = cms.string('PixelCPETemplateReco'),
speed = cms.int32(-2),
#PixelErrorParametrization = cms.string('NOTcmsim'),
Alpha2Order = cms.bool(True),
UseClusterSplitter = cms.bool(False),
# petar, for clusterProbability() from TTRHs
ClusterProbComputationFlag = cms.int32(0),
# gavril
DoCosmics = cms.bool(False),
# The flag to regulate if the LA offset is taken from Alignment
# True in Run II for offline RECO
DoLorentz = cms.bool(True),
LoadTemplatesFromDB = cms.bool(True)
)
# This customization will be removed once we get the templates for phase2 pixel
from Configuration.Eras.Modifier_phase2_tracker_cff import phase2_tracker
phase2_tracker.toModify(templates,
LoadTemplatesFromDB = False,
DoLorentz = False,
)
| 30.793103 | 79 | 0.740202 |
acebe3a23d10ba4ec44d33276149450bf50bb317 | 10,595 | py | Python | test/test_command_monitoring_legacy.py | henrifroese/mongo-python-driver | a37e28b84f539da72e7b0b09b9d9881812bbbe60 | [
"Apache-2.0"
] | null | null | null | test/test_command_monitoring_legacy.py | henrifroese/mongo-python-driver | a37e28b84f539da72e7b0b09b9d9881812bbbe60 | [
"Apache-2.0"
] | null | null | null | test/test_command_monitoring_legacy.py | henrifroese/mongo-python-driver | a37e28b84f539da72e7b0b09b9d9881812bbbe60 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run the command monitoring legacy-format spec tests."""
import os
import re
import sys
sys.path[0:0] = [""]
import pymongo
from bson import json_util
from pymongo.errors import OperationFailure
from pymongo.write_concern import WriteConcern
from test import unittest, client_context
from test.utils import (
single_client, wait_until, EventListener, parse_read_preference)
# Location of JSON test specifications.
_TEST_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'command_monitoring')
def camel_to_snake(camel):
# Regex to convert CamelCase to snake_case.
snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower()
class TestAllScenarios(unittest.TestCase):
@classmethod
@client_context.require_connection
def setUpClass(cls):
cls.listener = EventListener()
cls.client = single_client(event_listeners=[cls.listener])
@classmethod
def tearDownClass(cls):
cls.client.close()
def tearDown(self):
self.listener.results.clear()
def format_actual_results(results):
started = results['started']
succeeded = results['succeeded']
failed = results['failed']
msg = "\nStarted: %r" % (started[0].command if len(started) else None,)
msg += "\nSucceeded: %r" % (succeeded[0].reply if len(succeeded) else None,)
msg += "\nFailed: %r" % (failed[0].failure if len(failed) else None,)
return msg
def create_test(scenario_def, test):
def run_scenario(self):
dbname = scenario_def['database_name']
collname = scenario_def['collection_name']
coll = self.client[dbname][collname]
coll.drop()
coll.insert_many(scenario_def['data'])
self.listener.results.clear()
name = camel_to_snake(test['operation']['name'])
if 'read_preference' in test['operation']:
coll = coll.with_options(read_preference=parse_read_preference(
test['operation']['read_preference']))
if 'collectionOptions' in test['operation']:
colloptions = test['operation']['collectionOptions']
if 'writeConcern' in colloptions:
concern = colloptions['writeConcern']
coll = coll.with_options(
write_concern=WriteConcern(**concern))
test_args = test['operation']['arguments']
if 'options' in test_args:
options = test_args.pop('options')
test_args.update(options)
args = {}
for arg in test_args:
args[camel_to_snake(arg)] = test_args[arg]
if name == 'bulk_write':
bulk_args = []
for request in args['requests']:
opname = request['name']
klass = opname[0:1].upper() + opname[1:]
arg = getattr(pymongo, klass)(**request['arguments'])
bulk_args.append(arg)
try:
coll.bulk_write(bulk_args, args.get('ordered', True))
except OperationFailure:
pass
elif name == 'find':
if 'sort' in args:
args['sort'] = list(args['sort'].items())
for arg in 'skip', 'limit':
if arg in args:
args[arg] = int(args[arg])
try:
# Iterate the cursor.
tuple(coll.find(**args))
except OperationFailure:
pass
# Wait for the killCursors thread to run if necessary.
if 'limit' in args and client_context.version[:2] < (3, 1):
self.client._kill_cursors_executor.wake()
started = self.listener.results['started']
succeeded = self.listener.results['succeeded']
wait_until(
lambda: started[-1].command_name == 'killCursors',
"publish a start event for killCursors.")
wait_until(
lambda: succeeded[-1].command_name == 'killCursors',
"publish a succeeded event for killCursors.")
else:
try:
getattr(coll, name)(**args)
except OperationFailure:
pass
res = self.listener.results
for expectation in test['expectations']:
event_type = next(iter(expectation))
if event_type == "command_started_event":
event = res['started'][0] if len(res['started']) else None
if event is not None:
# The tests substitute 42 for any number other than 0.
if (event.command_name == 'getMore'
and event.command['getMore']):
event.command['getMore'] = 42
elif event.command_name == 'killCursors':
event.command['cursors'] = [42]
elif event.command_name == 'update':
# TODO: remove this once PYTHON-1744 is done.
# Add upsert and multi fields back into
# expectations.
updates = expectation[event_type]['command'][
'updates']
for update in updates:
update.setdefault('upsert', False)
update.setdefault('multi', False)
elif event_type == "command_succeeded_event":
event = (
res['succeeded'].pop(0) if len(res['succeeded']) else None)
if event is not None:
reply = event.reply
# The tests substitute 42 for any number other than 0,
# and "" for any error message.
if 'writeErrors' in reply:
for doc in reply['writeErrors']:
# Remove any new fields the server adds. The tests
# only have index, code, and errmsg.
diff = set(doc) - set(['index', 'code', 'errmsg'])
for field in diff:
doc.pop(field)
doc['code'] = 42
doc['errmsg'] = ""
elif 'cursor' in reply:
if reply['cursor']['id']:
reply['cursor']['id'] = 42
elif event.command_name == 'killCursors':
# Make the tests continue to pass when the killCursors
# command is actually in use.
if 'cursorsKilled' in reply:
reply.pop('cursorsKilled')
reply['cursorsUnknown'] = [42]
# Found succeeded event. Pop related started event.
res['started'].pop(0)
elif event_type == "command_failed_event":
event = res['failed'].pop(0) if len(res['failed']) else None
if event is not None:
# Found failed event. Pop related started event.
res['started'].pop(0)
else:
self.fail("Unknown event type")
if event is None:
event_name = event_type.split('_')[1]
self.fail(
"Expected %s event for %s command. Actual "
"results:%s" % (
event_name,
expectation[event_type]['command_name'],
format_actual_results(res)))
for attr, expected in expectation[event_type].items():
if 'options' in expected:
options = expected.pop('options')
expected.update(options)
actual = getattr(event, attr)
if isinstance(expected, dict):
for key, val in expected.items():
self.assertEqual(val, actual[key])
else:
self.assertEqual(actual, expected)
return run_scenario
def create_tests():
for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, 'legacy')):
dirname = os.path.split(dirpath)[-1]
for filename in filenames:
with open(os.path.join(dirpath, filename)) as scenario_stream:
scenario_def = json_util.loads(scenario_stream.read())
assert bool(scenario_def.get('tests')), "tests cannot be empty"
# Construct test from scenario.
for test in scenario_def['tests']:
new_test = create_test(scenario_def, test)
if "ignore_if_server_version_greater_than" in test:
version = test["ignore_if_server_version_greater_than"]
ver = tuple(int(elt) for elt in version.split('.'))
new_test = client_context.require_version_max(*ver)(
new_test)
if "ignore_if_server_version_less_than" in test:
version = test["ignore_if_server_version_less_than"]
ver = tuple(int(elt) for elt in version.split('.'))
new_test = client_context.require_version_min(*ver)(
new_test)
if "ignore_if_topology_type" in test:
types = set(test["ignore_if_topology_type"])
if "sharded" in types:
new_test = client_context.require_no_mongos(None)(
new_test)
test_name = 'test_%s_%s_%s' % (
dirname,
os.path.splitext(filename)[0],
str(test['description'].replace(" ", "_")))
new_test.__name__ = test_name
setattr(TestAllScenarios, new_test.__name__, new_test)
create_tests()
if __name__ == "__main__":
unittest.main()
| 41.54902 | 80 | 0.539122 |
acebe5116f3ad19d3421a318ad4604e89c777125 | 1,126 | py | Python | tests/test_variational_autoencoder.py | helblazer811/ManimMachineLearning | 58aec269cf2dea91484d83646b5a72c477a2e7d8 | [
"MIT"
] | 12 | 2022-02-17T03:44:31.000Z | 2022-03-17T22:05:01.000Z | tests/test_variational_autoencoder.py | helblazer811/ManimMachineLearning | 58aec269cf2dea91484d83646b5a72c477a2e7d8 | [
"MIT"
] | 1 | 2022-02-18T21:40:56.000Z | 2022-02-18T21:40:56.000Z | tests/test_variational_autoencoder.py | helblazer811/ManimMachineLearning | 58aec269cf2dea91484d83646b5a72c477a2e7d8 | [
"MIT"
] | null | null | null | from manim import *
from PIL import Image
from manim_ml.neural_network.layers import EmbeddingLayer, FeedForwardLayer, ImageLayer
from manim_ml.neural_network.neural_network import NeuralNetwork
config.pixel_height = 720
config.pixel_width = 1280
config.frame_height = 8.0
config.frame_width = 8.0
class VariationalAutoencoderScene(Scene):
def construct(self):
embedding_layer = EmbeddingLayer(dist_theme="ellipse").scale(2)
image = Image.open('images/image.jpeg')
numpy_image = np.asarray(image)
# Make nn
neural_network = NeuralNetwork([
ImageLayer(numpy_image, height=1.4),
FeedForwardLayer(5),
FeedForwardLayer(3),
embedding_layer,
FeedForwardLayer(3),
FeedForwardLayer(5),
ImageLayer(numpy_image, height=1.4),
], layer_spacing=0.1)
neural_network.scale(1.3)
self.play(Create(neural_network), run_time=3)
self.play(neural_network.make_forward_pass_animation(), run_time=5)
self.play(neural_network.make_forward_pass_animation(), run_time=5) | 34.121212 | 87 | 0.686501 |
acebe54e6c7ff47691773d4af16138002d3cd03b | 24,792 | py | Python | tensorflow/python/data/ops/multi_device_iterator_ops.py | MathMachado/tensorflow | 56afda20b15f234c23e8393f7e337e7dd2659c2d | [
"Apache-2.0"
] | 848 | 2019-12-03T00:16:17.000Z | 2022-03-31T22:53:17.000Z | tensorflow/python/data/ops/multi_device_iterator_ops.py | MathMachado/tensorflow | 56afda20b15f234c23e8393f7e337e7dd2659c2d | [
"Apache-2.0"
] | 656 | 2019-12-03T00:48:46.000Z | 2022-03-31T18:41:54.000Z | tensorflow/python/data/ops/multi_device_iterator_ops.py | MathMachado/tensorflow | 56afda20b15f234c23e8393f7e337e7dd2659c2d | [
"Apache-2.0"
] | 506 | 2019-12-03T00:46:26.000Z | 2022-03-30T10:34:56.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for prefetching_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
class _PerDeviceGenerator(dataset_ops.DatasetV2):
"""A `dummy` generator dataset."""
def __init__(self, shard_num, multi_device_iterator_resource, incarnation_id,
source_device, element_spec):
self._element_spec = element_spec
multi_device_iterator_string_handle = (
gen_dataset_ops.multi_device_iterator_to_string_handle(
multi_device_iterator_resource))
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(autograph=False) # Pure graph code.
def _init_func():
return multi_device_iterator_string_handle
init_func_concrete = _init_func._get_concrete_function_internal() # pylint: disable=protected-access
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(autograph=False) # Pure graph code.
def _remote_init_func():
return functional_ops.remote_call(
target=source_device,
args=init_func_concrete.captured_inputs,
Tout=[dtypes.string],
f=init_func_concrete)
self._init_func = _remote_init_func._get_concrete_function_internal() # pylint: disable=protected-access
self._init_captured_args = self._init_func.captured_inputs
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
autograph=False) # Pure graph code.
def _next_func(string_handle):
# pylint: disable=protected-access
multi_device_iterator = (
gen_dataset_ops.multi_device_iterator_from_string_handle(
string_handle=string_handle,
output_types=structure.get_flat_tensor_types(self._element_spec),
output_shapes=structure.get_flat_tensor_shapes(
self._element_spec)))
return gen_dataset_ops.multi_device_iterator_get_next_from_shard(
multi_device_iterator=multi_device_iterator,
shard_num=shard_num,
incarnation_id=incarnation_id,
output_types=structure.get_flat_tensor_types(self._element_spec),
output_shapes=structure.get_flat_tensor_shapes(self._element_spec))
next_func_concrete = _next_func._get_concrete_function_internal() # pylint: disable=protected-access
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun_with_attributes(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
attributes={"experimental_ints_on_device": True},
autograph=False) # Pure graph code.
def _remote_next_func(string_handle):
return functional_ops.remote_call(
target=source_device,
args=[string_handle] + next_func_concrete.captured_inputs,
Tout=structure.get_flat_tensor_types(self._element_spec),
f=next_func_concrete)
self._next_func = _remote_next_func._get_concrete_function_internal() # pylint: disable=protected-access
self._next_captured_args = self._next_func.captured_inputs
self._incarnation_id_index = -1
for i, arg in enumerate(self._next_captured_args):
if arg is incarnation_id:
self._incarnation_id_index = i
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
autograph=False) # Pure graph code.
def _finalize_func(unused_string_handle):
return array_ops.constant(0, dtypes.int64)
finalize_func_concrete = _finalize_func._get_concrete_function_internal() # pylint: disable=protected-access
# TODO(b/124254153): Enable autograph once the overhead is low enough.
@function.defun(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
autograph=False) # Pure graph code.
def _remote_finalize_func(string_handle):
return functional_ops.remote_call(
target=source_device,
args=[string_handle] + finalize_func_concrete.captured_inputs,
Tout=[dtypes.int64],
f=finalize_func_concrete)
self._finalize_func = _remote_finalize_func._get_concrete_function_internal( # pylint: disable=protected-access
)
self._finalize_captured_args = self._finalize_func.captured_inputs
variant_tensor = gen_dataset_ops.generator_dataset(
self._init_captured_args,
self._next_captured_args,
self._finalize_captured_args,
init_func=self._init_func,
next_func=self._next_func,
finalize_func=self._finalize_func,
**self._flat_structure)
super(_PerDeviceGenerator, self).__init__(variant_tensor)
def _inputs(self):
# TODO(b/116506223): Determine which datasets should be used as inputs here.
return []
@property
def element_spec(self):
return self._element_spec
class _ReincarnatedPerDeviceGenerator(dataset_ops.DatasetV2):
"""Creates a _PerDeviceGenerator-like dataset with a new incarnation_id.
Re-uses the functions from the provided per_device_dataset and just switches
out the function argument corresponding to the incarnation_id.
"""
def __init__(self, per_device_dataset, incarnation_id):
# pylint: disable=protected-access
self._element_spec = per_device_dataset.element_spec
self._init_func = per_device_dataset._init_func
self._init_captured_args = self._init_func.captured_inputs
self._next_func = per_device_dataset._next_func
self._next_captured_args = per_device_dataset._next_captured_args
# The captured arguments to the next_func are string_handle, incarnation_id.
# We update the incarnation id to the new one.
self._next_captured_args[
per_device_dataset._incarnation_id_index] = incarnation_id
self._finalize_func = per_device_dataset._finalize_func
self._finalize_captured_args = per_device_dataset._finalize_captured_args
variant_tensor = gen_dataset_ops.generator_dataset(
self._init_captured_args,
self._next_captured_args,
self._finalize_captured_args,
init_func=self._init_func,
next_func=self._next_func,
finalize_func=self._finalize_func,
**self._flat_structure)
super(_ReincarnatedPerDeviceGenerator, self).__init__(variant_tensor)
def _inputs(self):
# TODO(b/116506223): Determine which datasets should be used as inputs here.
return []
@property
def element_spec(self):
return self._element_spec
def _create_device_dataset(prototype_ds, incarnation_id, prefetch_buffer_size,
experimental_slack):
"""Uses _prototype_device_datasets[i] to build a dataset for the device."""
ds = _ReincarnatedPerDeviceGenerator(prototype_ds, incarnation_id)
if prefetch_buffer_size > 0:
if experimental_slack:
ds = dataset_ops.PrefetchDataset(ds, prefetch_buffer_size, slack_period=1)
else:
ds = ds.prefetch(prefetch_buffer_size)
# TODO(jsimsa): Enable auto-tuning and optimizations when supported for
# non-CPU devices.
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = False
ds = ds.with_options(options)
return ds
class MultiDeviceIterator(object):
"""An iterator over multiple devices."""
def __init__(self,
dataset,
devices,
max_buffer_size=1,
prefetch_buffer_size=1,
source_device="/cpu:0"):
"""Constructs a MultiDeviceIterator.
Args:
dataset: The input dataset to be iterated over.
devices: The list of devices to fetch data to.
max_buffer_size: Maximum size of the host side per device buffer to keep.
prefetch_buffer_size: if > 1, then we setup a buffer on each device to
prefetch into.
source_device: The host device to place the `dataset` on. In order to
prevent deadlocks, if the prefetch_buffer_size is greater than the
max_buffer_size, we set the max_buffer_size to prefetch_buffer_size.
"""
options = dataset_ops.Options()
options.experimental_distribute.num_devices = len(devices)
dataset = dataset.with_options(options)
self._dataset = dataset._apply_options() # pylint: disable=protected-access
self._experimental_slack = dataset.options().experimental_slack
self._devices = devices
self._source_device = source_device
self._source_device_tensor = ops.convert_to_tensor(source_device)
self._max_buffer_size = max_buffer_size
self._prefetch_buffer_size = prefetch_buffer_size
if self._prefetch_buffer_size > self._max_buffer_size:
self._max_buffer_size = self._prefetch_buffer_size
# Create the MultiDeviceIterator.
with ops.device(self._source_device):
# TODO(b/121378567): Get rid of this shared_name hack.
shared_name = ""
if context.executing_eagerly():
shared_name = context.shared_name()
self._multi_device_iterator_resource = (
gen_dataset_ops.multi_device_iterator(
devices=self._devices,
shared_name=shared_name,
container="",
**self._dataset._flat_structure)) # pylint: disable=protected-access
if context.executing_eagerly():
# Delete the resource when this object is deleted
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._multi_device_iterator_resource,
handle_device=self._source_device)
# The incarnation ID is used to ensure consistency between the per-device
# iterators and the multi-device iterator.
self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(
self._dataset._variant_tensor, # pylint: disable=protected-access
self._multi_device_iterator_resource,
max_buffer_size=self._max_buffer_size)
self._prototype_device_datasets = []
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _PerDeviceGenerator(i, self._multi_device_iterator_resource,
self._incarnation_id,
self._source_device_tensor,
self._dataset.element_spec)
self._prototype_device_datasets.append(ds)
# TODO(rohanj): Explore the possibility of the MultiDeviceIterator to
# initialize the device side of the pipeline. This would allow the
# MultiDeviceIterator to choose, for example, to move some transformations
# into the device side from its input. It might be useful in rewriting.
# Create the per device iterators.
self._device_iterators = []
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _create_device_dataset(self._prototype_device_datasets[i],
self._incarnation_id,
self._prefetch_buffer_size,
self._experimental_slack)
if context.executing_eagerly():
self._device_iterators.append(dataset_ops.make_one_shot_iterator(ds))
else:
self._device_iterators.append(
dataset_ops.make_initializable_iterator(ds))
if not context.executing_eagerly():
device_iterator_initializers = [
iterator.initializer for iterator in self._device_iterators
]
self._initializer = control_flow_ops.group(*device_iterator_initializers)
def _create_device_dataset(self, i):
"""Uses _prototype_device_datasets[i] to build a dataset for the device."""
ds = self._prototype_device_datasets[i]
ds = _ReincarnatedPerDeviceGenerator(ds, self._incarnation_id)
if self._prefetch_buffer_size > 0:
if self._experimental_slack:
ds = dataset_ops.PrefetchDataset(
ds, self._prefetch_buffer_size, slack_period=1)
else:
ds = ds.prefetch(self._prefetch_buffer_size)
# TODO(jsimsa): Enable auto-tuning and optimizations when supported for
# non-CPU devices.
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = False
ds = ds.with_options(options)
return ds
def get_next(self, device=None):
"""Returns the next element given a `device`, else returns all in a list."""
if device is not None:
index = self._devices.index(device)
return self._device_iterators[index].get_next()
result = []
for i, device in enumerate(self._devices):
with ops.device(device):
result.append(self._device_iterators[i].get_next())
return result
def get_next_as_optional(self):
result = []
for i, device in enumerate(self._devices):
with ops.device(device):
result.append(
iterator_ops.get_next_as_optional(self._device_iterators[i]))
return result
@property
def initializer(self):
if context.executing_eagerly():
return control_flow_ops.no_op()
return self._initializer
def _eager_reset(self):
"""Resets the MultiDeviceIterator in eager mode."""
if not ops.executing_eagerly_outside_functions():
raise ValueError("Eager reset is only supported in eager mode.")
# pylint: disable=protected-access
self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(
self._dataset._variant_tensor,
self._multi_device_iterator_resource,
max_buffer_size=self._max_buffer_size)
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _create_device_dataset(self._prototype_device_datasets[i],
self._incarnation_id,
self._prefetch_buffer_size,
self._experimental_slack)
# Reset the device iterator resources with the new dataset.
ds_variant = ds._variant_tensor
gen_dataset_ops.make_iterator(
ds_variant, self._device_iterators[i]._iterator_resource)
@property
def element_spec(self):
return self._dataset.element_spec
class MultiDeviceIteratorResourceDeleter(object):
"""An object which cleans up a Multi Device Iterator resource.
An alternative to defining a __del__ method on an object. Even if the parent
object is part of a reference cycle, the cycle will be collectible.
"""
def __init__(self, multi_device_iterator, iterators, device, deleter):
self._deleter = deleter
self._multi_device_iterator = multi_device_iterator
self._iterators = iterators
self._device = device
self._eager_mode = context.executing_eagerly()
def __del__(self):
with ops.device(self._device):
# Make sure the resource is deleted in the same mode as it was created in.
# We pass in the iterator handles as inputs to the op to make sure that
# this op runs after all the iterators are deleted.
if self._eager_mode:
with context.eager_mode():
gen_dataset_ops.delete_multi_device_iterator(
multi_device_iterator=self._multi_device_iterator,
iterators=self._iterators,
deleter=self._deleter)
else:
with context.graph_mode():
gen_dataset_ops.delete_multi_device_iterator(
multi_device_iterator=self._multi_device_iterator,
iterators=self._iterators,
deleter=self._deleter)
class MultiDeviceIteratorSpec(type_spec.TypeSpec):
"""Type specification for `MultiDeviceIteratorV2`."""
__slots__ = ["_devices", "_source_device", "_element_spec"]
def __init__(self, devices, source_device, element_spec):
self._devices = devices
self._source_device = source_device
self._element_spec = element_spec
@property
def value_type(self):
return MultiDeviceIteratorV2
def _serialize(self):
return (tuple(self._devices), self._source_device, self._element_spec)
@property
def _component_specs(self):
specs = [
tensor_spec.TensorSpec([], dtypes.resource),
tensor_spec.TensorSpec([], dtypes.scalar)
]
for _ in range(len(self._devices)):
specs.append(iterator_ops.IteratorSpec(self._element_spec))
return specs
def _to_components(self, value):
# pylint: disable=protected-access
c = [value._multi_device_iterator_resource, value._deleter]
c.extend(value._device_iterators)
return c
def _from_components(self, components):
return MultiDeviceIteratorV2(
dataset=None,
devices=self._devices,
source_device=self._source_device,
components=components,
element_spec=self._element_spec)
@staticmethod
def from_value(value):
# pylint: disable=protected-access
return MultiDeviceIteratorSpec(
value._devices,
value._source_device,
value.element_spec)
class MultiDeviceIteratorV2(composite_tensor.CompositeTensor):
"""An iterator over multiple devices."""
def __init__(self,
dataset=None,
devices=None,
max_buffer_size=1,
prefetch_buffer_size=1,
source_device="/cpu:0",
components=None,
element_spec=None):
"""Constructs a MultiDeviceIteratorV2 object.
Args:
dataset: The input dataset to be iterated over.
devices: The list of devices to fetch data to.
max_buffer_size: Maximum size of the host side per device buffer to keep.
prefetch_buffer_size: if > 1, then we setup a buffer on each device to
prefetch into.
source_device: The host device to place the `dataset` on. In order to
prevent deadlocks, if the prefetch_buffer_size is greater than the
max_buffer_size, we set the max_buffer_size to prefetch_buffer_size.
components: Tensor components to construct the MultiDeviceIterator from.
element_spec: A nested structure of `TypeSpec` objects that
represents the type specification of elements of the iterator.
Raises:
RuntimeError: If executed in graph mode or outside of function building
mode.
"""
if (not context.executing_eagerly() and
not ops.get_default_graph()._building_function): # pylint: disable=protected-access
raise RuntimeError("MultiDeviceIteratorV2 is only supported inside of "
"tf.function or when eager execution is enabled.")
if devices is None:
raise ValueError("`devices` must be provided")
error_message = "Either `dataset` or both `components` and "
"`element_spec` need to be provided."
if dataset is None:
if (components is None or element_spec is None):
raise ValueError(error_message)
self._element_spec = element_spec
self._devices = devices
self._source_device = source_device
self._multi_device_iterator_resource = components[0]
self._deleter = components[1]
self._device_iterators = components[2:]
iterator_handles = []
for it in self._device_iterators:
iterator_handles.append(it._iterator_resource) # pylint: disable=protected-access
else:
if (components is not None or element_spec is not None):
raise ValueError(error_message)
options = dataset_ops.Options()
options.experimental_distribute.num_devices = len(devices)
dataset = dataset.with_options(options)
dataset = dataset._apply_options() # pylint: disable=protected-access
self._element_spec = dataset.element_spec
experimental_slack = dataset.options().experimental_slack
self._devices = devices
self._source_device = source_device
source_device_tensor = ops.convert_to_tensor(self._source_device)
if prefetch_buffer_size > max_buffer_size:
max_buffer_size = prefetch_buffer_size
# Create the MultiDeviceIterator.
with ops.device(self._source_device):
self._multi_device_iterator_resource, self._deleter = (
gen_dataset_ops.anonymous_multi_device_iterator(
devices=self._devices, **dataset._flat_structure)) # pylint: disable=protected-access
# The incarnation ID is used to ensure consistency between the
# per-device iterators and the multi-device iterator.
incarnation_id = gen_dataset_ops.multi_device_iterator_init(
dataset._variant_tensor, # pylint: disable=protected-access
self._multi_device_iterator_resource,
max_buffer_size=max_buffer_size)
prototype_device_datasets = []
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _PerDeviceGenerator(i, self._multi_device_iterator_resource,
incarnation_id, source_device_tensor,
dataset.element_spec)
prototype_device_datasets.append(ds)
# TODO(rohanj): Explore the possibility of the MultiDeviceIterator to
# initialize the device side of the pipeline. This would allow the
# MultiDeviceIterator to choose, for example, to move some transformations
# into the device side from its input. It might be useful in rewriting.
# Create the per device iterators.
self._device_iterators = []
iterator_handles = []
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _create_device_dataset(prototype_device_datasets[i],
incarnation_id, prefetch_buffer_size,
experimental_slack)
iterator = iter(ds)
self._device_iterators.append(iterator)
iterator_handles.append(iterator._iterator_resource) # pylint: disable=protected-access
self._resource_deleter = MultiDeviceIteratorResourceDeleter(
multi_device_iterator=self._multi_device_iterator_resource,
iterators=iterator_handles,
device=self._source_device,
deleter=self._deleter)
def get_next(self, device=None):
"""Returns the next element given a `device`, else returns all in a list."""
if device is not None:
index = self._devices.index(device)
return self._device_iterators[index].get_next()
result = []
for i, device in enumerate(self._devices):
with ops.device(device):
result.append(self._device_iterators[i].get_next())
return result
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
try:
return self.get_next()
except errors.OutOfRangeError:
raise StopIteration
def get_next_as_optional(self):
result = []
for i, device in enumerate(self._devices):
with ops.device(device):
result.append(
iterator_ops.get_next_as_optional(self._device_iterators[i]))
return result
@property
def element_spec(self):
return self._element_spec
@property
def _type_spec(self):
return MultiDeviceIteratorSpec(self._devices, self._source_device,
self._element_spec)
| 40.843493 | 116 | 0.709463 |
acebe5d1a252f049d946a20f8a9d967ea7002354 | 1,874 | py | Python | scons/scons-local-2.5.0/SCons/Tool/gas.py | emamanto/Soar | 72d2bc095068dd87ac78dad4f48938f6edc0353a | [
"BSD-2-Clause"
] | 72 | 2020-06-12T06:33:41.000Z | 2021-03-22T03:15:56.000Z | scons/scons-local-2.5.0/SCons/Tool/gas.py | emamanto/Soar | 72d2bc095068dd87ac78dad4f48938f6edc0353a | [
"BSD-2-Clause"
] | 9 | 2020-07-02T09:36:49.000Z | 2021-03-25T23:54:00.000Z | scons/scons-local-2.5.0/SCons/Tool/gas.py | emamanto/Soar | 72d2bc095068dd87ac78dad4f48938f6edc0353a | [
"BSD-2-Clause"
] | 14 | 2020-06-12T03:08:03.000Z | 2021-02-03T11:43:09.000Z | """SCons.Tool.gas
Tool-specific initialization for as, the Gnu assembler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/gas.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
as_module = __import__('as', globals(), locals(), [])
assemblers = ['as', 'gas']
def generate(env):
"""Add Builders and construction variables for as to an Environment."""
as_module.generate(env)
env['AS'] = env.Detect(assemblers) or 'as'
def exists(env):
return env.Detect(assemblers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 34.703704 | 102 | 0.7492 |
acebe6dbb2733c9d3159efad92c63048cf9d8cad | 350 | py | Python | QRSMS/initial/forms.py | Srishti-Ahuja/QRSMS-V1 | 1f2fa82e8ddaeb62e633fcd6a136696355317bba | [
"Apache-2.0"
] | 4 | 2020-06-16T09:42:20.000Z | 2021-11-24T08:18:16.000Z | QRSMS/initial/forms.py | Srishti-Ahuja/QRSMS-V1 | 1f2fa82e8ddaeb62e633fcd6a136696355317bba | [
"Apache-2.0"
] | 7 | 2021-04-08T21:57:34.000Z | 2022-02-27T06:41:15.000Z | QRSMS/initial/forms.py | Srishti-Ahuja/QRSMS-V1 | 1f2fa82e8ddaeb62e633fcd6a136696355317bba | [
"Apache-2.0"
] | 7 | 2020-11-29T09:45:44.000Z | 2022-03-30T15:27:33.000Z | from django import forms
from .models import Course, Semester
class CourseForm(forms.ModelForm):
class Meta:
model = Course
fields = ['course_name', 'course_code']
class SemesterForm(forms.ModelForm):
class Meta:
model = Semester
fields = [ 'teachers_available','semester_year', 'start_date', 'end_date']
| 21.875 | 82 | 0.674286 |
acebe6f24ddee1d104fc0d9e4eb9a78a8fd45193 | 448 | py | Python | nglview/utils/widget_utils.py | stefdoerr/nglview | 671016d4277fb969c9c99cf626dd2db9e2d72514 | [
"MIT"
] | null | null | null | nglview/utils/widget_utils.py | stefdoerr/nglview | 671016d4277fb969c9c99cf626dd2db9e2d72514 | [
"MIT"
] | null | null | null | nglview/utils/widget_utils.py | stefdoerr/nglview | 671016d4277fb969c9c99cf626dd2db9e2d72514 | [
"MIT"
] | 1 | 2021-11-19T02:03:46.000Z | 2021-11-19T02:03:46.000Z | from __future__ import absolute_import
def get_widget_by_name(box, widget_name):
if hasattr(box, '_ngl_children'):
children = box._ngl_children
elif hasattr(box, 'children'):
children = box.children
else:
children = None
if children is not None:
for widget in children:
if hasattr(widget, '_ngl_name') and widget._ngl_name == widget_name:
return widget
return None
| 26.352941 | 80 | 0.645089 |
acebe7196c20f131139ace4eeb38ff4a7aa978c2 | 389 | py | Python | notice/notice/wsgi.py | Smart-Notice-Bot/Smart.Notice.Bot | 6da1e653a537237d89554616e0f5b0b1d7ea683d | [
"Apache-2.0"
] | null | null | null | notice/notice/wsgi.py | Smart-Notice-Bot/Smart.Notice.Bot | 6da1e653a537237d89554616e0f5b0b1d7ea683d | [
"Apache-2.0"
] | null | null | null | notice/notice/wsgi.py | Smart-Notice-Bot/Smart.Notice.Bot | 6da1e653a537237d89554616e0f5b0b1d7ea683d | [
"Apache-2.0"
] | 2 | 2022-01-31T07:47:20.000Z | 2022-02-19T15:30:21.000Z | """
WSGI config for notice project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'notice.settings')
application = get_wsgi_application()
| 22.882353 | 78 | 0.784062 |
acebe795d400e9cb1fb5e45909c7e6b07e45146a | 204 | py | Python | gimmebio/covid/tests/test_covid.py | Chandrima-04/gimmebio | cb3e66380006d5c5c00ff70bfb87317dd252c312 | [
"MIT"
] | 3 | 2020-01-21T23:49:55.000Z | 2020-07-29T17:02:30.000Z | gimmebio/covid/tests/test_covid.py | Chandrima-04/gimmebio | cb3e66380006d5c5c00ff70bfb87317dd252c312 | [
"MIT"
] | null | null | null | gimmebio/covid/tests/test_covid.py | Chandrima-04/gimmebio | cb3e66380006d5c5c00ff70bfb87317dd252c312 | [
"MIT"
] | 4 | 2020-01-21T16:48:17.000Z | 2020-03-13T15:34:52.000Z | """Test suite for COVID."""
from unittest import TestCase
class TestKmers(TestCase):
"""Test suite for wasabi."""
def test_dummy(self):
"""Dummy test to fill in later."""
pass
| 17 | 42 | 0.617647 |
acebe7ace340e57377cb06eac7b9129fa7989afb | 1,983 | py | Python | src/twisted/internet/test/test_abstract.py | clokep/twisted | 79a26b0aa4b1b81b46cc64d203644b35e455e46b | [
"Unlicense",
"MIT"
] | null | null | null | src/twisted/internet/test/test_abstract.py | clokep/twisted | 79a26b0aa4b1b81b46cc64d203644b35e455e46b | [
"Unlicense",
"MIT"
] | 1 | 2019-10-02T18:56:18.000Z | 2019-10-02T18:56:18.000Z | src/twisted/internet/test/test_abstract.py | clokep/twisted | 79a26b0aa4b1b81b46cc64d203644b35e455e46b | [
"Unlicense",
"MIT"
] | 1 | 2019-10-02T18:36:25.000Z | 2019-10-02T18:36:25.000Z | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.abstract}, a collection of APIs for implementing
reactors.
"""
from twisted.trial.unittest import SynchronousTestCase
from twisted.internet.abstract import isIPv6Address
class IPv6AddressTests(SynchronousTestCase):
"""
Tests for L{isIPv6Address}, a function for determining if a particular
string is an IPv6 address literal.
"""
def test_empty(self):
"""
The empty string is not an IPv6 address literal.
"""
self.assertFalse(isIPv6Address(""))
def test_colon(self):
"""
A single C{":"} is not an IPv6 address literal.
"""
self.assertFalse(isIPv6Address(":"))
def test_loopback(self):
"""
C{"::1"} is the IPv6 loopback address literal.
"""
self.assertTrue(isIPv6Address("::1"))
def test_scopeID(self):
"""
An otherwise valid IPv6 address literal may also include a C{"%"}
followed by an arbitrary scope identifier.
"""
self.assertTrue(isIPv6Address("fe80::1%eth0"))
self.assertTrue(isIPv6Address("fe80::2%1"))
self.assertTrue(isIPv6Address("fe80::3%en2"))
def test_invalidWithScopeID(self):
"""
An otherwise invalid IPv6 address literal is still invalid with a
trailing scope identifier.
"""
self.assertFalse(isIPv6Address("%eth0"))
self.assertFalse(isIPv6Address(":%eth0"))
self.assertFalse(isIPv6Address("hello%eth0"))
def test_unicodeAndBytes(self):
"""
L{isIPv6Address} evaluates ASCII-encoded bytes as well as text.
"""
self.assertTrue(isIPv6Address(b"fe80::2%1"))
self.assertTrue(isIPv6Address(u"fe80::2%1"))
self.assertFalse(isIPv6Address(u"\u4321"))
self.assertFalse(isIPv6Address(u"hello%eth0"))
self.assertFalse(isIPv6Address(b"hello%eth0"))
| 28.73913 | 77 | 0.638427 |
acebea9ebed4e42f1a655047152c13cb7dc38f6c | 174 | py | Python | spja_tracker/urls.py | Matej-Chmel/Package_Tracker | 06f510e505ae49b4ed5d8fda68d8daf872d8b4a7 | [
"CC0-1.0"
] | null | null | null | spja_tracker/urls.py | Matej-Chmel/Package_Tracker | 06f510e505ae49b4ed5d8fda68d8daf872d8b4a7 | [
"CC0-1.0"
] | null | null | null | spja_tracker/urls.py | Matej-Chmel/Package_Tracker | 06f510e505ae49b4ed5d8fda68d8daf872d8b4a7 | [
"CC0-1.0"
] | null | null | null | from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('tracker/', include('tracker.urls')),
path('admin/', admin.site.urls),
]
| 21.75 | 46 | 0.695402 |
acebeaa4d8b4dd615640c817a6083f47c8fdbc11 | 1,869 | py | Python | tests/test_add_update_publication.py | McGill-NLP/mcgill-nlp.github.io | 3344ebfa950c55c17ae8767b79d92c2e40f7ef48 | [
"MIT"
] | null | null | null | tests/test_add_update_publication.py | McGill-NLP/mcgill-nlp.github.io | 3344ebfa950c55c17ae8767b79d92c2e40f7ef48 | [
"MIT"
] | null | null | null | tests/test_add_update_publication.py | McGill-NLP/mcgill-nlp.github.io | 3344ebfa950c55c17ae8767b79d92c2e40f7ef48 | [
"MIT"
] | 1 | 2021-11-06T21:01:48.000Z | 2021-11-06T21:01:48.000Z | from genericpath import exists
import unittest
import os
import json
import shutil
from ruamel.yaml import YAML
import src.python.add_update_publication as mod
class TestAddUpdatePublication(unittest.TestCase):
@classmethod
def setUpClass(cls):
yaml = YAML()
yaml.preserve_quotes = True
with open("_data/authors.yml") as f:
cls.authors = yaml.load(f)
os.makedirs("tests/scratch/assets/images/papers/", exist_ok=True)
cls.save_dir = "tests/scratch/_posts/papers/"
cls.image_dir = "tests/scratch/assets/images/papers/"
@classmethod
def tearDownClass(cls) -> None:
for path in [
cls.save_dir,
cls.image_dir,
]:
if os.path.exists(path):
shutil.rmtree(path)
def test_add_publication(self):
with open("tests/data/add_publication/in.md") as f:
issue_body = f.read()
with open("tests/data/add_publication/out.md") as f:
expected = f.read()
formatted = mod.main(
issue_body, save_dir=self.save_dir, image_dir=self.image_dir
)
self.assertEqual(formatted["content"], expected)
def test_update_publication(self):
with open("tests/data/update_publication/in.md") as f:
issue_body = f.read()
with open("tests/data/update_publication/out.md") as f:
expected = f.read()
formatted = mod.main(
issue_body, save_dir=self.save_dir, image_dir=self.image_dir
)
self.assertEqual(formatted["content"], expected)
path = os.path.join(self.image_dir, "1904.1234.jpg")
self.assertTrue(
os.path.isfile(path),
msg=f"Expected a file to find a file at {path}, but only the following files were there: {os.listdir(self.image_dir)}",
)
| 29.203125 | 131 | 0.621723 |
acebeaccfda3dcc5c64154d500a4cbd9cfa06e81 | 7,050 | py | Python | tests/system/action/topic/test_create.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | null | null | null | tests/system/action/topic/test_create.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | null | null | null | tests/system/action/topic/test_create.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | null | null | null | from openslides_backend.models.models import AgendaItem
from openslides_backend.permissions.permissions import Permissions
from tests.system.action.base import BaseActionTestCase
class TopicCreateSystemTest(BaseActionTestCase):
def test_create(self) -> None:
self.set_models(
{
"topic/41": {},
"meeting/1": {"name": "test", "is_active_in_organization_id": 1},
}
)
response = self.request("topic.create", {"meeting_id": 1, "title": "test"})
self.assert_status_code(response, 200)
self.assert_model_exists("topic/42")
topic = self.get_model("topic/42")
self.assertEqual(topic.get("meeting_id"), 1)
self.assertEqual(topic.get("agenda_item_id"), 1)
self.assertEqual(topic.get("sequential_number"), 1)
self.assert_model_exists("agenda_item/1")
agenda_item = self.get_model("agenda_item/1")
self.assertEqual(agenda_item.get("meeting_id"), 1)
self.assertEqual(agenda_item.get("content_object_id"), "topic/42")
self.assert_model_exists(
"list_of_speakers/1", {"content_object_id": "topic/42"}
)
self.assert_model_exists(
"list_of_speakers/1", {"content_object_id": "topic/42"}
)
self.assertTrue(response.json["success"])
self.assertEqual(response.json["message"], "Actions handled successfully")
self.assertEqual(
response.json["results"], [[{"id": 42, "sequential_number": 1}]]
)
def test_create_multiple_requests(self) -> None:
self.create_model(
"meeting/1", {"name": "test", "is_active_in_organization_id": 1}
)
response = self.request_json(
[
{
"action": "topic.create",
"data": [
{"meeting_id": 1, "title": "test1"},
{"meeting_id": 1, "title": "test2"},
],
},
{
"action": "topic.create",
"data": [
{"meeting_id": 1, "title": "test3"},
{"meeting_id": 1, "title": "test4"},
],
},
],
)
self.assert_status_code(response, 400)
self.assertIn(
"Datastore service sends HTTP 400. The following locks were broken: 'list_of_speakers/meeting_id', 'list_of_speakers/sequential_number', 'topic/meeting_id', 'topic/sequential_number'",
response.json["message"],
)
self.assert_model_not_exists("topic/1")
self.assert_model_not_exists("topic/2")
self.assert_model_not_exists("topic/3")
self.assert_model_not_exists("topic/4")
def test_create_more_fields(self) -> None:
self.create_model(
"meeting/1", {"name": "test", "is_active_in_organization_id": 1}
)
response = self.request(
"topic.create",
{
"meeting_id": 1,
"title": "test",
"agenda_type": AgendaItem.INTERNAL_ITEM,
"agenda_duration": 60,
},
)
self.assert_status_code(response, 200)
self.assert_model_exists("topic/1")
topic = self.get_model("topic/1")
self.assertEqual(topic.get("meeting_id"), 1)
self.assertEqual(topic.get("agenda_item_id"), 1)
self.assertTrue(topic.get("agenda_type") is None)
agenda_item = self.get_model("agenda_item/1")
self.assertEqual(agenda_item.get("meeting_id"), 1)
self.assertEqual(agenda_item.get("content_object_id"), "topic/1")
self.assertEqual(agenda_item["type"], AgendaItem.INTERNAL_ITEM)
self.assertEqual(agenda_item["duration"], 60)
self.assertEqual(agenda_item["weight"], 10000)
def test_create_multiple_in_one_request(self) -> None:
self.create_model("meeting/1", {"is_active_in_organization_id": 1})
response = self.request_multi(
"topic.create",
[
{
"meeting_id": 1,
"title": "A",
"agenda_type": AgendaItem.AGENDA_ITEM,
"agenda_weight": 1000,
},
{
"meeting_id": 1,
"title": "B",
"agenda_type": AgendaItem.AGENDA_ITEM,
"agenda_weight": 1001,
},
],
)
self.assert_status_code(response, 200)
topic = self.get_model("topic/1")
self.assertEqual(topic.get("agenda_item_id"), 1)
self.assertEqual(topic.get("sequential_number"), 1)
agenda_item = self.get_model("agenda_item/1")
self.assertEqual(agenda_item.get("meeting_id"), 1)
self.assertEqual(agenda_item.get("content_object_id"), "topic/1")
self.assertEqual(agenda_item.get("type"), AgendaItem.AGENDA_ITEM)
self.assertEqual(agenda_item.get("weight"), 1000)
topic = self.get_model("topic/2")
self.assertEqual(topic.get("agenda_item_id"), 2)
self.assertEqual(topic.get("sequential_number"), 2)
agenda_item = self.get_model("agenda_item/2")
self.assertEqual(agenda_item.get("meeting_id"), 1)
self.assertEqual(agenda_item.get("content_object_id"), "topic/2")
self.assertEqual(agenda_item.get("type"), AgendaItem.AGENDA_ITEM)
self.assertEqual(agenda_item.get("weight"), 1001)
meeting = self.get_model("meeting/1")
self.assertEqual(meeting.get("topic_ids"), [1, 2])
self.assertEqual(meeting.get("agenda_item_ids"), [1, 2])
self.assertEqual(meeting.get("list_of_speakers_ids"), [1, 2])
def test_create_multiple_with_existing_sequential_number(self) -> None:
self.set_models(
{
"meeting/1": {"is_active_in_organization_id": 1},
"topic/1": {"meeting_id": 1, "sequential_number": 42},
}
)
response = self.request_multi(
"topic.create",
[
{
"meeting_id": 1,
"title": "A",
},
{
"meeting_id": 1,
"title": "B",
},
],
)
self.assert_status_code(response, 200)
topic = self.get_model("topic/2")
self.assertEqual(topic.get("sequential_number"), 43)
topic = self.get_model("topic/3")
self.assertEqual(topic.get("sequential_number"), 44)
def test_create_no_permission(self) -> None:
self.base_permission_test(
{}, "topic.create", {"meeting_id": 1, "title": "test"}
)
def test_create_permission(self) -> None:
self.base_permission_test(
{},
"topic.create",
{"meeting_id": 1, "title": "test"},
Permissions.AgendaItem.CAN_MANAGE,
)
| 40.517241 | 196 | 0.556738 |
acebed613cb6061f8527a3b8486241e904ff39ba | 431 | py | Python | lib_bgp_simulator/tests/yaml_system_tests/__init__.py | jfuruness/lib_bgp_simulator | 77f9d32bf92591a846813a105450654724212d73 | [
"BSD-3-Clause"
] | 1 | 2021-09-27T14:13:06.000Z | 2021-09-27T14:13:06.000Z | lib_bgp_simulator/tests/yaml_system_tests/__init__.py | jfuruness/lib_bgp_simulator | 77f9d32bf92591a846813a105450654724212d73 | [
"BSD-3-Clause"
] | 2 | 2022-03-10T14:19:17.000Z | 2022-03-11T18:43:48.000Z | lib_bgp_simulator/tests/yaml_system_tests/__init__.py | jfuruness/lib_bgp_simulator | 77f9d32bf92591a846813a105450654724212d73 | [
"BSD-3-Clause"
] | null | null | null | from .hidden_hijack import BaseHiddenHijackTester
from .bgp_prop import BaseBGPPropTester
from .fig2 import BaseFig2Tester
from .non_routed_superprefix import BaseNonRoutedSuperprefixTester
from .non_routed_prefix import BaseNonRoutedPrefixTester
__all__ = ["BaseHiddenHijackTester",
"BaseBGPPropTester",
"BaseFig2Tester",
"BaseNonRoutedSuperprefixTester",
"BaseNonRoutedPrefixTester"]
| 35.916667 | 66 | 0.781903 |
acebefe1941312b4035862ba60d58276653e5f4b | 334 | py | Python | config.py | seanthegeek/TweetFreq | 772af42896480f9879bf9fe4366d83bea197c3fe | [
"MIT"
] | null | null | null | config.py | seanthegeek/TweetFreq | 772af42896480f9879bf9fe4366d83bea197c3fe | [
"MIT"
] | null | null | null | config.py | seanthegeek/TweetFreq | 772af42896480f9879bf9fe4366d83bea197c3fe | [
"MIT"
] | null | null | null | """Flask configuration"""
# Ignore "unused" import
# pylint: disable=W0611
from secrets import SECRET_KEY
DEBUG = False
BABEL_DEFAULT_LOCALE = 'en_US'
BABEL_DEFAULT_TIMEZONE = 'UTC'
# Celery settings
CELERY_ACCEPT_CONTENT = [ 'json']
BROKER_URL = "redis://localhost:6379/1"
CELERYD_CONCURRENCY = 3
CELERY_IMPORTS = ('tweetfreq', )
| 20.875 | 39 | 0.754491 |
acebf013052c10489485ff24b31b2f5dd24dd24a | 2,312 | py | Python | examples/viddev2.py | samvanderpoel/PixelGeom | cbe7ae3edd62105d69843f7317c608b16907849f | [
"MIT"
] | null | null | null | examples/viddev2.py | samvanderpoel/PixelGeom | cbe7ae3edd62105d69843f7317c608b16907849f | [
"MIT"
] | null | null | null | examples/viddev2.py | samvanderpoel/PixelGeom | cbe7ae3edd62105d69843f7317c608b16907849f | [
"MIT"
] | null | null | null | import cv2
import math
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # add parent dir to sys path
import numpy as np
from grid import *
from shape import *
imgs = []
s = 0.3
a1, b1, c1 = np.random.rand(3)
a2, b2, c2 = np.random.rand(3)
frames = 700
low, high = 0.1, 10
for idx, i in enumerate(np.linspace(0.1, 10, num = frames)):
sys.stdout.write('\rCreating frame ' + str(idx+1) + ' of ' + str(frames))
sys.stdout.flush()
tico = dodecahedron(name = 'tico', center = (s*499, s*499, s*100),
rad = (math.sin(i))**2*s*1500, shade = 1)
mygrid = grid(shapes = [tico], dim = (round(s*1000), round(s*1000)))
mygrid.rotate_shape3d(name = 'tico', axis = [a1, b1, c1], angle = i*math.pi/5)
mygrid.rotate_shape3d(name = 'tico', axis = [a2, b2, c2], angle = i*math.pi/5)
mygrid.paint_canvas(paint = 'gradient')
mygrid.draw_shapes()
cmap = rgb_to_cmap(colors = [[205,92,92], [0,128,128], [221,160,221]], penlow = [0,255,127], penhigh = [205,133,63])
mygrid.plot_grid('pic.png', cmap = cmap, dpi = 200)
imgs.append(cv2.imread('pic.png'))
os.remove('pic.png')
print('\n')
np.save('anim.npy', imgs)
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
video=cv2.VideoWriter('dodecahedron.mp4', fourcc, 50, (1000, 1000)) # img dims must meet or exceed resolution determined by dpi above
n = len(imgs)
m = len(str(n))
for idx, image in enumerate(imgs):
if idx % 10 == 0:
sys.stdout.write('\rAdding frame: ' + str(idx+1) + (m-len(str(idx+1)))*' ' + ' to video ')
sys.stdout.flush()
video.write(image)
for idx, image in enumerate(reversed(imgs)):
if idx % 10 == 0:
sys.stdout.write('\rAdding frame ' + str(n-1-idx) + (m-len(str(n-1-idx)))*' ' + ' to video ')
sys.stdout.flush()
video.write(image)
for idx, image in enumerate(imgs):
if idx % 10 == 0:
sys.stdout.write('\rAdding frame ' + str(idx) + (m-len(str(idx+1)))*' ' + ' to video ')
sys.stdout.flush()
video.write(image)
for idx, image in enumerate(reversed(imgs)):
if idx % 10 == 0:
sys.stdout.write('\rAdding frame ' + str(n-1-idx) + (m-len(str(n-1-idx)))*' ' + ' to video ')
sys.stdout.flush()
video.write(image)
print('\n')
cv2.destroyAllWindows()
video.release()
| 36.125 | 133 | 0.610294 |
acebf180174578a91e89a482496f000ad77adc20 | 15,532 | py | Python | steelscript/appfwk/business_hours/datasource/business_hours_source.py | riverbed/steelscript-appfwk-business-hours | 33f6970fd8eff3863c03da31dcf0618da7e6a968 | [
"MIT"
] | null | null | null | steelscript/appfwk/business_hours/datasource/business_hours_source.py | riverbed/steelscript-appfwk-business-hours | 33f6970fd8eff3863c03da31dcf0618da7e6a968 | [
"MIT"
] | null | null | null | steelscript/appfwk/business_hours/datasource/business_hours_source.py | riverbed/steelscript-appfwk-business-hours | 33f6970fd8eff3863c03da31dcf0618da7e6a968 | [
"MIT"
] | null | null | null | # Copyright (c) 2015 Riverbed Technology, Inc.
#
# This software is licensed under the terms and conditions of the MIT License
# accompanying the software ("License"). This software is distributed "AS IS"
# as set forth in the License.
import re
import copy
import logging
import datetime
import pytz
import pandas
from django import forms
from steelscript.common.timeutils import timedelta_total_seconds
from steelscript.appfwk.apps.datasource.forms import fields_add_time_selection
from steelscript.appfwk.apps.datasource.models import \
DatasourceTable, DatasourceQuery, Table, TableField
from steelscript.appfwk.apps.jobs import \
QueryContinue, QueryComplete
from steelscript.appfwk.apps.jobs.models import Job
from steelscript.appfwk.apps.datasource.modules.analysis import \
AnalysisException, AnalysisTable, AnalysisQuery
logger = logging.getLogger(__name__)
TIMES = (['12:00am'] +
['%d:00am' % h for h in range(1, 12)] +
['12:00pm'] +
['%d:00pm' % h for h in range(1, 12)])
def parse_time(t_str):
m = re.match("^([0-9]+):([0-9][0-9]) *([aApP][mM]?)?$", t_str)
if not m:
raise ValueError("Could not parse time string: %s" % t_str)
hours = int(m.group(1))
minutes = int(m.group(2))
ampm = m.group(3)
if ampm:
if ampm.lower()[0] == 'p':
hours += 12
return datetime.time(hours, minutes, 0)
def replace_time(dt, t, tz):
# Need to strip off the tzinfo, change the hours/minutes
# then re-localize to the requested timezone
# This ensures DST is handled properly
return tz.localize(dt.replace(hour=t.hour,
minute=t.minute,
second=0,
microsecond=0,
tzinfo=None))
def fields_add_business_hour_fields(obj,
initial_biz_hours_start='8:00am',
initial_biz_hours_end='5:00pm',
initial_biz_hours_tzname='US/Eastern',
initial_biz_hours_weekends=False,
**kwargs):
kwargs['initial_duration'] = kwargs.get('initial_duration', '1w')
fields_add_time_selection(obj, show_start=True, **kwargs)
time_choices = {'choices': zip(TIMES, TIMES)}
business_hours_start = TableField(keyword='business_hours_start',
label='Start Business',
initial=initial_biz_hours_start,
field_cls=forms.ChoiceField,
field_kwargs=time_choices,
required=True)
business_hours_start.save()
obj.fields.add(business_hours_start)
business_hours_end = TableField(keyword='business_hours_end',
label='End Business',
initial=initial_biz_hours_end,
field_cls=forms.ChoiceField,
field_kwargs=time_choices,
required=True)
business_hours_end.save()
obj.fields.add(business_hours_end)
tz_choices = {'choices': zip(pytz.common_timezones,
pytz.common_timezones)}
business_hours_tzname = TableField(keyword='business_hours_tzname',
label='Business Timezone',
initial=initial_biz_hours_tzname,
field_cls=forms.ChoiceField,
field_kwargs=tz_choices,
required=True)
business_hours_tzname.save()
obj.fields.add(business_hours_tzname)
business_hours_weekends = TableField(keyword='business_hours_weekends',
field_cls=forms.BooleanField,
label='Business includes weekends',
initial=initial_biz_hours_weekends,
required=False)
business_hours_weekends.save()
obj.fields.add(business_hours_weekends)
def get_timestable(biztable):
return Table.from_ref(biztable.options.tables['times'])
class BusinessHoursTimesTable(DatasourceTable):
class Meta:
proxy = True
app_label = 'steelscript.appfwk.business-hours'
_query_class = 'BusinessHoursTimesQuery'
def post_process_table(self, field_options):
super(BusinessHoursTimesTable, self).post_process_table(field_options)
self.add_column('starttime', 'Start time', datatype='time',
iskey=True, sortasc=True)
self.add_column('endtime', 'End time', datatype='time', iskey=True)
self.add_column('totalsecs', 'Total secs')
fields_add_business_hour_fields(self)
class BusinessHoursTimesQuery(DatasourceQuery):
def run(self):
criteria = self.job.criteria
tzname = criteria.business_hours_tzname
logger.debug("%s: timezone: %s" % (self.job, tzname))
tz = pytz.timezone(tzname)
# Convert to datetime objects in the requested timezone
st = criteria.starttime.astimezone(tz)
et = criteria.endtime.astimezone(tz)
logger.debug("%s: times: %s - %s" % (self.job, st, et))
# Business hours start/end, as string "HH:MMam" like 8:00am
sb = parse_time(criteria.business_hours_start)
eb = parse_time(criteria.business_hours_end)
weekends = criteria.business_hours_weekends
# Iterate from st to et until
times = []
t = st
while t <= et:
# Set t0/t1 to date of t but time of sb/eb
t0 = replace_time(t, sb, tz)
t1 = replace_time(t, eb, tz)
# Advance t by 1 day
t = t + datetime.timedelta(days=1)
# Strip timezone, then re-add to account for DST
t = tz.localize(t.replace(tzinfo=None))
# Skip weekends
if not weekends and t0.weekday() >= 5:
continue
# Now see if we have any overlap of biz hours for today
if et < t0:
# Report end time is today before biz hours start, all done
break
if et < t1:
# Report end time is today in the middle of biz hours, adjust
t1 = et
if t1 < st:
# Report start time occurs today *after* biz end, nothing today
continue
if t0 < st:
# Report start time occurs today in the middle of the biz hours
# Adjust t0
t0 = st
logger.debug("%s: start: %s, end: %s, duration: %s" %
(self.job, str(t0), str(t1),
str(timedelta_total_seconds(t1-t0))))
times.append((t0, t1, timedelta_total_seconds(t1-t0)))
if len(times) == 0:
data = None
else:
# manually assign data types to avoid Python 2.6 issues
# when converting date times
columns = ['starttime', 'endtime', 'totalsecs']
s1 = pandas.Series([x[0] for x in times], dtype='datetime64[ns]')
s2 = pandas.Series([x[1] for x in times], dtype='datetime64[ns]')
tt = pandas.Series([x[2] for x in times], dtype='int')
# create dataframe then assign using correct column ordering
df = pandas.DataFrame(dict(zip(columns, [s1, s2, tt])))
data = df[columns]
return QueryComplete(data)
class BusinessHoursTable(AnalysisTable):
class Meta:
proxy = True
app_label = 'steelscript.appfwk.business-hours'
_query_class = 'BusinessHoursQuery'
TABLE_OPTIONS = {'aggregate': None}
@classmethod
def create(cls, name, basetable, aggregate, **kwargs):
timestable = BusinessHoursTimesTable.create(name + '-times')
kwargs['tables'] = {'times': timestable}
kwargs['related_tables'] = {'basetable': basetable}
kwargs['aggregate'] = aggregate
return super(BusinessHoursTable, cls).create(name, **kwargs)
def post_process_table(self, field_options):
super(BusinessHoursTable, self).post_process_table(field_options)
self.copy_columns(self.options['related_tables']['basetable'])
for col in self.get_columns(synthetic=True):
col.synthetic = False
col.compute_expression = ''
col.save()
class BusinessHoursQuery(AnalysisQuery):
def analyze(self, jobs):
criteria = self.job.criteria
tzname = criteria.business_hours_tzname
tz = pytz.timezone(tzname)
times = jobs['times'].data()
if times is None or len(times) == 0:
return QueryComplete(None)
basetable = Table.from_ref(
self.table.options.related_tables['basetable']
)
# Create all the jobs
depjobs = {}
for i, row in times.iterrows():
(t0, t1) = (row['starttime'], row['endtime'])
sub_criteria = copy.copy(criteria)
sub_criteria.starttime = t0.astimezone(tz)
sub_criteria.endtime = t1.astimezone(tz)
job = Job.create(table=basetable, criteria=sub_criteria,
update_progress=False, parent=self.job)
logger.debug("Created %s: %s - %s" % (job, t0, t1))
depjobs[job.id] = job
return QueryContinue(self.collect, depjobs)
def collect(self, jobs=None):
logger.debug("%s: bizhours.collect: %s" % (self, jobs))
basetable = Table.from_ref(
self.table.options.related_tables['basetable']
)
# collect all key names
keynames = []
istime = False
for key in basetable.get_columns(iskey=True):
keynames.append(key.name)
if key.istime():
istime = True
# Now collect the data
total_secs = 0
dfs = []
idx = 0
for jid, job in jobs.iteritems():
if job.status == Job.ERROR:
raise AnalysisException("%s for %s-%s failed: %s" %
(job, job.criteria.starttime,
job.criteria.endtime,
job.message))
subdf = job.data()
logger.debug("%s: returned %d rows" %
(job, len(subdf) if subdf is not None else 0))
if subdf is None:
continue
logger.debug("%s: actual_criteria %s" % (job, job.actual_criteria))
t0 = job.actual_criteria.starttime
t1 = job.actual_criteria.endtime
if not istime:
subdf['__secs__'] = timedelta_total_seconds(t1 - t0)
total_secs += timedelta_total_seconds(t1 - t0)
idx += 1
dfs.append(subdf)
if len(dfs) == 0:
return QueryComplete(None)
df = pandas.concat(dfs, ignore_index=True)
if not istime:
if 'aggregate' in self.table.options:
ops = self.table.options['aggregate']
for col in basetable.get_columns(iskey=False):
if col.name not in ops:
ops[col.name] = 'sum'
else:
ops = 'sum'
df = avg_groupby_aggregate(df, keynames, ops,
'__secs__', total_secs)
return QueryComplete(df)
def avg_groupby_aggregate(df, keys, ops, t_col, total_t):
"""Groupby/aggregate with support for weighted averge column
Group the data frame `df` on `keys` using the operation dict
defined in `ops` just like the standard `df.aggregate(ops)`
call, but support a `avg` operation that computes a
weighted average using `weight_col` as the weight.
This is used for aggregating multiple reports over different
timeframes, where the rows from each report have a weight
representing the time interval covered by the row in seconds.
`df` source pandas DataFrame
`keys` array of key column names
`ops` dict of operations to perform on each column.
the key is the column, the value is either a
numpy operation (et.sum, mean) or another function.
Use 'avg' to compute the time weighted average
`t_col` the name of the column holding the time interval
covered by each row
`total_t` the total time interval covered by all reports
in the `df` source data
For example, consider 3 queries over different intervals, the first
is 1 hour (3600 seconds), the second and third both cover 8 hours
(28800 seconds). The results from each query are all in the
same DataFrame.
>>> q1_data = [['tcp', 72000, 20, 3600],
['udp', 3600, 1, 3600],
['icmp', 360, 0.1, 3600]]
>>> q2_data = [['tcp', 1152000, 40, 28800],
['udp', 57600, 2, 28800]]
>>> q3_data = [['tcp', 1440000, 50, 28800],
['udp', 201600, 7, 28800],
['icmp', 8640, 0.3, 28800]]
>>> data = q1_data.copy()
>>> data.append(q2_data)
>>> data.append(q3_data)
>>> df = pandas.DataFrame(data,
columns = ['proto', 'bytes', 'avg_bytes', 'interval'])
>>> avg_groupby_aggregate(df, ['proto'],
{'bytes': sum, 'avg_bytes': 'avg'},
'interval', 3600 + 28800 + 28800)
proto bytes avg_bytes
0 icmp 9000 0.147059
1 tcp 2664000 43.529412
2 udp 262800 4.294118
"""
# The basic idea is to leverage the pandas aggregate() function, but
# it works most simply column by column, whereas we need to
# leverage the weight of each row as it may change.
#
# We can get this done by instead computing weighted value for each
# row, using pandas to aggregate using sum on just the weighted sum
# columns, and then dividing the resulting sums by the total interval
#
# Basic algorithm for a column x:
# 1. Compute x__weighted__ == x * t_col for all rows
# 2. Group by and aggregate the weighted cols by sum()
# 3. Compute the resulted x = sum(x__weighted__) / total_t
# Simple function to define a unique column name
# for the weighted total column for an existing column name
def weighted_col(name):
return name + '__weighted__'
# The ops dictionary defines the map of column names and the aggregation
# function. Iterate through this map
newops = {}
for k, v in ops.iteritems():
if v == 'avg':
df[weighted_col(k)] = df[k] * df[t_col]
newops[weighted_col(k)] = 'sum'
else:
newops[k] = ops[k]
result = df.groupby(keys).aggregate(newops).reset_index()
for k, v in ops.iteritems():
if v == 'avg':
result[k] = result[weighted_col(k)] / total_t
del result[weighted_col(k)]
del df[weighted_col(k)]
return result
| 36.545882 | 79 | 0.564963 |
acebf23371f0e653b95b2125172de17a54af0c5b | 7,630 | py | Python | alameda_api/v1alpha1/datahub/events/events_pb2.py | containers-ai/api | ec0fafc7bfd17c16cff1d1737dbceacac5d09fd8 | [
"Apache-2.0"
] | 1 | 2020-05-18T02:34:29.000Z | 2020-05-18T02:34:29.000Z | alameda_api/v1alpha1/datahub/events/events_pb2.py | containers-ai/api | ec0fafc7bfd17c16cff1d1737dbceacac5d09fd8 | [
"Apache-2.0"
] | 9 | 2018-11-01T09:08:51.000Z | 2019-01-12T07:09:06.000Z | alameda_api/v1alpha1/datahub/events/events_pb2.py | containers-ai/api | ec0fafc7bfd17c16cff1d1737dbceacac5d09fd8 | [
"Apache-2.0"
] | 12 | 2018-10-30T02:46:56.000Z | 2021-04-13T07:55:09.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: alameda_api/v1alpha1/datahub/events/events.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from alameda_api.v1alpha1.datahub.events import types_pb2 as alameda__api_dot_v1alpha1_dot_datahub_dot_events_dot_types__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='alameda_api/v1alpha1/datahub/events/events.proto',
package='containersai.alameda.v1alpha1.datahub.events',
syntax='proto3',
serialized_options=b'Z@github.com/containers-ai/api/alameda_api/v1alpha1/datahub/events',
serialized_pb=b'\n0alameda_api/v1alpha1/datahub/events/events.proto\x12,containersai.alameda.v1alpha1.datahub.events\x1a/alameda_api/v1alpha1/datahub/events/types.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xeb\x03\n\x05\x45vent\x12(\n\x04time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\n\n\x02id\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12I\n\x06source\x18\x04 \x01(\x0b\x32\x39.containersai.alameda.v1alpha1.datahub.events.EventSource\x12\x45\n\x04type\x18\x05 \x01(\x0e\x32\x37.containersai.alameda.v1alpha1.datahub.events.EventType\x12K\n\x07version\x18\x06 \x01(\x0e\x32:.containersai.alameda.v1alpha1.datahub.events.EventVersion\x12G\n\x05level\x18\x07 \x01(\x0e\x32\x38.containersai.alameda.v1alpha1.datahub.events.EventLevel\x12Q\n\x07subject\x18\x08 \x01(\x0b\x32@.containersai.alameda.v1alpha1.datahub.events.K8SObjectReference\x12\x0f\n\x07message\x18\t \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\n \x01(\tBBZ@github.com/containers-ai/api/alameda_api/v1alpha1/datahub/eventsb\x06proto3'
,
dependencies=[alameda__api_dot_v1alpha1_dot_datahub_dot_events_dot_types__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='containersai.alameda.v1alpha1.datahub.events.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='time', full_name='containersai.alameda.v1alpha1.datahub.events.Event.time', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='containersai.alameda.v1alpha1.datahub.events.Event.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='containersai.alameda.v1alpha1.datahub.events.Event.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source', full_name='containersai.alameda.v1alpha1.datahub.events.Event.source', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='containersai.alameda.v1alpha1.datahub.events.Event.type', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='containersai.alameda.v1alpha1.datahub.events.Event.version', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='level', full_name='containersai.alameda.v1alpha1.datahub.events.Event.level', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subject', full_name='containersai.alameda.v1alpha1.datahub.events.Event.subject', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='containersai.alameda.v1alpha1.datahub.events.Event.message', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='containersai.alameda.v1alpha1.datahub.events.Event.data', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=181,
serialized_end=672,
)
_EVENT.fields_by_name['time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_EVENT.fields_by_name['source'].message_type = alameda__api_dot_v1alpha1_dot_datahub_dot_events_dot_types__pb2._EVENTSOURCE
_EVENT.fields_by_name['type'].enum_type = alameda__api_dot_v1alpha1_dot_datahub_dot_events_dot_types__pb2._EVENTTYPE
_EVENT.fields_by_name['version'].enum_type = alameda__api_dot_v1alpha1_dot_datahub_dot_events_dot_types__pb2._EVENTVERSION
_EVENT.fields_by_name['level'].enum_type = alameda__api_dot_v1alpha1_dot_datahub_dot_events_dot_types__pb2._EVENTLEVEL
_EVENT.fields_by_name['subject'].message_type = alameda__api_dot_v1alpha1_dot_datahub_dot_events_dot_types__pb2._K8SOBJECTREFERENCE
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), {
'DESCRIPTOR' : _EVENT,
'__module__' : 'alameda_api.v1alpha1.datahub.events.events_pb2'
# @@protoc_insertion_point(class_scope:containersai.alameda.v1alpha1.datahub.events.Event)
})
_sym_db.RegisterMessage(Event)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 53.732394 | 1,022 | 0.777588 |
acebf2efa4aed2edb895327fbe618c075485c0a5 | 4,485 | py | Python | setup.py | damienmg/OctoPrint-ControlBox | b91b5eba60741baceccf34aa134cc9f8244c278c | [
"Apache-2.0"
] | null | null | null | setup.py | damienmg/OctoPrint-ControlBox | b91b5eba60741baceccf34aa134cc9f8244c278c | [
"Apache-2.0"
] | null | null | null | setup.py | damienmg/OctoPrint-ControlBox | b91b5eba60741baceccf34aa134cc9f8244c278c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
########################################################################################################################
### Do not forget to adjust the following variables to your own plugin.
# The plugin's identifier, has to be unique
plugin_identifier = "control_box"
# The plugin's python package, should be "octoprint_<plugin identifier>", has to be unique
plugin_package = "octoprint_control_box"
# The plugin's human readable name. Can be overwritten within OctoPrint's internal data via __plugin_name__ in the
# plugin module
plugin_name = "OctoPrint-ControlBox"
# The plugin's version. Can be overwritten within OctoPrint's internal data via __plugin_version__ in the plugin module
plugin_version = "0.1.0"
# The plugin's description. Can be overwritten within OctoPrint's internal data via __plugin_description__ in the plugin
# module
plugin_description = """A plugin to control various relay switches from the Raspberry Pi. Linked to my own control box."""
# The plugin's author. Can be overwritten within OctoPrint's internal data via __plugin_author__ in the plugin module
plugin_author = "Damien Martin-Guillerez"
# The plugin's author's mail address.
plugin_author_email = "damien.martin.guillerez@gmail.com"
# The plugin's homepage URL. Can be overwritten within OctoPrint's internal data via __plugin_url__ in the plugin module
plugin_url = "https://github.com/damienmg/OctoPrint-ControlBox"
# The plugin's license. Can be overwritten within OctoPrint's internal data via __plugin_license__ in the plugin module
plugin_license = "Apache 2.0"
# Any additional requirements besides OctoPrint should be listed here
plugin_requires = [
"pyusb>=1.0.0",
"RPi.GPIO",
]
### --------------------------------------------------------------------------------------------------------------------
### More advanced options that you usually shouldn't have to touch follow after this point
### --------------------------------------------------------------------------------------------------------------------
# Additional package data to install for this plugin. The subfolders "templates", "static" and "translations" will
# already be installed automatically if they exist. Note that if you add something here you'll also need to update
# MANIFEST.in to match to ensure that python setup.py sdist produces a source distribution that contains all your
# files. This is sadly due to how python's setup.py works, see also http://stackoverflow.com/a/14159430/2028598
plugin_additional_data = []
# Any additional python packages you need to install with your plugin that are not contained in <plugin_package>.*
plugin_additional_packages = []
# Any python packages within <plugin_package>.* you do NOT want to install with your plugin
plugin_ignored_packages = []
# Additional parameters for the call to setuptools.setup. If your plugin wants to register additional entry points,
# define dependency links or other things like that, this is the place to go. Will be merged recursively with the
# default setup parameters as provided by octoprint_setuptools.create_plugin_setup_parameters using
# octoprint.util.dict_merge.
#
# Example:
# plugin_requires = ["someDependency==dev"]
# additional_setup_parameters = {"dependency_links": ["https://github.com/someUser/someRepo/archive/master.zip#egg=someDependency-dev"]}
additional_setup_parameters = {}
########################################################################################################################
from setuptools import setup
try:
import octoprint_setuptools
except:
print("Could not import OctoPrint's setuptools, are you sure you are running that under "
"the same python installation that OctoPrint is installed under?")
import sys
sys.exit(-1)
setup_parameters = octoprint_setuptools.create_plugin_setup_parameters(
identifier=plugin_identifier,
package=plugin_package,
name=plugin_name,
version=plugin_version,
description=plugin_description,
author=plugin_author,
mail=plugin_author_email,
url=plugin_url,
license=plugin_license,
requires=plugin_requires,
additional_packages=plugin_additional_packages,
ignored_packages=plugin_ignored_packages,
additional_data=plugin_additional_data
)
if len(additional_setup_parameters):
from octoprint.util import dict_merge
setup_parameters = dict_merge(setup_parameters, additional_setup_parameters)
setup(**setup_parameters)
| 44.85 | 140 | 0.701895 |
acebf31f6734e9d311ada3ebb6d00107e1ca40cc | 127 | py | Python | pkg/entity/_http.py | shrohilla/kafka-func-core-tool-test | 110815f74bd035758542526c74ebaa55ce51046f | [
"Apache-2.0"
] | null | null | null | pkg/entity/_http.py | shrohilla/kafka-func-core-tool-test | 110815f74bd035758542526c74ebaa55ce51046f | [
"Apache-2.0"
] | null | null | null | pkg/entity/_http.py | shrohilla/kafka-func-core-tool-test | 110815f74bd035758542526c74ebaa55ce51046f | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
@dataclass
class HttpRequest:
url: str
method: str
body: dict
params: dict
| 12.7 | 33 | 0.692913 |
acebf6a7a632edab4f0641a4e6973c4d9686fc09 | 12,659 | py | Python | python-packages/sra_client/src/zero_ex/sra_client/models/relayer_api_order_config_payload_schema.py | Beovolytics-Inc/0x-monorepo | 8a20cc682cd620cc5bed3df9db7b654aa6e02dbf | [
"Apache-2.0"
] | 1,075 | 2018-03-04T13:18:52.000Z | 2022-03-29T06:33:59.000Z | python-packages/sra_client/src/zero_ex/sra_client/models/relayer_api_order_config_payload_schema.py | Beovolytics-Inc/0x-monorepo | 8a20cc682cd620cc5bed3df9db7b654aa6e02dbf | [
"Apache-2.0"
] | 1,873 | 2018-03-03T14:37:53.000Z | 2021-06-26T03:02:12.000Z | python-packages/sra_client/src/zero_ex/sra_client/models/relayer_api_order_config_payload_schema.py | Beovolytics-Inc/0x-monorepo | 8a20cc682cd620cc5bed3df9db7b654aa6e02dbf | [
"Apache-2.0"
] | 500 | 2018-03-03T20:39:43.000Z | 2022-03-21T21:01:55.000Z | # coding: utf-8
import pprint
import re # noqa: F401
import six
class RelayerApiOrderConfigPayloadSchema(object):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"maker_address": "str",
"taker_address": "str",
"maker_asset_amount": "str",
"taker_asset_amount": "str",
"maker_asset_data": "str",
"taker_asset_data": "str",
"exchange_address": "str",
"expiration_time_seconds": "str",
}
attribute_map = {
"maker_address": "makerAddress",
"taker_address": "takerAddress",
"maker_asset_amount": "makerAssetAmount",
"taker_asset_amount": "takerAssetAmount",
"maker_asset_data": "makerAssetData",
"taker_asset_data": "takerAssetData",
"exchange_address": "exchangeAddress",
"expiration_time_seconds": "expirationTimeSeconds",
}
def __init__(
self,
maker_address=None,
taker_address=None,
maker_asset_amount=None,
taker_asset_amount=None,
maker_asset_data=None,
taker_asset_data=None,
exchange_address=None,
expiration_time_seconds=None,
): # noqa: E501
"""RelayerApiOrderConfigPayloadSchema - a model defined in OpenAPI""" # noqa: E501
self._maker_address = None
self._taker_address = None
self._maker_asset_amount = None
self._taker_asset_amount = None
self._maker_asset_data = None
self._taker_asset_data = None
self._exchange_address = None
self._expiration_time_seconds = None
self.discriminator = None
self.maker_address = maker_address
self.taker_address = taker_address
self.maker_asset_amount = maker_asset_amount
self.taker_asset_amount = taker_asset_amount
self.maker_asset_data = maker_asset_data
self.taker_asset_data = taker_asset_data
self.exchange_address = exchange_address
self.expiration_time_seconds = expiration_time_seconds
@property
def maker_address(self):
"""Gets the maker_address of this RelayerApiOrderConfigPayloadSchema.
:return: The maker_address of this RelayerApiOrderConfigPayloadSchema.
:rtype: str
"""
return self._maker_address
@maker_address.setter
def maker_address(self, maker_address):
"""Sets the maker_address of this RelayerApiOrderConfigPayloadSchema.
:param maker_address: The maker_address of this RelayerApiOrderConfigPayloadSchema.
:type: str
"""
if maker_address is None:
raise ValueError(
"Invalid value for `maker_address`, must not be `None`"
) # noqa: E501
if maker_address is not None and not re.search(
r"^0x[0-9a-f]{40}$", maker_address
): # noqa: E501
raise ValueError(
r"Invalid value for `maker_address`, must be a follow pattern or equal to `/^0x[0-9a-f]{40}$/`"
) # noqa: E501
self._maker_address = maker_address
@property
def taker_address(self):
"""Gets the taker_address of this RelayerApiOrderConfigPayloadSchema.
:return: The taker_address of this RelayerApiOrderConfigPayloadSchema.
:rtype: str
"""
return self._taker_address
@taker_address.setter
def taker_address(self, taker_address):
"""Sets the taker_address of this RelayerApiOrderConfigPayloadSchema.
:param taker_address: The taker_address of this RelayerApiOrderConfigPayloadSchema.
:type: str
"""
if taker_address is None:
raise ValueError(
"Invalid value for `taker_address`, must not be `None`"
) # noqa: E501
if taker_address is not None and not re.search(
r"^0x[0-9a-f]{40}$", taker_address
): # noqa: E501
raise ValueError(
r"Invalid value for `taker_address`, must be a follow pattern or equal to `/^0x[0-9a-f]{40}$/`"
) # noqa: E501
self._taker_address = taker_address
@property
def maker_asset_amount(self):
"""Gets the maker_asset_amount of this RelayerApiOrderConfigPayloadSchema.
:return: The maker_asset_amount of this RelayerApiOrderConfigPayloadSchema.
:rtype: str
"""
return self._maker_asset_amount
@maker_asset_amount.setter
def maker_asset_amount(self, maker_asset_amount):
"""Sets the maker_asset_amount of this RelayerApiOrderConfigPayloadSchema.
:param maker_asset_amount: The maker_asset_amount of this RelayerApiOrderConfigPayloadSchema.
:type: str
"""
if maker_asset_amount is None:
raise ValueError(
"Invalid value for `maker_asset_amount`, must not be `None`"
) # noqa: E501
if maker_asset_amount is not None and not re.search(
r"^\d+$", maker_asset_amount
): # noqa: E501
raise ValueError(
r"Invalid value for `maker_asset_amount`, must be a follow pattern or equal to `/^\d+$/`"
) # noqa: E501
self._maker_asset_amount = maker_asset_amount
@property
def taker_asset_amount(self):
"""Gets the taker_asset_amount of this RelayerApiOrderConfigPayloadSchema.
:return: The taker_asset_amount of this RelayerApiOrderConfigPayloadSchema.
:rtype: str
"""
return self._taker_asset_amount
@taker_asset_amount.setter
def taker_asset_amount(self, taker_asset_amount):
"""Sets the taker_asset_amount of this RelayerApiOrderConfigPayloadSchema.
:param taker_asset_amount: The taker_asset_amount of this RelayerApiOrderConfigPayloadSchema.
:type: str
"""
if taker_asset_amount is None:
raise ValueError(
"Invalid value for `taker_asset_amount`, must not be `None`"
) # noqa: E501
if taker_asset_amount is not None and not re.search(
r"^\d+$", taker_asset_amount
): # noqa: E501
raise ValueError(
r"Invalid value for `taker_asset_amount`, must be a follow pattern or equal to `/^\d+$/`"
) # noqa: E501
self._taker_asset_amount = taker_asset_amount
@property
def maker_asset_data(self):
"""Gets the maker_asset_data of this RelayerApiOrderConfigPayloadSchema.
:return: The maker_asset_data of this RelayerApiOrderConfigPayloadSchema.
:rtype: str
"""
return self._maker_asset_data
@maker_asset_data.setter
def maker_asset_data(self, maker_asset_data):
"""Sets the maker_asset_data of this RelayerApiOrderConfigPayloadSchema.
:param maker_asset_data: The maker_asset_data of this RelayerApiOrderConfigPayloadSchema.
:type: str
"""
if maker_asset_data is None:
raise ValueError(
"Invalid value for `maker_asset_data`, must not be `None`"
) # noqa: E501
if maker_asset_data is not None and not re.search(
r"^0x(([0-9a-f][0-9a-f])+)?$", maker_asset_data
): # noqa: E501
raise ValueError(
r"Invalid value for `maker_asset_data`, must be a follow pattern or equal to `/^0x(([0-9a-f][0-9a-f])+)?$/`"
) # noqa: E501
self._maker_asset_data = maker_asset_data
@property
def taker_asset_data(self):
"""Gets the taker_asset_data of this RelayerApiOrderConfigPayloadSchema.
:return: The taker_asset_data of this RelayerApiOrderConfigPayloadSchema.
:rtype: str
"""
return self._taker_asset_data
@taker_asset_data.setter
def taker_asset_data(self, taker_asset_data):
"""Sets the taker_asset_data of this RelayerApiOrderConfigPayloadSchema.
:param taker_asset_data: The taker_asset_data of this RelayerApiOrderConfigPayloadSchema.
:type: str
"""
if taker_asset_data is None:
raise ValueError(
"Invalid value for `taker_asset_data`, must not be `None`"
) # noqa: E501
if taker_asset_data is not None and not re.search(
r"^0x(([0-9a-f][0-9a-f])+)?$", taker_asset_data
): # noqa: E501
raise ValueError(
r"Invalid value for `taker_asset_data`, must be a follow pattern or equal to `/^0x(([0-9a-f][0-9a-f])+)?$/`"
) # noqa: E501
self._taker_asset_data = taker_asset_data
@property
def exchange_address(self):
"""Gets the exchange_address of this RelayerApiOrderConfigPayloadSchema.
:return: The exchange_address of this RelayerApiOrderConfigPayloadSchema.
:rtype: str
"""
return self._exchange_address
@exchange_address.setter
def exchange_address(self, exchange_address):
"""Sets the exchange_address of this RelayerApiOrderConfigPayloadSchema.
:param exchange_address: The exchange_address of this RelayerApiOrderConfigPayloadSchema.
:type: str
"""
if exchange_address is None:
raise ValueError(
"Invalid value for `exchange_address`, must not be `None`"
) # noqa: E501
if exchange_address is not None and not re.search(
r"^0x[0-9a-f]{40}$", exchange_address
): # noqa: E501
raise ValueError(
r"Invalid value for `exchange_address`, must be a follow pattern or equal to `/^0x[0-9a-f]{40}$/`"
) # noqa: E501
self._exchange_address = exchange_address
@property
def expiration_time_seconds(self):
"""Gets the expiration_time_seconds of this RelayerApiOrderConfigPayloadSchema.
:return: The expiration_time_seconds of this RelayerApiOrderConfigPayloadSchema.
:rtype: str
"""
return self._expiration_time_seconds
@expiration_time_seconds.setter
def expiration_time_seconds(self, expiration_time_seconds):
"""Sets the expiration_time_seconds of this RelayerApiOrderConfigPayloadSchema.
:param expiration_time_seconds: The expiration_time_seconds of this RelayerApiOrderConfigPayloadSchema.
:type: str
"""
if expiration_time_seconds is None:
raise ValueError(
"Invalid value for `expiration_time_seconds`, must not be `None`"
) # noqa: E501
if expiration_time_seconds is not None and not re.search(
r"^\d+$", expiration_time_seconds
): # noqa: E501
raise ValueError(
r"Invalid value for `expiration_time_seconds`, must be a follow pattern or equal to `/^\d+$/`"
) # noqa: E501
self._expiration_time_seconds = expiration_time_seconds
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value,
)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RelayerApiOrderConfigPayloadSchema):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 34.493188 | 124 | 0.61711 |
acebf6d06d74eb1829973ec98c0d7857adf4f063 | 4,521 | py | Python | GPL/traffic_profiles/trex/trex-stl-3n-ethip4-ip4dst60000-2cnf.py | nidhyanandhan/csit | 2156583b4e66f2c3c35903c854b1823b76a4e9a6 | [
"Apache-2.0"
] | null | null | null | GPL/traffic_profiles/trex/trex-stl-3n-ethip4-ip4dst60000-2cnf.py | nidhyanandhan/csit | 2156583b4e66f2c3c35903c854b1823b76a4e9a6 | [
"Apache-2.0"
] | null | null | null | GPL/traffic_profiles/trex/trex-stl-3n-ethip4-ip4dst60000-2cnf.py | nidhyanandhan/csit | 2156583b4e66f2c3c35903c854b1823b76a4e9a6 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stream profile for T-rex traffic generator.
Stream profile:
- Two streams sent in directions 0 --> 1 and 1 --> 0 at the same time.
- Packet: ETH / IP /
- Direction 0 --> 1:
- Source IP address range: 10.0.0.1
- Destination IP address range: 20.0.0.0 - 20.0.234.95
- Direction 1 --> 0:
- Source IP address range: 20.0.0.1
- Destination IP address range: 10.0.0.0 - 10.0.234.95
"""
from trex.stl.api import *
from profile_trex_stateless_base_class import TrafficStreamsBaseClass
class TrafficStreams(TrafficStreamsBaseClass):
"""Stream profile."""
def __init__(self):
"""Initialization and setting of streams' parameters."""
super(TrafficStreamsBaseClass, self).__init__()
self.p1_dst_start_mac = u"02:02:00:00:12:00"
self.p2_dst_start_mac = u"02:02:00:00:02:00"
# IPs used in packet headers.
self.p1_src_start_ip = u"10.0.0.1"
self.p1_dst_start_ip = u"20.0.0.0"
self.p1_dst_end_ip = u"20.0.234.95"
self.p2_src_start_ip = u"20.0.0.1"
self.p2_dst_start_ip = u"10.0.0.0"
self.p2_dst_end_ip = u"10.0.234.95"
def define_packets(self):
"""Defines the packets to be sent from the traffic generator.
Packet definition: | ETH | IP |
:returns: Packets to be sent from the traffic generator.
:rtype: tuple
"""
# Direction 0 --> 1
base_pkt_a = (
Ether(
dst=self.p1_dst_start_mac
) /
IP(
src=self.p1_src_start_ip,
dst=self.p1_dst_start_ip,
proto=61
)
)
# Direction 1 --> 0
base_pkt_b = (
Ether(
dst=self.p2_dst_start_mac
) /
IP(
src=self.p2_src_start_ip,
dst=self.p2_dst_start_ip,
proto=61
)
)
# Direction 0 --> 1
vm1 = STLScVmRaw(
[
STLVmFlowVar(
name=u"mac_dst",
min_value=0,
max_value=1,
size=1,
op=u"inc"
),
STLVmWrFlowVar(
fv_name=u"mac_dst",
pkt_offset=5
),
STLVmFlowVar(
name=u"dst",
min_value=self.p1_dst_start_ip,
max_value=self.p1_dst_end_ip,
size=4,
op=u"inc"
),
STLVmWrFlowVar(
fv_name=u"dst",
pkt_offset=u"IP.dst"
),
STLVmFixIpv4(
offset=u"IP"
)
]
)
# Direction 1 --> 0
vm2 = STLScVmRaw(
[
STLVmFlowVar(
name=u"mac_dst",
min_value=0,
max_value=1,
size=1,
op=u"inc"
),
STLVmWrFlowVar(
fv_name=u"mac_dst",
pkt_offset=5
),
STLVmFlowVar(
name=u"dst",
min_value=self.p2_dst_start_ip,
max_value=self.p2_dst_end_ip,
size=4,
op=u"inc"
),
STLVmWrFlowVar(
fv_name=u"dst",
pkt_offset=u"IP.dst"
),
STLVmFixIpv4(
offset=u"IP"
)
]
)
return base_pkt_a, base_pkt_b, vm1, vm2
def register():
"""Register this traffic profile to T-rex.
Do not change this function.
:return: Traffic streams.
:rtype: Object
"""
return TrafficStreams()
| 28.613924 | 74 | 0.490157 |
acebf7b6f9a01417d215ed4ebb67be3f9f43c70d | 4,703 | py | Python | book/tutorials/geospatial/ATL06_to_dataframe.py | tsutterley/website2022 | 95e31e5989c387c734c1b4ee91e3c5c99e0d8ef7 | [
"MIT"
] | 8 | 2022-02-01T16:54:29.000Z | 2022-03-22T18:09:31.000Z | book/tutorials/geospatial/ATL06_to_dataframe.py | tsutterley/website2022 | 95e31e5989c387c734c1b4ee91e3c5c99e0d8ef7 | [
"MIT"
] | 99 | 2022-01-27T22:01:05.000Z | 2022-03-31T19:42:28.000Z | book/tutorials/geospatial/ATL06_to_dataframe.py | tsutterley/website2022 | 95e31e5989c387c734c1b4ee91e3c5c99e0d8ef7 | [
"MIT"
] | 25 | 2022-02-02T00:58:27.000Z | 2022-03-24T20:59:57.000Z | #!/usr/bin/env python
u"""
ATL06_to_dataframe.py (03/2022)
Read ICESat-2 ATL06 (Land Ice Along-Track Height Product) data files
derived from https://github.com/tsutterley/read-ICESat-2/
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
h5py: Python interface for Hierarchal Data Format 5 (HDF5)
https://www.h5py.org/
geopandas: Python tools for geographic data
http://geopandas.readthedocs.io/
"""
from __future__ import print_function
import geopandas as gpd
import h5py
import io
import logging
import numpy as np
import os
import re
# default beams to read from ATL06
DEFAULT_BEAMS = ['gt1l','gt1r','gt2l','gt2r','gt3l','gt3r']
# default groups to read from ATL06
DEFAULT_GROUPS = []
DEFAULT_GROUPS.append('bias_correction')
DEFAULT_GROUPS.append('dem')
DEFAULT_GROUPS.append('fit_statistics')
DEFAULT_GROUPS.append('geophysical')
DEFAULT_GROUPS.append('ground_track')
# PURPOSE: read ICESat-2 ATL06 HDF5 data files
def ATL06_to_dataframe(FILENAME,
beams=DEFAULT_BEAMS,
groups=DEFAULT_GROUPS,
**kwargs):
"""
Reads ICESat-2 ATL06 (Land Ice Along-Track Height Product) data files
Arguments
---------
FILENAME: full path to ATL06 file
Keyword Arguments
-----------------
beams: ATLAS beam groups to read
groups: HDF5 groups to read
crs: Coordinate Reference System for dataframe
Returns
-------
gdf: geodataframe with ATL06 variables
"""
# set default EPSG
kwargs.setdefault('crs','EPSG:4326')
# Open the HDF5 file for reading
if isinstance(FILENAME, io.IOBase):
fileID = h5py.File(FILENAME, 'r')
else:
fileID = h5py.File(os.path.expanduser(FILENAME), 'r')
# Output HDF5 file information
logging.info(fileID.filename)
logging.info(list(fileID.keys()))
# output GeoDataFrame for ICESat-2 ATL06 variables
gdf = gpd.GeoDataFrame(geometry=gpd.points_from_xy([],[]), crs=kwargs['crs'])
# read each input beam within the file
IS2_atl06_beams = []
for gtx in [k for k in fileID.keys() if re.match(r'gt\d[lr]',k) and k in beams]:
# check if subsetted beam contains land ice data
try:
fileID[gtx]['land_ice_segments']['segment_id']
except KeyError:
pass
else:
IS2_atl06_beams.append(gtx)
# read each input beam within the file
for gtx in IS2_atl06_beams:
# get each HDF5 variable in ICESat-2 land_ice_segments Group
columns = {}
for key,val in fileID[gtx]['land_ice_segments'].items():
if isinstance(val, h5py.Dataset):
if val.attrs.get('_FillValue'):
columns[key] = val[:].astype('f')
columns[key][val[:] == val.fillvalue] = np.nan
else:
columns[key] = val[:]
elif isinstance(val, h5py.Group) and (val.name.split('/')[-1] in groups):
for k,v in val.items():
if v.attrs.get('_FillValue'):
columns[k] = v[:].astype('f')
columns[k][v[:] == v.fillvalue] = np.nan
else:
columns[k] = v[:]
# number of segments
n_seg = fileID[gtx]['land_ice_segments']['h_li'].size
# fill value
# generate derived variables
columns['rgt'] = np.full((n_seg),fileID['orbit_info']['rgt'][0])
columns['cycle_number'] = np.full((n_seg),fileID['orbit_info']['cycle_number'][0])
BP,LR = re.findall(r'gt(\d)([lr])',gtx).pop()
columns['BP'] = np.full((n_seg),int(BP))
columns['LR'] = [LR]*n_seg
beam_type = fileID[gtx].attrs['atlas_beam_type'].decode('utf-8')
columns['beam_type'] = [beam_type]*n_seg
columns['spot'] = np.full((n_seg),fileID[gtx].attrs['atlas_spot_number'])
# convert from dictionary to geodataframe
delta_time = (columns['delta_time']*1e9).astype('timedelta64[ns]')
atlas_sdp_epoch = np.datetime64('2018-01-01T00:00:00Z')
columns['time'] = gpd.pd.to_datetime(atlas_sdp_epoch + delta_time)
# generate geometry column
geometry = gpd.points_from_xy(columns['longitude'], columns['latitude'])
del columns['longitude']
del columns['latitude']
# create Pandas DataFrame object
df = gpd.pd.DataFrame(columns)
# append to GeoDataFrame
gdf = gdf.append(gpd.GeoDataFrame(df, geometry=geometry, crs=kwargs['crs']))
# Closing the HDF5 file
fileID.close()
# Return the geodataframe
return gdf
| 35.628788 | 90 | 0.623644 |
acebf7e79a1827c9f6a235d65038cc672c5acf30 | 22,603 | py | Python | electrum/bitcoin.py | GZR0/electrum | ab24613a858897b9ff426b648c78512c35a0c8ef | [
"MIT"
] | null | null | null | electrum/bitcoin.py | GZR0/electrum | ab24613a858897b9ff426b648c78512c35a0c8ef | [
"MIT"
] | 4 | 2019-10-05T18:59:03.000Z | 2020-09-15T19:53:00.000Z | electrum/bitcoin.py | GZR0/electrum | ab24613a858897b9ff426b648c78512c35a0c8ef | [
"MIT"
] | 3 | 2019-09-23T13:07:26.000Z | 2020-01-29T16:13:38.000Z | # -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
from typing import List, Tuple, TYPE_CHECKING, Optional, Union
import enum
from enum import IntEnum, Enum
from .util import bfh, bh2u, BitcoinException, assert_bytes, to_bytes, inv_dict
from . import version
from . import segwit_addr
from . import constants
from . import ecc
from .crypto import sha256d, sha256, hash_160, hmac_oneshot
if TYPE_CHECKING:
from .network import Network
################################## transactions
COINBASE_MATURITY = 20
COIN = 100000000
TOTAL_COIN_SUPPLY_LIMIT_IN_BTC = 6268656716
NLOCKTIME_MIN = 0
NLOCKTIME_BLOCKHEIGHT_MAX = 500_000_000 - 1
NLOCKTIME_MAX = 2 ** 32 - 1
# supported types of transaction outputs
# TODO kill these with fire
TYPE_ADDRESS = 0
TYPE_PUBKEY = 1
TYPE_SCRIPT = 2
class opcodes(IntEnum):
# push value
OP_0 = 0x00
OP_FALSE = OP_0
OP_PUSHDATA1 = 0x4c
OP_PUSHDATA2 = 0x4d
OP_PUSHDATA4 = 0x4e
OP_1NEGATE = 0x4f
OP_RESERVED = 0x50
OP_1 = 0x51
OP_TRUE = OP_1
OP_2 = 0x52
OP_3 = 0x53
OP_4 = 0x54
OP_5 = 0x55
OP_6 = 0x56
OP_7 = 0x57
OP_8 = 0x58
OP_9 = 0x59
OP_10 = 0x5a
OP_11 = 0x5b
OP_12 = 0x5c
OP_13 = 0x5d
OP_14 = 0x5e
OP_15 = 0x5f
OP_16 = 0x60
# control
OP_NOP = 0x61
OP_VER = 0x62
OP_IF = 0x63
OP_NOTIF = 0x64
OP_VERIF = 0x65
OP_VERNOTIF = 0x66
OP_ELSE = 0x67
OP_ENDIF = 0x68
OP_VERIFY = 0x69
OP_RETURN = 0x6a
# stack ops
OP_TOALTSTACK = 0x6b
OP_FROMALTSTACK = 0x6c
OP_2DROP = 0x6d
OP_2DUP = 0x6e
OP_3DUP = 0x6f
OP_2OVER = 0x70
OP_2ROT = 0x71
OP_2SWAP = 0x72
OP_IFDUP = 0x73
OP_DEPTH = 0x74
OP_DROP = 0x75
OP_DUP = 0x76
OP_NIP = 0x77
OP_OVER = 0x78
OP_PICK = 0x79
OP_ROLL = 0x7a
OP_ROT = 0x7b
OP_SWAP = 0x7c
OP_TUCK = 0x7d
# splice ops
OP_CAT = 0x7e
OP_SUBSTR = 0x7f
OP_LEFT = 0x80
OP_RIGHT = 0x81
OP_SIZE = 0x82
# bit logic
OP_INVERT = 0x83
OP_AND = 0x84
OP_OR = 0x85
OP_XOR = 0x86
OP_EQUAL = 0x87
OP_EQUALVERIFY = 0x88
OP_RESERVED1 = 0x89
OP_RESERVED2 = 0x8a
# numeric
OP_1ADD = 0x8b
OP_1SUB = 0x8c
OP_2MUL = 0x8d
OP_2DIV = 0x8e
OP_NEGATE = 0x8f
OP_ABS = 0x90
OP_NOT = 0x91
OP_0NOTEQUAL = 0x92
OP_ADD = 0x93
OP_SUB = 0x94
OP_MUL = 0x95
OP_DIV = 0x96
OP_MOD = 0x97
OP_LSHIFT = 0x98
OP_RSHIFT = 0x99
OP_BOOLAND = 0x9a
OP_BOOLOR = 0x9b
OP_NUMEQUAL = 0x9c
OP_NUMEQUALVERIFY = 0x9d
OP_NUMNOTEQUAL = 0x9e
OP_LESSTHAN = 0x9f
OP_GREATERTHAN = 0xa0
OP_LESSTHANOREQUAL = 0xa1
OP_GREATERTHANOREQUAL = 0xa2
OP_MIN = 0xa3
OP_MAX = 0xa4
OP_WITHIN = 0xa5
# crypto
OP_RIPEMD160 = 0xa6
OP_SHA1 = 0xa7
OP_SHA256 = 0xa8
OP_HASH160 = 0xa9
OP_HASH256 = 0xaa
OP_CODESEPARATOR = 0xab
OP_CHECKSIG = 0xac
OP_CHECKSIGVERIFY = 0xad
OP_CHECKMULTISIG = 0xae
OP_CHECKMULTISIGVERIFY = 0xaf
# expansion
OP_NOP1 = 0xb0
OP_CHECKLOCKTIMEVERIFY = 0xb1
OP_NOP2 = OP_CHECKLOCKTIMEVERIFY
OP_CHECKSEQUENCEVERIFY = 0xb2
OP_NOP3 = OP_CHECKSEQUENCEVERIFY
OP_NOP4 = 0xb3
OP_NOP5 = 0xb4
OP_NOP6 = 0xb5
OP_NOP7 = 0xb6
OP_NOP8 = 0xb7
OP_NOP9 = 0xb8
OP_NOP10 = 0xb9
OP_INVALIDOPCODE = 0xff
def hex(self) -> str:
return bytes([self]).hex()
def rev_hex(s: str) -> str:
return bh2u(bfh(s)[::-1])
def int_to_hex(i: int, length: int=1) -> str:
"""Converts int to little-endian hex string.
`length` is the number of bytes available
"""
if not isinstance(i, int):
raise TypeError('{} instead of int'.format(i))
range_size = pow(256, length)
if i < -(range_size//2) or i >= range_size:
raise OverflowError('cannot convert int {} to hex ({} bytes)'.format(i, length))
if i < 0:
# two's complement
i = range_size + i
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
def script_num_to_hex(i: int) -> str:
"""See CScriptNum in Bitcoin Core.
Encodes an integer as hex, to be used in script.
ported from https://github.com/bitcoin/bitcoin/blob/8cbc5c4be4be22aca228074f087a374a7ec38be8/src/script/script.h#L326
"""
if i == 0:
return ''
result = bytearray()
neg = i < 0
absvalue = abs(i)
while absvalue > 0:
result.append(absvalue & 0xff)
absvalue >>= 8
if result[-1] & 0x80:
result.append(0x80 if neg else 0x00)
elif neg:
result[-1] |= 0x80
return bh2u(result)
def var_int(i: int) -> str:
# https://en.bitcoin.it/wiki/Protocol_specification#Variable_length_integer
# https://github.com/bitcoin/bitcoin/blob/efe1ee0d8d7f82150789f1f6840f139289628a2b/src/serialize.h#L247
# "CompactSize"
assert i >= 0, i
if i<0xfd:
return int_to_hex(i)
elif i<=0xffff:
return "fd"+int_to_hex(i,2)
elif i<=0xffffffff:
return "fe"+int_to_hex(i,4)
else:
return "ff"+int_to_hex(i,8)
def witness_push(item: str) -> str:
"""Returns data in the form it should be present in the witness.
hex -> hex
"""
return var_int(len(item) // 2) + item
def _op_push(i: int) -> str:
if i < opcodes.OP_PUSHDATA1:
return int_to_hex(i)
elif i <= 0xff:
return opcodes.OP_PUSHDATA1.hex() + int_to_hex(i, 1)
elif i <= 0xffff:
return opcodes.OP_PUSHDATA2.hex() + int_to_hex(i, 2)
else:
return opcodes.OP_PUSHDATA4.hex() + int_to_hex(i, 4)
def push_script(data: str) -> str:
"""Returns pushed data to the script, automatically
choosing canonical opcodes depending on the length of the data.
hex -> hex
ported from https://github.com/btcsuite/btcd/blob/fdc2bc867bda6b351191b5872d2da8270df00d13/txscript/scriptbuilder.go#L128
"""
data = bfh(data)
data_len = len(data)
# "small integer" opcodes
if data_len == 0 or data_len == 1 and data[0] == 0:
return opcodes.OP_0.hex()
elif data_len == 1 and data[0] <= 16:
return bh2u(bytes([opcodes.OP_1 - 1 + data[0]]))
elif data_len == 1 and data[0] == 0x81:
return opcodes.OP_1NEGATE.hex()
return _op_push(data_len) + bh2u(data)
def add_number_to_script(i: int) -> bytes:
return bfh(push_script(script_num_to_hex(i)))
def relayfee(network: 'Network' = None) -> int:
"""Returns feerate in sat/kbyte."""
from .simple_config import FEERATE_DEFAULT_RELAY, FEERATE_MAX_RELAY
if network and network.relay_fee is not None:
fee = network.relay_fee
else:
fee = FEERATE_DEFAULT_RELAY
# sanity safeguards, as network.relay_fee is coming from a server:
fee = min(fee, FEERATE_MAX_RELAY)
fee = max(fee, FEERATE_DEFAULT_RELAY)
return fee
# see https://github.com/bitcoin/bitcoin/blob/a62f0ed64f8bbbdfe6467ac5ce92ef5b5222d1bd/src/policy/policy.cpp#L14
DUST_LIMIT_DEFAULT_SAT_LEGACY = 546
DUST_LIMIT_DEFAULT_SAT_SEGWIT = 294
def dust_threshold(network: 'Network' = None) -> int:
"""Returns the dust limit in satoshis."""
# Change <= dust threshold is added to the tx fee
dust_lim = 182 * 3 * relayfee(network) # in msat
# convert to sat, but round up:
return (dust_lim // 1000) + (dust_lim % 1000 > 0)
def hash_encode(x: bytes) -> str:
return bh2u(x[::-1])
def hash_decode(x: str) -> bytes:
return bfh(x)[::-1]
############ functions from pywallet #####################
def hash160_to_b58_address(h160: bytes, addrtype: int) -> str:
s = bytes([addrtype]) + h160
s = s + sha256d(s)[0:4]
return base_encode(s, base=58)
def b58_address_to_hash160(addr: str) -> Tuple[int, bytes]:
addr = to_bytes(addr, 'ascii')
_bytes = DecodeBase58Check(addr)
if len(_bytes) != 21:
raise Exception(f'expected 21 payload bytes in base58 address. got: {len(_bytes)}')
return _bytes[0], _bytes[1:21]
def hash160_to_p2pkh(h160: bytes, *, net=None) -> str:
if net is None: net = constants.net
return hash160_to_b58_address(h160, net.ADDRTYPE_P2PKH)
def hash160_to_p2sh(h160: bytes, *, net=None) -> str:
if net is None: net = constants.net
return hash160_to_b58_address(h160, net.ADDRTYPE_P2SH)
def public_key_to_p2pkh(public_key: bytes, *, net=None) -> str:
if net is None: net = constants.net
return hash160_to_p2pkh(hash_160(public_key), net=net)
def hash_to_segwit_addr(h: bytes, witver: int, *, net=None) -> str:
if net is None: net = constants.net
return segwit_addr.encode(net.SEGWIT_HRP, witver, h)
def public_key_to_p2wpkh(public_key: bytes, *, net=None) -> str:
if net is None: net = constants.net
return hash_to_segwit_addr(hash_160(public_key), witver=0, net=net)
def script_to_p2wsh(script: str, *, net=None) -> str:
if net is None: net = constants.net
return hash_to_segwit_addr(sha256(bfh(script)), witver=0, net=net)
def p2wpkh_nested_script(pubkey: str) -> str:
pkh = bh2u(hash_160(bfh(pubkey)))
return '00' + push_script(pkh)
def p2wsh_nested_script(witness_script: str) -> str:
wsh = bh2u(sha256(bfh(witness_script)))
return '00' + push_script(wsh)
def pubkey_to_address(txin_type: str, pubkey: str, *, net=None) -> str:
if net is None: net = constants.net
if txin_type == 'p2pkh':
return public_key_to_p2pkh(bfh(pubkey), net=net)
elif txin_type == 'p2wpkh':
return public_key_to_p2wpkh(bfh(pubkey), net=net)
elif txin_type == 'p2wpkh-p2sh':
scriptSig = p2wpkh_nested_script(pubkey)
return hash160_to_p2sh(hash_160(bfh(scriptSig)), net=net)
else:
raise NotImplementedError(txin_type)
# TODO this method is confusingly named
def redeem_script_to_address(txin_type: str, scriptcode: str, *, net=None) -> str:
if net is None: net = constants.net
if txin_type == 'p2sh':
# given scriptcode is a redeem_script
return hash160_to_p2sh(hash_160(bfh(scriptcode)), net=net)
elif txin_type == 'p2wsh':
# given scriptcode is a witness_script
return script_to_p2wsh(scriptcode, net=net)
elif txin_type == 'p2wsh-p2sh':
# given scriptcode is a witness_script
redeem_script = p2wsh_nested_script(scriptcode)
return hash160_to_p2sh(hash_160(bfh(redeem_script)), net=net)
else:
raise NotImplementedError(txin_type)
def script_to_address(script: str, *, net=None) -> str:
from .transaction import get_address_from_output_script
return get_address_from_output_script(bfh(script), net=net)
def address_to_script(addr: str, *, net=None) -> str:
if net is None: net = constants.net
if not is_address(addr, net=net):
raise BitcoinException(f"invalid bitcoin address: {addr}")
witver, witprog = segwit_addr.decode(net.SEGWIT_HRP, addr)
if witprog is not None:
if not (0 <= witver <= 16):
raise BitcoinException(f'impossible witness version: {witver}')
script = bh2u(add_number_to_script(witver))
script += push_script(bh2u(bytes(witprog)))
return script
addrtype, hash_160_ = b58_address_to_hash160(addr)
if addrtype == net.ADDRTYPE_P2PKH:
script = pubkeyhash_to_p2pkh_script(bh2u(hash_160_))
elif addrtype == net.ADDRTYPE_P2SH:
script = opcodes.OP_HASH160.hex()
script += push_script(bh2u(hash_160_))
script += opcodes.OP_EQUAL.hex()
else:
raise BitcoinException(f'unknown address type: {addrtype}')
return script
class OnchainOutputType(Enum):
"""Opaque types of scriptPubKeys.
In case of p2sh, p2wsh and similar, no knowledge of redeem script, etc.
"""
P2PKH = enum.auto()
P2SH = enum.auto()
WITVER0_P2WPKH = enum.auto()
WITVER0_P2WSH = enum.auto()
def address_to_hash(addr: str, *, net=None) -> Tuple[OnchainOutputType, bytes]:
"""Return (type, pubkey hash / witness program) for an address."""
if net is None: net = constants.net
if not is_address(addr, net=net):
raise BitcoinException(f"invalid bitcoin address: {addr}")
witver, witprog = segwit_addr.decode(net.SEGWIT_HRP, addr)
if witprog is not None:
if witver != 0:
raise BitcoinException(f"not implemented handling for witver={witver}")
if len(witprog) == 20:
return OnchainOutputType.WITVER0_P2WPKH, bytes(witprog)
elif len(witprog) == 32:
return OnchainOutputType.WITVER0_P2WSH, bytes(witprog)
else:
raise BitcoinException(f"unexpected length for segwit witver=0 witprog: len={len(witprog)}")
addrtype, hash_160_ = b58_address_to_hash160(addr)
if addrtype == net.ADDRTYPE_P2PKH:
return OnchainOutputType.P2PKH, hash_160_
elif addrtype == net.ADDRTYPE_P2SH:
return OnchainOutputType.P2SH, hash_160_
raise BitcoinException(f"unknown address type: {addrtype}")
def address_to_scripthash(addr: str) -> str:
script = address_to_script(addr)
return script_to_scripthash(script)
def script_to_scripthash(script: str) -> str:
h = sha256(bfh(script))[0:32]
return bh2u(bytes(reversed(h)))
def public_key_to_p2pk_script(pubkey: str) -> str:
return push_script(pubkey) + opcodes.OP_CHECKSIG.hex()
def pubkeyhash_to_p2pkh_script(pubkey_hash160: str) -> str:
script = bytes([opcodes.OP_DUP, opcodes.OP_HASH160]).hex()
script += push_script(pubkey_hash160)
script += bytes([opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG]).hex()
return script
__b58chars = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(__b58chars) == 58
__b43chars = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
assert len(__b43chars) == 43
class BaseDecodeError(BitcoinException): pass
def base_encode(v: bytes, *, base: int) -> str:
""" encode v, which is a string of bytes, to base58."""
assert_bytes(v)
if base not in (58, 43):
raise ValueError('not supported base: {}'.format(base))
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
power_of_base = 1
for c in v[::-1]:
# naive but slow variant: long_value += (256**i) * c
long_value += power_of_base * c
power_of_base <<= 8
result = bytearray()
while long_value >= base:
div, mod = divmod(long_value, base)
result.append(chars[mod])
long_value = div
result.append(chars[long_value])
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0x00:
nPad += 1
else:
break
result.extend([chars[0]] * nPad)
result.reverse()
return result.decode('ascii')
def base_decode(v: Union[bytes, str], *, base: int, length: int = None) -> Optional[bytes]:
""" decode v into a string of len bytes."""
# assert_bytes(v)
v = to_bytes(v, 'ascii')
if base not in (58, 43):
raise ValueError('not supported base: {}'.format(base))
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
power_of_base = 1
for c in v[::-1]:
digit = chars.find(bytes([c]))
if digit == -1:
raise BaseDecodeError('Forbidden character {} for base {}'.format(c, base))
# naive but slow variant: long_value += digit * (base**i)
long_value += digit * power_of_base
power_of_base *= base
result = bytearray()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result.append(mod)
long_value = div
result.append(long_value)
nPad = 0
for c in v:
if c == chars[0]:
nPad += 1
else:
break
result.extend(b'\x00' * nPad)
if length is not None and len(result) != length:
return None
result.reverse()
return bytes(result)
class InvalidChecksum(BaseDecodeError):
pass
def EncodeBase58Check(vchIn: bytes) -> str:
hash = sha256d(vchIn)
return base_encode(vchIn + hash[0:4], base=58)
def DecodeBase58Check(psz: Union[bytes, str]) -> bytes:
vchRet = base_decode(psz, base=58)
payload = vchRet[0:-4]
csum_found = vchRet[-4:]
csum_calculated = sha256d(payload)[0:4]
if csum_calculated != csum_found:
raise InvalidChecksum(f'calculated {bh2u(csum_calculated)}, found {bh2u(csum_found)}')
else:
return payload
# backwards compat
# extended WIF for segwit (used in 3.0.x; but still used internally)
# the keys in this dict should be a superset of what Imported Wallets can import
WIF_SCRIPT_TYPES = {
'p2pkh':38,
'p2wpkh':1,
'p2wpkh-p2sh':2,
'p2sh':85,
'p2wsh':6,
'p2wsh-p2sh':7
}
WIF_SCRIPT_TYPES_INV = inv_dict(WIF_SCRIPT_TYPES)
def is_segwit_script_type(txin_type: str) -> bool:
return txin_type in ('p2wpkh', 'p2wpkh-p2sh', 'p2wsh', 'p2wsh-p2sh')
def serialize_privkey(secret: bytes, compressed: bool, txin_type: str, *,
internal_use: bool = False) -> str:
# we only export secrets inside curve range
secret = ecc.ECPrivkey.normalize_secret_bytes(secret)
if internal_use:
prefix = bytes([(WIF_SCRIPT_TYPES[txin_type] + constants.net.WIF_PREFIX) & 255])
else:
prefix = bytes([constants.net.WIF_PREFIX])
suffix = b'\01' if compressed else b''
vchIn = prefix + secret + suffix
base58_wif = EncodeBase58Check(vchIn)
if internal_use:
return base58_wif
else:
return '{}:{}'.format(txin_type, base58_wif)
def deserialize_privkey(key: str) -> Tuple[str, bytes, bool]:
if is_minikey(key):
return 'p2pkh', minikey_to_private_key(key), False
txin_type = None
if ':' in key:
txin_type, key = key.split(sep=':', maxsplit=1)
if txin_type not in WIF_SCRIPT_TYPES:
raise BitcoinException('unknown script type: {}'.format(txin_type))
try:
vch = DecodeBase58Check(key)
except Exception as e:
neutered_privkey = str(key)[:3] + '..' + str(key)[-2:]
raise BaseDecodeError(f"cannot deserialize privkey {neutered_privkey}") from e
if txin_type is None:
# keys exported in version 3.0.x encoded script type in first byte
prefix_value = vch[0] - constants.net.WIF_PREFIX
try:
txin_type = WIF_SCRIPT_TYPES_INV[prefix_value]
except KeyError as e:
raise BitcoinException('invalid prefix ({}) for WIF key (1)'.format(vch[0])) from None
else:
# all other keys must have a fixed first byte
if vch[0] != constants.net.WIF_PREFIX:
raise BitcoinException('invalid prefix ({}) for WIF key (2)'.format(vch[0]))
if len(vch) not in [33, 34]:
raise BitcoinException('invalid vch len for WIF key: {}'.format(len(vch)))
compressed = False
if len(vch) == 34:
if vch[33] == 0x01:
compressed = True
else:
raise BitcoinException(f'invalid WIF key. length suggests compressed pubkey, '
f'but last byte is {vch[33]} != 0x01')
if is_segwit_script_type(txin_type) and not compressed:
raise BitcoinException('only compressed public keys can be used in segwit scripts')
secret_bytes = vch[1:33]
# we accept secrets outside curve range; cast into range here:
secret_bytes = ecc.ECPrivkey.normalize_secret_bytes(secret_bytes)
return txin_type, secret_bytes, compressed
def is_compressed_privkey(sec: str) -> bool:
return deserialize_privkey(sec)[2]
def address_from_private_key(sec: str) -> str:
txin_type, privkey, compressed = deserialize_privkey(sec)
public_key = ecc.ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
return pubkey_to_address(txin_type, public_key)
def is_segwit_address(addr: str, *, net=None) -> bool:
if net is None: net = constants.net
try:
witver, witprog = segwit_addr.decode(net.SEGWIT_HRP, addr)
except Exception as e:
return False
return witprog is not None
def is_b58_address(addr: str, *, net=None) -> bool:
if net is None: net = constants.net
try:
# test length, checksum, encoding:
addrtype, h = b58_address_to_hash160(addr)
except Exception as e:
return False
if addrtype not in [net.ADDRTYPE_P2PKH, net.ADDRTYPE_P2SH]:
return False
return True
def is_address(addr: str, *, net=None) -> bool:
if net is None: net = constants.net
return is_segwit_address(addr, net=net) \
or is_b58_address(addr, net=net)
def is_private_key(key: str, *, raise_on_error=False) -> bool:
try:
deserialize_privkey(key)
return True
except BaseException as e:
if raise_on_error:
raise
return False
########### end pywallet functions #######################
def is_minikey(text: str) -> bool:
# Minikeys are typically 22 or 30 characters, but this routine
# permits any length of 20 or more provided the minikey is valid.
# A valid minikey must begin with an 'S', be in base58, and when
# suffixed with '?' have its SHA256 hash begin with a zero byte.
# They are widely used in Casascius physical bitcoins.
return (len(text) >= 20 and text[0] == 'S'
and all(ord(c) in __b58chars for c in text)
and sha256(text + '?')[0] == 0x00)
def minikey_to_private_key(text: str) -> bytes:
return sha256(text)
| 30.878415 | 125 | 0.658762 |
acebf85c58b3cee11ef7dff34e25bd6cd3c8a699 | 4,822 | py | Python | vocal_similarity.py | jackgoffinet/vocal_similarity | 31c409a7e29aa4f50e0f00e373409be771bbe0c7 | [
"MIT"
] | null | null | null | vocal_similarity.py | jackgoffinet/vocal_similarity | 31c409a7e29aa4f50e0f00e373409be771bbe0c7 | [
"MIT"
] | null | null | null | vocal_similarity.py | jackgoffinet/vocal_similarity | 31c409a7e29aa4f50e0f00e373409be771bbe0c7 | [
"MIT"
] | null | null | null | from __future__ import print_function, division
"""
An implementation of the vocal similarity model of chord consonance described
in "Vocal similarity predicts the relative attraction of musical chords" by
Daniel L. Bowling, Dale Purves, and Kamraan Z. Gill (December 2017).
Reference:
@article{bowling2017vocal,
title={Vocal similarity predicts the relative attraction of musical
chords},
author={Bowling, Daniel L and Purves, Dale and Gill, Kamraan Z},
journal={Proceedings of the National Academy of Sciences},
pages={201713206},
year={2017},
publisher={National Acad Sciences}
note={Available at: https://doi.org/10.1073/pnas.1713206115}
}
Usage:
$ python vocal_similarity.py
Notes:
The main method is <get_consonance_score>.
"""
__author__ = "Jack Goffinet"
__date__ = "March 2018"
from fractions import gcd
from itertools import combinations
# Numerators & Denominators of the just intonation ratios used.
JI_NUMS = [1, 16, 9, 6, 5, 4, 7, 3, 8, 5, 9, 15, 2]
JI_DENOMS = [1, 15, 8, 5, 4, 3, 5, 2, 5, 3, 5, 8, 1]
MIDDLE_C = 220.0 * 2.0 ** (3 / 12) # Middle C in Hertz
def get_gcd(numbers):
"""Return the greatest common divisor of the given integers"""
return reduce(gcd, numbers)
def get_lcm(numbers):
"""Return the lowest common multiple of the given integers."""
def helper(a, b):
return (a * b) // gcd(a, b)
return reduce(helper, numbers, 1)
def chord_to_freq_ratios(chord):
"""Return the frequency ratios of the pitches in <chord>
Args:
chord (tuple of ints): see <get_consonance_score>.
Returns:
list of ints:
"""
numerators = [JI_NUMS[i] for i in chord]
denoms = [JI_DENOMS[i] for i in chord]
denominator = get_lcm(denoms)
numerators = [(numerators[i] * denominator) // denoms[i] for i in \
range(len(numerators))]
return numerators, denominator
def harmonic_metric(chord):
"""Calculate the harmonic metric described in (Bowling et al., 2017).
Roughly, the portion of the pitches' GCD's harmonic spectrum coinciding with
the harmonic spectra of the pitches.
Args:
chord (tuple of ints): see <get_consonance_score>.
Returns:
float: The harmonic metric of the given chord.
"""
numerators, _ = chord_to_freq_ratios(chord)
numerator_gcd = get_gcd(numerators)
numerator_lcm = get_lcm(numerators)
result_denom = numerator_lcm // numerator_gcd
gcd_series = range(numerator_gcd, numerator_lcm + 1, numerator_gcd)
result_num = 0
# Count the overlap in GCD series & chord spectrum.
for i in gcd_series:
for j in numerators:
if i % j == 0:
result_num += 1
break
return result_num / result_denom
def interval_metric(chord):
"""Return the minimum fundamental frequency diff. among pitches in <chord>
The chord is first transposed so that the average fundamental frequency of
its pitches is <MIDDLE_C>.
Args:
chord (tuple of ints): see <get_consonance_score>.
Returns:
float: The minimum diff. among the pitches' fundamental frequencies.
"""
numerators, denominator = chord_to_freq_ratios(chord)
freqs = [i / denominator for i in numerators]
avg_freq = sum(freqs) / len(freqs)
freqs = [i / avg_freq * MIDDLE_C for i in freqs]
diffs = [freqs[i+1] - freqs[i] for i in range(len(freqs)-1)]
return min(diffs)
def get_consonance_score(chord, cutoff=50.0):
"""Combine the interval and harmonic metrics, outputing a single score.
In the original algorithm description, the vocal similarity model is
restricted to pairwise chord comparisons. Therefore the score returned by
this method is only intended to be used for ranking and pairwise comparisons
(what matters is relative score, not absolute differences in score). If the
interval metric is used, the result will be negative and if the harmonic
metric is used, the result will be positive.
The argument <chord> is a sorted tuple of ints in the range [0,12]. The int
7 respresents the 7th semitone, so (0, 4, 7) represents a major triad.
Args:
chord (tuple of ints): A sorted tuple representing a chord. (see above)
cutoff (float, optional): Vocal range limit, in Hertz. Defaults to 50.0.
Returns:
float: The chord's consonance score.
"""
min_diff = interval_metric(chord)
if min_diff < cutoff:
return (-cutoff - 1.0 + min_diff) / (cutoff + 1.0)
return harmonic_metric(chord)
if __name__ == '__main__':
major_triad = (0,4,7)
minor_triad = (0,3,7)
print(get_consonance_score(major_triad))
print(get_consonance_score(minor_triad))
| 31.51634 | 80 | 0.672335 |
acebf8a04e10285764d6168d0610a53e7d345747 | 4,651 | py | Python | sdk/python/pulumi_azure_native/netapp/v20200601/get_account.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/netapp/v20200601/get_account.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/netapp/v20200601/get_account.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetAccountResult',
'AwaitableGetAccountResult',
'get_account',
]
@pulumi.output_type
class GetAccountResult:
"""
NetApp account resource
"""
def __init__(__self__, active_directories=None, id=None, location=None, name=None, provisioning_state=None, tags=None, type=None):
if active_directories and not isinstance(active_directories, list):
raise TypeError("Expected argument 'active_directories' to be a list")
pulumi.set(__self__, "active_directories", active_directories)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="activeDirectories")
def active_directories(self) -> Optional[Sequence['outputs.ActiveDirectoryResponse']]:
"""
Active Directories
"""
return pulumi.get(self, "active_directories")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetAccountResult(GetAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAccountResult(
active_directories=self.active_directories,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type)
def get_account(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountResult:
"""
NetApp account resource
:param str account_name: The name of the NetApp account
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:netapp/v20200601:getAccount', __args__, opts=opts, typ=GetAccountResult).value
return AwaitableGetAccountResult(
active_directories=__ret__.active_directories,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type)
| 32.075862 | 134 | 0.637282 |
acebf91183cbfab930cfdad0f1a13b5fc7041163 | 4,690 | py | Python | sdk/python/pulumi_newrelic/get_alert_policy.py | bob-bins/pulumi-newrelic | f8a121fb7d6e6ad979d3ccf72467b9e89769e305 | [
"ECL-2.0",
"Apache-2.0"
] | 6 | 2019-09-17T20:41:26.000Z | 2022-01-13T23:54:14.000Z | sdk/python/pulumi_newrelic/get_alert_policy.py | bob-bins/pulumi-newrelic | f8a121fb7d6e6ad979d3ccf72467b9e89769e305 | [
"ECL-2.0",
"Apache-2.0"
] | 136 | 2019-04-29T21:34:57.000Z | 2022-03-30T17:07:03.000Z | sdk/python/pulumi_newrelic/get_alert_policy.py | bob-bins/pulumi-newrelic | f8a121fb7d6e6ad979d3ccf72467b9e89769e305 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2019-10-05T10:33:59.000Z | 2021-06-15T16:37:49.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetAlertPolicyResult',
'AwaitableGetAlertPolicyResult',
'get_alert_policy',
]
@pulumi.output_type
class GetAlertPolicyResult:
"""
A collection of values returned by getAlertPolicy.
"""
def __init__(__self__, account_id=None, created_at=None, id=None, incident_preference=None, name=None, updated_at=None):
if account_id and not isinstance(account_id, int):
raise TypeError("Expected argument 'account_id' to be a int")
pulumi.set(__self__, "account_id", account_id)
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if incident_preference and not isinstance(incident_preference, str):
raise TypeError("Expected argument 'incident_preference' to be a str")
pulumi.set(__self__, "incident_preference", incident_preference)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if updated_at and not isinstance(updated_at, str):
raise TypeError("Expected argument 'updated_at' to be a str")
pulumi.set(__self__, "updated_at", updated_at)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> int:
return pulumi.get(self, "account_id")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> str:
"""
The time the policy was created.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="incidentPreference")
def incident_preference(self) -> Optional[str]:
"""
The rollup strategy for the policy. Options include: PER_POLICY, PER_CONDITION, or PER_CONDITION_AND_TARGET. The default is PER_POLICY.
"""
return pulumi.get(self, "incident_preference")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> str:
"""
The time the policy was last updated.
"""
return pulumi.get(self, "updated_at")
class AwaitableGetAlertPolicyResult(GetAlertPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAlertPolicyResult(
account_id=self.account_id,
created_at=self.created_at,
id=self.id,
incident_preference=self.incident_preference,
name=self.name,
updated_at=self.updated_at)
def get_alert_policy(account_id: Optional[int] = None,
incident_preference: Optional[str] = None,
name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAlertPolicyResult:
"""
Use this data source to get information about a specific alert policy in New Relic that already exists.
:param str incident_preference: The rollup strategy for the policy. Options include: PER_POLICY, PER_CONDITION, or PER_CONDITION_AND_TARGET. The default is PER_POLICY.
:param str name: The name of the alert policy in New Relic.
"""
__args__ = dict()
__args__['accountId'] = account_id
__args__['incidentPreference'] = incident_preference
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('newrelic:index/getAlertPolicy:getAlertPolicy', __args__, opts=opts, typ=GetAlertPolicyResult).value
return AwaitableGetAlertPolicyResult(
account_id=__ret__.account_id,
created_at=__ret__.created_at,
id=__ret__.id,
incident_preference=__ret__.incident_preference,
name=__ret__.name,
updated_at=__ret__.updated_at)
| 36.929134 | 171 | 0.664606 |
acebfa0152deffd6f49b386059fecd8ff58aab66 | 129 | py | Python | FullContact/urls.py | KamalAwasthi/FullContact | fa2e9f29079064b015848d980ddbb8da51f323c9 | [
"Apache-2.0"
] | 2 | 2018-05-31T16:21:06.000Z | 2019-11-28T11:58:12.000Z | FullContact/urls.py | KamalAwasthi/FullContact | fa2e9f29079064b015848d980ddbb8da51f323c9 | [
"Apache-2.0"
] | null | null | null | FullContact/urls.py | KamalAwasthi/FullContact | fa2e9f29079064b015848d980ddbb8da51f323c9 | [
"Apache-2.0"
] | 2 | 2018-02-12T16:37:08.000Z | 2019-11-28T11:58:24.000Z | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.getFullContact, name='FullContact'),
] | 21.5 | 57 | 0.705426 |
acebfc86d6b351efa1529e02dc915f33c93318e2 | 1,520 | py | Python | experiments/one_net_sbm/train_model.py | cassianobecker/msbm | 6ce22f93f63071dc3ca722d499db376ea678eb23 | [
"MIT"
] | null | null | null | experiments/one_net_sbm/train_model.py | cassianobecker/msbm | 6ce22f93f63071dc3ca722d499db376ea678eb23 | [
"MIT"
] | null | null | null | experiments/one_net_sbm/train_model.py | cassianobecker/msbm | 6ce22f93f63071dc3ca722d499db376ea678eb23 | [
"MIT"
] | null | null | null | # Experiment that generates several sets of networks of varying CH-divergence types
# then trains an msbm of a single type in a "consensus" type of way. Then we report the
# average rand_index and average entropy of the z variables, which are indicators of how well
# the algorithm is learning the true model.
import sys, os
import pickle
import pdb
sys.path.insert(0, '../..')
import util as ut
import init_msbm_vi as im
import varinf
def main():
file_list = sorted(os.listdir('data'))
for data_file in file_list:
# load data
file_url = os.path.join('data', data_file)
data = ut.load_data(file_url)
prior = dict()
prior['ALPHA_0'] = 0.5
prior['BETA_0'] = 0.5
prior['NU_0'] = 0.5
prior['ZETA_0'] = 0.5
# assigning hyper-parameters from ground truth (cheating)
hyper = dict()
hyper['M'] = data['M']
hyper['Q'] = data['Q']
hyper['init_TAU'] = 'distance'
# initialize moments
mom = im.init_moments(data, hyper)
par = dict()
par['MAX_ITER'] = 1000
par['TOL_ELBO'] = 1.e-16
par['ALG'] = 'cavi'
results_mom, elbo_seq = varinf.infer(data, prior, hyper, mom, par)
print('Saving file to {:s} ... '.format('models/model_' + data_file))
out_file_url = os.path.join('models', 'model_' + data_file)
pickle.dump({'results_mom': results_mom, 'elbo_seq': elbo_seq}, open(out_file_url, 'wb'))
if __name__ == '__main__':
main()
| 28.679245 | 97 | 0.614474 |
acebfcb046d17ba329664cf491625a114875db26 | 16,394 | py | Python | build/lib/vengeance/excel_com/classes/excel_levity_cls.py | michael-ross-ven/vengeance | 53c6eefba0573936d22a55ba5900744ac701f4b9 | [
"MIT"
] | 1 | 2020-01-18T18:23:26.000Z | 2020-01-18T18:23:26.000Z | build/lib/vengeance/excel_com/classes/excel_levity_cls.py | michael-ross-ven/vengeance | 53c6eefba0573936d22a55ba5900744ac701f4b9 | [
"MIT"
] | null | null | null | build/lib/vengeance/excel_com/classes/excel_levity_cls.py | michael-ross-ven/vengeance | 53c6eefba0573936d22a55ba5900744ac701f4b9 | [
"MIT"
] | null | null | null |
import re
# noinspection PyUnresolvedReferences
from pythoncom import com_error as pythoncom_error
from typing import Generator
from typing import List
from typing import Any
from .lev_row_cls import lev_row_cls
from .. import excel_address
from .. import worksheet
from .. excel_constants import *
from ... util.iter import iterator_to_collection
from ... util.iter import map_values_to_enum
from ... util.iter import modify_iteration_depth
from ... util.text import object_name
from ... conditional import ordereddict
class excel_levity_cls:
allow_focus = False
def __init__(self, ws, *,
first_c=None,
last_c=None,
meta_r=0,
header_r=0,
first_r=0,
last_r=0):
if (not isinstance(meta_r, int) or
not isinstance(header_r, int) or
not isinstance(first_r, int) or
not isinstance(last_r, int)):
raise TypeError('row references must be integers')
self.ws = ws
if hasattr(ws, 'Name'):
self.ws_name = ws.Name
else:
self.ws_name = "(no 'Name' attribute)"
self.headers = ordereddict()
self.m_headers = ordereddict()
self._named_ranges = {}
self._fixed_columns = (first_c, last_c)
self._fixed_rows = (first_r, last_r)
self.first_c = first_c
self.last_c = last_c
self.meta_r = meta_r
self.header_r = header_r
self.first_r = first_r
self.last_r = last_r
self.set_range_boundaries(index_meta=True,
index_header=True)
@property
def is_worksheet_type(self):
""" ie,
a chart that has been moved to its own worksheet will not be
a true worksheet object
"""
return worksheet.is_win32_worksheet_instance(self.ws)
@staticmethod
def col_letter_offset(col_str, offset):
return excel_address.col_letter_offset(col_str, offset)
@staticmethod
def col_letter(col_int):
return excel_address.col_letter(col_int)
@staticmethod
def col_number(col_str):
return excel_address.col_number(col_str)
@property
def application(self):
return self.ws.Application
@property
def workbook(self):
return self.ws.Parent
@property
def named_ranges(self):
if not self._named_ranges:
self._named_ranges = _named_ranges_in_workbook(self.workbook)
return self._named_ranges
@property
def worksheet(self):
return self.ws
@property
def worksheet_name(self):
return self.ws_name
@property
def meta_headers(self):
return self.m_headers
def header_names(self):
return list(self.headers.keys())
def meta_header_names(self):
return list(self.m_headers.keys())
@property
def has_headers(self):
if self.is_empty():
return False
return bool(self.headers) or bool(self.m_headers)
@property
def has_filter(self):
if not self.is_worksheet_type:
return False
return bool(self.ws.AutoFilter)
@property
def first_empty_row(self):
if self.is_empty():
return self.header_r or self.meta_r or 1
a = '{}{}:{}{}'.format(self.first_c, self.first_r,
self.last_c, self.first_r)
first_data_row = self.ws.Range(a)
if worksheet.is_range_empty(first_data_row):
r = self.first_r
else:
r = self.last_r + 1
return r
@property
def first_empty_column(self):
""" determines the first available empty column in sheet """
if self.is_empty():
c = self.first_c
else:
c = excel_address.col_letter_offset(self.last_c, 1)
return c
@property
def num_cols(self):
return excel_address.col_number(self.last_c) - excel_address.col_number(self.first_c) + 1
@property
def num_rows(self):
return int(self.last_r) - int(self.first_r) + 1
def is_empty(self):
if self.last_r > self.first_r:
return False
r_1 = self.header_r or self.meta_r or 1
r_2 = self.last_r
a = '{}{}:{}{}'.format(self.first_c, r_1, self.last_c, r_2)
return worksheet.is_range_empty(self.ws.Range(a))
def rows(self, r_1='*h', r_2='*l') -> Generator[List, Any, Any]:
if self.is_empty():
return ([] for _ in range(1))
a = '*f {}:*l {}'.format(r_1, r_2)
excel_range = self.range(a)
return (row for row in worksheet.escape_excel_range_errors(excel_range))
def lev_rows(self, r_1='*h', r_2='*l') -> Generator[lev_row_cls, Any, Any]:
if self.headers:
headers = map_values_to_enum(self.headers.keys())
elif self.m_headers:
headers = map_values_to_enum(self.m_headers.keys())
else:
headers = ordereddict()
if self.is_empty():
return (lev_row_cls(headers, [], '') for _ in range(1))
reserved = headers.keys() & set(lev_row_cls.reserved_names())
if reserved:
raise NameError("reserved name(s) {} found in header row {}"
.format(list(reserved), list(headers.keys())))
a = '*f {}:*l {}'.format(r_1, r_2)
excel_range = self.range(a)
r_1 = excel_range.Row
c_1, c_2 = self.first_c, self.last_c
for r, row in enumerate(worksheet.escape_excel_range_errors(excel_range), r_1):
a = '${}${}:${}${}'.format(c_1, r, c_2, r)
yield lev_row_cls(headers, row, a)
def activate(self):
if self.allow_focus:
worksheet.activate_worksheet(self.ws)
def clear_filter(self):
if not self.has_filter:
return
if worksheet.is_filtered(self.ws):
self.set_range_boundaries()
def remove_filter(self):
if not self.has_filter:
return
self.ws.AutoFilterMode = False
def reapply_filter(self, c_1='*f', c_2='*l'):
if self.header_r > 0:
r = '*h'
else:
r = '*f'
a = '{} {}:{} {}'.format(c_1, r, c_2, r)
excel_range = self.range(a)
if excel_range.Cells.Count == 1:
a = '{} {}:{} {}'.format(c_1, r, c_2, self.last_r + 1)
excel_range = self.range(a)
self.remove_filter()
excel_range.AutoFilter(*(1,))
def calculate(self):
self.range('*f *h: *l *l').Calculate()
def clear(self, reference,
clear_values=True,
clear_colors=False):
excel_range = self.range(reference)
_, r_1, _, r_2 = worksheet.parse_range(excel_range)
if clear_values:
excel_range.ClearContents()
index_meta = (r_1 <= self.meta_r)
index_header = (r_1 <= self.header_r)
self.set_range_boundaries(index_meta, index_header)
if clear_colors:
excel_range.Interior.Color = xlNone
def set_range_boundaries(self, index_meta=True, index_header=True):
""" find the edges of data in worksheet
worksheet filter MUST be cleared from worksheet to
determine these boundaries correctly
"""
if not self.is_worksheet_type:
self.first_c = ''
self.last_c = ''
self.first_r = 0
self.last_r = 0
return
worksheet.clear_worksheet_filter(self.ws)
self.__range_boundaries()
if index_meta:
self.__index_meta_columns()
if index_header:
self.__index_header_columns()
def __range_boundaries(self):
used_range = self.ws.UsedRange
first_c, last_c = self._fixed_columns
first_r, last_r = self._fixed_rows
self.first_c = first_c or worksheet.first_col(used_range)
self.last_c = last_c or worksheet.last_col(used_range, default=self.first_c)
r_1 = max(self.meta_r, self.header_r) + 1
r_2 = used_range.Rows.Count
a = '{}{}:{}{}'.format(self.first_c, r_1,
self.last_c, r_2)
excel_range = self.ws.Range(a)
self.first_r = first_r or worksheet.first_row(excel_range, default=r_1)
self.last_r = last_r or worksheet.last_row(excel_range, default=self.first_r)
self.first_c = excel_address.col_letter(self.first_c)
self.last_c = excel_address.col_letter(self.last_c)
self.first_r = int(self.first_r)
self.last_r = int(self.last_r)
@classmethod
def index_headers(cls, ws, row_int=None):
if ws.__class__.__name__ != '_Worksheet':
return {}
if row_int is None:
row_int = worksheet.first_row(ws)
c = excel_address.col_letter(ws.UsedRange.Columns.Count)
a = '{}{}:{}{}'.format('A', row_int, c, row_int)
excel_range = ws.Range(a)
return cls.__index_row_headers(excel_range)
@classmethod
def __index_row_headers(cls, excel_range):
row = excel_range.Rows(1)
row = worksheet.escape_excel_range_errors(row)[0]
if not any(row):
return ordereddict()
c_1 = excel_range.Column
headers = map_values_to_enum(row, c_1)
headers = ordereddict((h, excel_address.col_letter(v)) for h, v in headers.items())
return headers
def __index_headers(self, row_ref):
a = '*f {} :*l {}'.format(row_ref, row_ref)
excel_range = self.range(a)
return self.__index_row_headers(excel_range)
def __index_meta_columns(self):
if self.meta_r == 0:
return
self.m_headers = self.__index_headers('meta_r')
def __index_header_columns(self):
if self.header_r == 0:
return
self.headers = self.__index_headers('header_r')
def range(self, reference):
if not self.is_worksheet_type:
ws_type = object_name(self.ws)
raise TypeError('{} is not an Excel worksheet '.format(ws_type))
try:
a = self.excel_address(reference)
excel_range = self.ws.Range(a)
except pythoncom_error:
excel_range = self.named_ranges.get(reference)
if excel_range is None:
raise ValueError("Invalid Range reference '{}'".format(reference))
return excel_range
def excel_address(self, reference):
if ':' in reference:
a_1, a_2 = reference.split(':')
c_1, r_1 = _reference_to_col_row(self, a_1)
c_2, r_2 = _reference_to_col_row(self, a_2)
a = '${}${}:${}${}'.format(c_1, r_1, c_2, r_2)
else:
c_1, r_1 = _reference_to_col_row(self, reference)
a = '${}${}'.format(c_1, r_1)
return a
def __getitem__(self, reference):
return self.range(reference)
def __setitem__(self, reference, v):
""" write value(s) to excel range """
excel_range = self.range(reference)
m = self.__validate_matrix_within_range_boundaries(v, excel_range)
was_filtered = self.has_filter
worksheet.write_to_excel_range(m, excel_range)
r = excel_range.Row
self.set_range_boundaries(index_meta=(r <= self.meta_r),
index_header=(r <= self.header_r))
if was_filtered:
self.reapply_filter()
def __iter__(self) -> Generator[lev_row_cls, Any, Any]:
return self.lev_rows('*f')
def __repr__(self):
if not self.is_worksheet_type:
return "{{}}: '{}'".format(self.ws.__class__.__name__, self.ws_name)
if self.first_c and self.last_c:
r = max(self.header_r, self.header_r)
if r == 0:
r = self.first_r
a = "{}{}:{}{}".format(self.first_c,
r,
self.last_c,
self.last_r)
else:
a = '{unknown address}'
return "'{}' {}".format(self.ws_name, a)
def __validate_matrix_within_range_boundaries(self, v, excel_range):
"""
if lev has fixed columns or rows, these should not be exceeded
make sure matrix fits in allowed destination space
"""
m = iterator_to_collection(v)
m = modify_iteration_depth(m, depth=2)
col_max = num_cols = len(m[0])
row_max = num_rows = len(m)
first_c, last_c = self._fixed_columns
first_r, last_r = self._fixed_rows
c_1 = excel_range.Column
r_1 = excel_range.Row
if last_c:
first_c = excel_address.col_number(first_c) or excel_address.col_number(c_1)
last_c = excel_address.col_number(last_c)
col_max = (last_c - first_c) + 1
if last_r:
first_r = int(first_r) or int(r_1)
last_r = int(last_r)
row_max = (last_r - first_r) + 1
if num_cols > col_max:
raise ValueError('Number of columns in data exceeds fixed destination range')
if num_rows > row_max:
raise ValueError('Number of rows in data exceeds fixed destination range')
return m
def _named_ranges_in_workbook(wb):
named_ranges = {}
for nr in wb.Names:
if nr.Visible:
try:
named_ranges[nr.Name] = nr.RefersToRange
except pythoncom_error:
continue
return named_ranges
def _reference_to_col_row(lev, reference):
reference = __reference_to_property_names(reference)
col, row = __property_names_to_value(lev, reference)
if col is None or row is None:
col, row = __parse_characters_from_digits(reference, col, row)
return col, row
def __reference_to_property_names(reference):
"""
eg:
'header_c first_r' = __reference_to_property_names('*h *f')
"""
anchor_names = {'*m': 'meta',
'*h': 'header',
'*f': 'first',
'*l': 'last'}
anchor_re = re.compile('''
(?P<col>^[*][fla])
|(?P<row>[*][mhfla]$)
''', re.X | re.I)
reference = reference.strip()
for match in anchor_re.finditer(reference):
name = match.lastgroup
value = match.group(0)
if name == 'col':
if value == '*a':
col = 'first_empty_column '
else:
col = anchor_names[value] + '_c '
reference = reference.replace(value, col, 1)
elif name == 'row':
if value == '*a':
row = 'first_empty_row '
else:
row = ' ' + anchor_names[value] + '_r'
reference = reference.replace(value, row, 1)
# replace multiple spaces with single space
reference = ' '.join(reference.split())
return reference
def __property_names_to_value(lev, reference):
if ' ' not in reference:
return None, None
# replace multiple spaces with single space
reference = ' '.join(reference.split())
splits = reference.split(' ')
col = __col_row_to_value(lev, splits[0])
row = __col_row_to_value(lev, splits[1])
return col, row
def __col_row_to_value(lev, reference):
if reference in lev.headers:
literal = lev.headers[reference]
elif reference in lev.m_headers:
literal = lev.m_headers[reference]
elif reference in lev.__dict__:
literal = lev.__dict__[reference]
elif reference in lev.__class__.__dict__:
literal = getattr(lev, reference)
else:
literal = None
return literal
def __parse_characters_from_digits(reference, col, row):
address_re = re.compile(r'''
(?P<col>^[$]?[a-z]{1,2})(?=[\d* ])
|(?P<row>[$]?[\d]+$)
''', re.X | re.I)
reference = reference.replace('$', '')
for match in address_re.finditer(reference):
name = match.lastgroup
value = match.group(0)
if (name == 'col') and (col is None):
col = value
elif (name == 'row') and (row is None):
row = value
return col, row
| 28.363322 | 97 | 0.581493 |
acebfcb072ed8710993a6b07f4216884e01de391 | 475 | py | Python | app/test/test_article.py | sharon002/News-highlights | f48911d04b325490b80f549a5799513b5b0efb65 | [
"MIT"
] | 1 | 2021-06-13T13:14:40.000Z | 2021-06-13T13:14:40.000Z | app/test/test_article.py | sharon002/News-highlights | f48911d04b325490b80f549a5799513b5b0efb65 | [
"MIT"
] | null | null | null | app/test/test_article.py | sharon002/News-highlights | f48911d04b325490b80f549a5799513b5b0efb65 | [
"MIT"
] | null | null | null | import unittest
from app.models import Article
class ArticleTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Source class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_article = Article('Test author','Test title','Test description','Test url','Test image','Test publishedAt')
def test_instance(self):
self.assertTrue(isinstance(self.new_article,Article))
| 29.6875 | 124 | 0.667368 |
acebfd142222a422907a262a765f0b9d98da99f6 | 329 | py | Python | examples/models/create_triplet_siamese.py | gugarosa/dualing | a625476946bded8e6c9211d83fe79dc16b3d8f16 | [
"Apache-2.0"
] | 2 | 2020-08-03T08:02:48.000Z | 2020-11-21T04:11:45.000Z | examples/models/create_triplet_siamese.py | gugarosa/dualing | a625476946bded8e6c9211d83fe79dc16b3d8f16 | [
"Apache-2.0"
] | 1 | 2020-09-29T12:58:33.000Z | 2020-09-29T12:58:33.000Z | examples/models/create_triplet_siamese.py | gugarosa/dualing | a625476946bded8e6c9211d83fe79dc16b3d8f16 | [
"Apache-2.0"
] | null | null | null | from dualing.models import TripletSiamese
from dualing.models.base import CNN
# Creates the base architecture
cnn = CNN(n_blocks=3, init_kernel=5, n_output=128, activation='linear')
# Creates the triplet siamese network
s = TripletSiamese(cnn, loss='hard', margin=0.5, soft=False, distance_metric='L2', name='triplet_siamese')
| 36.555556 | 106 | 0.781155 |
acebfec0407706908237d483dec901dd937c8409 | 1,551 | py | Python | makibot/plugins/screencapture.py | ThinkinCoin/custom-tg-bot | 3a7780941faa2e8c2723ac137ff65f8b9660dca7 | [
"MIT"
] | null | null | null | makibot/plugins/screencapture.py | ThinkinCoin/custom-tg-bot | 3a7780941faa2e8c2723ac137ff65f8b9660dca7 | [
"MIT"
] | null | null | null | makibot/plugins/screencapture.py | ThinkinCoin/custom-tg-bot | 3a7780941faa2e8c2723ac137ff65f8b9660dca7 | [
"MIT"
] | null | null | null | """Take screenshot of any website
Syntax: .screencapture <Website URL>"""
import io
import requests
from telethon import events
from makibot.utils import admin_cmd
@borg.on(admin_cmd("screencapture (.*)"))
async def _(event):
if event.fwd_from:
return
if Config.SCREEN_SHOT_LAYER_ACCESS_KEY is None:
await event.edit("Need to get an API key from https://screenshotlayer.com/product \nModule stopping!")
return
await event.edit("Processing ...")
sample_url = "https://api.screenshotlayer.com/api/capture?access_key={}&url={}&fullpage={}&viewport={}&format={}&force={}"
input_str = event.pattern_match.group(1)
response_api = requests.get(sample_url.format(
Config.SCREEN_SHOT_LAYER_ACCESS_KEY,
input_str,
"1",
"2560x1440",
"PNG",
"1"
))
# https://stackoverflow.com/a/23718458/4723940
contentType = response_api.headers['content-type']
if "image" in contentType:
with io.BytesIO(response_api.content) as screenshot_image:
screenshot_image.name = "screencapture.png"
try:
await borg.send_file(
event.chat_id,
screenshot_image,
caption=input_str,
force_document=True,
reply_to=event.message.reply_to_msg_id
)
await event.delete()
except Exception as e:
await event.edit(str(e))
else:
await event.edit(response_api.text)
| 33.717391 | 126 | 0.612508 |
acebff8384e49ba82bf88d5b1483064712ae00da | 1,010 | py | Python | tests/test_utils.py | jalvaradosegura/changelog-pre-commit | f47871b4e3b06eee35f02a46fb9140cd496b2215 | [
"MIT"
] | null | null | null | tests/test_utils.py | jalvaradosegura/changelog-pre-commit | f47871b4e3b06eee35f02a46fb9140cd496b2215 | [
"MIT"
] | null | null | null | tests/test_utils.py | jalvaradosegura/changelog-pre-commit | f47871b4e3b06eee35f02a46fb9140cd496b2215 | [
"MIT"
] | null | null | null | import pytest
from changelog_pre_commit.utils import contains_changelog_file
@pytest.mark.parametrize(
"files, changelog_name",
[
(["a.py", "b.rb"], "changelog"),
(["a.py", "b.rb", "log"], "changelog"),
(["a.py", "changelog", "log"], "here_are_my_changes"),
],
)
def test_check_for_changelog_file_and_there_is_no_changelog(
files, changelog_name
):
result = contains_changelog_file(files, changelog_name)
assert result is False
@pytest.mark.parametrize(
"files, changelog_name",
[
(["a.py", "b.rb", "changelog"], "changelog"),
(["a.py", "Changelog.md", "log"], "changelog"),
(["a.py", "CHANGELOG.MD", "log"], "changelog"),
(["a.py", "my_changes.md", "log"], "my_changes"),
(["a.py", "b.js", "hi_changelog.py"], "changelog"),
],
)
def test_check_for_changelog_file_and_there_is_a_changelog(
files, changelog_name
):
result = contains_changelog_file(files, changelog_name)
assert result is True
| 28.055556 | 62 | 0.629703 |
acec00a03d96f2963d5069bf0947cccbb69b162d | 4,933 | py | Python | vega/search_space/networks/pytorch/ops/fmdunit.py | Lzc06/vega | 852d2f57e21caed11473ddc96397124561eacf8a | [
"MIT"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | vega/search_space/networks/pytorch/ops/fmdunit.py | Lzc06/vega | 852d2f57e21caed11473ddc96397124561eacf8a | [
"MIT"
] | 3 | 2021-03-31T20:15:40.000Z | 2022-02-09T23:50:46.000Z | built-in/TensorFlow/Research/cv/image_classification/Darts_for_TensorFlow/automl/vega/search_space/networks/pytorch/ops/fmdunit.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Defined FMD Unit."""
import torch
import torch.nn.functional as F
import numpy as np
from vega.search_space.networks.pytorch.network import Network
from vega.search_space.networks.net_utils import NetTypes
from vega.search_space.networks.network_factory import NetworkFactory
@NetworkFactory.register(NetTypes.OPS)
class FMDUnit(Network):
"""Basic class for feature map distortion.
:param drop_prob: probability of an element to be dropped.
:type drop_prob: float
:param block_size: size of the block to drop.
:type block_size: int
"""
def __init__(self, drop_prob, block_size, args=None):
"""Init FMDUnit."""
super(FMDUnit, self).__init__()
self.drop_prob = drop_prob
self.weight_behind = None
self.weight_record = None
self.alpha = args.alpha
self.block_size = block_size
def forward(self, x):
"""Forward."""
if not self.training:
return x
else:
width = x.size(3)
seed_drop_rate = self.drop_prob * width ** 2 / \
self.block_size ** 2 / (width - self.block_size + 1) ** 2
valid_block_center = torch.zeros(
width, width, device=x.device).float()
valid_block_center[int(self.block_size // 2):(width - (self.block_size - 1) // 2),
int(self.block_size // 2):(width - (self.block_size - 1) // 2)] = 1.0
valid_block_center = valid_block_center.unsqueeze(0).unsqueeze(0)
randnoise = torch.rand(x.shape, device=x.device)
block_pattern = (
(1 - valid_block_center + float(1 - seed_drop_rate) + randnoise) >= 1).float()
if self.block_size == width:
block_pattern = torch.min(block_pattern.view(x.size(0), x.size(1),
x.size(2) * x.size(3)), dim=2)[0].unsqueeze(-1).unsqueeze(
-1)
else:
block_pattern = -F.max_pool2d(input=-block_pattern, kernel_size=(self.block_size, self.block_size),
stride=(1, 1), padding=self.block_size // 2)
if self.block_size % 2 == 0:
block_pattern = block_pattern[:, :, :-1, :-1]
percent_ones = block_pattern.sum() / float(block_pattern.numel())
if not (self.weight_behind is None) and not (len(self.weight_behind) == 0):
wtsize = self.weight_behind.size(3)
weight_max = self.weight_behind.max(dim=0, keepdim=True)[0]
sig = torch.ones(weight_max.size(), device=weight_max.device)
sig[torch.rand(weight_max.size(), device=sig.device) < 0.5] = -1
weight_max = weight_max * sig
weight_mean = weight_max.mean(dim=(2, 3), keepdim=True)
if wtsize == 1:
weight_mean = 0.1 * weight_mean
self.weight_record = weight_mean
var = torch.var(x).clone().detach()
if not (self.weight_behind is None) and not (len(self.weight_behind) == 0):
noise = self.alpha * weight_mean * \
(var ** 0.5) * torch.randn(*x.shape, device=x.device)
else:
noise = self.alpha * 0.01 * \
(var ** 0.5) * torch.randn(*x.shape, device=x.device)
x = x * block_pattern
noise = noise * (1 - block_pattern)
x = x + noise
x = x / percent_ones
return x
@NetworkFactory.register(NetTypes.OPS)
class LinearScheduler(Network):
"""LinearScheduler class.
:param dropblock: drop block.
:type dropblock: nn.Module
:param start_value: drop rate start value.
:type start_value: float
:param stop_value: drop rate stop value.
:type stop_value: float
:param nr_steps: drop rate decay steps.
:type nr_steps: int
"""
def __init__(self, fmdblock, start_value, stop_value, nr_steps):
super(LinearScheduler, self).__init__()
self.fmdblock = fmdblock
self.i = 0
self.dis_values = np.linspace(
start=int(start_value), stop=int(stop_value), num=int(nr_steps))
def forward(self, x):
"""Forward."""
return self.fmdblock(x)
def step(self):
"""Step."""
if self.i < len(self.dis_values):
self.fmdblock.drop_prob = self.dis_values[self.i]
self.i += 1
| 41.453782 | 119 | 0.588283 |
acec01048185b5f2c707304b179f21876888e55c | 577 | py | Python | CreateHelmetsFunction.py | edgecdec/TobaccoAwarenessMC | 2fce9181ae3bb8b65dd4dcbfb4f3363e42e3dd90 | [
"MIT"
] | null | null | null | CreateHelmetsFunction.py | edgecdec/TobaccoAwarenessMC | 2fce9181ae3bb8b65dd4dcbfb4f3363e42e3dd90 | [
"MIT"
] | null | null | null | CreateHelmetsFunction.py | edgecdec/TobaccoAwarenessMC | 2fce9181ae3bb8b65dd4dcbfb4f3363e42e3dd90 | [
"MIT"
] | null | null | null | from Team import Team
def createHelmetsFunction(teams, fileLocation):
with open(fileLocation, "w") as outfile:
for i in range(len(teams)):
curTeam = teams[i]
teamNum = curTeam.getNumber()
teamColor = curTeam.getHexCode()
command = f"execute as @a run item replace entity @p[team=team{teamNum}] armor.head with leather_helmet{{Unbreakable:1,Enchantments:[{{id:binding_curse,lvl:1}}],display:{{color:{teamColor}}}}} 1\n\n"
outfile.write(f"#Put team{teamNum} helmet on\n")
outfile.write(command)
| 52.454545 | 211 | 0.646447 |
acec01605708dfdd28ee123b8e6daae79153c973 | 114 | py | Python | TheBasics/conversion.py | 1302580MK/Udemy_Python | c7aef0645ae15a954c2356ba96288deaa087fb32 | [
"MIT"
] | null | null | null | TheBasics/conversion.py | 1302580MK/Udemy_Python | c7aef0645ae15a954c2356ba96288deaa087fb32 | [
"MIT"
] | null | null | null | TheBasics/conversion.py | 1302580MK/Udemy_Python | c7aef0645ae15a954c2356ba96288deaa087fb32 | [
"MIT"
] | null | null | null | # 26.05.2019
var1= 6.7
print(var1)
print(int(var1))
awesomelist=["test", 1, 2 , "test2"]
print(awesomelist[-1]) | 12.666667 | 36 | 0.649123 |
acec018a81dca447d98e709983e99c12e4d07602 | 2,019 | py | Python | FRAS/Capture_Image.py | ciroiriarte/Face-Recognition-Attendance-System | b838bcdab25a0fca6bc64599418b5c63f194331e | [
"MIT"
] | 1 | 2022-03-03T16:19:25.000Z | 2022-03-03T16:19:25.000Z | FRAS/Capture_Image.py | ciroiriarte/Face-Recognition-Attendance-System | b838bcdab25a0fca6bc64599418b5c63f194331e | [
"MIT"
] | 3 | 2021-06-08T20:49:52.000Z | 2022-01-13T02:06:09.000Z | FRAS/Capture_Image.py | ciroiriarte/Face-Recognition-Attendance-System | b838bcdab25a0fca6bc64599418b5c63f194331e | [
"MIT"
] | null | null | null | import csv
import cv2
import os
# counting the numbers
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
# Take image function
def takeImages():
Id = input("Enter Your Id: ")
name = input("Enter Your Name: ")
if(is_number(Id) and name.isalpha()):
cam = cv2.VideoCapture(0)
harcascadePath = "haarcascade_frontalface_default.xml"
detector = cv2.CascadeClassifier(harcascadePath)
sampleNum = 0
while(True):
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.3, 5)
for(x,y,w,h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
#incrementing sample number
sampleNum = sampleNum+1
#saving the captured face in the dataset folder TrainingImage
cv2.imwrite("TrainingImage" + os.sep +name + "."+Id + '.' +
str(sampleNum) + ".jpg", gray[y:y+h, x:x+w])
#display the frame
cv2.imshow('frame', img)
#wait for 100 miliseconds
if cv2.waitKey(100) & 0xFF == ord('q'):
break
# break if the sample number is morethan 100
elif sampleNum > 60:
break
cam.release()
cv2.destroyAllWindows()
res = "Images Saved for ID : " + Id + " Name : " + name
row = [Id, name]
with open("StudentDetails"+os.sep+"StudentDetails.csv", 'a+') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
csvFile.close()
else:
if(is_number(Id)):
print("Enter Alphabetical Name")
if(name.isalpha()):
print("Enter Numeric ID")
| 26.565789 | 81 | 0.531451 |
acec0212a2bb471887de12d8f138f32218750a0c | 1,781 | py | Python | dev-files/targets/RP2040/generators-unused-irq/endless-loop/unused_irq_generator.py | pierremolinaro/real-time-kernel-pi-pico | 581360dd1135e17fe0c4ddabbe74052a366de7d6 | [
"MIT"
] | 3 | 2021-05-05T19:40:01.000Z | 2021-05-08T06:40:35.000Z | dev-files/targets/RP2040/generators-unused-irq/endless-loop/unused_irq_generator.py | pierremolinaro/real-time-kernel-pi-pico | 581360dd1135e17fe0c4ddabbe74052a366de7d6 | [
"MIT"
] | null | null | null | dev-files/targets/RP2040/generators-unused-irq/endless-loop/unused_irq_generator.py | pierremolinaro/real-time-kernel-pi-pico | 581360dd1135e17fe0c4ddabbe74052a366de7d6 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# -*- coding: UTF-8 -*-
#---------------------------------------------------------------------------------------------------
def asSeparator () :
return "//" + ("-" * 78) + "\n"
#---------------------------------------------------------------------------------------------------
def generateUnusedInterrupt (unusedInterruptName, unusedInterruptNumber) :
sCode = asSeparator ()
sCode += "// INTERRUPT - UNUSED: " + unusedInterruptName + "\n"
sCode += asSeparator () + "\n"
sCode += " .section .text.interrupt." + unusedInterruptName + ", \"ax\", %progbits\n\n"
sCode += " .align 1\n"
sCode += " .type interrupt." + unusedInterruptName + ", %function\n"
sCode += " .global interrupt." + unusedInterruptName + "\n\n"
sCode += "interrupt." + unusedInterruptName + ":\n"
sCode += " b interrupt." + unusedInterruptName + "\n\n"
return ("", sCode)
#---------------------------------------------------------------------------------------------------
# ENTRY POINT
#---------------------------------------------------------------------------------------------------
def buildUnusedInterruptCode (unusedInterruptDictionary):
#------------------------------ Destination file strings
cppFile = ""
sFile = ""
#------------------------------ Iterate on unused interrupt list
for unusedInterruptName in unusedInterruptDictionary.keys () :
unusedInterruptNumber = unusedInterruptDictionary [unusedInterruptName]
(cppCode, sCode) = generateUnusedInterrupt (unusedInterruptName, unusedInterruptNumber)
cppFile += cppCode
sFile += sCode
#------------------------------ Return
return (cppFile, sFile)
#---------------------------------------------------------------------------------------------------
| 44.525 | 100 | 0.44301 |
acec02daecc95aa520de0f445ce3a8c4c032f163 | 10,249 | py | Python | Source Code DES/client.py | BryanYehuda/KriptografiETS | 11c3fe6b33065ba9998f0fd70b13ecaaf5d538f2 | [
"MIT"
] | 1 | 2021-06-11T13:40:45.000Z | 2021-06-11T13:40:45.000Z | Source Code DES/client.py | BryanYehuda/KriptografiETS | 11c3fe6b33065ba9998f0fd70b13ecaaf5d538f2 | [
"MIT"
] | null | null | null | Source Code DES/client.py | BryanYehuda/KriptografiETS | 11c3fe6b33065ba9998f0fd70b13ecaaf5d538f2 | [
"MIT"
] | null | null | null | import socket
from functools import reduce
def string2hex(s):
List=[]
for ch in s:
st2hx = hex(ord(ch)).replace('0x','')
if(len(st2hx)==1): st2hx = '0' + st2hx
List.append(st2hx)
return reduce(lambda i, j: i+j, List)
def xor(a: str, b: str) -> str:
return bin(int(a,2) ^ int(b,2))[2:].rjust(len(a), '0')
def split(msg: str, n: int) -> str:
return ' '.join(msg[i:i+n] for i in range(0, len(msg), n))
def shuffle(key: str, table: tuple) -> str:
return "".join(key[i-1] for i in table)
def hex_to_bin(h: str) -> str:
return "".join(map(lambda x: bin(int(x, 16))[2:].rjust(4, '0'), list(h)))
def bin_to_hex(b: str) -> str:
return "".join(map(lambda x: hex(int(x, 2))[2:], split(b, 4).split()))
class Feistel:
def __init__(self, left: str, right: str, keys: list, f, debug=True):
self.L = [left]
self.R = [right]
self.new_r = []
self.kr = []
self.boxes = []
self.keys = keys
self.f = f
self.p_table = (
16, 7, 20, 21,
29, 12, 28, 17,
1, 15, 23, 26,
5, 18, 31, 10,
2, 8, 24, 14,
32, 27, 3, 9,
19, 13, 30, 6,
22, 11, 4, 25
)
self.e_bit_selection_table = (
32, 1, 2, 3, 4, 5,
4, 5, 6, 7, 8, 9,
8, 9, 10, 11, 12, 13,
12, 13, 14, 15, 16, 17,
16, 17, 18, 19, 20, 21,
20, 21, 22, 23, 24, 25,
24, 25, 26, 27, 28, 29,
28, 29, 30, 31, 32, 1
)
self.SBox = (
(
(14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7),
(0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8),
(4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0),
(15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13)
),
(
(15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10),
(3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5),
(0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15),
(13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9)
),
(
(10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8),
(13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1),
(13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7),
(1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12)
),
(
(7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15),
(13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9),
(10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4),
(3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14)
),
(
(2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9),
(14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6),
(4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14),
(11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3)
),
(
(12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11),
(10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8),
(9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6),
(4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13)
),
(
(4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1),
(13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6),
(1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2),
(6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12)
),
(
(13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7),
(1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2),
(7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8),
(2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11)
)
)
self.debug = debug
def round16(self) -> str:
for i in range(16):
self.L.append(self.R[i])
self.new_r.append(shuffle(self.R[i], self.e_bit_selection_table))
self.kr.append(self.f(self.new_r[i], self.keys[i+1]))
self.boxes.append(split(self.kr[i], 6).split())
for idx, b in enumerate(self.boxes[-1]):
row = int(b[0]+b[5], 2)
col = int(b[1:5], 2)
self.boxes[-1][idx] = bin(self.SBox[idx][row][col])[2:].rjust(4, '0')
self.R.append(xor(self.L[i], shuffle(''.join(self.boxes[i]), self.p_table)))
if self.debug:
print("="*96+"\n")
print("Feistel\n")
for i in range(17):
print(f"L{i}\t\t: {split(self.L[i], 4)}")
print(f"R{i}\t\t: {split(self.R[i], 4)}")
print()
if i < 16:
print(f"#Round {i+1}")
print(f"E(R{i})\t\t: {split(self.new_r[i], 6)}")
print(f"K{i+1}\t\t: {split(self.keys[i+1], 6)}")
print(f"K{i+1}+E(R{i})\t: {split(self.kr[i], 6)}")
print(f"S-Box\t\t: {' '.join(self.boxes[i])}")
print(f"S-Box-P\t\t: {split(shuffle(''.join(self.boxes[i]), self.p_table), 4)}")
return self.R[16] + self.L[16]
class DES:
def __init__(self, key: str, debug=False):
self.key = hex_to_bin(key)
self.pc1 = (
57, 49, 41, 33, 25, 17, 9,
1, 58, 50, 42, 34, 26, 18,
10, 2, 59, 51, 43, 35, 27,
19, 11, 3, 60, 52, 44, 36,
63, 55, 47, 39, 31, 23, 15,
7, 62, 54, 46, 38, 30, 22,
14, 6, 61, 53, 45, 37, 29,
21, 13, 5, 28, 20, 12, 4,
)
self.pc2 = (
14, 17, 11, 24, 1, 5,
3, 28, 15, 6, 21, 10,
23, 19, 12, 4, 26, 8,
16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55,
30, 40, 51, 45, 33, 48,
44, 49, 39, 56, 34, 53,
46, 42, 50, 36, 29, 32,
)
self.ip_table = (
58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6,
64, 56, 48, 40, 32, 24, 16, 8,
57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7,
)
self.shift_table = (1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1)
self.reverse_ip_table = (
40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25
)
self.debug = debug
self.K = [shuffle(self.key, self.pc1)]
self.C = [self.K[0][:28]]
self.D = [self.K[0][28:]]
self.generate_subkeys()
def encrypt(self, msg: str) -> str:
msg = hex_to_bin(msg)
# print(f"msg: {split(msg, 8)}\nkey: {split(self.key, 8)}")
msg = shuffle(msg, self.ip_table)
if self.debug:
print(f"shuffled msg: {split(msg, 8)}\n")
print("="*96+"\n")
print(f"K0\t: {split(self.K[0], 7)}")
print(f"C0\t: {split(self.C[0], 7)}")
print(f"D0\t: {split(self.D[0], 7)}")
print()
for i in range(1, 17):
print(f"C{i}\t: {split(self.C[i], 7)}")
print(f"D{i}\t: {split(self.D[i], 7)}")
print(f"C{i}D{i}\t: {split(self.C[i]+self.D[i], 7)}")
print(f"K{i}\t: {split(self.K[i], 6)}")
print()
left = msg[:32]
right = msg[32:]
feistel = Feistel(left, right, self.K, xor, self.debug)
res = feistel.round16()
if self.debug:
print(f"R16L16\t\t: {split(res, 8)}")
res = shuffle(res, self.reverse_ip_table)
if self.debug:
print(f"Reverse IP\t: {split(res, 8)}")
print()
print("="*96)
print()
return bin_to_hex(res)
def decrypt(self, ciphertext) -> str:
ciphertext = hex_to_bin(ciphertext)
ciphertext = shuffle(ciphertext, self.ip_table)
left = ciphertext[:32]
right = ciphertext[32:]
feistel = Feistel(left, right, [self.K[0]]+self.K[1:][::-1], xor, self.debug)
res = feistel.round16()
res = shuffle(res, self.reverse_ip_table)
return bin_to_hex(res)
def generate_subkeys(self) -> None:
for i in range(16):
shift = self.shift_table[i]
self.C.append(self.C[i][shift:]+self.C[i][:shift])
self.D.append(self.D[i][shift:]+self.D[i][:shift])
for i in range(16):
self.K.append(shuffle(self.C[i+1] + self.D[i+1], self.pc2))
# msg = "0123456712332332"
HEADER = 64
PORT = 5051
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "!DISCONNECT"
SERVER = socket.gethostbyname(socket.gethostname())
ADDR = (SERVER, PORT)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(ADDR)
def send(msg):
message = msg.encode(FORMAT)
msg_length = len(message)
send_length = str(msg_length).encode(FORMAT)
send_length += b' ' * (HEADER - len(send_length))
client.send(send_length)
client.send(message)
print(client.recv(2048).decode(FORMAT))
def pad(text):
while len(text) %8 != 0:
text += ' '
return text
x = ' '
while x != 'quit':
# Ask the user for a name.
key = "myChiper"
x = ' '
x = input()
pad_text = pad(x)
msg = string2hex(pad_text)
key = string2hex(key)
des = DES(key, False)
enc = des.encrypt(msg).upper()
print(f"encrypted: {enc}")
send(enc)
send(DISCONNECT_MESSAGE) | 35.341379 | 100 | 0.416138 |
acec02f9fb136ad3624baa32a8aec39d26b4f0c7 | 6,669 | py | Python | spikeinterface/toolkit/preprocessing/phase_shift.py | vncntprvst/spikeinterface | dd5ae94f85fe5d9082b45321d2c96ba316eb4b77 | [
"MIT"
] | null | null | null | spikeinterface/toolkit/preprocessing/phase_shift.py | vncntprvst/spikeinterface | dd5ae94f85fe5d9082b45321d2c96ba316eb4b77 | [
"MIT"
] | null | null | null | spikeinterface/toolkit/preprocessing/phase_shift.py | vncntprvst/spikeinterface | dd5ae94f85fe5d9082b45321d2c96ba316eb4b77 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.fft import rfft, irfft
from spikeinterface.core.core_tools import define_function_from_class
from .tools import get_chunk_with_margin
from .basepreprocessor import BasePreprocessor, BasePreprocessorSegment
class PhaseShiftRecording(BasePreprocessor):
"""
This apply a phase shift to a recording to cancel the small sampling
delay across for some recording system.
This is particularly relevant for neuropixel recording.
This code is inspired from from IBL lib.
https://github.com/int-brain-lab/ibllib/blob/master/ibllib/dsp/fourier.py
and also the one from spikeglx
https://billkarsh.github.io/SpikeGLX/help/dmx_vs_gbl/dmx_vs_gbl/
Parameters
----------
recording: Recording
The recording. It need to have "inter_sample_shift" in properties.
margin_ms: float (default 40)
margin in ms for computation
40ms ensure a very small error when doing chunk processing
inter_sample_shift: None or numpy array
If "inter_sample_shift" is not in recording.properties
we can externaly provide one.
Returns
-------
filter_recording: PhaseShiftRecording
The phase shifted recording object
"""
name = 'phase_shift'
def __init__(self, recording, margin_ms=40., inter_sample_shift=None, dtype=None):
if inter_sample_shift is None:
assert "inter_sample_shift" in recording.get_property_keys(), "'inter_sample_shift' is not a property!"
sample_shifts = recording.get_property("inter_sample_shift")
else:
assert len(inter_sample_shift) == recording.get_num_channels(), "sample "
sample_shifts = np.asarray(inter_sample_shift)
margin = int(margin_ms * recording.get_sampling_frequency() / 1000.)
force_dtype_back = None
if dtype is None:
if recording.get_dtype().kind in ('i', 'u'):
# because of the tapper on margin we need to force the computation to float32
# and then go back to the original buffer unless dtype is explicitly forced
dtype = np.dtype('float32')
force_dtype_back = recording.get_dtype()
BasePreprocessor.__init__(self, recording, dtype=dtype)
for parent_segment in recording._recording_segments:
rec_segment = PhaseShiftRecordingSegment(parent_segment, sample_shifts, margin, dtype, force_dtype_back)
self.add_recording_segment(rec_segment)
# for dumpability
if inter_sample_shift is not None:
inter_sample_shift = list(inter_sample_shift)
self._kwargs = dict(recording=recording.to_dict(), margin_ms=float(margin_ms),
inter_sample_shift=inter_sample_shift)
class PhaseShiftRecordingSegment(BasePreprocessorSegment):
def __init__(self, parent_recording_segment, sample_shifts, margin, dtype, force_dtype_back):
BasePreprocessorSegment.__init__(self, parent_recording_segment)
self.sample_shifts = sample_shifts
self.margin = margin
self.dtype = dtype
self.force_dtype_back = force_dtype_back
def get_traces(self, start_frame, end_frame, channel_indices):
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = self.get_num_samples()
# this return a copy with margin + taper on border always
traces_chunk, left_margin, right_margin = get_chunk_with_margin(self.parent_recording_segment,
start_frame, end_frame, channel_indices, self.margin,
dtype=self.dtype, add_zeros=True, window_on_margin=True)
traces_shift = apply_fshift_sam(traces_chunk, self.sample_shifts, axis=0)
# traces_shift = apply_fshift_ibl(traces_chunk, self.sample_shifts, axis=0)
traces_shift = traces_shift[left_margin:-right_margin, :]
if self.force_dtype_back:
traces_shift = traces_shift.astype(self.force_dtype_back)
return traces_shift
# function for API
phase_shift = define_function_from_class(source_class=PhaseShiftRecording, name="phase_shift")
def apply_fshift_sam(sig, sample_shifts, axis=0):
"""
Apply the shift on a traces buffer.
"""
n = sig.shape[axis]
sig_f = np.fft.rfft(sig, axis=axis)
if n % 2 == 0:
# n is even sig_f[-1] is nyquist and so pi
omega = np.linspace(0, np.pi, sig_f.shape[axis])
else:
# n is odd sig_f[-1] is exactly nyquist!! we need (n-1) / n factor!!
omega = np.linspace(0, np.pi * (n - 1) / n, sig_f.shape[axis])
# broadcast omega and sample_shifts depend the axis
if axis == 0:
shifts = omega[:, np.newaxis] * sample_shifts[np.newaxis, :]
else:
shifts = omega[np.newaxis, :] * sample_shifts[:, np.newaxis]
sig_shift = np.fft.irfft(sig_f * np.exp(- 1j * shifts), n=n, axis=axis)
return sig_shift
apply_fshift = apply_fshift_sam
def apply_fshift_ibl(w, s, axis=0, ns=None):
"""
Function from IBLIB: https://github.com/int-brain-lab/ibllib/blob/master/ibllib/dsp/fourier.py
Shifts a 1D or 2D signal in frequency domain, to allow for accurate non-integer shifts
:param w: input signal (if complex, need to provide ns too)
:param s: shift in samples, positive shifts forward
:param axis: axis along which to shift (last axis by default)
:param axis: axis along which to shift (last axis by default)
:param ns: if a rfft frequency domain array is provided, give a number of samples as there
is an ambiguity
:return: w
"""
# create a vector that contains a 1 sample shift on the axis
ns = ns or w.shape[axis]
shape = np.array(w.shape) * 0 + 1
shape[axis] = ns
dephas = np.zeros(shape)
# np.put(dephas, 1, 1)
dephas[1] = 1
dephas = rfft(dephas, axis=axis)
# fft the data along the axis and the dephas
do_fft = np.invert(np.iscomplexobj(w))
if do_fft:
W = rfft(w, axis=axis)
else:
W = w
# if multiple shifts, broadcast along the other dimensions, otherwise keep a single vector
if not np.isscalar(s):
s_shape = np.array(w.shape)
s_shape[axis] = 1
s = s.reshape(s_shape)
# apply the shift (s) to the fft angle to get the phase shift and broadcast
W *= np.exp(1j * np.angle(dephas) * s)
if do_fft:
W = np.real(irfft(W, ns, axis=axis))
W = W.astype(w.dtype)
return W
| 40.418182 | 129 | 0.65842 |
acec031f78554b7336f2bcd300b6fbd22ac1594d | 496 | py | Python | setup.py | opensourcepj/ndbutil | 784476f0d148bb2e8c93575e2c96c9351e3be5bf | [
"MIT"
] | null | null | null | setup.py | opensourcepj/ndbutil | 784476f0d148bb2e8c93575e2c96c9351e3be5bf | [
"MIT"
] | null | null | null | setup.py | opensourcepj/ndbutil | 784476f0d148bb2e8c93575e2c96c9351e3be5bf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
from distutils.core import setup
from distutils.command.clean import clean
from distutils.command.install import install
from distutils.dir_util import remove_tree
class InstallNdbUtils(install):
def run(self):
install.run(self)
remove_tree('build')
setup(name='ndbutils',
version='0.31',
description="Helper functions for working with gae ndb.",
py_modules=['ndbutils'],
cmdclass={'install': InstallNdbUtils}
)
| 24.8 | 63 | 0.711694 |
acec04847cc0bcd53f295b8ea106a84d21006bee | 10,848 | py | Python | modelzoo/utils/param_flops.py | LicharYuan/modelzoo.pytorch | c4f5fbcfa387c514b007ea889670b6fbdfef0a57 | [
"MIT"
] | null | null | null | modelzoo/utils/param_flops.py | LicharYuan/modelzoo.pytorch | c4f5fbcfa387c514b007ea889670b6fbdfef0a57 | [
"MIT"
] | null | null | null | modelzoo/utils/param_flops.py | LicharYuan/modelzoo.pytorch | c4f5fbcfa387c514b007ea889670b6fbdfef0a57 | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch
import numpy as np
"""match offical version flops and parameters"""
def get_model_complexity_info(model, input_res, print_per_layer_stat=True, as_strings=True):
assert type(input_res) is tuple
assert len(input_res) == 3
batch = torch.FloatTensor(1, *input_res)
flops_model = add_flops_counting_methods(model)
flops_model.eval().start_flops_count()
out = flops_model(batch)
if print_per_layer_stat:
print_model_with_flops(flops_model, units=None, precision=6)
flops_count = flops_model.compute_average_flops_cost()
params_count = get_model_parameters_number(flops_model)
flops_model.stop_flops_count()
if as_strings:
return flops_to_string(flops_count), params_to_string(params_count)
return flops_count, params_count
def flops_to_string(flops, units='GMac', precision=6):
if units is None:
if flops // 10**9 > 0:
return str(round(flops / 10.**9, precision)) + ' GMac'
elif flops // 10**6 > 0:
return str(round(flops / 10.**6, precision)) + ' MMac'
elif flops // 10**3 > 0:
return str(round(flops / 10.**3, precision)) + ' KMac'
else:
return str(flops) + ' Mac'
else:
if units == 'GMac':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MMac':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KMac':
return str(round(flops / 10.**3, precision)) + ' ' + units
else:
return str(flops) + ' Mac'
def params_to_string(params_num):
if params_num // 10 ** 6 > 0:
return str(round(params_num / 10 ** 6, 4)) + ' M'
elif params_num // 10 ** 3:
return str(round(params_num / 10 ** 3, 4)) + ' k'
def print_model_with_flops(model, units='GMac', precision=6):
total_flops = model.compute_average_flops_cost()
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_flops_cost = self.accumulate_flops()
return ', '.join([flops_to_string(accumulated_flops_cost, units=units, precision=precision),
'{:.3%} MACs'.format(accumulated_flops_cost / total_flops),
self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if m.extra_repr != flops_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(model)
model.apply(del_extra_repr)
def get_model_parameters_number(model):
params_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params_num
def add_flops_counting_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
# Adding variables necessary for masked flops computation
net_main_module.apply(add_flops_mask_variable_or_reset)
return net_main_module
def compute_average_flops_cost(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Returns current mean flops consumption per image.
"""
batches_count = self.__batch_counter__
flops_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
return flops_sum / batches_count
def start_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Activates the computation of mean flops consumption per image.
Call it before you run the network.
"""
add_batch_counter_hook_function(self)
self.apply(add_flops_counter_hook_function)
def stop_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
def reset_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Resets statistics computed so far.
"""
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
def add_flops_mask(module, mask):
def add_flops_mask_func(module):
if isinstance(module, torch.nn.Conv2d):
module.__mask__ = mask
module.apply(add_flops_mask_func)
def remove_flops_mask(module):
module.apply(add_flops_mask_variable_or_reset)
# ---- Internal functions
def is_supported_instance(module):
if isinstance(module, (torch.nn.Conv2d, torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, \
torch.nn.LeakyReLU, torch.nn.ReLU6, torch.nn.Linear, \
torch.nn.MaxPool2d, torch.nn.AvgPool2d, torch.nn.BatchNorm2d, \
torch.nn.Upsample, nn.AdaptiveMaxPool2d, nn.AdaptiveAvgPool2d)):
return True
return False
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += output_elements_count
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += active_elements_count
def linear_flops_counter_hook(module, input, output):
input = input[0]
batch_size = input.shape[0]
module.__flops__ += batch_size * input.shape[1] * output.shape[1]
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += np.prod(input.shape)
def bn_flops_counter_hook(module, input, output):
module.affine
input = input[0]
batch_flops = np.prod(input.shape)
if module.affine:
batch_flops *= 2
module.__flops__ += batch_flops
def conv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
output_height, output_width = output.shape[2:]
kernel_height, kernel_width = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = kernel_height * kernel_width * in_channels * filters_per_channel
active_elements_count = batch_size * output_height * output_width
if conv_module.__mask__ is not None:
# (b, 1, h, w)
flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height, output_width)
active_elements_count = flops_mask.sum()
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
bias_flops = out_channels * active_elements_count
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += overall_flops
def batch_counter_hook(module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
module.__batch_counter__ += batch_size
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
module.__flops__ = 0
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if isinstance(module, torch.nn.Conv2d):
handle = module.register_forward_hook(conv_flops_counter_hook)
elif isinstance(module, (torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, \
torch.nn.LeakyReLU, torch.nn.ReLU6)):
handle = module.register_forward_hook(relu_flops_counter_hook)
elif isinstance(module, torch.nn.Linear):
handle = module.register_forward_hook(linear_flops_counter_hook)
elif isinstance(module, (torch.nn.AvgPool2d, torch.nn.MaxPool2d, nn.AdaptiveMaxPool2d, \
nn.AdaptiveAvgPool2d)):
handle = module.register_forward_hook(pool_flops_counter_hook)
elif isinstance(module, torch.nn.BatchNorm2d):
handle = module.register_forward_hook(bn_flops_counter_hook)
elif isinstance(module, torch.nn.Upsample):
handle = module.register_forward_hook(upsample_flops_counter_hook)
else:
handle = module.register_forward_hook(empty_flops_counter_hook)
module.__flops_handle__ = handle
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
# --- Masked flops counting
# Also being run in the initialization
def add_flops_mask_variable_or_reset(module):
if is_supported_instance(module):
module.__mask__ = None
| 34.22082 | 100 | 0.696995 |
acec048e333f1664ad489da77b8837aac15c03d4 | 1,480 | py | Python | netbox/settings/staging.py | moonbirddk/networked-toolbox | b059b77bfda173794b3cad55874cb06edc70d0e1 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2016-09-02T12:45:31.000Z | 2018-02-10T10:18:11.000Z | netbox/settings/staging.py | Socialsquare/networked-toolbox | b059b77bfda173794b3cad55874cb06edc70d0e1 | [
"BSD-2-Clause-FreeBSD"
] | 17 | 2020-03-24T15:58:05.000Z | 2022-02-10T08:08:57.000Z | netbox/settings/staging.py | moonbirddk/networked-toolbox | b059b77bfda173794b3cad55874cb06edc70d0e1 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2018-01-05T08:02:18.000Z | 2018-01-05T08:02:18.000Z | import os
import dj_database_url
from .base import *
from .s3_settings import *
DEBUG = True
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
SITE_ID = 1
DOMAIN = 'staging.networkedtoolbox.com'
ALLOWED_HOSTS = [DOMAIN, 'network-toolbox-staging.herokuapp.com']
# We're using S3 for media and static storage.
# STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
DATABASES = {
'default': dj_database_url.config()
}
#DATABASES['default']['CONN_MAX_AGE'] = 5
DATABASE_POOL_ARGS = {
'max_overflow': 10,
'pool_size': 5,
'recycle': 300
}
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
DEFAULT_FROM_EMAIL = 'Reflection Action <info@{0}>'.format(DOMAIN)
SERVER_EMAIL = 'Reflection Action <alerts@{0}>'.format(DOMAIN)
ADMINS = (
('Kræn', 'kraen+network-toolbox-staging@socialsquare.dk', ),
('Andreas', 'andreas+network-toolbox-staging@socialsquare.dk', ),
)
SITE_ADMIN_EMAIL = 'networkedtoolbox@actionaid.dk'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.environ['MAILGUN_SMTP_SERVER']
EMAIL_HOST_USER = os.environ['MAILGUN_SMTP_LOGIN']
EMAIL_HOST_PASSWORD = os.environ['MAILGUN_SMTP_PASSWORD']
EMAIL_PORT = os.environ['MAILGUN_SMTP_PORT']
EMAIL_USE_TLS = True
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': os.environ['BONSAI_URL'],
'INDEX_NAME': 'haystack',
},
}
GOOGLE_ANALYTICS_ID = 'UA-71138728-2'
| 27.407407 | 86 | 0.732432 |
acec04ec501bc8d604acf331a8afdb8f1da467b8 | 1,320 | py | Python | shape_calculator.py | RickyDonne/Polygon-area-calculator | 5f7238cb40e12028aadc9191b0face38c5eeda2d | [
"MIT"
] | null | null | null | shape_calculator.py | RickyDonne/Polygon-area-calculator | 5f7238cb40e12028aadc9191b0face38c5eeda2d | [
"MIT"
] | null | null | null | shape_calculator.py | RickyDonne/Polygon-area-calculator | 5f7238cb40e12028aadc9191b0face38c5eeda2d | [
"MIT"
] | null | null | null | class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
def set_width(self, width):
self.width = width
def set_height(self, height):
self.height = height
def get_area(self):
return self.width * self.height
def get_perimeter(self):
return ((2 * self.width) + (2 * self.height))
def get_diagonal(self):
return (((self.width ** 2) + (self.height ** 2)) ** .5)
def get_picture(self):
if self.width > 50 or self.height > 50:
return "Too big for picture."
else:
return ((("*" * self.width) + "\n") * self.height)
def get_amount_inside(self, other):
return ((self.width // other.width) * (self.height // other.height))
def __str__(self):
return f"Rectangle(width={self.width}, height={self.height})"
class Square(Rectangle):
def __init__(self, side):
self.width = side
self.height = side
def set_side(self, side):
self.width = side
self.height = side
def set_height(self, side):
self.width = side
self.height = side
def set_width(self, side):
self.width = side
self.height = side
def __str__(self):
return f"Square(side={self.width})"
| 24 | 76 | 0.573485 |
acec05e1f9cdd7e206c0328a10db33f699662b36 | 13,510 | py | Python | base/lib/pythonbin/pymongo/operations.py | threefoldtech/sandbox_osx | e2a5ea812c3789dea40113719dbad6d6ee7cd720 | [
"Apache-2.0"
] | 46 | 2019-03-01T02:19:18.000Z | 2021-12-18T12:37:02.000Z | venv/lib/python3.6/site-packages/pymongo/operations.py | HeyWeiPan/vnpy_crypto | 844381797a475a01c05a4e162592a5a6e3a48032 | [
"MIT"
] | 9 | 2020-03-24T16:56:41.000Z | 2022-03-11T23:45:08.000Z | env/lib/python3.6/site-packages/pymongo/operations.py | bcss-pm/incidents | 927a102104b5718fe118bceb307d3cd633d6699b | [
"MIT"
] | 67 | 2018-10-29T09:50:49.000Z | 2022-01-06T07:35:56.000Z | # Copyright 2015-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Operation class definitions."""
from pymongo.common import validate_boolean, validate_is_mapping, validate_list
from pymongo.collation import validate_collation_or_none
from pymongo.helpers import _gen_index_name, _index_document, _index_list
class InsertOne(object):
"""Represents an insert_one operation."""
__slots__ = ("_doc",)
def __init__(self, document):
"""Create an InsertOne instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `document`: The document to insert. If the document is missing an
_id field one will be added.
"""
self._doc = document
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_insert(self._doc)
def __repr__(self):
return "InsertOne(%r)" % (self._doc,)
def __eq__(self, other):
if type(other) == type(self):
return other._doc == self._doc
return NotImplemented
def __ne__(self, other):
return not self == other
class DeleteOne(object):
"""Represents a delete_one operation."""
__slots__ = ("_filter", "_collation")
def __init__(self, filter, collation=None):
"""Create a DeleteOne instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the document to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
.. versionchanged:: 3.5
Added the `collation` option.
"""
if filter is not None:
validate_is_mapping("filter", filter)
self._filter = filter
self._collation = collation
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_delete(self._filter, 1, collation=self._collation)
def __repr__(self):
return "DeleteOne(%r, %r)" % (self._filter, self._collation)
def __eq__(self, other):
if type(other) == type(self):
return ((other._filter, other._collation) ==
(self._filter, self._collation))
return NotImplemented
def __ne__(self, other):
return not self == other
class DeleteMany(object):
"""Represents a delete_many operation."""
__slots__ = ("_filter", "_collation")
def __init__(self, filter, collation=None):
"""Create a DeleteMany instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the documents to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
.. versionchanged:: 3.5
Added the `collation` option.
"""
if filter is not None:
validate_is_mapping("filter", filter)
self._filter = filter
self._collation = collation
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_delete(self._filter, 0, collation=self._collation)
def __repr__(self):
return "DeleteMany(%r, %r)" % (self._filter, self._collation)
def __eq__(self, other):
if type(other) == type(self):
return ((other._filter, other._collation) ==
(self._filter, self._collation))
return NotImplemented
def __ne__(self, other):
return not self == other
class ReplaceOne(object):
"""Represents a replace_one operation."""
__slots__ = ("_filter", "_doc", "_upsert", "_collation")
def __init__(self, filter, replacement, upsert=False, collation=None):
"""Create a ReplaceOne instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The new document.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
.. versionchanged:: 3.5
Added the `collation` option.
"""
if filter is not None:
validate_is_mapping("filter", filter)
if upsert is not None:
validate_boolean("upsert", upsert)
self._filter = filter
self._doc = replacement
self._upsert = upsert
self._collation = collation
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_replace(self._filter, self._doc, self._upsert,
collation=self._collation)
def __eq__(self, other):
if type(other) == type(self):
return (
(other._filter, other._doc, other._upsert, other._collation) ==
(self._filter, self._doc, self._upsert, self._collation))
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
return "%s(%r, %r, %r, %r)" % (
self.__class__.__name__, self._filter, self._doc, self._upsert,
self._collation)
class _UpdateOp(object):
"""Private base class for update operations."""
__slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters")
def __init__(self, filter, doc, upsert, collation, array_filters):
if filter is not None:
validate_is_mapping("filter", filter)
if upsert is not None:
validate_boolean("upsert", upsert)
if array_filters is not None:
validate_list("array_filters", array_filters)
self._filter = filter
self._doc = doc
self._upsert = upsert
self._collation = collation
self._array_filters = array_filters
def __eq__(self, other):
if type(other) == type(self):
return (
(other._filter, other._doc, other._upsert, other._collation,
other._array_filters) ==
(self._filter, self._doc, self._upsert, self._collation,
self._array_filters))
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
return "%s(%r, %r, %r, %r, %r)" % (
self.__class__.__name__, self._filter, self._doc, self._upsert,
self._collation, self._array_filters)
class UpdateOne(_UpdateOp):
"""Represents an update_one operation."""
__slots__ = ()
def __init__(self, filter, update, upsert=False, collation=None,
array_filters=None):
"""Represents an update_one operation.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
.. versionchanged:: 3.6
Added the `array_filters` option.
.. versionchanged:: 3.5
Added the `collation` option.
"""
super(UpdateOne, self).__init__(filter, update, upsert, collation,
array_filters)
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_update(self._filter, self._doc, False, self._upsert,
collation=self._collation,
array_filters=self._array_filters)
class UpdateMany(_UpdateOp):
"""Represents an update_many operation."""
__slots__ = ()
def __init__(self, filter, update, upsert=False, collation=None,
array_filters=None):
"""Create an UpdateMany instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the documents to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
.. versionchanged:: 3.6
Added the `array_filters` option.
.. versionchanged:: 3.5
Added the `collation` option.
"""
super(UpdateMany, self).__init__(filter, update, upsert, collation,
array_filters)
def _add_to_bulk(self, bulkobj):
"""Add this operation to the _Bulk instance `bulkobj`."""
bulkobj.add_update(self._filter, self._doc, True, self._upsert,
collation=self._collation,
array_filters=self._array_filters)
class IndexModel(object):
"""Represents an index to create."""
__slots__ = ("__document",)
def __init__(self, keys, **kwargs):
"""Create an Index instance.
For use with :meth:`~pymongo.collection.Collection.create_indexes`.
Takes either a single key or a list of (key, direction) pairs.
The key(s) must be an instance of :class:`basestring`
(:class:`str` in python 3), and the direction(s) must be one of
(:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`,
:data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`,
:data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`,
:data:`~pymongo.TEXT`).
Valid options include, but are not limited to:
- `name`: custom name to use for this index - if none is
given, a name will be generated.
- `unique`: if ``True`` creates a uniqueness constraint on the index.
- `background`: if ``True`` this index should be created in the
background.
- `sparse`: if ``True``, omit from the index any documents that lack
the indexed field.
- `bucketSize`: for use with geoHaystack indexes.
Number of documents to group together within a certain proximity
to a given longitude and latitude.
- `min`: minimum value for keys in a :data:`~pymongo.GEO2D`
index.
- `max`: maximum value for keys in a :data:`~pymongo.GEO2D`
index.
- `expireAfterSeconds`: <int> Used to create an expiring (TTL)
collection. MongoDB will automatically delete documents from
this collection after <int> seconds. The indexed field must
be a UTC datetime or the data will not expire.
- `partialFilterExpression`: A document that specifies a filter for
a partial index.
- `collation`: An instance of :class:`~pymongo.collation.Collation`
that specifies the collation to use in MongoDB >= 3.4.
See the MongoDB documentation for a full list of supported options by
server version.
.. note:: `partialFilterExpression` requires server version **>= 3.2**
:Parameters:
- `keys`: a single key or a list of (key, direction)
pairs specifying the index to create
- `**kwargs` (optional): any additional index creation
options (see the above list) should be passed as keyword
arguments
.. versionchanged:: 3.2
Added partialFilterExpression to support partial indexes.
"""
keys = _index_list(keys)
if "name" not in kwargs:
kwargs["name"] = _gen_index_name(keys)
kwargs["key"] = _index_document(keys)
collation = validate_collation_or_none(kwargs.pop('collation', None))
self.__document = kwargs
if collation is not None:
self.__document['collation'] = collation
@property
def document(self):
"""An index document suitable for passing to the createIndexes
command.
"""
return self.__document
| 36.415094 | 79 | 0.613916 |
acec06b779489c00f801ddc1259e0a893c5592d1 | 10,815 | py | Python | core/extractor.py | neuroailab/RAFT | 03e532cdcad35d3582b053035ec10257c73cbaaa | [
"BSD-3-Clause"
] | null | null | null | core/extractor.py | neuroailab/RAFT | 03e532cdcad35d3582b053035ec10257c73cbaaa | [
"BSD-3-Clause"
] | null | null | null | core/extractor.py | neuroailab/RAFT | 03e532cdcad35d3582b053035ec10257c73cbaaa | [
"BSD-3-Clause"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', kernel_size=3, stride=1, residual=True, bias=True, downsample=False):
super(ResidualBlock, self).__init__()
padding = 1 if kernel_size == 3 else 0
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=kernel_size, bias=bias, padding=padding, stride=stride)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=kernel_size, bias=bias, padding=padding)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1 or downsample is True:
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
if not stride == 1 or downsample is True:
self.norm3 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
if not stride == 1 or downsample is True:
self.norm3 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
if not stride == 1 or downsample is True:
self.norm3 = nn.Sequential()
if stride == 1 and not downsample:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=bias), self.norm3)
self.residual = residual
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
if not self.residual:
return y
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(BottleneckBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes//4, kernel_size=1, padding=0)
self.conv2 = nn.Conv2d(planes//4, planes//4, kernel_size=3, padding=1, stride=stride)
self.conv3 = nn.Conv2d(planes//4, planes, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes//4)
self.norm2 = nn.BatchNorm2d(planes//4)
self.norm3 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm4 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes//4)
self.norm2 = nn.InstanceNorm2d(planes//4)
self.norm3 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm4 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
self.norm3 = nn.Sequential()
if not stride == 1:
self.norm4 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
y = self.relu(self.norm3(self.conv3(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BasicEncoder(nn.Module):
def __init__(self,
output_dim=128,
norm_fn='batch',
dropout=0.0,
gate_stride=2
):
super(BasicEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(64)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(64)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7,
stride=gate_stride, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 64
self.layer1 = self._make_layer(64, stride=1)
self.layer2 = self._make_layer(96, stride=2)
self.layer3 = self._make_layer(128, stride=2)
# output convolution
self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
class SmallEncoder(nn.Module):
def __init__(self,
output_dim=128,
norm_fn='batch',
dropout=0.0,
gate_stride=2
):
super(SmallEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(32)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(32)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=gate_stride, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 32
self.layer1 = self._make_layer(32, stride=1)
self.layer2 = self._make_layer(64, stride=2)
self.layer3 = self._make_layer(96, stride=2)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
self.conv2 = nn.Conv2d(96, output_dim, kernel_size=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
class KeyQueryExtractor(nn.Module):
def __init__(self, input_dim, kq_dim, latent_dim, kernel_size=3, norm_fn='batch', downsample=False):
super(KeyQueryExtractor, self).__init__()
self.conv = nn.Conv2d(input_dim, kq_dim, kernel_size=kernel_size, bias=True, padding='same')
self.key = nn.Sequential(
ResidualBlock(kq_dim, latent_dim, norm_fn, kernel_size=kernel_size, stride=1, residual=False, downsample=downsample),
nn.Conv2d(latent_dim, kq_dim, kernel_size=1, bias=True, padding='same'))
self.query = nn.Sequential(
ResidualBlock(kq_dim, latent_dim, norm_fn, kernel_size=kernel_size, stride=1, residual=False, downsample=downsample),
nn.Conv2d(latent_dim, kq_dim, kernel_size=1, bias=True, padding='same'))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
# feature projection
feats = self.conv(x) # [B, C, H, W]
# key & query projection
keys = self.key(feats) # [B, C, H, W]
queries = self.query(feats) # [B, C, H, W]
return keys, queries | 34.442675 | 129 | 0.576884 |
acec08c9c69ccbd058ec9e6a44fb9c616b8ae5b2 | 1,050 | py | Python | P1/Nivel.py | eliiags/gestion_master | d9416788529008799c3de42987515d82c2d1da86 | [
"Apache-2.0"
] | null | null | null | P1/Nivel.py | eliiags/gestion_master | d9416788529008799c3de42987515d82c2d1da86 | [
"Apache-2.0"
] | null | null | null | P1/Nivel.py | eliiags/gestion_master | d9416788529008799c3de42987515d82c2d1da86 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Elianni Aguero, Angelica Guerrero, Cynthia Quintana
"""
"""
Clase Nivel
- Representa los niveles del juego
"""
class Nivel():
def __init__(self, MIN, MAX, intentos):
# Numero de intentos del nivel
self._n_intentos = intentos
# Rango Maximo en el cual debe adivinarse el numero
self._MAX = MAX
# Rango minimo en el cual debe adivinarse el numero
self._MIN = MIN
# Devuelve el total de intentos del nivel
def getIntentos(self):
return self._n_intentos
def setIntentos(self, intentos):
self._n_intentos = intentos
# Devuelve el rango maximo del numero que ha de adivinarse
def getMAX(self):
return self._MAX
def setMAX(self, MAX):
self._MAX = MAX
# Devuelve el rango minimo del numero que ha de adivinarse
def getMIN(self):
return self._MIN
def setMIN(self, MIN):
self._MIN = MIN
""" FIN NIVEL JUEGO """
| 22.826087 | 62 | 0.609524 |
acec0b0b714b01916fd833569d53a0cee77b2de8 | 1,712 | py | Python | tools/telemetry/telemetry/page/page_runner_repeat.py | MIPS/external-chromium_org | e31b3128a419654fd14003d6117caa8da32697e7 | [
"BSD-3-Clause"
] | 2 | 2018-11-24T07:58:44.000Z | 2019-02-22T21:02:46.000Z | tools/telemetry/telemetry/page/page_runner_repeat.py | carlosavignano/android_external_chromium_org | 2b5652f7889ccad0fbdb1d52b04bad4c23769547 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/telemetry/telemetry/page/page_runner_repeat.py | carlosavignano/android_external_chromium_org | 2b5652f7889ccad0fbdb1d52b04bad4c23769547 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3 | 2017-07-31T19:09:52.000Z | 2019-01-04T18:48:50.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
class PageRunnerRepeatState(object):
def __init__(self, repeat_options):
self.pageset_start_time = None
self.pageset_iters = None
self.page_start_time = None
self.page_iters = None
self.options = repeat_options
def WillRunPage(self):
"""Runs before we start repeating a page"""
self.page_start_time = time.time()
self.page_iters = 0
def WillRunPageSet(self):
"""Runs before we start repeating a pageset"""
self.pageset_start_time = time.time()
self.pageset_iters = 0
def DidRunPage(self):
"""Runs after each completion of a page iteration"""
self.page_iters += 1
def DidRunPageSet(self):
"""Runs after each completion of a pageset iteration"""
self.pageset_iters += 1
def ShouldRepeatPageSet(self):
"""Returns True if we need to repeat this pageset more times"""
if (self.options.pageset_repeat_secs and
time.time() - self.pageset_start_time >
self.options.pageset_repeat_secs):
return False
elif (not self.options.pageset_repeat_secs and
self.pageset_iters >= self.options.pageset_repeat_iters):
return False
return True
def ShouldRepeatPage(self):
"""Returns True if we need to repeat this page more times"""
if (self.options.page_repeat_secs and
time.time() - self.page_start_time > self.options.page_repeat_secs):
return False
elif (not self.options.page_repeat_secs and
self.page_iters >= self.options.page_repeat_iters):
return False
return True
| 31.127273 | 76 | 0.704439 |
acec0c5ccf611bc1279c28f6e556b2f330be79c5 | 200 | py | Python | recursiveCount.py | ClarkGH/pythonCS | 3ee0002dca0a5eb3e705fc52120f222a62d05e17 | [
"MIT"
] | null | null | null | recursiveCount.py | ClarkGH/pythonCS | 3ee0002dca0a5eb3e705fc52120f222a62d05e17 | [
"MIT"
] | null | null | null | recursiveCount.py | ClarkGH/pythonCS | 3ee0002dca0a5eb3e705fc52120f222a62d05e17 | [
"MIT"
] | null | null | null | numberList = [1,2,3,4,5]
def recursiveCount(numList):
if not numList:
return 0
else:
return 1 + recursiveCount(numList[1::2]) + recursiveCount(numList[2::2])
print recursiveCount( numberList ) | 22.222222 | 74 | 0.72 |
acec0ced85d6c19676b25fb6293674bf35c64a41 | 1,460 | py | Python | algorithm/loss.py | soroushtaslimi/nih_lung_joint_multilabel_classification | 44ffd2c9e15e6ef299229eb710d43a4606fa2106 | [
"MIT"
] | null | null | null | algorithm/loss.py | soroushtaslimi/nih_lung_joint_multilabel_classification | 44ffd2c9e15e6ef299229eb710d43a4606fa2106 | [
"MIT"
] | null | null | null | algorithm/loss.py | soroushtaslimi/nih_lung_joint_multilabel_classification | 44ffd2c9e15e6ef299229eb710d43a4606fa2106 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
def kl_loss_compute(pred, soft_targets, reduce=True):
kl = F.kl_div(F.log_softmax(pred, dim=1),F.softmax(soft_targets, dim=1),reduction='none')
if reduce:
return torch.mean(torch.sum(kl, dim=1))
else:
return torch.sum(kl, 1)
def loss_jocor(y_1, y_2, t, forget_rate, ind, noise_or_not=None, co_lambda=0.1, class_weights=None):
if class_weights is not None:
class_weights = torch.FloatTensor(class_weights).cuda()
loss_pick_1 = F.cross_entropy(y_1, t, reduction='none', weight=class_weights) * (1-co_lambda)
loss_pick_2 = F.cross_entropy(y_2, t, reduction='none', weight=class_weights) * (1-co_lambda)
loss_pick = (loss_pick_1 + loss_pick_2 + co_lambda * kl_loss_compute(y_1, y_2,reduce=False) + co_lambda * kl_loss_compute(y_2, y_1, reduce=False)).cpu()
ind_sorted = np.argsort(loss_pick.data)
loss_sorted = loss_pick[ind_sorted]
remember_rate = 1 - forget_rate
num_remember = int(remember_rate * len(loss_sorted))
if noise_or_not is None:
pure_ratio = 0.0
else:
pure_ratio = np.sum(noise_or_not[ind[ind_sorted[:num_remember]]])/float(num_remember)
ind_update=ind_sorted[:num_remember]
# exchange
loss = torch.mean(loss_pick[ind_update])
return loss, loss, pure_ratio, pure_ratio
| 29.795918 | 156 | 0.712329 |
acec0d1b93c1c85c550c7bedd8251d1e9cdf789d | 3,247 | py | Python | robot_controller/src/robot_control.py | tubleronchik/Baxter_simulation_controller | 4858d940a022df1dd5dda72557d6884d77f3ebe2 | [
"Apache-2.0"
] | null | null | null | robot_controller/src/robot_control.py | tubleronchik/Baxter_simulation_controller | 4858d940a022df1dd5dda72557d6884d77f3ebe2 | [
"Apache-2.0"
] | null | null | null | robot_controller/src/robot_control.py | tubleronchik/Baxter_simulation_controller | 4858d940a022df1dd5dda72557d6884d77f3ebe2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import ipfshttpclient
import subprocess
import rospy
import time
import cv2
import os
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from threading import Thread
from std_msgs.msg import String
# write about activation
rospy.init_node('robot_control', anonymous=False)
rospy.loginfo("Activation")
br = CvBridge()
dirname = os.path.dirname(__file__)
path = dirname + "/configuration.txt"
conf = open(path, 'r')
my_private_key = conf.readline()
my_adress = conf.readline()
conf.close()
face_publisher = rospy.Publisher('/robot/xdisplay', Image, queue_size=1)
sad_picture = dirname + "/sad_face.png"
face = cv2.imread(sad_picture, 1)
face_msg = br.cv2_to_imgmsg(face, "bgr8")
rospy.loginfo("Activation complete. Ready for a job")
face_publisher.publish(face_msg)
# end of activation
global ForExit
ForExit = True
# TON topic
def subscriber():
rospy.Subscriber("task", String, moj_callback)
def moj_callback(data):
global ForExit
ForExit = False
pub_js = rospy.Publisher('result_pub', String, queue_size=10)
# waiting for a job
while ForExit:
try:
while not rospy.is_shutdown():
face_publisher.publish(face_msg)
break
subscriber()
#rospy.loginfo(ForExit)
except KeyboardInterrupt:
exit()
# start working
rospy.loginfo("Start working")
result = ""
happy_picture = dirname + "/happy_smile.jpg"
face = cv2.imread(happy_picture, 1)
face_msg = br.cv2_to_imgmsg(face, "bgr8")
face_publisher.publish(face_msg)
i = time.time()
result_picture = []
global stop_publish
stop_publish = False
def callback_head(data):
global i
global result_picture
global stop_publish
if(not stop_publish):
if(time.time() - i > 2):
path = dirname + "/scrennshot" + str(int(i)) + ".png"
result_picture.append(path)
image = br.imgmsg_to_cv2(data)
cv2.imwrite(path, image)
i = time.time()
def callback(data):
global result
result = result + (str(data) + "\n")
def listener():
global stop_publish
rate = rospy.Rate(2)
rospy.Subscriber('/cameras/head_camera/image', Image, callback_head)
rospy.Subscriber('/sim/laserscan/left_hand_range/state', LaserScan, callback)
while not rospy.is_shutdown():
face_publisher.publish(face_msg)
if stop_publish:
break
rate.sleep()
publish = Thread(target = listener)
publish.start()
rospy.sleep(7)
stop_publish = True
publish.join()
try:
path = dirname + "/result.txt"
result_file = open(path, "w")
for f in result:
result_file.write(f)
finally:
result_file.close()
rospy.loginfo("End of work")
done_picture = dirname + "/accept.png"
face = cv2.imread(done_picture, 1)
face_msg = br.cv2_to_imgmsg(face, "bgr8")
#push to ipfs
rospy.loginfo("Push to IPFS")
client = ipfshttpclient.connect()
while not rospy.is_shutdown():
face_publisher.publish(face_msg)
break
res = client.add(dirname + '/' + "result.txt")
pub_js.publish(String(res.values()[0].encode('utf8')))
rospy.loginfo("Pushed, the IPFS hash is " + res.values()[0].encode('utf8'))
| 24.231343 | 81 | 0.692639 |
acec0d786557eb634ad76155fd658ff338305955 | 4,802 | py | Python | src/traininfojp/traininfojp.py | ichigozero/traininfojp | cb6f4888369681cca3acac7d8e781dda4df4c935 | [
"MIT"
] | null | null | null | src/traininfojp/traininfojp.py | ichigozero/traininfojp | cb6f4888369681cca3acac7d8e781dda4df4c935 | [
"MIT"
] | null | null | null | src/traininfojp/traininfojp.py | ichigozero/traininfojp | cb6f4888369681cca3acac7d8e781dda4df4c935 | [
"MIT"
] | null | null | null | import copy
import re
import urllib.parse
import requests
from bs4 import BeautifulSoup
BASE_URL = 'https://transit.yahoo.co.jp'
TRAIN_INFO_URL = 'https://transit.yahoo.co.jp/traininfo/top'
def _exc_attr_err(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except AttributeError:
return None
return wrapper
class TrainType:
REGULAR = 0
BULLET_TRAIN = 1
RAPID = 2
class BaseClass:
def __init__(self):
self._parsed_html = None
self.fetch_status = ''
def fetch_parse_html_source(self, page_url):
try:
response = requests.get(page_url)
self._parsed_html = BeautifulSoup(response.text, 'html.parser')
self.fetch_status = 'OK'
except requests.exceptions.RequestException:
self.fetch_status = 'ERR'
class RailList(BaseClass):
def fetch_parse_html_source(self):
try:
response = requests.get(TRAIN_INFO_URL)
self._parsed_html = BeautifulSoup(response.text, 'html.parser')
self.fetch_status = 'OK'
except requests.exceptions.RequestException:
self.fetch_status = 'ERR'
def get_regular_train_title(self):
return self._get_train_type_title(TrainType.REGULAR)
def get_bullet_train_title(self):
return self._get_train_type_title(TrainType.BULLET_TRAIN)
def get_rapid_train_title(self):
return self._get_train_type_title(TrainType.RAPID)
@_exc_attr_err
def _get_train_type_title(self, train_type):
div = self._parsed_html.find('div', class_='elmTblLstTrain')
th = copy.copy(div.find_all('th')[train_type])
th.span.decompose()
return th.text
def get_regular_train_summary_page_urls(self):
return self._get_train_page_urls(TrainType.REGULAR)
def get_bullet_train_details_page_urls(self):
return self._get_train_page_urls(TrainType.BULLET_TRAIN)
def get_rapid_train_summary_page_urls(self):
return self._get_train_page_urls(TrainType.RAPID)
@_exc_attr_err
def _get_train_page_urls(self, train_type):
div = self._parsed_html.find('div', class_='elmTblLstTrain')
ul = div.find_all('ul')[train_type]
train_urls = list()
for li in ul.find_all('li'):
anchor = li.find('a')
train_urls.append({
'title': anchor.text,
'url': urllib.parse.urljoin(BASE_URL, anchor['href'])
})
return train_urls
class RailSummary(BaseClass):
@_exc_attr_err
def get_rail_company_names(self):
names = list()
for h3 in self._parsed_html.find_all('h3', class_='title'):
names.append(h3.text)
return names
@_exc_attr_err
def get_line_names_by_rail_company(self, company_name):
div = self._parsed_html.find(
'h3', class_='title', string=company_name).parent
next_div = div.find_next_sibling('div', class_='elmTblLstLine')
names = list()
for anchor in next_div.find_all('a'):
names.append(anchor.text)
return names
@_exc_attr_err
def get_line_status(self, line_name):
td = self._parsed_html.find('td', string=line_name)
next_td = td.find_next_sibling()
if next_td.find('span', class_=re.compile('icn.*')) is not None:
return next_td.contents[1].text
return next_td.text
@_exc_attr_err
def get_line_status_details(self, line_name):
td = self._parsed_html.find('td', string=line_name)
next_td = td.find_next_sibling().find_next_sibling()
return next_td.text
@_exc_attr_err
def get_line_details_page_url(self, line_name):
td = self._parsed_html.find('td', string=line_name)
return td.find('a')['href']
class RailDetails(BaseClass):
@_exc_attr_err
def get_line_kanji_name(self):
div = self._parsed_html.find('div', class_='labelLarge')
return div.find('h1', class_='title').text
@_exc_attr_err
def get_line_kana_name(self):
div = self._parsed_html.find('div', class_='labelLarge')
return div.find('span', class_='staKana').text
@_exc_attr_err
def get_last_updated_time(self):
div = self._parsed_html.find('div', class_='labelLarge')
return div.find('span', class_='subText').text
@_exc_attr_err
def get_line_status(self):
div = self._parsed_html.find('div', id='mdServiceStatus')
dt = copy.copy(div.find('dt'))
dt.span.decompose()
return dt.text.strip()
@_exc_attr_err
def get_line_status_details(self):
div = self._parsed_html.find('div', id='mdServiceStatus')
return div.find('p').text
| 28.754491 | 75 | 0.650979 |
acec0e5656bb614fe1a98327c4034186549c1ada | 1,359 | py | Python | typediff/jpn2yy.py | ned2/grammalytics | af2d91c3221182ddb0c8cf55db4127c5c5587544 | [
"MIT"
] | 2 | 2015-09-01T04:14:54.000Z | 2019-12-10T03:12:39.000Z | typediff/jpn2yy.py | ned2/grammalytics | af2d91c3221182ddb0c8cf55db4127c5c5587544 | [
"MIT"
] | 2 | 2018-03-28T20:25:41.000Z | 2018-04-04T14:44:48.000Z | typediff/jpn2yy.py | ned2/grammalytics | af2d91c3221182ddb0c8cf55db4127c5c5587544 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -* coding:utf-8 -*-
# Author: Francis Bond
import MeCab
import sys
m = MeCab.Tagger('-Ochasen')
punct = "!\"!&'()*+,-−./;<=>?@[\]^_`{|}~。!?…. ○●◎*☆★◇◆"
def jp2yy(sent):
"""take a Japanese sentence encoded in UTF8 and convert to YY-mode
using mecab"""
### (id, start, end, [link,] path+, form [surface], ipos, lrule+[, {pos p}+])
### set ipos as lemma (just for fun)
### fixme: do the full lattice
yid = 0
start = 0
cfrom = 0
cto = 0
yy = list()
sent_decoded = sent.decode('utf-8')
for tok in m.parse(sent).split('\n'):
if tok and tok != 'EOS':
(form, p, lemma, p1, p2, p3) = tok.decode('utf-8').split('\t')
if form in punct:
continue
p2 = p2 or 'n'
p3 = p3 or 'n'
pos = "%s:%s-%s" % (p1, p2, p3) ## wierd format jacy requires
cfrom = sent_decoded.find(form, cto) ## first instance after last token
cto = cfrom + len(form) ## find the end
yy.append('(%d, %d, %d, <%d:%d>, 1, "%s", %s, "null", "%s" 1.0)' % \
(yid, start, start +1, cfrom, cto, form, 0, pos))
yid += 1
start += 1
return "".join(yy).encode('utf-8')
if __name__ == "__main__":
sent = sys.argv[1]
print("".join(jp2yy(sent)))
| 30.2 | 84 | 0.483444 |
acec0edc970e8860fcf0885f05193163ebf9cea0 | 10,922 | py | Python | sdk/python/pulumi_azure_native/network/v20180601/get_virtual_network_gateway.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20180601/get_virtual_network_gateway.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20180601/get_virtual_network_gateway.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayResult',
'AwaitableGetVirtualNetworkGatewayResult',
'get_virtual_network_gateway',
]
@pulumi.output_type
class GetVirtualNetworkGatewayResult:
"""
A common class for general resource information
"""
def __init__(__self__, active_active=None, bgp_settings=None, enable_bgp=None, etag=None, gateway_default_site=None, gateway_type=None, id=None, ip_configurations=None, location=None, name=None, provisioning_state=None, resource_guid=None, sku=None, tags=None, type=None, vpn_client_configuration=None, vpn_type=None):
if active_active and not isinstance(active_active, bool):
raise TypeError("Expected argument 'active_active' to be a bool")
pulumi.set(__self__, "active_active", active_active)
if bgp_settings and not isinstance(bgp_settings, dict):
raise TypeError("Expected argument 'bgp_settings' to be a dict")
pulumi.set(__self__, "bgp_settings", bgp_settings)
if enable_bgp and not isinstance(enable_bgp, bool):
raise TypeError("Expected argument 'enable_bgp' to be a bool")
pulumi.set(__self__, "enable_bgp", enable_bgp)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if gateway_default_site and not isinstance(gateway_default_site, dict):
raise TypeError("Expected argument 'gateway_default_site' to be a dict")
pulumi.set(__self__, "gateway_default_site", gateway_default_site)
if gateway_type and not isinstance(gateway_type, str):
raise TypeError("Expected argument 'gateway_type' to be a str")
pulumi.set(__self__, "gateway_type", gateway_type)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ip_configurations and not isinstance(ip_configurations, list):
raise TypeError("Expected argument 'ip_configurations' to be a list")
pulumi.set(__self__, "ip_configurations", ip_configurations)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if vpn_client_configuration and not isinstance(vpn_client_configuration, dict):
raise TypeError("Expected argument 'vpn_client_configuration' to be a dict")
pulumi.set(__self__, "vpn_client_configuration", vpn_client_configuration)
if vpn_type and not isinstance(vpn_type, str):
raise TypeError("Expected argument 'vpn_type' to be a str")
pulumi.set(__self__, "vpn_type", vpn_type)
@property
@pulumi.getter(name="activeActive")
def active_active(self) -> Optional[bool]:
"""
ActiveActive flag
"""
return pulumi.get(self, "active_active")
@property
@pulumi.getter(name="bgpSettings")
def bgp_settings(self) -> Optional['outputs.BgpSettingsResponse']:
"""
Virtual network gateway's BGP speaker settings.
"""
return pulumi.get(self, "bgp_settings")
@property
@pulumi.getter(name="enableBgp")
def enable_bgp(self) -> Optional[bool]:
"""
Whether BGP is enabled for this virtual network gateway or not.
"""
return pulumi.get(self, "enable_bgp")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="gatewayDefaultSite")
def gateway_default_site(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of the LocalNetworkGateway resource which represents local network site having default routes. Assign Null value in case of removing existing default site setting.
"""
return pulumi.get(self, "gateway_default_site")
@property
@pulumi.getter(name="gatewayType")
def gateway_type(self) -> Optional[str]:
"""
The type of this virtual network gateway. Possible values are: 'Vpn' and 'ExpressRoute'.
"""
return pulumi.get(self, "gateway_type")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[Sequence['outputs.VirtualNetworkGatewayIPConfigurationResponse']]:
"""
IP configurations for virtual network gateway.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the VirtualNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the VirtualNetworkGateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.VirtualNetworkGatewaySkuResponse']:
"""
The reference of the VirtualNetworkGatewaySku resource which represents the SKU selected for Virtual network gateway.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="vpnClientConfiguration")
def vpn_client_configuration(self) -> Optional['outputs.VpnClientConfigurationResponse']:
"""
The reference of the VpnClientConfiguration resource which represents the P2S VpnClient configurations.
"""
return pulumi.get(self, "vpn_client_configuration")
@property
@pulumi.getter(name="vpnType")
def vpn_type(self) -> Optional[str]:
"""
The type of this virtual network gateway. Possible values are: 'PolicyBased' and 'RouteBased'.
"""
return pulumi.get(self, "vpn_type")
class AwaitableGetVirtualNetworkGatewayResult(GetVirtualNetworkGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayResult(
active_active=self.active_active,
bgp_settings=self.bgp_settings,
enable_bgp=self.enable_bgp,
etag=self.etag,
gateway_default_site=self.gateway_default_site,
gateway_type=self.gateway_type,
id=self.id,
ip_configurations=self.ip_configurations,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
sku=self.sku,
tags=self.tags,
type=self.type,
vpn_client_configuration=self.vpn_client_configuration,
vpn_type=self.vpn_type)
def get_virtual_network_gateway(resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayResult:
"""
A common class for general resource information
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20180601:getVirtualNetworkGateway', __args__, opts=opts, typ=GetVirtualNetworkGatewayResult).value
return AwaitableGetVirtualNetworkGatewayResult(
active_active=__ret__.active_active,
bgp_settings=__ret__.bgp_settings,
enable_bgp=__ret__.enable_bgp,
etag=__ret__.etag,
gateway_default_site=__ret__.gateway_default_site,
gateway_type=__ret__.gateway_type,
id=__ret__.id,
ip_configurations=__ret__.ip_configurations,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type,
vpn_client_configuration=__ret__.vpn_client_configuration,
vpn_type=__ret__.vpn_type)
| 39.716364 | 322 | 0.664164 |
acec0fd1928e24767d2ffe2ee63ab6adea09bcb8 | 3,157 | py | Python | iotchain/settings.py | IoTchain-ASD-01-2018/iotchain | 09633d1a1dfc10f708c9d480ffb3e2bdb2855f0c | [
"MIT"
] | null | null | null | iotchain/settings.py | IoTchain-ASD-01-2018/iotchain | 09633d1a1dfc10f708c9d480ffb3e2bdb2855f0c | [
"MIT"
] | null | null | null | iotchain/settings.py | IoTchain-ASD-01-2018/iotchain | 09633d1a1dfc10f708c9d480ffb3e2bdb2855f0c | [
"MIT"
] | null | null | null | """
Django settings for iotchain project.
Generated by 'django-admin startproject' using Django 1.11.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+bj!252jppnyisoi40f9rg%&%^rs+9+316knn+d9n9=2+565=0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blockchain',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'iotchain.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'iotchain.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
APPEND_SLASH = False | 25.666667 | 91 | 0.698131 |
acec115a3b4b08950790a064ed0c14a2a5326983 | 29,845 | py | Python | dqn/RLutils/tabulate.py | Theling/Delayed_MDP | db1a8808a05917a5915220947cf65268f0524fa9 | [
"MIT"
] | null | null | null | dqn/RLutils/tabulate.py | Theling/Delayed_MDP | db1a8808a05917a5915220947cf65268f0524fa9 | [
"MIT"
] | null | null | null | dqn/RLutils/tabulate.py | Theling/Delayed_MDP | db1a8808a05917a5915220947cf65268f0524fa9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Taken from John's code
"""Pretty-print tabular data."""
from collections import namedtuple
from platform import python_version_tuple
import re
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = str
_binary_type = str
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = str
_binary_type = bytes
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.2"
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _latex_line_begin_tabular(colwidths, colaligns):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\\begin{tabular}{" + tabular_columns_fmt + "}\n\hline"
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=DataRow("", "&", "\\\\"),
datarow=DataRow("", "&", "\\\\"),
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_invisible_codes = re.compile("\x1b\[\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except ValueError:
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is int or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) and \
_isconvertible(int, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:>%ds}" % iwidth
return fmt.format(s)
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:<%ds}" % iwidth
return fmt.format(s)
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:^%ds}" % iwidth
return fmt.format(s)
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len(_strip_invisible(s))
else:
return len(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
return strings
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
maxwidth = max(max(list(map(width_fn, strings))), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4 }
invtypes = { 4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type }
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval=""):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
return _text_type(val, "ascii")
elif valtype is float:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = list(tabular_data.keys())
rows = list(zip_longest(*list(tabular_data.values()))) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = list(tabular_data.keys())
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")): # namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif headers == "keys" and len(rows) > 0: # keys are column indices
headers = list(map(_text_type, list(range(len(rows[0])))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(headers)
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=[], tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, a two-dimensional NumPy array,
NumPy record array, or a Pandas' dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
and 'latex'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"""
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c]
for c,ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h)+2 for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
padded_headers = _pad_row(headers, pad)
padded_rows = [_pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
if padded_headers:
lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
else:
for row in padded_rows:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
return "\n".join(lines)
| 35.070505 | 198 | 0.534729 |
acec11fc936754af09ea00a5b275a4d4b0fdde50 | 850 | py | Python | ardana_configurationprocessor/cp/model/JsonHelper.py | eapodaca/ardana-configuration-processor | 70ea25a138cf65d9fb6baf75b48d82f12bbc6d8b | [
"Apache-2.0"
] | 2 | 2018-01-24T20:21:23.000Z | 2019-07-29T07:55:21.000Z | ardana_configurationprocessor/cp/model/JsonHelper.py | eapodaca/ardana-configuration-processor | 70ea25a138cf65d9fb6baf75b48d82f12bbc6d8b | [
"Apache-2.0"
] | null | null | null | ardana_configurationprocessor/cp/model/JsonHelper.py | eapodaca/ardana-configuration-processor | 70ea25a138cf65d9fb6baf75b48d82f12bbc6d8b | [
"Apache-2.0"
] | 3 | 2018-03-09T19:51:06.000Z | 2019-02-26T19:58:11.000Z | #
# (c) Copyright 2015 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017-2018 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import simplejson as json
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
| 34 | 75 | 0.738824 |
acec1382d4278b1c77af94c34b59691cb7c52299 | 505 | py | Python | pytf2/mp_deal.py | mninc/pytf | 3dbf93f6c6b8edd7366f9fdb8f46ad84df3ac819 | [
"MIT"
] | 7 | 2018-03-09T22:50:31.000Z | 2021-08-09T10:17:45.000Z | pytf2/mp_deal.py | mninc/pytf | 3dbf93f6c6b8edd7366f9fdb8f46ad84df3ac819 | [
"MIT"
] | 2 | 2019-06-19T19:18:34.000Z | 2020-08-20T11:10:09.000Z | pytf2/mp_deal.py | mninc/pytf | 3dbf93f6c6b8edd7366f9fdb8f46ad84df3ac819 | [
"MIT"
] | 1 | 2019-08-15T08:23:32.000Z | 2019-08-15T08:23:32.000Z | class Item:
def __init__(self, data):
self.sku = data.get("sku")
self.name = data.get("name")
self.defindex = data.get("defindex")
self.quality = data.get("quality")
self.num_for_sale = data.get("num_for_sale")
self.lowest_price = data.get("lowest_price")
class Deal:
def __init__(self, data):
self.item = Item(data["item"])
self.deal_pct_off = data.get("deal_pct_off")
self.deal_dollars_off = data.get("deal_dollars_off")
| 31.5625 | 60 | 0.617822 |
acec1473831940fa9e720f51a6f140d55cc78eb3 | 5,026 | py | Python | Python/SimpleNeuralNetwork.py | FlorentCLMichel/learning_data_science | d9ccc0a85609406b2c77a91db96dba8c97fc9ac4 | [
"MIT"
] | null | null | null | Python/SimpleNeuralNetwork.py | FlorentCLMichel/learning_data_science | d9ccc0a85609406b2c77a91db96dba8c97fc9ac4 | [
"MIT"
] | null | null | null | Python/SimpleNeuralNetwork.py | FlorentCLMichel/learning_data_science | d9ccc0a85609406b2c77a91db96dba8c97fc9ac4 | [
"MIT"
] | null | null | null | '''
In this file we provide a simple implementation of a neural network.
We use a sigmoid as activation function and lean square as loss function.
This impleentation is not efficient for a large number of neurons and should
thus not be used for real-world applications. Much more efficient
implementations are provided, for instance, by the Tensorflow, PyTorch, and
Keras libraries. The main goal of the present file is to show the inner
workings of a basic neural network in a more transparent way.
'''
import numpy as np
def sigmoid(x):
return 1. / (1. + np.exp(-x))
class NeuralNetwork:
'''
Assume the last layer has exactly one neuron
Assume the first hidden layer as has many neurons as there are inputs
We use that the derivative of the sigmoid is f' = (1-f) f
'''
def __init__(self, layers, weights = None, bias = None, learn_rate = 0.1, epochs = 1000):
self.layers = layers
self.learn_rate = learn_rate
self.epochs = epochs
# If the bias are not given, choose them randomly
if bias is None:
bias = []
for i in range(len(layers)):
bias.append([])
for j in range(layers[i]):
bias[-1].append(np.random.normal())
# If the weights are not given, choose them randomly
if weights is None:
weights = []
for i in range(len(layers)):
weights.append([])
if i > 0:
for j in range(layers[i]):
weights[-1].append([np.random.normal() for k in range(layers[i-1])])
else:
for j in range(layers[i]):
weights[-1].append([np.random.normal() for k in range(layers[0])])
self.weights = weights
self.bias = bias
def feedforward(self, x):
for i in range(len(layers)):
x = [sigmoid(np.dot(self.weights[i][j], x) + self.bias[i][j]) for j in range(self.layers[i])]
return x
def train(self, data, y_true_all, learn_rate = 0, epochs = 0):
# If the learning rate and number of epochs are not given explicitly, take those of the instance
if learn_rate == 0:
learn_rate = self.learn_rate
if epochs == 0:
epochs = self.epochs
# to avoid writing "self" all the time
layers = self.layers
weights = self.weights
bias = self.bias
for epoch in range(epochs):
for x, y_true in zip(data, y_true_all): # run over the data
# feedfoward, retaining the state of each neuron
states = [x[:]]
for i in range(len(layers)):
x = [sigmoid(np.dot(weights[i][j], x) + bias[i][j]) for j in range(layers[i])]
states.append(x[:])
# the predicted value is the state of the last neuron
y_pred = states[-1][0]
# derivative of the loss function with respect to y_pred
d_L_d_ypred = 2 * (y_pred - y_true)
# partial derivatives for the output layer
state = states[-1][0]
# partial derivative with respect to the input
d_ypred_d_x = [[[weights[-1][0][k]*state*(1.-state) for k in range(layers[-2])]]]
# partial derivative with respect to the weights
d_ypred_d_weights = [[[states[-2][k]*state*(1.-state) for k in range(layers[-2])]]]
# partial derivative with respect to the bias
d_ypred_d_bias = [[state*(1.-state)]]
# partial derivatives for the other layers
for i in range(2, len(layers)+1): # running backward over the layers
d_ypred_d_x.insert(0,[])
d_ypred_d_weights.insert(0,[])
d_ypred_d_bias.insert(0,[])
for j in range(layers[-i]):
# derivative of y_pred with respect to the output of this neuron
d_ypred_d_yint = np.sum([d_ypred_d_x[1][k][j] for k in range(layers[-i+1])])
state = states[-i][j]
d_ypred_d_x[0].append([weights[-i][j][k]*state*(1.-state)*d_ypred_d_yint
for k in range(len(weights[-i][j]))])
d_ypred_d_weights[0].append([states[-i-1][k]*state*(1.-state)*d_ypred_d_yint
for k in range(len(weights[-i][j]))])
d_ypred_d_bias[0].append(state*(1.-state)*d_ypred_d_yint)
# update weights and bias
for i in range(len(layers)):
for j in range(layers[i]):
for k in range(len(weights[i][j])):
weights[i][j][k] -= learn_rate * d_L_d_ypred * d_ypred_d_weights[i][j][k]
bias[i][j] -= learn_rate * d_L_d_ypred * d_ypred_d_bias[i][j]
# evaluate the loss function
def loss(self, data, y_true_all):
res = 0.
for x, y_true in zip(data, y_true_all):
res = res + (self.feedforward(x) - y_true)**2
return res[0]/len(data)
# save the neural networ in a npy file
def save(self, name_file):
np.save(name_file+'.npy', [self.layers, self.weights, self.bias,
self.learn_rate, self.epochs])
# load a neural network from a npy file
def load_NN_1(name_file):
list_parameters = np.load(name_file+'.npy', allow_pickle=True)
return NeuralNetwork_1(*list_parameters)
# Example use (data taken from https://victorzhou.com/blog/intro-to-neural-networks/):
layers = [2, 1]
data = np.array([
[-2, -1],
[25, 6],
[17, 4],
[-15, -6],
])
y_true_all = np.array([
1,
0,
0,
1,
])
myNetwork = NeuralNetwork(layers)
myNetwork.train(data, y_true_all)
print(myNetwork.loss(data, y_true_all))
| 32.425806 | 98 | 0.660963 |
acec152efbe598323a28189415b2b1ba0e51a674 | 5,587 | py | Python | tripoll.py | pamelia/tripoll | 9f3fd60407d9837ee03f5646135b2f35c05c0310 | [
"MIT"
] | null | null | null | tripoll.py | pamelia/tripoll | 9f3fd60407d9837ee03f5646135b2f35c05c0310 | [
"MIT"
] | null | null | null | tripoll.py | pamelia/tripoll | 9f3fd60407d9837ee03f5646135b2f35c05c0310 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Usage: tripoll.py --config=config [--debug]
"""
from docopt import docopt
import logging
import yaml
import sys
import time
import threading
import re
from datetime import datetime
from pysnmp.hlapi import *
from pysnmp.entity.rfc3413.oneliner import cmdgen
from influxdb import InfluxDBClient
LOG_FORMAT = '[%(levelname)s] - %(asctime)s (%(threadName)-10s) %(message)s'
debug = False
logger = logging.getLogger('tripoll')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(LOG_FORMAT)
ch.setFormatter(formatter)
logger.addHandler(ch)
def debug_log(msg):
if debug:
logger.debug(msg)
def load_config(config_file):
try:
with open(config_file, 'r') as f:
return yaml.load(f)
except Exception as e:
print('Failed to read config {}: {}'.format(config_file, e))
sys.exit(1)
def build_json(measurement, hostname, interface, timestamp, value):
json_body = [
{
"measurement": "{}".format(measurement),
"tags": {
"host": "{}".format(hostname),
"interface": "{}".format(interface)
},
"time": "{}".format(timestamp),
"fields": {
"value": value
}
}
]
return json_body
def poll(what, interface, snmp_engine, community_data, transport_target):
err_indication, err_status, err_index, var_binds = next(
getCmd(snmp_engine,
community_data,
transport_target,
ContextData(),
ObjectType(ObjectIdentity('IF-MIB', what, interface)))
)
if err_indication:
logger.warning("Poll failed: " + str(err_indication))
return err_indication, 0
elif err_status:
print('%s at %s' % (
err_status.prettyPrint(),
err_index and var_binds[int(err_index) - 1][0] or '?'))
else:
return 'success', long(var_binds[0][1])
def get_interface_id_from_mib(oid):
return oid.split('.')[-1]
def get_current_time():
return datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
def get_interface_ids(config):
for host in config['hosts']:
logger.info('getting interface ids for %s' % host)
cmd_gen = cmdgen.CommandGenerator()
interfaces = []
err_indication, err_status, err_index, varbindtable = cmd_gen.nextCmd(
cmdgen.CommunityData(config['snmp']['community']),
cmdgen.UdpTransportTarget((host, config['snmp']['port'])),
'1.3.6.1.2.1.2.2.1.2'
)
if err_indication:
logger.warning(err_indication)
else:
if err_status:
print('%s at %s' % (
err_status.prettyPrint(),
err_index and varbindtable[-1][int(err_index)-1] or '?'))
else:
for varBindTableRow in varbindtable:
for name, val in varBindTableRow:
for interface in config['hosts'][host]['interfaces']:
regex = re.compile("%s$" % interface)
m = re.search(regex, str(val.prettyPrint()))
if m:
interface_id = get_interface_id_from_mib(str(name))
interface_data = (interface, interface_id)
interfaces.append(interface_data)
break
config['hosts'][host]['interfaces'] = interfaces
return config
def worker(cfg, host):
influx = InfluxDBClient(cfg['influx']['hostname'],
cfg['influx']['port'],
cfg['influx']['username'],
cfg['influx']['password'],
cfg['influx']['database'])
snmp_engine = SnmpEngine()
community_data = CommunityData(cfg['snmp']['community'], mpModel=1)
transport_target = UdpTransportTarget((host, cfg['snmp']['port']))
while True:
for interface in cfg['hosts'][host]['interfaces']:
for what in ['ifHCInOctets', 'ifHCOutOctets']:
status, value = poll(what,
interface[1],
snmp_engine,
community_data,
transport_target)
if status == 'success':
current_time = get_current_time()
json_data = build_json(what,
host,
interface[0],
current_time,
value)
debug_log(json_data)
influx.write_points(json_data)
time.sleep(10)
def main():
args = docopt(__doc__)
logger.info('tripoll starting')
config = load_config(args['--config'])
global debug
if args['--debug']:
debug = True
cfg = get_interface_ids(config)
threads = []
for host in cfg['hosts']:
thread_name = 'poller-%s' % host
t = threading.Thread(name=thread_name,
target=worker,
args=(cfg, host,))
t.daemon = True
threads.append(t)
t.start()
while True:
logger.info('tripoll is alive')
time.sleep(60)
if __name__ == '__main__':
main()
| 30.36413 | 83 | 0.525327 |
acec1559f9cff11443bfe3d03e3ac46e358e3954 | 255 | py | Python | requirementsto.py | DavidLutton/Fragments | 20a14ea6b421e3e3e64883082733e70b060f7083 | [
"MIT"
] | null | null | null | requirementsto.py | DavidLutton/Fragments | 20a14ea6b421e3e3e64883082733e70b060f7083 | [
"MIT"
] | null | null | null | requirementsto.py | DavidLutton/Fragments | 20a14ea6b421e3e3e64883082733e70b060f7083 | [
"MIT"
] | null | null | null | data = ""
with open('requirements.txt', 'r') as f:
data += f.read()
# print(data)
for n in data.splitlines(True):
if n.startswith("# "):
print(n)
else:
x = n.split(" # ")
if len(x) > 1:
print("- " + x[1])
| 19.615385 | 40 | 0.458824 |
acec15c15fca7cab53020902b8d38fdb60fc84cb | 2,033 | py | Python | causal_networkx/algorithms/d_separation.py | adam2392/causal-networkx | aba5355d2e900b30dd3d99916981674f3c0074e9 | [
"BSD-3-Clause"
] | null | null | null | causal_networkx/algorithms/d_separation.py | adam2392/causal-networkx | aba5355d2e900b30dd3d99916981674f3c0074e9 | [
"BSD-3-Clause"
] | null | null | null | causal_networkx/algorithms/d_separation.py | adam2392/causal-networkx | aba5355d2e900b30dd3d99916981674f3c0074e9 | [
"BSD-3-Clause"
] | null | null | null | from typing import Union
import numpy as np
from networkx.algorithms import d_separated as nx_d_separated
from causal_networkx.cgm import ADMG, PAG
def d_separated(G: Union[ADMG, PAG], x, y, z):
"""Check d-separation among 'x' and 'y' given 'z' in graph G.
This algorithm wraps ``networkx.algorithms.d_separated``, but
allows one to pass in a ``ADMG`` instance instead.
It first converts all bidirected edges into explicit unobserved
confounding nodes in an explicit ``networkx.DiGraph``, which then
calls ``networkx.algorithms.d_separated`` to determine d-separation.
This inherently increases the runtime cost if there are many
bidirected edges, because many nodes must be added.
Parameters
----------
G : ADMG
Causal graph.
x : set
First set of nodes in ``G``.
y : set
Second set of nodes in ``G``.
z : set
Set of conditioning nodes in ``G``. Can be empty set.
See Also
--------
causal_networkx.ADMG
networkx.algorithms.d_separation.d_separated
Notes
-----
This wraps the networkx implementation, which only allows DAGs. Since
``ADMG`` is not represented.
"""
# get the full graph by converting bidirected edges into latent confounders
# and keeping the directed edges
explicit_G = G.compute_full_graph(to_networkx=True)
# run d-separation
if isinstance(x, np.ndarray):
x = set(list(x))
elif isinstance(x, str):
x = set([x])
elif type(x) == int or float:
x = set([x])
if isinstance(y, np.ndarray):
y = set(list(y))
elif isinstance(y, str):
y = set([y])
elif type(y) == int or float:
y = set([y])
if isinstance(z, np.ndarray):
z = set(list(z))
elif isinstance(z, str):
z = set([z])
elif type(z) in (int, float):
z = set([z])
# make sure there are always conditioned on the conditioning set
z = z.union(G._cond_set)
return nx_d_separated(explicit_G, x, y, z)
| 28.633803 | 79 | 0.632563 |
acec1667d7defb2d7ff82a0a5c68d50e4823ef92 | 5,733 | py | Python | processdata/getdata.py | TheSecret3003/skkweb | 2a00c7b1b3fe00fcd2e929d18bab8d0630d47469 | [
"MIT"
] | null | null | null | processdata/getdata.py | TheSecret3003/skkweb | 2a00c7b1b3fe00fcd2e929d18bab8d0630d47469 | [
"MIT"
] | null | null | null | processdata/getdata.py | TheSecret3003/skkweb | 2a00c7b1b3fe00fcd2e929d18bab8d0630d47469 | [
"MIT"
] | null | null | null | import datetime
import platform
import csv
import joblib
import numpy as np
import pandas as pd
# model_scaler = joblib.load("scalerprofileproduction.save")
# model_gmfileprod = joblib.load("gmprofileproduction.save")
def get_database1():
df = pd.read_excel("processdata/database1.xlsx", engine="openpyxl")
return df
def get_database2():
df = pd.read_excel("processdata/database2.xlsx", engine="openpyxl", sheet_name=None)
return df
def get_database3():
df = pd.read_excel("processdata/database3.xlsx", engine="openpyxl")
return df
def func1(rfpred, qd2, field_ioip):
fname=[]
rf=[]
rfdif=[]
rfqpeak=[]
qpeak=[]
rfmax=[]
b=[]
di=[]
for k in qd2.index:
fname.append(qd2['Field Name'][k])
rf.append(qd2['EUR/IOIP'][k])
rfqpeak.append(qd2['RF@qpeak'][k])
qpeak.append(qd2['qpeak, stb/m'][k]/30)
rfmax.append(qd2['RF Max'][k])
b.append(qd2['b'][k])
di.append(qd2['Di'][k])
rfdif.append(abs(rfpred-qd2['EUR/IOIP'][k]))
rf_dif=pd.DataFrame()
rf_dif['FIELD_NAME']=fname
rf_dif['RF']=rf
rf_dif['RF@qpeak']=rfqpeak
rf_dif['qpeak']=qpeak
rf_dif['RF Max']=rfmax
rf_dif['b']=b
rf_dif['Di']=di
rf_dif['RF_DIFFERENCE']=rfdif
rf_dif.sort_values(by=['RF_DIFFERENCE'], inplace=True)
rf_dif.reset_index(drop=True, inplace=True)
qcummax=rfpred*field_ioip*(10**6)
return rf_dif, qcummax
def func2(rfpred, rf_dif, database3, field_ioip, model_cluster, model_scaling):
rf_qpeak=[]
qcum_peak=[]
rf_cluster_pred=[]
b=[]
di=[]
rfqpeak=-1
k=0
while rfqpeak<0:
low=rf_dif.iloc[k,:]
up=rf_dif.iloc[k+1,:]
rfqpeak=low['RF@qpeak']+((rfpred-low['RF'])/(up['RF']-low['RF']))*(up['RF@qpeak']-low['RF@qpeak'])
k+=1
qcum_peak=rfqpeak*field_ioip*(10**6)
rf_qpeak=rfqpeak
clust=model_cluster.predict(model_scaling.transform([[rfqpeak,rfpred]]))[0]
b=database3['b'][clust]
di=database3['Di'][clust]/100
rf_cluster_pred=clust
return rf_qpeak, qcum_peak, rf_cluster_pred, b, di
def func3(rf_dif, df_ita, rf_qpeak):
for k in rf_dif.index:
if rf_qpeak<=3/6*rf_dif['RF Max'][k] and rf_qpeak>=1/6*rf_dif['RF Max'][k]:
#if k>=0:
rfqp=rf_qpeak
fn=rf_dif['FIELD_NAME'][k]
tbasis=df_ita[fn][df_ita[fn]['RF']<=rfqp]
ttpeak=tbasis['t/tpeak'][len(tbasis)-1]
tcum=tbasis['tcum'][len(tbasis)-1]
tpeak=tcum/ttpeak
qpeak=rf_dif['qpeak'][k]
break
return tpeak
# Defining Function
def f(x, t, N):
return (t**x)/x - N
# Defining derivative of function
def g(x, t):
return ((np.log(t)*x-1)*(t**x))/(x**2)
# Implementing Newton Raphson Method
def newtonRaphson(x0,e,niter,t,N):
#print('\n\n*** NEWTON RAPHSON METHOD IMPLEMENTATION ***')
step = 1
flag = 1
condition = True
while condition:
if g(x0,t) == 0.0:
print('Divide by zero error!')
break
x1 = x0 - f(x0, t,N)/g(x0,t)
#print('Iteration-%d, x1 = %0.6f and f(x1) = %0.6f' % (step, x1, f(x1,t,N)))
x0 = x1
step = step + 1
if step > niter:
flag = 0
break
condition = abs(f(x1,t,N)) > e
#if flag==1:
#print('\nRequired root is: %0.8f' % x1)
#else:
#print('\nNot Convergent.')
return x1
def func4 (tpeak, qcum_peak):
x0=3
e=0.0001
t=tpeak
N=qcum_peak
niter=1000
q_exp=abs(newtonRaphson(x0,e,niter,t,N)-1)
return q_exp
def func(x, qexp):
y=x**(qexp)
return y
def cum_prod(x,y):
cumprod=0
for i in range (len(x)):
cumprod=cumprod+y[i]
return cumprod
def forecast(qi, b, Di, t0, qcumpeak, qcummax):
def q(qi, b, Di, t, t0):
if b==0:
q=qi*np.exp((-Di) * (t-t0))
else:
q=qi/((1 + (b * Di*(t-t0)))**(1/b))
return q
condition=True
i=1
qcum=qcumpeak
x=[]
x.append(t0)
y=[]
y.append(qi)
while condition:
x.append(t0+i)
y.append(q(y[0],b,Di,x[i],x[0]))
qcum=qcum+y[i]
i=i+1
if qcum>=qcummax or (t0+i)>=365*30:
condition=False
return x, y
def plot_prod(tpeak, qexp, fieldname,qcumpeak,qcummax,b,di):
xi=[i for i in range (1,int(tpeak+1))]
yi=[]
for i in xi:
yi.append(func(i, qexp))
x=[]
y=[]
nmon=np.ceil(tpeak/30)
for i in range(1,int(nmon+1)):
x.append(i)
qmon=0
j=1
if i!=nmon:
while j<=30:
qmon+=yi[j+(i-1)*30-1]
j+=1
y.append(qmon)
else:
while (j+(i-1)*30-1)<tpeak-1:
qmon+=yi[j+(i-1)*30-1]
j+=1
y.append(qmon)
x_forecast, y_forecast=forecast(max(y),b,di,nmon,qcumpeak,qcummax)
# plt.figure(figsize=(16,8))
# plt.plot(x,y)
# plt.fill_between(np.array(x),np.array(y), color='r')
# plt.plot(x_forecast,y_forecast)
# plt.xlabel('Time, Month', fontstyle='italic',fontsize=12,color='dimgrey')
# plt.ylabel('Oil Production, STB/M', fontstyle='italic',fontsize=12,color='dimgrey')
# plt.title(fieldname, fontweight='bold',fontsize = 15)
# plt.xticks(fontsize=15)
# plt.xlim(0,500)
# plt.yticks(fontsize=15)
# plt.show()
df1 = pd.DataFrame( columns =['X_up', 'Y_up'])
df2 = pd.DataFrame( columns =['X_down', 'Y_down'])
df1['X_up'] = list(x)
df1['Y_up'] = list(y)
df2['X_down'] = list(x_forecast)
df2['Y_down'] = list(y_forecast)
return df1,df2 # return x, y
| 26.298165 | 114 | 0.556777 |
acec16d0ede537849efab53bd084e395dd619657 | 1,456 | py | Python | deepext/data/transforms/classification.py | pei223/deepext | 54874594f7f94715bd00f6a3fac0ce609fefabe6 | [
"MIT"
] | 2 | 2020-06-17T02:47:23.000Z | 2020-09-10T14:34:39.000Z | deepext/data/transforms/classification.py | pei223/deepext | 54874594f7f94715bd00f6a3fac0ce609fefabe6 | [
"MIT"
] | 2 | 2021-06-08T21:28:45.000Z | 2022-01-13T02:41:12.000Z | deepext/data/transforms/classification.py | pei223/deepext | 54874594f7f94715bd00f6a3fac0ce609fefabe6 | [
"MIT"
] | 1 | 2020-06-17T02:57:14.000Z | 2020-06-17T02:57:14.000Z | import numpy as np
from PIL import Image
import albumentations as A
import cv2
class AlbumentationsClsWrapperTransform:
def __init__(self, albumentations_transforms: A.Compose, is_image_normalize=True):
self._albumentations_transforms = albumentations_transforms
self._is_image_normalize = is_image_normalize
def __call__(self, image: Image.Image or np.ndarray, label: int):
if not isinstance(image, np.ndarray):
image = np.array(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
result_dict = self._albumentations_transforms(image=image)
result_image = result_dict["image"]
if self._is_image_normalize:
result_image = result_image / 255.
return result_image, label
class AlbumentationsOnlyImageWrapperTransform:
def __init__(self, albumentations_transforms: A.Compose, is_image_normalize=True):
self._albumentations_transforms = albumentations_transforms
self._is_image_normalize = is_image_normalize
def __call__(self, image: Image.Image or np.ndarray):
if not isinstance(image, np.ndarray):
image = np.array(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
result_dict = self._albumentations_transforms(image=image)
result_image = result_dict["image"]
if self._is_image_normalize:
result_image = result_image / 255.
return result_image
| 37.333333 | 86 | 0.712912 |
acec170b4a7326878668716a21d0ffa9a657c460 | 652 | py | Python | python_framework/api/src/service/SchedulerManager.py | SamuelJansen/python_framework | a3e57def47c13edd67319f9bbca32be2bbb00f43 | [
"MIT"
] | 5 | 2020-09-02T20:05:44.000Z | 2022-03-04T21:02:13.000Z | python_framework/api/src/service/SchedulerManager.py | SamuelJansen/python_framework | a3e57def47c13edd67319f9bbca32be2bbb00f43 | [
"MIT"
] | 1 | 2021-05-23T22:55:58.000Z | 2021-05-24T15:33:50.000Z | python_framework/api/src/service/SchedulerManager.py | SamuelJansen/python_framework | a3e57def47c13edd67319f9bbca32be2bbb00f43 | [
"MIT"
] | 3 | 2020-11-01T01:13:09.000Z | 2022-02-22T15:01:19.000Z | from python_helper import Constant as c
from python_helper import EnvironmentHelper
from flask_apscheduler import APScheduler
def addScheduler(api, app) :
# initialize scheduler
scheduler = APScheduler()
# if you don't wanna use a config, you can set options here:
enabled = api.globals.getApiSetting('api.scheduler.enable')
scheduler.api_enabled = enabled is True or c.TRUE == enabled
api.scheduler = scheduler
return scheduler
def initialize(api, app) :
api.scheduler.init_app(app)
api.scheduler.start()
def shutdown(api, app) :
import atexit
atexit.register(lambda: api.scheduler.shutdown(wait=False))
| 31.047619 | 64 | 0.742331 |
acec1714dab94e7c9b869e069031d9a29f712537 | 8,338 | py | Python | evalml/tests/automl_tests/test_automl_utils.py | BlockchainClimateInstitute/price_microservice | 11d1cff8965fe1befc997e9da3dc09efceed4579 | [
"BSD-3-Clause"
] | null | null | null | evalml/tests/automl_tests/test_automl_utils.py | BlockchainClimateInstitute/price_microservice | 11d1cff8965fe1befc997e9da3dc09efceed4579 | [
"BSD-3-Clause"
] | null | null | null | evalml/tests/automl_tests/test_automl_utils.py | BlockchainClimateInstitute/price_microservice | 11d1cff8965fe1befc997e9da3dc09efceed4579 | [
"BSD-3-Clause"
] | null | null | null | import warnings
from unittest.mock import patch
import pandas as pd
import pytest
from sklearn.model_selection import KFold
from evalml.automl.utils import (
_LARGE_DATA_PERCENT_VALIDATION,
_LARGE_DATA_ROW_THRESHOLD,
get_default_primary_search_objective,
make_data_splitter,
tune_binary_threshold
)
from evalml.objectives import F1, R2, LogLossBinary, LogLossMulticlass
from evalml.preprocessing.data_splitters import (
BalancedClassificationDataCVSplit,
BalancedClassificationDataTVSplit,
TimeSeriesSplit,
TrainingValidationSplit
)
from evalml.problem_types import ProblemTypes
from evalml.utils.woodwork_utils import infer_feature_types
def test_get_default_primary_search_objective():
assert isinstance(get_default_primary_search_objective("binary"), LogLossBinary)
assert isinstance(get_default_primary_search_objective(ProblemTypes.BINARY), LogLossBinary)
assert isinstance(get_default_primary_search_objective("multiclass"), LogLossMulticlass)
assert isinstance(get_default_primary_search_objective(ProblemTypes.MULTICLASS), LogLossMulticlass)
assert isinstance(get_default_primary_search_objective("regression"), R2)
assert isinstance(get_default_primary_search_objective(ProblemTypes.REGRESSION), R2)
assert isinstance(get_default_primary_search_objective('time series binary'), LogLossBinary)
assert isinstance(get_default_primary_search_objective('time series multiclass'), LogLossMulticlass)
with pytest.raises(KeyError, match="Problem type 'auto' does not exist"):
get_default_primary_search_objective("auto")
@pytest.mark.parametrize("problem_type", ProblemTypes.all_problem_types)
@pytest.mark.parametrize("large_data", [False, True])
def test_make_data_splitter_default(problem_type, large_data):
n = 10
if large_data:
n = _LARGE_DATA_ROW_THRESHOLD + 1
X = pd.DataFrame({'col_0': list(range(n)),
'target': list(range(n))})
y = X.pop('target')
problem_configuration = None
if problem_type in [ProblemTypes.TIME_SERIES_REGRESSION,
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_MULTICLASS]:
problem_configuration = {'gap': 1, 'max_delay': 7}
data_splitter = make_data_splitter(X, y, problem_type, problem_configuration=problem_configuration)
if large_data and problem_type in [ProblemTypes.REGRESSION, ProblemTypes.BINARY, ProblemTypes.MULTICLASS]:
if problem_type == ProblemTypes.REGRESSION:
assert isinstance(data_splitter, TrainingValidationSplit)
assert data_splitter.stratify is None
assert data_splitter.random_state == 0
else:
assert isinstance(data_splitter, BalancedClassificationDataTVSplit)
assert data_splitter.random_seed == 0
assert data_splitter.shuffle
assert data_splitter.test_size == _LARGE_DATA_PERCENT_VALIDATION
return
if problem_type == ProblemTypes.REGRESSION:
assert isinstance(data_splitter, KFold)
assert data_splitter.n_splits == 3
assert data_splitter.shuffle
assert data_splitter.random_state == 0
if problem_type in [ProblemTypes.BINARY, ProblemTypes.MULTICLASS]:
assert isinstance(data_splitter, BalancedClassificationDataCVSplit)
assert data_splitter.n_splits == 3
assert data_splitter.shuffle
assert data_splitter.random_seed == 0
if problem_type in [ProblemTypes.TIME_SERIES_REGRESSION,
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_MULTICLASS]:
assert isinstance(data_splitter, TimeSeriesSplit)
assert data_splitter.n_splits == 3
assert data_splitter.gap == 1
assert data_splitter.max_delay == 7
@pytest.mark.parametrize("problem_type, expected_data_splitter", [(ProblemTypes.REGRESSION, KFold),
(ProblemTypes.BINARY, BalancedClassificationDataCVSplit),
(ProblemTypes.MULTICLASS, BalancedClassificationDataCVSplit)])
def test_make_data_splitter_parameters(problem_type, expected_data_splitter):
n = 10
X = pd.DataFrame({'col_0': list(range(n)),
'target': list(range(n))})
y = X.pop('target')
random_seed = 42
data_splitter = make_data_splitter(X, y, problem_type, n_splits=5, random_seed=random_seed)
assert isinstance(data_splitter, expected_data_splitter)
assert data_splitter.n_splits == 5
assert data_splitter.shuffle
if str(problem_type) == 'regression':
assert data_splitter.random_state == random_seed
else:
assert data_splitter.random_seed == random_seed
def test_make_data_splitter_parameters_time_series():
n = 10
X = pd.DataFrame({'col_0': list(range(n)),
'target': list(range(n))})
y = X.pop('target')
for problem_type in [ProblemTypes.TIME_SERIES_REGRESSION, ProblemTypes.TIME_SERIES_BINARY, ProblemTypes.TIME_SERIES_MULTICLASS]:
data_splitter = make_data_splitter(X, y, problem_type, problem_configuration={'gap': 1, 'max_delay': 7}, n_splits=5, shuffle=False)
assert isinstance(data_splitter, TimeSeriesSplit)
assert data_splitter.n_splits == 5
assert data_splitter.gap == 1
assert data_splitter.max_delay == 7
def test_make_data_splitter_error():
n = 10
X = pd.DataFrame({'col_0': list(range(n)),
'target': list(range(n))})
y = X.pop('target')
with pytest.raises(ValueError, match="problem_configuration is required for time series problem types"):
make_data_splitter(X, y, ProblemTypes.TIME_SERIES_REGRESSION)
with pytest.raises(KeyError, match="Problem type 'XYZ' does not exist"):
make_data_splitter(X, y, 'XYZ')
@pytest.mark.parametrize("problem_type", [ProblemTypes.REGRESSION, ProblemTypes.BINARY, ProblemTypes.MULTICLASS])
@pytest.mark.parametrize("large_data", [True, False])
def test_make_data_splitter_error_shuffle_random_state(problem_type, large_data):
n = 10
if large_data:
n = _LARGE_DATA_ROW_THRESHOLD + 1
X = pd.DataFrame({'col_0': list(range(n)),
'target': list(range(n))})
y = X.pop('target')
if large_data:
make_data_splitter(X, y, problem_type, n_splits=5, shuffle=False, random_seed=42)
else:
with pytest.raises(ValueError, match="Setting a random_state has no effect since shuffle is False."):
make_data_splitter(X, y, problem_type, n_splits=5, shuffle=False, random_seed=42)
def test_make_data_splitter_raises_deprecated_random_state_warning(X_y_binary):
X, y = X_y_binary
with warnings.catch_warnings(record=True) as warn:
warnings.simplefilter("always")
splitter = make_data_splitter(X, y, "binary", n_splits=5, shuffle=True, random_state=15)
assert splitter.random_seed == 15
assert str(warn[0].message).startswith(
"Argument 'random_state' has been deprecated in favor of 'random_seed'")
@patch('evalml.objectives.BinaryClassificationObjective.optimize_threshold')
@patch('evalml.pipelines.BinaryClassificationPipeline._encode_targets', side_effect=lambda y: y)
@patch('evalml.pipelines.BinaryClassificationPipeline.predict_proba')
@patch('evalml.pipelines.BinaryClassificationPipeline.score')
@patch('evalml.pipelines.BinaryClassificationPipeline.fit')
def test_tune_binary_threshold(mock_fit, mock_score, mock_predict_proba, mock_encode_targets, mock_optimize_threshold,
dummy_binary_pipeline_class, X_y_binary):
mock_optimize_threshold.return_value = 0.42
mock_score.return_value = {'F1': 1.0}
X, y = X_y_binary
X = infer_feature_types(X)
y = infer_feature_types(y)
pipeline = dummy_binary_pipeline_class({})
tune_binary_threshold(pipeline, F1(), 'binary', X, y)
assert pipeline.threshold == 0.42
pipeline = dummy_binary_pipeline_class({})
tune_binary_threshold(pipeline, F1(), 'binary', None, None)
assert pipeline.threshold == 0.5
pipeline = dummy_binary_pipeline_class({})
tune_binary_threshold(pipeline, F1(), 'multiclass', X, y)
assert pipeline.threshold is None
| 44.827957 | 139 | 0.722236 |
acec176d4d0e89e561c8c3dccf11427f0228d717 | 755 | py | Python | igvc_ws/src/igvc_nav/src/utilities/dstar_viewer.py | SoonerRobotics/igvc_software_2022 | 906e6a4fca22d2b0c06ef1b8a4a3a9df7f1d17dd | [
"MIT"
] | 4 | 2020-07-07T14:56:56.000Z | 2021-08-13T23:31:07.000Z | igvc_ws/src/igvc_nav/src/utilities/dstar_viewer.py | pradumn203/igvc-winners-2021 | 658233609054eafac59603a77b2a092dc002e145 | [
"MIT"
] | 4 | 2020-09-22T01:53:48.000Z | 2021-07-17T01:02:31.000Z | igvc_ws/src/igvc_nav/src/utilities/dstar_viewer.py | pradumn203/igvc-winners-2021 | 658233609054eafac59603a77b2a092dc002e145 | [
"MIT"
] | 3 | 2021-06-29T05:21:18.000Z | 2021-08-23T05:03:27.000Z | import matplotlib as mpl
import numpy as np
from matplotlib import pyplot as plt
def setup_pyplot():
plt.ion()
plt.show()
# Draws dstar path planning information to pyplot
def draw_dstar(start_pos, goal_pos, cost_map, path, fig_num=1):
plt.figure(fig_num)
plt.clf()
if cost_map:
plt.imshow(np.reshape(cost_map, (200, 200)), interpolation = 'nearest', origin='lower')
if path:
for point in path:
plt.plot(point[0], point[1], '.', markersize=8, color="red")
if start_pos:
plt.plot(start_pos[0], start_pos[1], '.', markersize=8, color="black")
if goal_pos:
plt.plot(goal_pos[0],goal_pos[1], '.', markersize=8, color="pink")
plt.draw()
plt.pause(0.00000000001) | 26.964286 | 95 | 0.635762 |
acec17fa68fee308f3bfa59ba7d15f0e3e49491a | 45,148 | py | Python | wrappers/Python/CoolProp/Plots/Common.py | rileycarr/CoolPropJava | 9c7fd4acea23e1354f816d2d60e35046c0966570 | [
"MIT"
] | null | null | null | wrappers/Python/CoolProp/Plots/Common.py | rileycarr/CoolPropJava | 9c7fd4acea23e1354f816d2d60e35046c0966570 | [
"MIT"
] | null | null | null | wrappers/Python/CoolProp/Plots/Common.py | rileycarr/CoolPropJava | 9c7fd4acea23e1354f816d2d60e35046c0966570 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import matplotlib.pyplot as plt
import numpy as np
from abc import ABCMeta
from six import with_metaclass
import warnings
import CoolProp
from CoolProp import AbstractState
from CoolProp import CoolProp as CP
from CoolProp.CoolProp import PropsSI, extract_backend, extract_fractions, PyCriticalState
def get_critical_point(state):
crit_state = PyCriticalState()
crit_state.T = np.nan
crit_state.p = np.nan
crit_state.rhomolar = np.nan
crit_state.rhomolar = np.nan
crit_state.stable = False
try:
crit_state.T = state.T_critical()
crit_state.p = state.p_critical()
crit_state.rhomolar = state.rhomolar_critical()
crit_state.stable = True
except:
try:
for crit_state_tmp in state.all_critical_points():
if crit_state_tmp.stable and (crit_state_tmp.T > crit_state.T or not np.isfinite(crit_state.T)):
crit_state.T = crit_state_tmp.T
crit_state.p = crit_state_tmp.p
crit_state.rhomolar = crit_state_tmp.rhomolar
crit_state.stable = crit_state_tmp.stable
except:
raise ValueError("Could not calculate the critical point data.")
new_state = AbstractState(state.backend_name(), '&'.join(state.fluid_names()))
masses = state.get_mass_fractions()
if len(masses)>1:
new_state.set_mass_fractions(masses) # Uses mass fraction to work with incompressibles
#try: new_state.build_phase_envelope("dummy")
#except: pass
msg = ""
if np.isfinite(crit_state.p) and np.isfinite(crit_state.T):
try:
new_state.specify_phase(CoolProp.iphase_critical_point)
new_state.update(CoolProp.PT_INPUTS, crit_state.p, crit_state.T)
return new_state
except Exception as e:
msg += str(e)+" - "
pass
try:
new_state.update(CoolProp.PT_INPUTS, crit_state.p, crit_state.T)
return new_state
except Exception as e:
msg += str(e)+" - "
pass
if np.isfinite(crit_state.rhomolar) and np.isfinite(crit_state.T):
try:
new_state.specify_phase(CoolProp.iphase_critical_point)
new_state.update(CoolProp.DmolarT_INPUTS, crit_state.rhomolar, crit_state.T)
return new_state
except Exception as e:
msg += str(e)+" - "
pass
try:
new_state.update(CoolProp.DmolarT_INPUTS, crit_state.rhomolar, crit_state.T)
return new_state
except Exception as e:
msg += str(e)+" - "
pass
raise ValueError("Could not calculate the critical point data. "+msg)
def interpolate_values_1d(x,y,x_points=None,kind='linear'):
try:
from scipy.interpolate.interpolate import interp1d
if x_points is None:
return interp1d(x, y, kind=kind)(x[np.isfinite(x)])
else:
return interp1d(x, y, kind=kind)(x_points)
except ImportError:
if kind != 'linear':
warnings.warn(
"You requested a non-linear interpolation, but SciPy is not available. Falling back to linear interpolation.",
UserWarning)
if x_points is None:
return np.interp((x[np.isfinite(x)]), x, y)
else:
return np.interp(x_points, x, y)
def is_string(in_obj):
try:
return isinstance(in_obj, basestring)
except NameError:
return isinstance(in_obj, str)
#except:
# return False
def process_fluid_state(fluid_ref, fractions='mole'):
"""Check input for state object or fluid string
Parameters
----------
fluid_ref : str, CoolProp.AbstractState
fractions : str, switch to set mass, volu or mole fractions
Returns
-------
CoolProp.AbstractState
"""
# Process the fluid and set self._state
if is_string(fluid_ref):
backend, fluids = extract_backend(fluid_ref)
fluids, fractions = extract_fractions(fluids)
state = AbstractState(backend, '&'.join(fluids))
if len(fluids) > 1 and len(fluids) == len(fractions):
if fractions=='mass': state.set_mass_fractions(fractions)
elif fractions=='volu': state.set_volu_fractions(fractions)
else: state.set_mole_fractions(fractions)
return state
elif isinstance(fluid_ref, AbstractState):
return fluid_ref
raise TypeError("Invalid fluid_ref input, expected a string or an abstract state instance.")
def _get_index(prop):
if is_string(prop):
return CP.get_parameter_index(prop)
elif isinstance(prop, int):
return prop
else:
raise ValueError("Invalid input, expected a string or an int, not {0:s}.".format(str(prop)))
class BaseQuantity(object):
"""A very basic property that can convert an input to and from a
given unit system, note that the conversion from SI units starts
with a multiplication. If you need to remove an offset, use the
off_SI property.
Examples with temperature:
celsius = BaseQuantity(add_SI=-273.15)
fahrenheit = BaseQuantity(add_SI=32.0, mul_SI=1.8, off_SI=-273.15)
Examples with pressure:
bar = BaseQuantity(mul_SI=1e-5)
psi = BaseQuantity(mul_SI=0.000145037738)
"""
def __init__(self, add_SI=0.0, mul_SI=1.0, off_SI=0.0):
self._add_SI = add_SI
self._mul_SI = mul_SI
self._off_SI = off_SI
@property
def add_SI(self): return self._add_SI
@add_SI.setter
def add_SI(self, value): self._add_SI = value
@property
def mul_SI(self): return self._mul_SI
@mul_SI.setter
def mul_SI(self, value): self._mul_SI = value
@property
def off_SI(self): return self._off_SI
@off_SI.setter
def off_SI(self, value): self._off_SI = value
def from_SI(self, value): return ((value+self.off_SI)*self.mul_SI)+self.add_SI
def to_SI(self, value): return (value-self.add_SI)/self.mul_SI-self.off_SI
class BaseDimension(BaseQuantity):
"""A dimension is a class that extends the BaseQuantity and adds a label, a symbol and a unit label"""
def __init__(self, add_SI=0.0, mul_SI=1.0, off_SI=0.0, label='', symbol='', unit=''):
self._label = label
self._symbol = symbol
self._unit = unit
super(BaseDimension, self).__init__(add_SI=add_SI, mul_SI=mul_SI, off_SI=off_SI)
@property
def label(self): return self._label
@label.setter
def label(self, value): self._label = value
@property
def symbol(self): return self._symbol
@symbol.setter
def symbol(self, value): self._symbol = value
@property
def unit(self): return self._unit
@unit.setter
def unit(self, value): self._unit = value
class PropertyDict(with_metaclass(ABCMeta),object):
"""A collection of dimensions for all the required quantities"""
def __init__(self):
self._D = None
self._H = None
self._P = None
self._S = None
self._T = None
self._U = None
self._Q = None
@property
def D(self): return self._D
@D.setter
def D(self, value): self._D = value
@property
def H(self): return self._H
@H.setter
def H(self, value): self._H = value
@property
def P(self): return self._P
@P.setter
def P(self, value): self._P = value
@property
def S(self): return self._S
@S.setter
def S(self, value): self._S = value
@property
def T(self): return self._T
@T.setter
def T(self, value): self._T = value
@property
def U(self): return self._U
@U.setter
def U(self, value): self._U = value
@property
def Q(self): return self._Q
@Q.setter
def Q(self, value): self._Q = value
@property
def dimensions(self):
return {
CoolProp.iDmass : self._D,
CoolProp.iHmass : self._H,
CoolProp.iP : self._P,
CoolProp.iSmass : self._S,
CoolProp.iT : self._T,
CoolProp.iUmass : self._U,
CoolProp.iQ : self._Q
}
def __getitem__(self, index):
"""Allow for property access via square brackets"""
idx = _get_index(index)
if idx == CoolProp.iDmass : return self.D
elif idx == CoolProp.iHmass : return self.H
elif idx == CoolProp.iP : return self.P
elif idx == CoolProp.iSmass : return self.S
elif idx == CoolProp.iT : return self.T
elif idx == CoolProp.iUmass : return self.U
elif idx == CoolProp.iQ : return self.Q
else: raise IndexError("Unknown index \"{0:s}\".".format(str(index)))
def __setitem__(self, index, value):
"""Allow for property access via square brackets"""
idx = _get_index(index)
if idx == CoolProp.iDmass : self.D = value
elif idx == CoolProp.iHmass : self.H = value
elif idx == CoolProp.iP : self.P = value
elif idx == CoolProp.iSmass : self.S = value
elif idx == CoolProp.iT : self.T = value
elif idx == CoolProp.iUmass : self.U = value
elif idx == CoolProp.iQ : self.Q = value
else: raise IndexError("Unknown index \"{0:s}\".".format(str(index)))
class SIunits(PropertyDict):
def __init__(self):
self._D = BaseDimension(add_SI=0.0, mul_SI=1.0, off_SI=0.0, label='Density', symbol=u'd', unit=u'kg/m3')
self._H = BaseDimension(add_SI=0.0, mul_SI=1.0, off_SI=0.0, label='Specific Enthalpy', symbol=u'h', unit=u'J/kg')
self._P = BaseDimension(add_SI=0.0, mul_SI=1.0, off_SI=0.0, label='Pressure', symbol=u'p', unit=u'Pa')
self._S = BaseDimension(add_SI=0.0, mul_SI=1.0, off_SI=0.0, label='Specific Entropy', symbol=u's', unit=u'J/kg/K')
self._T = BaseDimension(add_SI=0.0, mul_SI=1.0, off_SI=0.0, label='Temperature', symbol=u'T', unit=u'K')
self._U = BaseDimension(add_SI=0.0, mul_SI=1.0, off_SI=0.0, label='Specific Internal Energy', symbol=u'u', unit=u'J/kg')
self._Q = BaseDimension(add_SI=0.0, mul_SI=1.0, off_SI=0.0, label='Vapour Quality', symbol=u'x', unit=u'')
class KSIunits(SIunits):
def __init__(self):
super(KSIunits, self).__init__()
self.H.mul_SI=1e-3
self.H.unit=u'kJ/kg'
self.P.mul_SI=1e-3
self.P.unit=u'kPa'
self.S.mul_SI=1e-3
self.S.unit=u'kJ/kg/K'
self.U.mul_SI=1e-3
self.U.unit=u'kJ/kg'
class EURunits(KSIunits):
def __init__(self):
super(EURunits, self).__init__()
self.P.mul_SI=1e-5
self.P.unit=u'bar'
self.T.add_SI=-273.15
self.T.unit=u'deg C'
class Base2DObject(with_metaclass(ABCMeta),object):
"""A container for shared settings and constants for the
isolines and the property plots."""
# A list of supported plot
TS = CoolProp.iT*10 + CoolProp.iSmass
PH = CoolProp.iP*10 + CoolProp.iHmass
HS = CoolProp.iHmass*10 + CoolProp.iSmass
PS = CoolProp.iP*10 + CoolProp.iSmass
PD = CoolProp.iP*10 + CoolProp.iDmass
TD = CoolProp.iT*10 + CoolProp.iDmass
PT = CoolProp.iP*10 + CoolProp.iT
PU = CoolProp.iP*10 + CoolProp.iUmass
PLOTS = {
'TS': TS,
'PH': PH,
'HS': HS,
'PS': PS,
'PD': PD,
'TD': TD,
'PT': PT,
}
PLOTS_INV = {v: k for k, v in PLOTS.items()}
# # A list of supported plot
# @property
# def TS(self): return type(self).TS
# @property
# def PH(self): return CoolProp.iP*10 + CoolProp.iHmass
# @property
# def HS(self): return CoolProp.iHmass*10 + CoolProp.iSmass
# @property
# def PS(self): return CoolProp.iP*10 + CoolProp.iSmass
# @property
# def PD(self): return CoolProp.iP*10 + CoolProp.iDmass
# @property
# def TD(self): return CoolProp.iT*10 + CoolProp.iDmass
# @property
# def PT(self): return CoolProp.iP*10 + CoolProp.iT
# @property
# def PU(self): return CoolProp.iP*10 + CoolProp.iUmass
def __init__(self, x_type, y_type, state=None, small=None):
self._x_index = _get_index(x_type)
self._y_index = _get_index(y_type)
self._critical_state = None
if small is not None: self._small = small
else: self._small = 1e-7
if state is not None: self.state = state
else: self._state = None
# A list of supported plot
@property
def x_index(self): return self._x_index
@property
def y_index(self): return self._y_index
@property
def critical_state(self):
if self._critical_state is None and self._state is not None:
self._critical_state = get_critical_point(self._state)
return self._critical_state
@property
def state(self): return self._state
@state.setter
def state(self, value):
self._state = process_fluid_state(value)
#try: self._state.build_phase_envelope("dummy")
#except: pass
self._critical_state = None
#self._T_small = self._state.trivial_keyed_output(CoolProp.iT_critical)*self._small
#self._P_small = self._state.trivial_keyed_output(CoolProp.iP_critical)*self._small
self._T_small = self.critical_state.keyed_output(CoolProp.iT)*self._small
self._P_small = self.critical_state.keyed_output(CoolProp.iP)*self._small
def _get_sat_bounds(self, kind, smin=None, smax=None):
"""Generates limits for the saturation line in either T or p determined
by 'kind'. If smin or smax are provided, values will be checked
against the allowable range for the EOS and a warning might be
generated. Returns a tuple containing (xmin, xmax)"""
# TODO: REFPROP backend does not have ptriple.
T_triple = self._state.trivial_keyed_output(CoolProp.iT_triple)
try:
T_min = self._state.trivial_keyed_output(CoolProp.iT_min)
except:
T_min = T_triple
self._state.update(CoolProp.QT_INPUTS, 0, max([T_triple,T_min])+self._T_small)
kind = _get_index(kind)
if kind == CoolProp.iP:
fluid_min = self._state.keyed_output(CoolProp.iP)+self._P_small
fluid_max = self.critical_state.keyed_output(CoolProp.iP)-self._P_small
elif kind == CoolProp.iT:
fluid_min = self._state.keyed_output(CoolProp.iT)+self._T_small
fluid_max = self.critical_state.keyed_output(CoolProp.iT)-self._T_small
else:
raise ValueError("Saturation boundaries have to be defined in T or P, but not in {0:s}".format(str(kind)))
if smin is not None:
if fluid_min < smin < fluid_max:
sat_min = smin
else:
warnings.warn(
"Your minimum {0:s} has been ignored, {1:f} is not between {2:f} and {3:f}".format(self.PROPERTIES[kind],smin,fluid_min,fluid_max),
UserWarning)
sat_min = fluid_min
else:
sat_min = fluid_min
if smax is not None:
if fluid_min < smax < fluid_max:
sat_max = smax
else:
warnings.warn(
"Your maximum {0:s} has been ignored, {1:f} is not between {2:f} and {3:f}".format(self.PROPERTIES[kind],smax,fluid_min,fluid_max),
UserWarning)
sat_max = fluid_max
else:
sat_max = fluid_max
return sat_min, sat_max
class IsoLine(Base2DObject):
"""An object that holds the functions to calculate a line of
a constant property in the dimensions of a property plot. This
class only uses SI units."""
# Normally we calculate a sweep in x-dimensions, but
# sometimes a sweep in y-dimensions is better.
XY_SWITCH = {
CoolProp.iDmass: { Base2DObject.TS:True , Base2DObject.PH:True , Base2DObject.HS:False, Base2DObject.PS:True , Base2DObject.PD:None , Base2DObject.TD:None , Base2DObject.PT:False},
CoolProp.iHmass: { Base2DObject.TS:False, Base2DObject.PH:None , Base2DObject.HS:None , Base2DObject.PS:True , Base2DObject.PD:True , Base2DObject.TD:False, Base2DObject.PT:False},
CoolProp.iP : { Base2DObject.TS:False, Base2DObject.PH:None , Base2DObject.HS:False, Base2DObject.PS:None , Base2DObject.PD:None , Base2DObject.TD:False, Base2DObject.PT:None },
CoolProp.iSmass: { Base2DObject.TS:None , Base2DObject.PH:True , Base2DObject.HS:None , Base2DObject.PS:None , Base2DObject.PD:True , Base2DObject.TD:False, Base2DObject.PT:True },
CoolProp.iT : { Base2DObject.TS:None , Base2DObject.PH:True , Base2DObject.HS:False, Base2DObject.PS:False, Base2DObject.PD:False, Base2DObject.TD:None , Base2DObject.PT:None },
CoolProp.iQ : { Base2DObject.TS:True , Base2DObject.PH:True , Base2DObject.HS:True , Base2DObject.PS:True , Base2DObject.PD:True , Base2DObject.TD:True , Base2DObject.PT:False}
}
# Abort interpolation if there are not enough
# valid entries.
VALID_REQ = 5.0/100.0
def __init__(self, i_index, x_index, y_index, value=0.0, state=None):
super(IsoLine, self).__init__(x_index, y_index, state)
self._i_index = _get_index(i_index)
if value is not None: self.value = value
else: self._value = None
self._x = None
self._y = None
@property
def i_index(self): return self._i_index
@property
def value(self): return self._value
@value.setter
def value(self, value): self._value = float(value)
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = np.array(value)
@property
def y(self): return self._y
@y.setter
def y(self, value): self._y = np.array(value)
def get_update_pair(self):
"""Processes the values for the isoproperty and the graph dimensions
to figure which should be used as inputs to the state update. Returns
a tuple with the indices for the update call and the property constant.
For an isobar in a Ts-diagram it returns the default order and the
correct constant for the update pair:
get_update_pair(CoolProp.iP,CoolProp.iSmass,CoolProp.iT) -> (0,1,2,CoolProp.PSmass_INPUTS)
other values require switching and swapping.
"""
# Figure out if x or y-dimension should be used
switch = self.XY_SWITCH[self.i_index][self.y_index*10+self.x_index]
if switch is None:
raise ValueError("This isoline cannot be calculated!")
elif switch is False:
pair, out1, _ = CP.generate_update_pair(self.i_index,0.0,self.x_index,1.0)
elif switch is True:
pair, out1, _ = CP.generate_update_pair(self.i_index,0.0,self.y_index,1.0)
else:
raise ValueError("Unknown error!")
if out1==0.0: # Correct order
swap = False
else: # Wrong order
swap = True
if not switch and not swap:
return 0,1,2,pair
elif switch and not swap:
return 0,2,1,pair
elif not switch and swap:
return 1,0,2,pair
elif switch and swap:
return 1,2,0,pair
else:
raise ValueError("Check the code, this should not happen!")
def calc_sat_range(self,Trange=None,Prange=None,num=200):
if Trange is not None:
two = np.array(Trange)
one = np.resize(np.array(self.value),two.shape)
pair = CoolProp.QT_INPUTS
elif Prange is not None:
one = np.array(Prange)
two = np.resize(np.array(self.value),one.shape)
pair = CoolProp.PQ_INPUTS
else:
T_lo,T_hi = self._get_sat_bounds(CoolProp.iT)
two = np.linspace(T_lo,T_hi,num)
one = np.resize(np.array(self.value),two.shape)
pair = CoolProp.QT_INPUTS
Tcrit = self.critical_state.keyed_output(CoolProp.iT)
Pcrit = self.critical_state.keyed_output(CoolProp.iP)
Dcrit = self.critical_state.keyed_output(CoolProp.iDmass)
try:
#self.state.update(CoolProp.DmassT_INPUTS, Dcrit, Tcrit)
#xcrit = self.state.keyed_output(self._x_index)
#ycrit = self.state.keyed_output(self._y_index)
xcrit = self.critical_state.keyed_output(self._x_index)
ycrit = self.critical_state.keyed_output(self._y_index)
except:
warnings.warn(
"An error occurred for the critical inputs, skipping it.",
UserWarning)
xcrit = np.NaN
ycrit = np.NaN
X = np.empty_like(one)
Y = np.empty_like(one)
err = False
for index, _ in np.ndenumerate(one):
try:
self.state.update(pair, one[index], two[index])
X[index] = self.state.keyed_output(self._x_index)
Y[index] = self.state.keyed_output(self._y_index)
except Exception as e:
if (pair == CoolProp.QT_INPUTS and abs(two[index]-Tcrit)<1e0) or \
(pair == CoolProp.PQ_INPUTS and abs(one[index]-Pcrit)<1e2):
X[index] = xcrit
Y[index] = ycrit
warnings.warn(
"An error occurred for near critical inputs {0:f}, {1:f} with index {2:s}: {3:s}".format(one[index],two[index],str(index),str(e)),
UserWarning)
pass
warnings.warn(
"An error occurred for inputs {0:f}, {1:f} with index {2:s}: {3:s}".format(one[index],two[index],str(index),str(e)),
UserWarning)
X[index] = np.NaN
Y[index] = np.NaN
err = True
self.x = X; self.y = Y
return
def calc_range(self,xvals=None,yvals=None):
if self.i_index == CoolProp.iQ:
warnings.warn(
"Please use \"calc_sat_range\" to calculate saturation and isoquality lines. Input ranges are discarded.",
UserWarning)
if xvals is not None: self.calc_sat_range(num=xvals.size)
elif yvals is not None: self.calc_sat_range(num=yvals.size)
else: self.calc_sat_range()
return
ipos,xpos,ypos,pair = self.get_update_pair()
order = [ipos,xpos,ypos]
idxs = [v for (_,v) in sorted(zip(order,[self.i_index , self.x_index , self.y_index ]))]
vals = [v for (_,v) in sorted(zip(order,[np.array(self.value), xvals , yvals ]))]
if vals[0] is None or vals[1] is None:
raise ValueError("One required input is missing, make sure to supply the correct xvals ({0:s}) or yvals ({1:s}).".format(str(xvals),str(yvals)))
if vals[0].size > vals[1].size:
vals[1] = np.resize(vals[1],vals[0].shape)
elif vals[0].size < vals[1].size:
vals[0] = np.resize(vals[0],vals[1].shape)
vals[2] = np.empty_like(vals[0])
err = False
guesses = CoolProp.CoolProp.PyGuessesStructure()
# Only use the guesses for selected inputs
if pair == CoolProp.HmolarP_INPUTS \
or pair == CoolProp.HmassP_INPUTS:
#or pair == CoolProp.HmassSmass_INPUTS \
#or pair == CoolProp.HmolarSmolar_INPUTS \
#or pair == CoolProp.PSmass_INPUTS \
#or pair == CoolProp.PSmolar_INPUTS:
use_guesses = True
else:
use_guesses = False
for index, _ in np.ndenumerate(vals[0]):
try:
if use_guesses:
if np.isfinite(guesses.rhomolar):
self.state.update_with_guesses(pair, vals[0][index], vals[1][index], guesses)
else:
self.state.update(pair, vals[0][index], vals[1][index])
guesses.rhomolar = self.state.rhomolar()
guesses.T = self.state.T()
else:
self.state.update(pair, vals[0][index], vals[1][index])
vals[2][index] = self.state.keyed_output(idxs[2])
except Exception as e:
warnings.warn(
"An error occurred for inputs {0:f}, {1:f} with index {2:s}: {3:s}".format(vals[0][index],vals[1][index],str(index),str(e)),
UserWarning)
vals[2][index] = np.NaN
guesses.rhomolar = np.NaN
guesses.T = np.NaN
err = True
for i,v in enumerate(idxs):
if v == self.x_index: self.x = vals[i]
if v == self.y_index: self.y = vals[i]
def sanitize_data(self):
"""Fill the series via interpolation"""
validx = None; validy = None
countx = None; county = None
if self.x is not None:
validx = np.isfinite(self.x)
countx = float(self.x.size)
else:
raise ValueError("The x-axis is not populated, calculate values before you interpolate.")
if self.y is not None:
validy = np.isfinite(self.y)
county = float(self.y.size)
else:
raise ValueError("The y-axis is not populated, calculate values before you interpolate.")
if min([np.sum(validx)/countx,np.sum(validy)/county]) < self.VALID_REQ:
warnings.warn(
"Poor data quality, there are not enough valid entries for x ({0:f}/{1:f}) or y ({2:f}/{3:f}).".format(np.sum(validx),countx,np.sum(validy),county),
UserWarning)
# TODO: use filter and cubic splines!
#filter = np.logical_and(np.isfinite(self.x),np.isfinite(self.y))
if np.sum(validy) > np.sum(validx):
self.x = interpolate_values_1d(self.y, self.x, x_points=self.y[validy])
self.y = self.y[validy]
else:
self.y = interpolate_values_1d(self.x, self.y, x_points=self.x[validx])
self.x = self.x[validx]
class BasePlot(Base2DObject):
"""The base class for all plots. It can be instantiated itself, but provides many
general facilities to be used in the different plots. """
# Define the iteration keys
PROPERTIES = {
CoolProp.iDmass: 'density',
CoolProp.iHmass: 'specific enthalpy',
CoolProp.iP : 'pressure',
CoolProp.iSmass: 'specific entropy',
CoolProp.iT : 'temperature',
CoolProp.iUmass: 'specific internal energy'
}
# Define the unit systems
UNIT_SYSTEMS = {
'SI' : SIunits(),
'KSI': KSIunits(),
'EUR': EURunits()
}
LINE_PROPS = {
CoolProp.iT : dict(color='Darkred' ,lw=0.25),
CoolProp.iP : dict(color='DarkCyan' ,lw=0.25),
CoolProp.iHmass: dict(color='DarkGreen' ,lw=0.25),
CoolProp.iDmass: dict(color='DarkBlue' ,lw=0.25),
CoolProp.iSmass: dict(color='DarkOrange',lw=0.25),
CoolProp.iQ : dict(color='black' ,lw=0.25)
}
ID_FACTOR = 10.0 # Values below this number are interpreted as factors
HI_FACTOR = 2.25 # Upper default limits: HI_FACTOR*T_crit and HI_FACTOR*p_crit
LO_FACTOR = 1.01 # Lower default limits: LO_FACTOR*T_triple and LO_FACTOR*p_triple
TP_LIMITS = {
'NONE' : [None,None,None,None],
'DEF' : [LO_FACTOR,HI_FACTOR,LO_FACTOR,HI_FACTOR],
'ACHP' : [173.15,493.15,0.25e5,HI_FACTOR],
'ORC' : [273.15,673.15,0.25e5,HI_FACTOR]
}
def __init__(self, fluid_ref, graph_type, unit_system = 'KSI', tp_limits='DEF', **kwargs):
# Process the graph_type and set self._x_type and self._y_type
graph_type = graph_type.upper()
graph_type = graph_type.replace(r'RHO',r'D')
if graph_type not in Base2DObject.PLOTS:
raise ValueError("Invalid graph_type input, expected a string from {0:s}".format(str(self.PLOTS)))
# Process the unit_system and set self._system
self.system = unit_system
# Process the plotting range based on T and p
self.limits = tp_limits
# Other properties
self.figure = kwargs.pop('figure',plt.figure(tight_layout=True))
self.axis = kwargs.pop('axis', self.figure.add_subplot(111))
self.props = kwargs.pop('props', None)
# call the base class
state = process_fluid_state(fluid_ref)
Base2DObject.__init__(self, graph_type[1], graph_type[0], state, **kwargs)
@property
def system(self): return self._system
@system.setter
def system(self, value):
value = value.upper()
if value in self.UNIT_SYSTEMS: self._system = self.UNIT_SYSTEMS[value]
else: raise ValueError("Invalid input, expected a string from {0:s}".format(str(self.UNIT_SYSTEMS.keys())))
@property
def limits(self):
"""Returns [Tmin,Tmax,pmin,pmax] as value or factors"""
return self._limits
@limits.setter
def limits(self, value):
if is_string(value):
value = value.upper()
if value in self.TP_LIMITS:
self._limits = self.TP_LIMITS[value]
elif len(value)==4:
self._limits = value
else:
raise ValueError("Invalid input, expected a list with 4 items or a string from {0:s}".format(str(self.TP_LIMITS.keys())))
@property
def figure(self): return self._figure
@figure.setter
def figure(self, value): self._figure = value
@property
def axis(self): return self._axis
@axis.setter
def axis(self, value): self._axis = value
@property
def props(self): return self._props
@props.setter
def props(self, value):
self._props = self.LINE_PROPS.copy()
if value is not None:
self._props.update(value)
def __sat_bounds(self, kind, smin=None, smax=None):
warnings.warn(
"You called the deprecated function \"__sat_bounds\", \
consider replacing it with \"_get_sat_bounds\".",
DeprecationWarning)
return self._get_sat_bounds(kind, smin, smax)
def _get_iso_label(self, isoline, unit=True):
if self._system is not None:
dim = self._system[isoline.i_index]
return str(r"$"+dim.symbol+"="+str(dim.from_SI(isoline.value))+ "$ "+dim.unit if unit else "$").strip()
return str(isoline.value).strip()
#def _get_phase_envelope(self):
#
#HEOS = CoolProp.AbstractState("HEOS", fluid)
#HEOS.build_phase_envelope("")
#PED = HEOS.get_phase_envelope_data()
#plt.plot(PED.T, np.log(PED.p))
#plt.show()
def _plot_default_annotations(self):
# def filter_fluid_ref(fluid_ref):
# fluid_ref_string = fluid_ref
# if fluid_ref.startswith('REFPROP-MIX'):
# end = 0
# fluid_ref_string = ''
# while fluid_ref.find('[', end + 1) != -1:
# start = fluid_ref.find('&', end + 1)
# if end == 0:
# start = fluid_ref.find(':', end + 1)
# end = fluid_ref.find('[', end + 1)
# fluid_ref_string = ' '.join([fluid_ref_string,
# fluid_ref[start+1:end], '+'])
# fluid_ref_string = fluid_ref_string[0:len(fluid_ref_string)-2]
# return fluid_ref_string
#
# if len(self.graph_type) == 2:
# y_axis_id = self.graph_type[0]
# x_axis_id = self.graph_type[1]
# else:
# y_axis_id = self.graph_type[0]
# x_axis_id = self.graph_type[1:len(self.graph_type)]
#
# tl_str = "%s - %s Graph for %s"
# if not self.axis.get_title():
# self.axis.set_title(tl_str % (self.AXIS_LABELS[self.unit_system][y_axis_id][0],
# self.AXIS_LABELS[self.unit_system][x_axis_id][0],
# filter_fluid_ref(self.fluid_ref)))
if self._x_index in [CoolProp.iDmass,CoolProp.iP]:
self.axis.set_xscale('log')
if self._y_index in [CoolProp.iDmass,CoolProp.iP]:
self.axis.set_yscale('log')
if not self.axis.get_xlabel():
dim = self._system[self._x_index]
self.xlabel((dim.label+u" $"+dim.symbol+u"$ / "+dim.unit).strip())
if not self.axis.get_ylabel():
dim = self._system[self._y_index]
self.ylabel((dim.label+u" $"+dim.symbol+u"$ / "+dim.unit).strip())
def title(self, title):
self.axis.set_title(title)
def xlabel(self, xlabel):
self.axis.set_xlabel(xlabel)
def ylabel(self, ylabel):
self.axis.set_ylabel(ylabel)
def grid(self, b=None, **kwargs):
g_map = {'on': True, 'off': False}
if b is not None:
b = g_map[b.lower()]
if not kwargs: #len=0
self.axis.grid(b)
else:
self.axis.grid(kwargs)
def set_Tp_limits(self, limits):
"""Set the limits for the graphs in temperature and pressure, based on
the active units: [Tmin, Tmax, pmin, pmax]"""
dim = self._system[CoolProp.iT]
limits[0] = dim.to_SI(limits[0])
limits[1] = dim.to_SI(limits[1])
dim = self._system[CoolProp.iP]
limits[2] = dim.to_SI(limits[2])
limits[3] = dim.to_SI(limits[3])
self.limits = limits
def get_Tp_limits(self):
"""Get the limits for the graphs in temperature and pressure, based on
the active units: [Tmin, Tmax, pmin, pmax]"""
limits = self._get_Tp_limits()
dim = self._system[CoolProp.iT]
limits[0] = dim.from_SI(limits[0])
limits[1] = dim.from_SI(limits[1])
dim = self._system[CoolProp.iP]
limits[2] = dim.from_SI(limits[2])
limits[3] = dim.from_SI(limits[3])
return limits
def _get_Tp_limits(self):
"""Get the limits for the graphs in temperature and pressure, based on
SI units: [Tmin, Tmax, pmin, pmax]"""
T_lo,T_hi,P_lo,P_hi = self.limits
Ts_lo,Ts_hi = self._get_sat_bounds(CoolProp.iT)
Ps_lo,Ps_hi = self._get_sat_bounds(CoolProp.iP)
if T_lo is None: T_lo = 0.0
elif T_lo < self.ID_FACTOR: T_lo *= Ts_lo
if T_hi is None: T_hi = 1e6
elif T_hi < self.ID_FACTOR: T_hi *= Ts_hi
if P_lo is None: P_lo = 0.0
elif P_lo < self.ID_FACTOR: P_lo *= Ps_lo
if P_hi is None: P_hi = 1e10
elif P_hi < self.ID_FACTOR: P_hi *= Ps_hi
try: T_lo = np.nanmax([T_lo, self._state.trivial_keyed_output(CoolProp.iT_min)])
except: pass
try: T_hi = np.nanmin([T_hi, self._state.trivial_keyed_output(CoolProp.iT_max)])
except: pass
try: P_lo = np.nanmax([P_lo, self._state.trivial_keyed_output(CoolProp.iP_min)])
except: pass
try: P_hi = np.nanmin([P_hi, self._state.trivial_keyed_output(CoolProp.iP_max)])
except: pass
return [T_lo,T_hi,P_lo,P_hi]
def set_axis_limits(self, limits):
"""Set the limits of the internal axis object based on the active units,
takes [xmin, xmax, ymin, ymax]"""
self.axis.set_xlim([limits[0], limits[1]])
self.axis.set_ylim([limits[2], limits[3]])
def _set_axis_limits(self, limits):
"""Set the limits of the internal axis object based on SI units,
takes [xmin, xmax, ymin, ymax]"""
dim = self._system[self._x_index]
self.axis.set_xlim([dim.from_SI(limits[0]), dim.from_SI(limits[1])])
dim = self._system[self._y_index]
self.axis.set_ylim([dim.from_SI(limits[2]), dim.from_SI(limits[3])])
def get_axis_limits(self,x_index=None,y_index=None):
"""Returns the previously set limits or generates them and
converts the default values to the selected unit system.
Returns a list containing [xmin, xmax, ymin, ymax]"""
if x_index is None: x_index = self._x_index
if y_index is None: y_index = self._y_index
if x_index != self.x_index or y_index != self.y_index or \
self.axis.get_autoscalex_on() or self.axis.get_autoscaley_on():
# One of them is not set or we work on a different set of axes
T_lo,T_hi,P_lo,P_hi = self._get_Tp_limits()
X=[0.0]*4; Y=[0.0]*4
i = -1
for T in [T_lo, T_hi]:
for P in [P_lo, P_hi]:
i+=1
try:
self._state.update(CoolProp.PT_INPUTS, P, T)
# TODO: include a check for P and T?
X[i] = self._state.keyed_output(x_index)
Y[i] = self._state.keyed_output(y_index)
except:
X[i] = np.nan; Y[i] = np.nan
# Figure out what to update
dim = self._system[x_index]
x_lim = [dim.from_SI(np.nanmin(X)),dim.from_SI(np.nanmax(X))]
dim = self._system[y_index]
y_lim = [dim.from_SI(np.nanmin(Y)),dim.from_SI(np.nanmax(Y))]
# Either update the axes limits or get them
if x_index == self._x_index:
if self.axis.get_autoscalex_on():
self.axis.set_xlim(x_lim)
else:
x_lim = self.axis.get_xlim()
if y_index == self._y_index:
if self.axis.get_autoscaley_on():
self.axis.set_ylim(y_lim)
else:
y_lim = self.axis.get_ylim()
else: # We only asked for the real axes limits and they are set already
x_lim = self.axis.get_xlim()
y_lim = self.axis.get_ylim()
return [x_lim[0],x_lim[1],y_lim[0],y_lim[1]]
def _get_axis_limits(self,x_index=None,y_index=None):
"""Get the limits of the internal axis object in SI units
Returns a list containing [xmin, xmax, ymin, ymax]"""
if x_index is None: x_index = self._x_index
if y_index is None: y_index = self._y_index
limits = self.get_axis_limits(x_index,y_index)
dim = self._system[x_index]
limits[0] = dim.to_SI(limits[0])
limits[1] = dim.to_SI(limits[1])
dim = self._system[y_index]
limits[2] = dim.to_SI(limits[2])
limits[3] = dim.to_SI(limits[3])
return limits
@staticmethod
def generate_ranges(itype, imin, imax, num):
"""Generate a range for a certain property"""
if itype in [CoolProp.iP, CoolProp.iDmass]:
return np.logspace(np.log2(imin),np.log2(imax),num=num,base=2.)
return np.linspace(imin, imax, num=num)
def _get_conversion_data(self):
[Axmin,Axmax,Aymin,Aymax] = self._get_axis_limits()
DELTAX_axis=Axmax-Axmin
DELTAY_axis=Aymax-Aymin
width=self.figure.get_figwidth()
height=self.figure.get_figheight()
pos=self.axis.get_position().get_points()
[[Fxmin,Fymin],[Fxmax,Fymax]]=pos
DELTAX_fig=width*(Fxmax-Fxmin)
DELTAY_fig=height*(Fymax-Fymin)
return [[Axmin,Axmax,Aymin,Aymax,Fxmin,Fxmax,Fymin,Fymax],[DELTAX_axis,DELTAY_axis,DELTAX_fig,DELTAY_fig]]
def _to_pixel_coords(self,xv,yv):
[[Axmin,Axmax,Aymin,Aymax,Fxmin,Fxmax,Fymin,Fymax],[DELTAX_axis,DELTAY_axis,DELTAX_fig,DELTAY_fig]] = self._get_conversion_data()
#Convert coords to pixels
x=(xv-Axmin)/DELTAX_axis*DELTAX_fig+Fxmin
y=(yv-Aymin)/DELTAY_axis*DELTAY_fig+Fymin
return x,y
def _to_data_coords(self,xv,yv):
[[Axmin,Axmax,Aymin,Aymax,Fxmin,Fxmax,Fymin,Fymax],[DELTAX_axis,DELTAY_axis,DELTAX_fig,DELTAY_fig]] = self._get_conversion_data()
#Convert back to measurements
x=(xv-Fxmin)/DELTAX_fig*DELTAX_axis+Axmin
y=(yv-Fymin)/DELTAY_fig*DELTAY_axis+Aymin
return x,y
@staticmethod
def get_x_y_dydx(xv,yv,x):
"""Get x and y coordinates and the linear interpolation derivative"""
# Old implementation:
##Get the rotation angle
#f = interp1d(xv, yv)
#y = f(x)
#h = 0.00001*x
#dy_dx = (f(x+h)-f(x-h))/(2*h)
#return x,y,dy_dx
if len(xv) == len(yv) and len(yv) > 1: # assure same length
if len(xv) == len(yv) and len(yv) == 2: # only two points
if np.min(xv)<x<np.max(xv):
dx = xv[1] - xv[0]
dy = yv[1] - yv[0]
dydx = dy/dx
y = yv[0] + dydx * (x-xv[0])
return x,y,dydx
else:
raise ValueError("Your coordinate has to be between the input values.")
else:
limit = 1e-10 # avoid hitting a point directly
diff = np.array(xv)-x # get differences
index = np.argmin(diff*diff) # nearest neighbour
if (xv[index]<x<xv[index+1] # nearest below, positive inclination
or xv[index]>x>xv[index+1]): # nearest above, negative inclination
if diff[index]<limit:
index = [index-1,index+1]
else:
index = [index, index+1]
elif (xv[index-1]<x<xv[index] # nearest above, positive inclination
or xv[index-1]>x>xv[index]): # nearest below, negative inclination
if diff[index]<limit:
index = [index-1,index+1]
else:
index = [index-1,index]
xvnew = xv[index]
yvnew = yv[index]
return BasePlot.get_x_y_dydx(xvnew,yvnew,x) # Allow for a single recursion
else:
raise ValueError("You have to provide the same amount of x- and y-pairs with at least two entries each.")
def _inline_label(self,xv,yv,x=None,y=None):
"""
This will give the coordinates and rotation required to align a label with
a line on a plot in SI units.
"""
if y is None and x is not None:
trash=0
(xv,yv)=self._to_pixel_coords(xv,yv)
#x is provided but y isn't
(x,trash)=self._to_pixel_coords(x,trash)
#Get the rotation angle and y-value
x,y,dy_dx = BasePlot.get_x_y_dydx(xv,yv,x)
rot = np.arctan(dy_dx)/np.pi*180.
elif x is None and y is not None:
#y is provided, but x isn't
_xv = xv[::-1]
_yv = yv[::-1]
#Find x by interpolation
x = interpolate_values_1d(yv, xv, x_points=y)
trash=0
(xv,yv)=self._to_pixel_coords(xv,yv)
(x,trash)=self._to_pixel_coords(x,trash)
#Get the rotation angle and y-value
x,y,dy_dx = BasePlot.get_x_y_dydx(xv,yv,x)
rot = np.arctan(dy_dx)/np.pi*180.
(x,y)=self._to_data_coords(x,y)
return (x,y,rot)
def inline_label(self,xv,yv,x=None,y=None):
"""
This will give the coordinates and rotation required to align a label with
a line on a plot in axis units.
"""
dimx = self._system[self._x_index]
xv = dimx.to_SI(xv)
if x is not None: x = dimx.to_SI(x)
dimy = self._system[self._y_index]
yv = dimy.to_SI(yv)
if y is not None: y = dimx.to_SI(y)
(x,y,rot) = self._inline_label(xv,yv,x,y)
x = dimx.from_SI(x)
y = dimx.from_SI(y)
return (x,y,rot)
def show(self):
plt.show()
def savefig(self, *args, **kwargs):
self.figure.savefig(*args, **kwargs)
if __name__ == "__main__":
for sys in [SIunits(), KSIunits(), EURunits()]:
print(sys.H.label)
print(sys.H.to_SI(20))
print(sys.P.label)
print(sys.P.to_SI(20))
#i_index, x_index, y_index, value=None, state=None)
iso = IsoLine('T','H','P')
print(iso.get_update_pair())
state = AbstractState("HEOS","water")
iso = IsoLine('T','H','P', 300.0, state)
hr = PropsSI("H","T",[290,310],"P",[1e5,1e5],"water")
pr = np.linspace(0.9e5,1.1e5,3)
iso.calc_range(hr,pr)
print(iso.x,iso.y)
iso = IsoLine('Q','H','P', 0.0, state)
iso.calc_range(hr,pr); print(iso.x,iso.y)
iso = IsoLine('Q','H','P', 1.0, state)
iso.calc_range(hr,pr); print(iso.x,iso.y)
#bp = BasePlot(fluid_ref, graph_type, unit_system = 'KSI', **kwargs):
bp = BasePlot('n-Pentane', 'PH', unit_system='EUR')
#print(bp._get_sat_bounds('P'))
#print(bp._get_iso_label(iso))
print(bp.get_axis_limits())
# get_update_pair(CoolProp.iP,CoolProp.iSmass,CoolProp.iT) -> (0,1,2,CoolProp.PSmass_INPUTS)
#other values require switching and swapping
#get_update_pair(CoolProp.iSmass,CoolProp.iP,CoolProp.iHmass) -> (1,0,2,CoolProp.PSmass_INPUTS)
| 38.65411 | 186 | 0.595663 |
acec19553aa1efba89b5555bf7869a743401644f | 51,222 | py | Python | python/ccxt/coinbase.py | z-brain/ccxt | dde32cfb5e0e2e2889ead60687d6fd0fdf5e3f02 | [
"MIT"
] | 4 | 2021-01-10T09:14:17.000Z | 2022-02-15T19:09:52.000Z | python/ccxt/coinbase.py | z-brain/ccxt | dde32cfb5e0e2e2889ead60687d6fd0fdf5e3f02 | [
"MIT"
] | 2 | 2020-05-12T12:53:48.000Z | 2020-07-05T12:59:52.000Z | python/ccxt/coinbase.py | z-brain/ccxt | dde32cfb5e0e2e2889ead60687d6fd0fdf5e3f02 | [
"MIT"
] | 4 | 2021-06-02T16:40:35.000Z | 2022-03-14T04:50:31.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import RateLimitExceeded
class coinbase(Exchange):
def describe(self):
return self.deep_extend(super(coinbase, self).describe(), {
'id': 'coinbase',
'name': 'Coinbase',
'countries': ['US'],
'rateLimit': 400, # 10k calls per hour
'version': 'v2',
'userAgent': self.userAgents['chrome'],
'headers': {
'CB-VERSION': '2018-05-30',
},
'has': {
'CORS': True,
'cancelOrder': False,
'createDepositAddress': True,
'createOrder': False,
'deposit': False,
'fetchBalance': True,
'fetchClosedOrders': False,
'fetchCurrencies': True,
'fetchDepositAddress': False,
'fetchMarkets': True,
'fetchMyTrades': False,
'fetchOHLCV': False,
'fetchOpenOrders': False,
'fetchOrder': False,
'fetchOrderBook': False,
'fetchL2OrderBook': False,
'fetchLedger': True,
'fetchOrders': False,
'fetchTicker': True,
'fetchTickers': False,
'fetchTime': True,
'fetchBidsAsks': False,
'fetchTrades': False,
'withdraw': False,
'fetchTransactions': False,
'fetchDeposits': True,
'fetchWithdrawals': True,
'fetchMySells': True,
'fetchMyBuys': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/40811661-b6eceae2-653a-11e8-829e-10bfadb078cf.jpg',
'api': 'https://api.coinbase.com',
'www': 'https://www.coinbase.com',
'doc': 'https://developers.coinbase.com/api/v2',
'fees': 'https://support.coinbase.com/customer/portal/articles/2109597-buy-sell-bank-transfer-fees',
'referral': 'https://www.coinbase.com/join/58cbe25a355148797479dbd2',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'api': {
'public': {
'get': [
'currencies',
'time',
'exchange-rates',
'users/{user_id}',
'prices/{symbol}/buy',
'prices/{symbol}/sell',
'prices/{symbol}/spot',
],
},
'private': {
'get': [
'accounts',
'accounts/{account_id}',
'accounts/{account_id}/addresses',
'accounts/{account_id}/addresses/{address_id}',
'accounts/{account_id}/addresses/{address_id}/transactions',
'accounts/{account_id}/transactions',
'accounts/{account_id}/transactions/{transaction_id}',
'accounts/{account_id}/buys',
'accounts/{account_id}/buys/{buy_id}',
'accounts/{account_id}/sells',
'accounts/{account_id}/sells/{sell_id}',
'accounts/{account_id}/deposits',
'accounts/{account_id}/deposits/{deposit_id}',
'accounts/{account_id}/withdrawals',
'accounts/{account_id}/withdrawals/{withdrawal_id}',
'payment-methods',
'payment-methods/{payment_method_id}',
'user',
'user/auth',
],
'post': [
'accounts',
'accounts/{account_id}/primary',
'accounts/{account_id}/addresses',
'accounts/{account_id}/transactions',
'accounts/{account_id}/transactions/{transaction_id}/complete',
'accounts/{account_id}/transactions/{transaction_id}/resend',
'accounts/{account_id}/buys',
'accounts/{account_id}/buys/{buy_id}/commit',
'accounts/{account_id}/sells',
'accounts/{account_id}/sells/{sell_id}/commit',
'accounts/{account_id}/deposists',
'accounts/{account_id}/deposists/{deposit_id}/commit',
'accounts/{account_id}/withdrawals',
'accounts/{account_id}/withdrawals/{withdrawal_id}/commit',
],
'put': [
'accounts/{account_id}',
'user',
],
'delete': [
'accounts/{id}',
'accounts/{account_id}/transactions/{transaction_id}',
],
},
},
'exceptions': {
'two_factor_required': AuthenticationError, # 402 When sending money over 2fa limit
'param_required': ExchangeError, # 400 Missing parameter
'validation_error': ExchangeError, # 400 Unable to validate POST/PUT
'invalid_request': ExchangeError, # 400 Invalid request
'personal_details_required': AuthenticationError, # 400 User’s personal detail required to complete self request
'identity_verification_required': AuthenticationError, # 400 Identity verification is required to complete self request
'jumio_verification_required': AuthenticationError, # 400 Document verification is required to complete self request
'jumio_face_match_verification_required': AuthenticationError, # 400 Document verification including face match is required to complete self request
'unverified_email': AuthenticationError, # 400 User has not verified their email
'authentication_error': AuthenticationError, # 401 Invalid auth(generic)
'invalid_token': AuthenticationError, # 401 Invalid Oauth token
'revoked_token': AuthenticationError, # 401 Revoked Oauth token
'expired_token': AuthenticationError, # 401 Expired Oauth token
'invalid_scope': AuthenticationError, # 403 User hasn’t authenticated necessary scope
'not_found': ExchangeError, # 404 Resource not found
'rate_limit_exceeded': RateLimitExceeded, # 429 Rate limit exceeded
'internal_server_error': ExchangeError, # 500 Internal server error
},
'options': {
'fetchCurrencies': {
'expires': 5000,
},
'accounts': [
'wallet',
'fiat',
# 'vault',
],
},
})
def fetch_time(self, params={}):
response = self.publicGetTime(params)
#
# {
# "data": {
# "epoch": 1589295679,
# "iso": "2020-05-12T15:01:19Z"
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.safe_timestamp(data, 'epoch')
def fetch_accounts(self, params={}):
response = self.privateGetAccounts(params)
#
# {
# "id": "XLM",
# "name": "XLM Wallet",
# "primary": False,
# "type": "wallet",
# "currency": {
# "code": "XLM",
# "name": "Stellar Lumens",
# "color": "#000000",
# "sort_index": 127,
# "exponent": 7,
# "type": "crypto",
# "address_regex": "^G[A-Z2-7]{55}$",
# "asset_id": "13b83335-5ede-595b-821e-5bcdfa80560f",
# "destination_tag_name": "XLM Memo ID",
# "destination_tag_regex": "^[-~]{1,28}$"
# },
# "balance": {
# "amount": "0.0000000",
# "currency": "XLM"
# },
# "created_at": null,
# "updated_at": null,
# "resource": "account",
# "resource_path": "/v2/accounts/XLM",
# "allow_deposits": True,
# "allow_withdrawals": True
# }
#
data = self.safe_value(response, 'data', [])
result = []
for i in range(0, len(data)):
account = data[i]
currency = self.safe_value(account, 'currency', {})
currencyId = self.safe_string(currency, 'code')
code = self.safe_currency_code(currencyId)
result.append({
'id': self.safe_string(account, 'id'),
'type': self.safe_string(account, 'type'),
'code': code,
'info': account,
})
return result
def create_deposit_address(self, code, params={}):
accountId = self.safe_string(params, 'account_id')
params = self.omit(params, 'account_id')
if accountId is None:
self.load_accounts()
for i in range(0, len(self.accounts)):
account = self.accounts[i]
if account['code'] == code and account['type'] == 'wallet':
accountId = account['id']
break
if accountId is None:
raise ExchangeError(self.id + ' createDepositAddress could not find the account with matching currency code, specify an `account_id` extra param')
request = {
'account_id': accountId,
}
response = self.privatePostAccountsAccountIdAddresses(self.extend(request, params))
#
# {
# "data": {
# "id": "05b1ebbf-9438-5dd4-b297-2ddedc98d0e4",
# "address": "coinbasebase",
# "address_info": {
# "address": "coinbasebase",
# "destination_tag": "287594668"
# },
# "name": null,
# "created_at": "2019-07-01T14:39:29Z",
# "updated_at": "2019-07-01T14:39:29Z",
# "network": "eosio",
# "uri_scheme": "eosio",
# "resource": "address",
# "resource_path": "/v2/accounts/14cfc769-e852-52f3-b831-711c104d194c/addresses/05b1ebbf-9438-5dd4-b297-2ddedc98d0e4",
# "warnings": [
# {
# "title": "Only send EOS(EOS) to self address",
# "details": "Sending any other cryptocurrency will result in permanent loss.",
# "image_url": "https://dynamic-assets.coinbase.com/deaca3d47b10ed4a91a872e9618706eec34081127762d88f2476ac8e99ada4b48525a9565cf2206d18c04053f278f693434af4d4629ca084a9d01b7a286a7e26/asset_icons/1f8489bb280fb0a0fd643c1161312ba49655040e9aaaced5f9ad3eeaf868eadc.png"
# },
# {
# "title": "Both an address and EOS memo are required to receive EOS",
# "details": "If you send funds without an EOS memo or with an incorrect EOS memo, your funds cannot be credited to your account.",
# "image_url": "https://www.coinbase.com/assets/receive-warning-2f3269d83547a7748fb39d6e0c1c393aee26669bfea6b9f12718094a1abff155.png"
# }
# ],
# "warning_title": "Only send EOS(EOS) to self address",
# "warning_details": "Sending any other cryptocurrency will result in permanent loss.",
# "destination_tag": "287594668",
# "deposit_uri": "eosio:coinbasebase?dt=287594668",
# "callback_url": null
# }
# }
#
data = self.safe_value(response, 'data', {})
tag = self.safe_string(data, 'destination_tag')
address = self.safe_string(data, 'address')
return {
'currency': code,
'tag': tag,
'address': address,
'info': response,
}
def fetch_my_sells(self, symbol=None, since=None, limit=None, params={}):
# they don't have an endpoint for all historical trades
request = self.prepare_account_request(limit, params)
self.load_markets()
query = self.omit(params, ['account_id', 'accountId'])
sells = self.privateGetAccountsAccountIdSells(self.extend(request, query))
return self.parse_trades(sells['data'], None, since, limit)
def fetch_my_buys(self, symbol=None, since=None, limit=None, params={}):
# they don't have an endpoint for all historical trades
request = self.prepare_account_request(limit, params)
self.load_markets()
query = self.omit(params, ['account_id', 'accountId'])
buys = self.privateGetAccountsAccountIdBuys(self.extend(request, query))
return self.parse_trades(buys['data'], None, since, limit)
def fetch_transactions_with_method(self, method, code=None, since=None, limit=None, params={}):
request = self.prepare_account_request_with_currency_code(code, limit, params)
self.load_markets()
query = self.omit(params, ['account_id', 'accountId'])
response = getattr(self, method)(self.extend(request, query))
return self.parse_transactions(response['data'], None, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
# fiat only, for crypto transactions use fetchLedger
return self.fetch_transactions_with_method('privateGetAccountsAccountIdWithdrawals', code, since, limit, params)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
# fiat only, for crypto transactions use fetchLedger
return self.fetch_transactions_with_method('privateGetAccountsAccountIdDeposits', code, since, limit, params)
def parse_transaction_status(self, status):
statuses = {
'created': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, market=None):
#
# fiat deposit
#
# {
# "id": "f34c19f3-b730-5e3d-9f72",
# "status": "completed",
# "payment_method": {
# "id": "a022b31d-f9c7-5043-98f2",
# "resource": "payment_method",
# "resource_path": "/v2/payment-methods/a022b31d-f9c7-5043-98f2"
# },
# "transaction": {
# "id": "04ed4113-3732-5b0c-af86-b1d2146977d0",
# "resource": "transaction",
# "resource_path": "/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/transactions/04ed4113-3732-5b0c-af86"
# },
# "user_reference": "2VTYTH",
# "created_at": "2017-02-09T07:01:18Z",
# "updated_at": "2017-02-09T07:01:26Z",
# "resource": "deposit",
# "resource_path": "/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/deposits/f34c19f3-b730-5e3d-9f72",
# "committed": True,
# "payout_at": "2017-02-12T07:01:17Z",
# "instant": False,
# "fee": {"amount": "0.00", "currency": "EUR"},
# "amount": {"amount": "114.02", "currency": "EUR"},
# "subtotal": {"amount": "114.02", "currency": "EUR"},
# "hold_until": null,
# "hold_days": 0,
# "hold_business_days": 0,
# "next_step": null
# }
#
# fiat_withdrawal
#
# {
# "id": "cfcc3b4a-eeb6-5e8c-8058",
# "status": "completed",
# "payment_method": {
# "id": "8b94cfa4-f7fd-5a12-a76a",
# "resource": "payment_method",
# "resource_path": "/v2/payment-methods/8b94cfa4-f7fd-5a12-a76a"
# },
# "transaction": {
# "id": "fcc2550b-5104-5f83-a444",
# "resource": "transaction",
# "resource_path": "/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/transactions/fcc2550b-5104-5f83-a444"
# },
# "user_reference": "MEUGK",
# "created_at": "2018-07-26T08:55:12Z",
# "updated_at": "2018-07-26T08:58:18Z",
# "resource": "withdrawal",
# "resource_path": "/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/withdrawals/cfcc3b4a-eeb6-5e8c-8058",
# "committed": True,
# "payout_at": "2018-07-31T08:55:12Z",
# "instant": False,
# "fee": {"amount": "0.15", "currency": "EUR"},
# "amount": {"amount": "13130.69", "currency": "EUR"},
# "subtotal": {"amount": "13130.84", "currency": "EUR"},
# "idem": "e549dee5-63ed-4e79-8a96",
# "next_step": null
# }
#
subtotalObject = self.safe_value(transaction, 'subtotal', {})
feeObject = self.safe_value(transaction, 'fee', {})
id = self.safe_string(transaction, 'id')
timestamp = self.parse8601(self.safe_value(transaction, 'created_at'))
updated = self.parse8601(self.safe_value(transaction, 'updated_at'))
type = self.safe_string(transaction, 'resource')
amount = self.safe_float(subtotalObject, 'amount')
currencyId = self.safe_string(subtotalObject, 'currency')
currency = self.safe_currency_code(currencyId)
feeCost = self.safe_float(feeObject, 'amount')
feeCurrencyId = self.safe_string(feeObject, 'currency')
feeCurrency = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
if status is None:
committed = self.safe_value(transaction, 'committed')
status = 'ok' if committed else 'pending'
return {
'info': transaction,
'id': id,
'txid': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': None,
'tag': None,
'type': type,
'amount': amount,
'currency': currency,
'status': status,
'updated': updated,
'fee': fee,
}
def parse_trade(self, trade, market=None):
#
# {
# "id": "67e0eaec-07d7-54c4-a72c-2e92826897df",
# "status": "completed",
# "payment_method": {
# "id": "83562370-3e5c-51db-87da-752af5ab9559",
# "resource": "payment_method",
# "resource_path": "/v2/payment-methods/83562370-3e5c-51db-87da-752af5ab9559"
# },
# "transaction": {
# "id": "441b9494-b3f0-5b98-b9b0-4d82c21c252a",
# "resource": "transaction",
# "resource_path": "/v2/accounts/2bbf394c-193b-5b2a-9155-3b4732659ede/transactions/441b9494-b3f0-5b98-b9b0-4d82c21c252a"
# },
# "amount": {"amount": "1.00000000", "currency": "BTC"},
# "total": {"amount": "10.25", "currency": "USD"},
# "subtotal": {"amount": "10.10", "currency": "USD"},
# "created_at": "2015-01-31T20:49:02Z",
# "updated_at": "2015-02-11T16:54:02-08:00",
# "resource": "buy",
# "resource_path": "/v2/accounts/2bbf394c-193b-5b2a-9155-3b4732659ede/buys/67e0eaec-07d7-54c4-a72c-2e92826897df",
# "committed": True,
# "instant": False,
# "fee": {"amount": "0.15", "currency": "USD"},
# "payout_at": "2015-02-18T16:54:00-08:00"
# }
#
symbol = None
totalObject = self.safe_value(trade, 'total', {})
amountObject = self.safe_value(trade, 'amount', {})
subtotalObject = self.safe_value(trade, 'subtotal', {})
feeObject = self.safe_value(trade, 'fee', {})
id = self.safe_string(trade, 'id')
timestamp = self.parse8601(self.safe_value(trade, 'created_at'))
if market is None:
baseId = self.safe_string(totalObject, 'currency')
quoteId = self.safe_string(amountObject, 'currency')
if (baseId is not None) and (quoteId is not None):
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
orderId = None
side = self.safe_string(trade, 'resource')
type = None
cost = self.safe_float(subtotalObject, 'amount')
amount = self.safe_float(amountObject, 'amount')
price = None
if cost is not None:
if amount is not None:
price = cost / amount
feeCost = self.safe_float(feeObject, 'amount')
feeCurrencyId = self.safe_string(feeObject, 'currency')
feeCurrency = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
return {
'info': trade,
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_markets(self, params={}):
response = self.fetch_currencies_from_cache(params)
currencies = self.safe_value(response, 'currencies', {})
exchangeRates = self.safe_value(response, 'exchangeRates', {})
data = self.safe_value(currencies, 'data', [])
dataById = self.index_by(data, 'id')
rates = self.safe_value(self.safe_value(exchangeRates, 'data', {}), 'rates', {})
baseIds = list(rates.keys())
result = []
for i in range(0, len(baseIds)):
baseId = baseIds[i]
base = self.safe_currency_code(baseId)
type = 'fiat' if (baseId in dataById) else 'crypto'
# https://github.com/ccxt/ccxt/issues/6066
if type == 'crypto':
for j in range(0, len(data)):
quoteCurrency = data[j]
quoteId = self.safe_string(quoteCurrency, 'id')
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
id = baseId + '-' + quoteId
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': None,
'info': quoteCurrency,
'precision': {
'amount': None,
'price': None,
},
'limits': {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_float(quoteCurrency, 'min_size'),
'max': None,
},
},
})
return result
def fetch_currencies_from_cache(self, params={}):
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
currencies = self.publicGetCurrencies(params)
exchangeRates = self.publicGetExchangeRates(params)
self.options['fetchCurrencies'] = self.extend(options, {
'currencies': currencies,
'exchangeRates': exchangeRates,
'timestamp': now,
})
return self.safe_value(self.options, 'fetchCurrencies', {})
def fetch_currencies(self, params={}):
response = self.fetch_currencies_from_cache(params)
currencies = self.safe_value(response, 'currencies', {})
#
# {
# "data":[
# {"id":"AED","name":"United Arab Emirates Dirham","min_size":"0.01000000"},
# {"id":"AFN","name":"Afghan Afghani","min_size":"0.01000000"},
# {"id":"ALL","name":"Albanian Lek","min_size":"0.01000000"},
# {"id":"AMD","name":"Armenian Dram","min_size":"0.01000000"},
# {"id":"ANG","name":"Netherlands Antillean Gulden","min_size":"0.01000000"},
# # ...
# ],
# }
#
exchangeRates = self.safe_value(response, 'exchangeRates', {})
#
# {
# "data":{
# "currency":"USD",
# "rates":{
# "AED":"3.67",
# "AFN":"78.21",
# "ALL":"110.42",
# "AMD":"474.18",
# "ANG":"1.75",
# # ...
# },
# }
# }
#
data = self.safe_value(currencies, 'data', [])
dataById = self.index_by(data, 'id')
rates = self.safe_value(self.safe_value(exchangeRates, 'data', {}), 'rates', {})
keys = list(rates.keys())
result = {}
for i in range(0, len(keys)):
key = keys[i]
type = 'fiat' if (key in dataById) else 'crypto'
currency = self.safe_value(dataById, key, {})
id = self.safe_string(currency, 'id', key)
name = self.safe_string(currency, 'name')
code = self.safe_currency_code(id)
result[code] = {
'id': id,
'code': code,
'info': currency, # the original payload
'type': type,
'name': name,
'active': True,
'fee': None,
'precision': None,
'limits': {
'amount': {
'min': self.safe_float(currency, 'min_size'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': None,
},
},
}
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
timestamp = self.seconds()
market = self.market(symbol)
request = self.extend({
'symbol': market['id'],
}, params)
buy = self.publicGetPricesSymbolBuy(request)
sell = self.publicGetPricesSymbolSell(request)
spot = self.publicGetPricesSymbolSpot(request)
ask = self.safe_float(buy['data'], 'amount')
bid = self.safe_float(sell['data'], 'amount')
last = self.safe_float(spot['data'], 'amount')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'bid': bid,
'ask': ask,
'last': last,
'high': None,
'low': None,
'bidVolume': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': None,
'info': {
'buy': buy,
'sell': sell,
'spot': spot,
},
}
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetAccounts(params)
balances = self.safe_value(response, 'data')
accounts = self.safe_value(params, 'type', self.options['accounts'])
result = {'info': response}
for b in range(0, len(balances)):
balance = balances[b]
if self.in_array(balance['type'], accounts):
currencyId = self.safe_string(balance['balance'], 'currency')
code = self.safe_currency_code(currencyId)
total = self.safe_float(balance['balance'], 'amount')
free = total
used = None
if code in result:
result[code]['free'] = self.sum(result[code]['free'], total)
result[code]['total'] = self.sum(result[code]['total'], total)
else:
account = {
'free': free,
'used': used,
'total': total,
}
result[code] = account
return self.parse_balance(result)
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = self.prepare_account_request_with_currency_code(code, limit, params)
query = self.omit(params, ['account_id', 'accountId'])
# for pagination use parameter 'starting_after'
# the value for the next page can be obtained from the result of the previous call in the 'pagination' field
# eg: instance.last_json_response.pagination.next_starting_after
response = self.privateGetAccountsAccountIdTransactions(self.extend(request, query))
return self.parse_ledger(response['data'], currency, since, limit)
def parse_ledger_entry_status(self, status):
types = {
'completed': 'ok',
}
return self.safe_string(types, status, status)
def parse_ledger_entry_type(self, type):
types = {
'buy': 'trade',
'sell': 'trade',
'fiat_deposit': 'transaction',
'fiat_withdrawal': 'transaction',
'exchange_deposit': 'transaction', # fiat withdrawal(from coinbase to coinbasepro)
'exchange_withdrawal': 'transaction', # fiat deposit(to coinbase from coinbasepro)
'send': 'transaction', # crypto deposit OR withdrawal
'pro_deposit': 'transaction', # crypto withdrawal(from coinbase to coinbasepro)
'pro_withdrawal': 'transaction', # crypto deposit(to coinbase from coinbasepro)
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# crypto deposit transaction
#
# {
# id: '34e4816b-4c8c-5323-a01c-35a9fa26e490',
# type: 'send',
# status: 'completed',
# amount: {amount: '28.31976528', currency: 'BCH'},
# native_amount: {amount: '2799.65', currency: 'GBP'},
# description: null,
# created_at: '2019-02-28T12:35:20Z',
# updated_at: '2019-02-28T12:43:24Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/c01d7364-edd7-5f3a-bd1d-de53d4cbb25e/transactions/34e4816b-4c8c-5323-a01c-35a9fa26e490',
# instant_exchange: False,
# network: {
# status: 'confirmed',
# hash: '56222d865dae83774fccb2efbd9829cf08c75c94ce135bfe4276f3fb46d49701',
# transaction_url: 'https://bch.btc.com/56222d865dae83774fccb2efbd9829cf08c75c94ce135bfe4276f3fb46d49701'
# },
# from: {resource: 'bitcoin_cash_network', currency: 'BCH'},
# details: {title: 'Received Bitcoin Cash', subtitle: 'From Bitcoin Cash address'}
# }
#
# crypto withdrawal transaction
#
# {
# id: '459aad99-2c41-5698-ac71-b6b81a05196c',
# type: 'send',
# status: 'completed',
# amount: {amount: '-0.36775642', currency: 'BTC'},
# native_amount: {amount: '-1111.65', currency: 'GBP'},
# description: null,
# created_at: '2019-03-20T08:37:07Z',
# updated_at: '2019-03-20T08:49:33Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/c6afbd34-4bd0-501e-8616-4862c193cd84/transactions/459aad99-2c41-5698-ac71-b6b81a05196c',
# instant_exchange: False,
# network: {
# status: 'confirmed',
# hash: '2732bbcf35c69217c47b36dce64933d103895277fe25738ffb9284092701e05b',
# transaction_url: 'https://blockchain.info/tx/2732bbcf35c69217c47b36dce64933d103895277fe25738ffb9284092701e05b',
# transaction_fee: {amount: '0.00000000', currency: 'BTC'},
# transaction_amount: {amount: '0.36775642', currency: 'BTC'},
# confirmations: 15682
# },
# to: {
# resource: 'bitcoin_address',
# address: '1AHnhqbvbYx3rnZx8uC7NbFZaTe4tafFHX',
# currency: 'BTC',
# address_info: {address: '1AHnhqbvbYx3rnZx8uC7NbFZaTe4tafFHX'}
# },
# idem: 'da0a2f14-a2af-4c5a-a37e-d4484caf582bsend',
# application: {
# id: '5756ab6e-836b-553b-8950-5e389451225d',
# resource: 'application',
# resource_path: '/v2/applications/5756ab6e-836b-553b-8950-5e389451225d'
# },
# details: {title: 'Sent Bitcoin', subtitle: 'To Bitcoin address'}
# }
#
# withdrawal transaction from coinbase to coinbasepro
#
# {
# id: '5b1b9fb8-5007-5393-b923-02903b973fdc',
# type: 'pro_deposit',
# status: 'completed',
# amount: {amount: '-0.00001111', currency: 'BCH'},
# native_amount: {amount: '0.00', currency: 'GBP'},
# description: null,
# created_at: '2019-02-28T13:31:58Z',
# updated_at: '2019-02-28T13:31:58Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/c01d7364-edd7-5f3a-bd1d-de53d4cbb25e/transactions/5b1b9fb8-5007-5393-b923-02903b973fdc',
# instant_exchange: False,
# application: {
# id: '5756ab6e-836b-553b-8950-5e389451225d',
# resource: 'application',
# resource_path: '/v2/applications/5756ab6e-836b-553b-8950-5e389451225d'
# },
# details: {title: 'Transferred Bitcoin Cash', subtitle: 'To Coinbase Pro'}
# }
#
# withdrawal transaction from coinbase to gdax
#
# {
# id: 'badb7313-a9d3-5c07-abd0-00f8b44199b1',
# type: 'exchange_deposit',
# status: 'completed',
# amount: {amount: '-0.43704149', currency: 'BCH'},
# native_amount: {amount: '-51.90', currency: 'GBP'},
# description: null,
# created_at: '2019-03-19T10:30:40Z',
# updated_at: '2019-03-19T10:30:40Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/c01d7364-edd7-5f3a-bd1d-de53d4cbb25e/transactions/badb7313-a9d3-5c07-abd0-00f8b44199b1',
# instant_exchange: False,
# details: {title: 'Transferred Bitcoin Cash', subtitle: 'To GDAX'}
# }
#
# deposit transaction from gdax to coinbase
#
# {
# id: '9c4b642c-8688-58bf-8962-13cef64097de',
# type: 'exchange_withdrawal',
# status: 'completed',
# amount: {amount: '0.57729420', currency: 'BTC'},
# native_amount: {amount: '4418.72', currency: 'GBP'},
# description: null,
# created_at: '2018-02-17T11:33:33Z',
# updated_at: '2018-02-17T11:33:33Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/c6afbd34-4bd0-501e-8616-4862c193cd84/transactions/9c4b642c-8688-58bf-8962-13cef64097de',
# instant_exchange: False,
# details: {title: 'Transferred Bitcoin', subtitle: 'From GDAX'}
# }
#
# deposit transaction from coinbasepro to coinbase
#
# {
# id: '8d6dd0b9-3416-568a-889d-8f112fae9e81',
# type: 'pro_withdrawal',
# status: 'completed',
# amount: {amount: '0.40555386', currency: 'BTC'},
# native_amount: {amount: '1140.27', currency: 'GBP'},
# description: null,
# created_at: '2019-03-04T19:41:58Z',
# updated_at: '2019-03-04T19:41:58Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/c6afbd34-4bd0-501e-8616-4862c193cd84/transactions/8d6dd0b9-3416-568a-889d-8f112fae9e81',
# instant_exchange: False,
# application: {
# id: '5756ab6e-836b-553b-8950-5e389451225d',
# resource: 'application',
# resource_path: '/v2/applications/5756ab6e-836b-553b-8950-5e389451225d'
# },
# details: {title: 'Transferred Bitcoin', subtitle: 'From Coinbase Pro'}
# }
#
# sell trade
#
# {
# id: 'a9409207-df64-585b-97ab-a50780d2149e',
# type: 'sell',
# status: 'completed',
# amount: {amount: '-9.09922880', currency: 'BTC'},
# native_amount: {amount: '-7285.73', currency: 'GBP'},
# description: null,
# created_at: '2017-03-27T15:38:34Z',
# updated_at: '2017-03-27T15:38:34Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/c6afbd34-4bd0-501e-8616-4862c193cd84/transactions/a9409207-df64-585b-97ab-a50780d2149e',
# instant_exchange: False,
# sell: {
# id: 'e3550b4d-8ae6-5de3-95fe-1fb01ba83051',
# resource: 'sell',
# resource_path: '/v2/accounts/c6afbd34-4bd0-501e-8616-4862c193cd84/sells/e3550b4d-8ae6-5de3-95fe-1fb01ba83051'
# },
# details: {
# title: 'Sold Bitcoin',
# subtitle: 'Using EUR Wallet',
# payment_method_name: 'EUR Wallet'
# }
# }
#
# buy trade
#
# {
# id: '63eeed67-9396-5912-86e9-73c4f10fe147',
# type: 'buy',
# status: 'completed',
# amount: {amount: '2.39605772', currency: 'ETH'},
# native_amount: {amount: '98.31', currency: 'GBP'},
# description: null,
# created_at: '2017-03-27T09:07:56Z',
# updated_at: '2017-03-27T09:07:57Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/8902f85d-4a69-5d74-82fe-8e390201bda7/transactions/63eeed67-9396-5912-86e9-73c4f10fe147',
# instant_exchange: False,
# buy: {
# id: '20b25b36-76c6-5353-aa57-b06a29a39d82',
# resource: 'buy',
# resource_path: '/v2/accounts/8902f85d-4a69-5d74-82fe-8e390201bda7/buys/20b25b36-76c6-5353-aa57-b06a29a39d82'
# },
# details: {
# title: 'Bought Ethereum',
# subtitle: 'Using EUR Wallet',
# payment_method_name: 'EUR Wallet'
# }
# }
#
# fiat deposit transaction
#
# {
# id: '04ed4113-3732-5b0c-af86-b1d2146977d0',
# type: 'fiat_deposit',
# status: 'completed',
# amount: {amount: '114.02', currency: 'EUR'},
# native_amount: {amount: '97.23', currency: 'GBP'},
# description: null,
# created_at: '2017-02-09T07:01:21Z',
# updated_at: '2017-02-09T07:01:22Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/transactions/04ed4113-3732-5b0c-af86-b1d2146977d0',
# instant_exchange: False,
# fiat_deposit: {
# id: 'f34c19f3-b730-5e3d-9f72-96520448677a',
# resource: 'fiat_deposit',
# resource_path: '/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/deposits/f34c19f3-b730-5e3d-9f72-96520448677a'
# },
# details: {
# title: 'Deposited funds',
# subtitle: 'From SEPA Transfer(GB47 BARC 20..., reference CBADVI)',
# payment_method_name: 'SEPA Transfer(GB47 BARC 20..., reference CBADVI)'
# }
# }
#
# fiat withdrawal transaction
#
# {
# id: '957d98e2-f80e-5e2f-a28e-02945aa93079',
# type: 'fiat_withdrawal',
# status: 'completed',
# amount: {amount: '-11000.00', currency: 'EUR'},
# native_amount: {amount: '-9698.22', currency: 'GBP'},
# description: null,
# created_at: '2017-12-06T13:19:19Z',
# updated_at: '2017-12-06T13:19:19Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/transactions/957d98e2-f80e-5e2f-a28e-02945aa93079',
# instant_exchange: False,
# fiat_withdrawal: {
# id: 'f4bf1fd9-ab3b-5de7-906d-ed3e23f7a4e7',
# resource: 'fiat_withdrawal',
# resource_path: '/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/withdrawals/f4bf1fd9-ab3b-5de7-906d-ed3e23f7a4e7'
# },
# details: {
# title: 'Withdrew funds',
# subtitle: 'To HSBC BANK PLC(GB74 MIDL...)',
# payment_method_name: 'HSBC BANK PLC(GB74 MIDL...)'
# }
# }
#
amountInfo = self.safe_value(item, 'amount', {})
amount = self.safe_float(amountInfo, 'amount')
direction = None
if amount < 0:
direction = 'out'
amount = -amount
else:
direction = 'in'
currencyId = self.safe_string(amountInfo, 'currency')
code = self.safe_currency_code(currencyId, currency)
#
# the address and txid do not belong to the unified ledger structure
#
# address = None
# if item['to']:
# address = self.safe_string(item['to'], 'address')
# }
# txid = None
#
fee = None
networkInfo = self.safe_value(item, 'network', {})
# txid = network['hash'] # txid does not belong to the unified ledger structure
feeInfo = self.safe_value(networkInfo, 'transaction_fee')
if feeInfo is not None:
feeCurrencyId = self.safe_string(feeInfo, 'currency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId, currency)
feeAmount = self.safe_float(feeInfo, 'amount')
fee = {
'cost': feeAmount,
'currency': feeCurrencyCode,
}
timestamp = self.parse8601(self.safe_value(item, 'created_at'))
id = self.safe_string(item, 'id')
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
status = self.parse_ledger_entry_status(self.safe_string(item, 'status'))
path = self.safe_string(item, 'resource_path')
accountId = None
if path is not None:
parts = path.split('/')
numParts = len(parts)
if numParts > 3:
accountId = parts[3]
return {
'info': item,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'direction': direction,
'account': accountId,
'referenceId': None,
'referenceAccount': None,
'type': type,
'currency': code,
'amount': amount,
'before': None,
'after': None,
'status': status,
'fee': fee,
}
def find_account_id(self, code):
self.load_markets()
self.load_accounts()
for i in range(0, len(self.accounts)):
account = self.accounts[i]
if account['code'] == code:
return account['id']
return None
def prepare_account_request(self, limit=None, params={}):
accountId = self.safe_string_2(params, 'account_id', 'accountId')
if accountId is None:
raise ArgumentsRequired(self.id + ' method requires an account_id(or accountId) parameter')
request = {
'account_id': accountId,
}
if limit is not None:
request['limit'] = limit
return request
def prepare_account_request_with_currency_code(self, code=None, limit=None, params={}):
accountId = self.safe_string_2(params, 'account_id', 'accountId')
if accountId is None:
if code is None:
raise ArgumentsRequired(self.id + ' method requires an account_id(or accountId) parameter OR a currency code argument')
accountId = self.find_account_id(code)
if accountId is None:
raise ExchangeError(self.id + ' could not find account id for ' + code)
request = {
'account_id': accountId,
}
if limit is not None:
request['limit'] = limit
return request
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
fullPath = '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if method == 'GET':
if query:
fullPath += '?' + self.urlencode(query)
url = self.urls['api'] + fullPath
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
payload = ''
if method != 'GET':
if query:
body = self.json(query)
payload = body
auth = nonce + method + fullPath + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
headers = {
'CB-ACCESS-KEY': self.apiKey,
'CB-ACCESS-SIGN': signature,
'CB-ACCESS-TIMESTAMP': nonce,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
feedback = self.id + ' ' + body
#
# {"error": "invalid_request", "error_description": "The request is missing a required parameter, includes an unsupported parameter value, or is otherwise malformed."}
#
# or
#
# {
# "errors": [
# {
# "id": "not_found",
# "message": "Not found"
# }
# ]
# }
#
errorCode = self.safe_string(response, 'error')
if errorCode is not None:
self.throw_exactly_matched_exception(self.exceptions, errorCode, feedback)
raise ExchangeError(feedback)
errors = self.safe_value(response, 'errors')
if errors is not None:
if isinstance(errors, list):
numErrors = len(errors)
if numErrors > 0:
errorCode = self.safe_string(errors[0], 'id')
if errorCode is not None:
self.throw_exactly_matched_exception(self.exceptions, errorCode, feedback)
raise ExchangeError(feedback)
data = self.safe_value(response, 'data')
if data is None:
raise ExchangeError(self.id + ' failed due to a malformed response ' + self.json(response))
| 45.289125 | 290 | 0.498282 |
acec196dbdc0c732afe27d5abe2d7e90b70918b7 | 21 | py | Python | atomate/__init__.py | srshivani/atomate | 1e851d70a5f107736e3b9c6775e2e9e4a2de7a5d | [
"BSD-3-Clause-LBNL"
] | 3 | 2021-08-02T09:19:20.000Z | 2022-03-28T17:37:47.000Z | atomate/__init__.py | srshivani/atomate | 1e851d70a5f107736e3b9c6775e2e9e4a2de7a5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | atomate/__init__.py | srshivani/atomate | 1e851d70a5f107736e3b9c6775e2e9e4a2de7a5d | [
"BSD-3-Clause-LBNL"
] | 2 | 2017-11-14T21:38:29.000Z | 2017-11-14T21:42:14.000Z | __version__ = "0.9.4" | 21 | 21 | 0.666667 |
acec1bba04064b3de291c3c79b6d36a3317519f0 | 5,019 | py | Python | Linked_Lists/5SumLists.py | RoKu1/cracking-the-coding-interview | ce2fabba75f1edf69b81a80022eb9ebac8a09af2 | [
"Apache-2.0"
] | null | null | null | Linked_Lists/5SumLists.py | RoKu1/cracking-the-coding-interview | ce2fabba75f1edf69b81a80022eb9ebac8a09af2 | [
"Apache-2.0"
] | null | null | null | Linked_Lists/5SumLists.py | RoKu1/cracking-the-coding-interview | ce2fabba75f1edf69b81a80022eb9ebac8a09af2 | [
"Apache-2.0"
] | null | null | null | class Node:
def __init__(self, data):
self.data = data
self.next = None
class LList:
def __init__(self):
self.head = None
def givehead(self):
return self.head
def printlist(self):
temp = self.head
while temp is not None:
if temp.next is not None:
print(temp.data, end=" -->")
else:
print(temp.data)
temp = temp.prev
def createfromlist(Arr):
llist = LList()
last = None
for each in Arr:
if llist.head is None:
llist.head = Node(each)
last = llist.head
else:
last.next = Node(each)
last = last.next
return llist
"""
2.5 Sum Lists: You have two numbers represented by a linked list, where each node contains a single
digit. The digits are stored in reverse order, such that the Vs digit is at the head of the list. Write a
function that adds the two numbers and returns the sum as a linked list.
EXAMPLE
Input: ( 7 - > 1 -> 6) + (5 -> 9 -> 2).That is,617 + 295.
Output: 2 -> 1 -> 9. That is, 912.
FOLLOW UP
Suppose the digits are stored in forward order. Repeat the above problem.
EXAMPLE
Input: (6 -> 1 -> 7) + (2 -> 9 -> 5).That is, 617 + 295,
Output:9 -> 1 -> 2,Thatis,912.
Hints: #7, #30, #71 #95, #109
"""
"""
Solution --> for Reverse order
It is easy -> as we know that we can start adding from the head__
TimeComplx --> O(max(n,m)) --> n and m being the length
"""
def sumlist(head1, head2):
carry = 0
sumlst = LList()
sumend = sumlst.head
while head1 is not None and head2 is not None:
s = head1.data + head2.data + carry
if s == 10:
carry = 1
else:
carry = s // 10
if sumlst.head is None:
sumlst.head = Node(s % 10)
sumend = sumlst.head
else:
sumend.next = Node(s % 10)
sumend = sumend.next
head1 = head1.next
head2 = head2.next
while head1 is not None:
s = head1.data + carry
if s == 10:
carry = 1
else:
carry = s // 10
sumend.next = Node(s % 10)
head1 = head1.next
while head2 is not None:
s = head2.data + carry
if s == 10:
carry = 1
else:
carry = s // 10
sumend.next = Node(s % 10)
head2 = head2.next
if carry:
sumend.next = Node(1)
return sumlst
sumlst = sumlist(createfromlist([7, 1, 6, 3]).head, createfromlist([5, 9, 3, 6]).head)
# sumlst.printlist()
"""
Solution for forward order ==>
Just reverse bot linked list --> max O(n)+O(m) for that and O(n) of above
2O(n)+O(m) --> O(n)
"""
def revlist(llist):
prev = None
curr = llist.head
nxt = curr.next
while nxt is not None:
curr.next = prev
prev = curr
curr = nxt
nxt = nxt.next
curr.next = prev
llist.head = curr
return llist
l1r = revlist(createfromlist([3, 6, 1, 7]))
l2r = revlist(createfromlist([6, 3, 9, 5]))
sumlst = revlist(sumlist(l1r.head, l2r.head))
# sumlst.printlist()
"""
Doing the forward one with recurssive function
1--> first we need to pad the shorter list with zeros
2--> do the rest
"""
sumlst2 = LList()
def length(temp):
l = 0
while temp is not None:
l += 1
temp = temp.prev
return l
def sumlist2(llist1, llist2):
l1 = length(llist1.head)
l2 = length(llist2.head)
# We equalize the lengths by padding zeros at start of lists
if l1 != l2:
if l1 < l2:
missingnodes = l2 - l1
for i in range(0, missingnodes):
node = Node(0)
node.next = llist1.head
llist1.head = node
else:
missingnodes = l1 - l2
for i in range(0, missingnodes):
node = Node(0)
node.next = llist2.head
llist2.head = node
llist1.printlist()
llist2.printlist()
carry, sumhead = recuaddition(llist1.head, llist2.head)
print(carry)
if carry:
node = Node(1)
node.next = sumhead
sumlst2.head = node
sumlst2.printlist()
def recuaddition(h1, h2):
if h1.prev is None and h2.prev is None:
if h1.data + h2.data == 10:
carry = 1
else:
carry = (h1.data + h2.data + 0) // 10
s = (h1.data + h2.data + 0) % 10
sumlst2.head = Node(s)
print("Node -> " + str(s) + "Carry is --> " + str(carry))
return carry, sumlst2.head
carry, temp = recuaddition(h1.prev, h2.prev)
s = (h1.data + h2.data + carry) % 10
if (h1.data + h2.data + carry) == 10:
carry = 1
else:
carry = (h1.data + h2.data + carry) // 10
node = Node(s)
node.next = temp
sumlst2.head = node
print("Node -> " + str(s) + "Carry is --> " + str(carry))
return carry, sumlst2.head
sumlist2(createfromlist([9, 9, 9, 9]), createfromlist([9, 9, 9]))
| 25.348485 | 105 | 0.543535 |
acec1cd39aba18978ac64f1a8230c9cb0f87f73f | 7,290 | py | Python | argo/workflows/client/models/v1_config_map_volume_source.py | ButterflyNetwork/argo-client-python | 00e50bb5eb6c64cfa76eb57c4a29a5fdd856611f | [
"Apache-2.0"
] | null | null | null | argo/workflows/client/models/v1_config_map_volume_source.py | ButterflyNetwork/argo-client-python | 00e50bb5eb6c64cfa76eb57c4a29a5fdd856611f | [
"Apache-2.0"
] | null | null | null | argo/workflows/client/models/v1_config_map_volume_source.py | ButterflyNetwork/argo-client-python | 00e50bb5eb6c64cfa76eb57c4a29a5fdd856611f | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Argo
Python client for Argo Workflows # noqa: E501
OpenAPI spec version: release-2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1ConfigMapVolumeSource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'default_mode': 'int',
'items': 'list[V1KeyToPath]',
'name': 'str',
'optional': 'bool'
}
attribute_map = {
'default_mode': 'defaultMode',
'items': 'items',
'name': 'name',
'optional': 'optional'
}
def __init__(self, default_mode=None, items=None, name=None, optional=None): # noqa: E501
"""V1ConfigMapVolumeSource - a model defined in Swagger""" # noqa: E501
self._default_mode = None
self._items = None
self._name = None
self._optional = None
self.discriminator = None
if default_mode is not None:
self.default_mode = default_mode
if items is not None:
self.items = items
if name is not None:
self.name = name
if optional is not None:
self.optional = optional
@property
def default_mode(self):
"""Gets the default_mode of this V1ConfigMapVolumeSource. # noqa: E501
Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
:return: The default_mode of this V1ConfigMapVolumeSource. # noqa: E501
:rtype: int
"""
return self._default_mode
@default_mode.setter
def default_mode(self, default_mode):
"""Sets the default_mode of this V1ConfigMapVolumeSource.
Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
:param default_mode: The default_mode of this V1ConfigMapVolumeSource. # noqa: E501
:type: int
"""
self._default_mode = default_mode
@property
def items(self):
"""Gets the items of this V1ConfigMapVolumeSource. # noqa: E501
If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. # noqa: E501
:return: The items of this V1ConfigMapVolumeSource. # noqa: E501
:rtype: list[V1KeyToPath]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1ConfigMapVolumeSource.
If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. # noqa: E501
:param items: The items of this V1ConfigMapVolumeSource. # noqa: E501
:type: list[V1KeyToPath]
"""
self._items = items
@property
def name(self):
"""Gets the name of this V1ConfigMapVolumeSource. # noqa: E501
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:return: The name of this V1ConfigMapVolumeSource. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1ConfigMapVolumeSource.
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:param name: The name of this V1ConfigMapVolumeSource. # noqa: E501
:type: str
"""
self._name = name
@property
def optional(self):
"""Gets the optional of this V1ConfigMapVolumeSource. # noqa: E501
Specify whether the ConfigMap or its keys must be defined # noqa: E501
:return: The optional of this V1ConfigMapVolumeSource. # noqa: E501
:rtype: bool
"""
return self._optional
@optional.setter
def optional(self, optional):
"""Sets the optional of this V1ConfigMapVolumeSource.
Specify whether the ConfigMap or its keys must be defined # noqa: E501
:param optional: The optional of this V1ConfigMapVolumeSource. # noqa: E501
:type: bool
"""
self._optional = optional
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1ConfigMapVolumeSource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ConfigMapVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 36.089109 | 504 | 0.627023 |
acec1e562e7e24bef2997ccade8ce7c4a3347db3 | 5,851 | py | Python | lambda/qldb/create_iot.py | UBC-CIC/VaccineDistribution | 3bd8d507eaabcf3a3213ec7c1d25127dd221c537 | [
"MIT"
] | null | null | null | lambda/qldb/create_iot.py | UBC-CIC/VaccineDistribution | 3bd8d507eaabcf3a3213ec7c1d25127dd221c537 | [
"MIT"
] | null | null | null | lambda/qldb/create_iot.py | UBC-CIC/VaccineDistribution | 3bd8d507eaabcf3a3213ec7c1d25127dd221c537 | [
"MIT"
] | null | null | null | ## In real-life scenarios IoT's will already be mapped to the containers
## And in Initiating shipment -- instead of creating a container by inputting data in the containers table
## Similar logic to putting batches in cases will be followed --> out of all the containers a container will be found which is empty and free and will be assigned
from logging import basicConfig, getLogger, INFO
from connect_to_ledger import create_qldb_driver
from amazon.ion.simpleion import dumps, loads
logger = getLogger(__name__)
basicConfig(level=INFO)
from pyion2json import ion_cursor_to_json
from constants import Constants
from register_person import get_index_number
from insert_document import insert_documents
from register_person import get_scentityid_from_personid
from sampledata.sample_data import convert_object_to_ion, get_value_from_documentid,document_exist,update_document,get_document
from accept_requests_for_admin import person_is_superadmin
def register_iot(transaction_executor,person_id, iot):
if person_is_superadmin(transaction_executor,person_id):
iot_number = get_index_number(transaction_executor,Constants.IOT_TABLE_NAME,"IoTNumber")
iot.update({"IoTNumber": iot_number})
iot_type = iot['IoTType']
if iot_type ==1:
iot_name = "Temperature Sensor"
elif iot_type == 2:
iot_name = "Humidity Sensor"
elif iot_type == 3:
iot_name = "Location Sensor"
else:
iot_name = "UnkownSensor"
iot.update({"IoTName":iot_name})
logger.info("iot_number is :{}".format(iot_number))
iot_id = insert_documents(transaction_executor,Constants.IOT_TABLE_NAME, [iot])
iot.update({"IoTId":iot_id})
print(iot_id)
return{
'statusCode': 200,
'body': {
"IoT":iot
}
}
else:
return_statement= "You are not a Super admin"
return{
'statusCode': 400,
'body': return_statement}
# create iot in iot table
## update iot ids in containers
def assign_iot(transaction_executor, iot_id,container_id,person_id):
# person_id must be super admin
actual_sc_entity_id = get_scentityid_from_personid(transaction_executor,person_id)
carrier_id = get_value_from_documentid(transaction_executor,Constants.CONTAINER_TABLE_NAME,container_id,"CarrierCompanyId")
if actual_sc_entity_id == carrier_id[0]:
if document_exist(transaction_executor,Constants.CONTAINER_TABLE_NAME,container_id):
update_document(transaction_executor,Constants.IOT_TABLE_NAME,"ContainerId",iot_id,container_id)
statement = "FROM {} AS s by id WHERE id = '{}' INSERT INTO s.IotIds VALUE ?".format(Constants.CONTAINER_TABLE_NAME,container_id)
cursor = transaction_executor.execute_statement(statement,iot_id)
iot = fetch_iot(transaction_executor,iot_id)
iot = iot["body"]
try:
next(cursor)
message = " ========== I o T ========= C R E A T E D ========== A N D ====== A D D E D =========T O === {}".format(container_id)
return{
'statusCode': 200,
'body': {
"IoT":iot,
"Message":message
}}
except:
return_statement = "Problem in Iot assignment"
return{
'statusCode': 400,
'body': return_statement}
else:
return_statement = "Container not found"
return{
'statusCode': 400,
'body': return_statement}
else:
return_statement = "Not authorized!"
return{
'statusCode': 400,
'body': return_statement}
def fetch_iot(transaction_executor,iot_id):
try:
iot_cursor = get_document(transaction_executor,Constants.IOT_TABLE_NAME,iot_id)
iot_document = ion_cursor_to_json(iot_cursor)
return {
'statusCode': 200,
'body': {
"IoT":iot_document[0]
}
}
except:
return_statement = "Check IoT Id"
return{
'statusCode': 400,
'body': return_statement}
######################################################################################
def create_iot(event):
try:
with create_qldb_driver() as driver:
iot = event["IoT"]
personid = event["PersonId"]
return driver.execute_lambda(lambda executor: register_iot(executor,personid,iot))
except Exception:
return_statement = 'Error registering IoT.'
return{
'statusCode': 400,
'body': return_statement}
def get_iot(event):
try:
with create_qldb_driver() as driver:
iot_id = event["IoTId"]
return driver.execute_lambda(lambda executor: fetch_iot(executor,iot_id))
except Exception:
return_statement = 'Error getting IoT.'
return{
'statusCode': 400,
'body': return_statement}
def assign_iot_to_container(event):
try:
with create_qldb_driver() as driver:
container_id = event["ContainerId"]
person_id = event["PersonId"]
iot_id = event["IoTId"]
return driver.execute_lambda(lambda executor : assign_iot(executor, iot_id,container_id,person_id))
except Exception:
return_statement = 'Error assigning IoT.'
return{
'statusCode': 400,
'body': return_statement}
| 37.50641 | 162 | 0.597505 |
acec1e62867e77ee313cd397f94bb7b8579439f4 | 20,715 | py | Python | tensorflow/lite/python/convert.py | droidiyann/tensorflow_tensorflow | ed10660cf38306921faaa67ddbc3f369441bcb6d | [
"Apache-2.0"
] | null | null | null | tensorflow/lite/python/convert.py | droidiyann/tensorflow_tensorflow | ed10660cf38306921faaa67ddbc3f369441bcb6d | [
"Apache-2.0"
] | null | null | null | tensorflow/lite/python/convert.py | droidiyann/tensorflow_tensorflow | ed10660cf38306921faaa67ddbc3f369441bcb6d | [
"Apache-2.0"
] | 1 | 2019-03-20T01:09:39.000Z | 2019-03-20T01:09:39.000Z | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts a frozen graph into a TFLite FlatBuffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum # pylint: disable=g-bad-import-order
import os as _os
import platform as _platform
import subprocess as _subprocess
import tempfile as _tempfile
import six
from six.moves import map
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python import util
from tensorflow.lite.python import wrap_toco
from tensorflow.lite.toco import model_flags_pb2 as _model_flags_pb2
from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.platform import resource_loader as _resource_loader
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export as _tf_export
# Find the toco_from_protos binary using the resource loader if using from
# bazel, otherwise we are in a pip where console_scripts already has
# the toco_from_protos tool.
if lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:
_toco_from_proto_bin = ""
else:
_toco_from_proto_bin = _resource_loader.get_path_to_datafile(
"../toco/python/toco_from_protos")
if _toco_from_proto_bin and not _os.path.exists(_toco_from_proto_bin):
_toco_from_proto_bin = "toco_from_protos"
def _try_convert_to_unicode(output):
if output is None:
return u""
if isinstance(output, bytes):
try:
return six.ensure_text(output)
except UnicodeDecodeError:
pass
return output
@_tf_export("lite.OpsSet")
class OpsSet(enum.Enum):
"""Enum class defining the sets of ops available to generate TFLite models.
WARNING: Experimental interface, subject to change.
"""
# Convert model using TensorFlow Lite builtin ops.
TFLITE_BUILTINS = "TFLITE_BUILTINS"
# Convert model using TensorFlow ops. Not all TensorFlow ops are available.
# WARNING: Experimental interface, subject to change.
SELECT_TF_OPS = "SELECT_TF_OPS"
# Convert model using only TensorFlow Lite quantized int8 operations.
# Specifying this will throw an error for operations that do not yet have
# quantized implementations.
TFLITE_BUILTINS_INT8 = "TFLITE_BUILTINS_INT8"
def __str__(self):
return self.value
@staticmethod
def get_options():
"""Returns a list of OpsSet options as a list of strings."""
return [str(option) for option in list(OpsSet)]
class ConverterError(Exception):
"""Raised when an error occurs during model conversion."""
pass
def toco_convert_protos(model_flags_str,
toco_flags_str,
input_data_str,
debug_info_str=None,
enable_mlir_converter=False):
"""Convert `input_data_str` according to model and toco parameters.
Unless you know what you are doing consider using
the more friendly `tf.compat.v1.lite.toco_convert`.
Args:
model_flags_str: Serialized proto describing model properties, see
`toco/model_flags.proto`.
toco_flags_str: Serialized proto describing conversion properties, see
`toco/toco_flags.proto`.
input_data_str: Input data in serialized form (e.g. a graphdef is common)
debug_info_str: Serialized `GraphDebugInfo` proto describing logging
information. (default None)
enable_mlir_converter: Enables MLIR-based conversion instead of the default
TOCO conversion. (default False)
Returns:
Converted model in serialized form (e.g. a TFLITE model is common).
Raises:
ConverterError: When conversion fails in TFLiteConverter, usually due to
ops not being supported.
RuntimeError: When conversion fails, an exception is raised with the error
message embedded.
"""
# TODO(aselle): When toco does not use fatal errors for failure, we can
# switch this on.
if not _toco_from_proto_bin:
try:
model_str = wrap_toco.wrapped_toco_convert(model_flags_str,
toco_flags_str, input_data_str,
debug_info_str,
enable_mlir_converter)
return model_str
except Exception as e:
raise ConverterError(str(e))
# Windows and TemporaryFile are not that useful together,
# since you cannot have two readers/writers. So we have to
# make the temporaries and close and delete them explicitly.
toco_filename, model_filename, input_filename, output_filename = (
None, None, None, None)
try:
# Build all input files
with _tempfile.NamedTemporaryFile(delete=False) as fp_toco, \
_tempfile.NamedTemporaryFile(delete=False) as fp_model, \
_tempfile.NamedTemporaryFile(delete=False) as fp_input, \
_tempfile.NamedTemporaryFile(delete=False) as fp_debug:
toco_filename = fp_toco.name
input_filename = fp_input.name
model_filename = fp_model.name
debug_filename = fp_debug.name
fp_model.write(model_flags_str)
fp_toco.write(toco_flags_str)
fp_input.write(six.ensure_binary(input_data_str))
debug_info_str = debug_info_str if debug_info_str else ""
# if debug_info_str contains a "string value", then the call to
# fp_debug.write(debug_info_str) will fail with the following error
#
# TypeError: a bytes-like object is required, not 'str'
#
# Some of the subtests within the "convert_test" unit-test fail
# with the error shown above. So watch out for that scenario and
# convert debug_info_str to bytes where needed
if not isinstance(debug_info_str, bytes):
fp_debug.write(debug_info_str.encode("utf-8"))
else:
fp_debug.write(debug_info_str)
# Reserve an output file
with _tempfile.NamedTemporaryFile(delete=False) as fp:
output_filename = fp.name
# Run
cmd = [
_toco_from_proto_bin,
model_filename,
toco_filename,
input_filename,
output_filename,
"--debug_proto_file={}".format(debug_filename),
]
if enable_mlir_converter:
cmd.append("--enable_mlir_converter")
cmdline = " ".join(cmd)
is_windows = _platform.system() == "Windows"
proc = _subprocess.Popen(
cmdline,
shell=True,
stdout=_subprocess.PIPE,
stderr=_subprocess.STDOUT,
close_fds=not is_windows)
stdout, stderr = proc.communicate()
exitcode = proc.returncode
if exitcode == 0:
with open(output_filename, "rb") as fp:
return fp.read()
else:
stdout = _try_convert_to_unicode(stdout)
stderr = _try_convert_to_unicode(stderr)
raise ConverterError("See console for info.\n%s\n%s\n" % (stdout, stderr))
finally:
# Must manually cleanup files.
for filename in [
toco_filename, input_filename, model_filename, output_filename]:
try:
_os.unlink(filename)
except (OSError, TypeError):
pass
def build_toco_convert_protos(input_tensors,
output_tensors,
inference_type=lite_constants.FLOAT,
inference_input_type=None,
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
input_shapes=None,
output_format=lite_constants.TFLITE,
quantized_input_stats=None,
default_ranges_stats=None,
drop_control_dependency=True,
reorder_across_fake_quant=False,
allow_custom_ops=False,
change_concat_input_ranges=False,
post_training_quantize=False,
quantize_to_float16=False,
dump_graphviz_dir=None,
dump_graphviz_video=False,
target_ops=None,
allow_nonexistent_arrays=False,
debug_info=None,
conversion_summary_dir=None):
"""Builds protocol buffers describing a conversion of a model using TOCO.
Typically this is to convert from TensorFlow GraphDef to TFLite, in which
case the default `input_format` and `output_format` are sufficient.
Args:
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
inference_type: Target data type of real-number arrays in the output file.
Must be `{tf.float32, tf.uint8}`. (default tf.float32)
Must be `{tf.float32, tf.uint8}`. (default `inference_type`)
inference_input_type: Target data type of real-number input arrays. Allows
for a different type for input arrays in the case of quantization.
input_format: Type of data to read Currently must be
`{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)
input_shapes: Input array shape. It needs to be a list of the same length
as `input_tensors`, or None. (default None)
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: List of tuples of floats representing the mean and
standard deviation. Each tuple maps to the corresponding input tensor.
Only need if `inference_input_type` is `QUANTIZED_UINT8`.
real_input_value = (quantized_input_value - mean_value) / std_dev_value.
(default None)
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
post_training_quantize: Boolean indicating whether to quantize the weights
of the converted float model. Model size will be reduced and there will be
latency improvements (at the cost of accuracy).
(default False)
quantize_to_float16: Boolean indicating whether to convert float buffers
to float16. (default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
target_ops: Experimental flag, subject to change. Set of OpsSet
options indicating which converter to use.
(default set([OpsSet.TFLITE_BUILTINS]))
allow_nonexistent_arrays: Allow specifying array names that don't exist
or are unused in the final graph. (default False)
debug_info: `GraphDebugInfo` proto containing the stack traces for the
original nodes referred by the converted graph.
conversion_summary_dir: A string, the path to the generated conversion logs.
Returns:
model_flags, toco_flags, debug_info: three protocol buffers describing the
conversion process and debug information.
Raises:
ValueError:
If the input tensor type is unknown
Missing mean_values or std_dev_values
RuntimeError: If TOCO fails to convert (in which case the runtime error's
error text will contain the TOCO error log)
"""
toco = _toco_flags_pb2.TocoFlags()
toco.input_format = input_format
toco.output_format = output_format
toco.inference_type = util.convert_dtype_to_tflite_type(inference_type)
if inference_input_type:
toco.inference_input_type = util.convert_dtype_to_tflite_type(
inference_input_type)
else:
toco.inference_input_type = toco.inference_type
toco.drop_control_dependency = drop_control_dependency
toco.reorder_across_fake_quant = reorder_across_fake_quant
toco.allow_custom_ops = allow_custom_ops
toco.post_training_quantize = post_training_quantize
toco.quantize_to_float16 = quantize_to_float16
if default_ranges_stats:
toco.default_ranges_min = default_ranges_stats[0]
toco.default_ranges_max = default_ranges_stats[1]
if dump_graphviz_dir:
toco.dump_graphviz_dir = dump_graphviz_dir
toco.dump_graphviz_include_video = dump_graphviz_video
if conversion_summary_dir:
toco.conversion_summary_dir = conversion_summary_dir
if target_ops:
if set(target_ops) == set([OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
toco.force_select_tf_ops = True
model = _model_flags_pb2.ModelFlags()
model.change_concat_input_ranges = change_concat_input_ranges
for idx, input_tensor in enumerate(input_tensors):
input_array = model.input_arrays.add()
input_array.name = util.get_tensor_name(input_tensor)
input_array.data_type = util.convert_dtype_to_tflite_type(
input_tensor.dtype)
if toco.inference_input_type == _types_pb2.QUANTIZED_UINT8:
if not quantized_input_stats:
raise ValueError("std_dev and mean must be defined when "
"inference_input_type is QUANTIZED_UINT8.")
input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
if input_shapes is None:
shape = input_tensor.shape
else:
shape = input_shapes[idx]
input_array.shape.dims.extend(list(map(int, shape)))
for output_tensor in output_tensors:
model.output_arrays.append(util.get_tensor_name(output_tensor))
model.allow_nonexistent_arrays = allow_nonexistent_arrays
return model, toco, debug_info
def toco_convert_graph_def(input_data, input_arrays_with_shape, output_arrays,
enable_mlir_converter, *args, **kwargs):
""""Convert a model using TOCO.
This function is used to convert GraphDefs that cannot be loaded into
TensorFlow to TFLite. Conversion can be customized by providing arguments
that are forwarded to `build_toco_convert_protos` (see documentation for
details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` is None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `output_tensors` is None.
(default None)
enable_mlir_converter: Enables MLIR-based conversion instead of TOCO
conversion.
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags, _ = build_toco_convert_protos(
input_tensors=[], output_tensors=[], *args, **kwargs)
for idx, (name, shape) in enumerate(input_arrays_with_shape):
input_array = model_flags.input_arrays.add()
if toco_flags.inference_input_type == _types_pb2.QUANTIZED_UINT8:
if (("quantized_input_stats" not in kwargs) or
(not kwargs["quantized_input_stats"])):
raise ValueError("std_dev and mean must be defined when "
"inference_input_type is QUANTIZED_UINT8.")
input_array.mean_value, input_array.std_value = kwargs[
"quantized_input_stats"][idx]
input_array.name = name
input_array.shape.dims.extend(list(map(int, shape)))
for name in output_arrays:
model_flags.output_arrays.append(name)
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString(),
enable_mlir_converter=enable_mlir_converter)
return data
def toco_convert_impl(input_data, input_tensors, output_tensors,
enable_mlir_converter, *args, **kwargs):
""""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
enable_mlir_converter: Enables MLIR-based conversion instead of TOCO
conversion.
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags, debug_info = build_toco_convert_protos(
input_tensors, output_tensors, *args, **kwargs)
debug_info_str = debug_info.SerializeToString() if debug_info else None
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString(),
debug_info_str=debug_info_str,
enable_mlir_converter=enable_mlir_converter)
return data
@_tf_export(v1=["lite.toco_convert"])
@deprecation.deprecated(None, "Use `lite.TFLiteConverter` instead.")
def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):
"""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details). This function has
been deprecated. Please use `lite.TFLiteConverter` instead.
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
enable_mlir_converter = kwargs.get("enable_mlir_converter", False)
return toco_convert_impl(input_data, input_tensors, output_tensors,
enable_mlir_converter, *args, **kwargs)
| 42.44877 | 80 | 0.714844 |
acec1f243f0343231afc6119d23f3f29228e43be | 2,242 | py | Python | config/settings/local.py | allyjweir/daily-q-a | f5ec240f55776b6555c9f1e62e169067717a98c7 | [
"MIT"
] | null | null | null | config/settings/local.py | allyjweir/daily-q-a | f5ec240f55776b6555c9f1e62e169067717a98c7 | [
"MIT"
] | null | null | null | config/settings/local.py | allyjweir/daily-q-a | f5ec240f55776b6555c9f1e62e169067717a98c7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
import os
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='vb@)ci2&yfyura(x1b-1iorlj6k0y_350u7pvju1%b56%yw752')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1"]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| 29.893333 | 99 | 0.498662 |
acec1f27352a152ddbb1462e45cb762bc0a61858 | 2,911 | py | Python | book_figures/chapter5/fig_bayes_blocks.py | StKyr/astroML_figures | 45e9748335e0cd854d09319dff0e43ecd70e7b61 | [
"BSD-2-Clause"
] | 6 | 2019-08-31T16:43:43.000Z | 2021-07-10T06:06:20.000Z | book_figures/chapter5/fig_bayes_blocks.py | StKyr/astroML_figures | 45e9748335e0cd854d09319dff0e43ecd70e7b61 | [
"BSD-2-Clause"
] | 34 | 2018-09-10T22:35:07.000Z | 2022-02-08T21:17:39.000Z | book_figures/chapter5/fig_bayes_blocks.py | StKyr/astroML_figures | 45e9748335e0cd854d09319dff0e43ecd70e7b61 | [
"BSD-2-Clause"
] | 10 | 2017-06-22T09:21:19.000Z | 2020-01-26T03:54:26.000Z | """
Distribution Representation Comparison
--------------------------------------
Figure 5.21
Comparison of Knuth's histogram and a Bayesian blocks histogram. The adaptive
bin widths of the Bayesian blocks histogram yield a better representation of
the underlying data, especially with fewer points.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy import stats
from astropy.visualization import hist
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
if "setup_text_plots" not in globals():
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Generate our data: a mix of several Cauchy distributions
np.random.seed(0)
N = 10000
mu_gamma_f = [(5, 1.0, 0.1),
(7, 0.5, 0.5),
(9, 0.1, 0.1),
(12, 0.5, 0.2),
(14, 1.0, 0.1)]
true_pdf = lambda x: sum([f * stats.cauchy(mu, gamma).pdf(x)
for (mu, gamma, f) in mu_gamma_f])
x = np.concatenate([stats.cauchy(mu, gamma).rvs(int(f * N))
for (mu, gamma, f) in mu_gamma_f])
np.random.shuffle(x)
x = x[x > -10]
x = x[x < 30]
#------------------------------------------------------------
# plot the results
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(bottom=0.08, top=0.95, right=0.95, hspace=0.1)
N_values = (500, 5000)
subplots = (211, 212)
for N, subplot in zip(N_values, subplots):
ax = fig.add_subplot(subplot)
xN = x[:N]
t = np.linspace(-10, 30, 1000)
# plot the results
ax.plot(xN, -0.005 * np.ones(len(xN)), '|k')
hist(xN, bins='knuth', ax=ax, density=True,
histtype='stepfilled', alpha=0.3,
label='Knuth Histogram')
hist(xN, bins='blocks', ax=ax, density=True,
histtype='step', color='k',
label="Bayesian Blocks")
ax.plot(t, true_pdf(t), '-', color='black',
label="Generating Distribution")
# label the plot
ax.text(0.02, 0.95, "%i points" % N, ha='left', va='top',
transform=ax.transAxes)
ax.set_ylabel('$p(x)$')
ax.legend(loc='upper right', prop=dict(size=8))
if subplot == 212:
ax.set_xlabel('$x$')
ax.set_xlim(0, 20)
ax.set_ylim(-0.01, 0.4001)
plt.show()
| 34.247059 | 79 | 0.595672 |
acec1f2f5fa2eb050fd3567888a07d9783862d5e | 5,799 | py | Python | scripts/3.0-am-pl-mlp-char-disaster-tweets.py | adityamangal410/nlp_with_pytorch | 81919102339ee483210f366aeaec0dd30273a846 | [
"MIT"
] | 2 | 2020-12-26T07:32:42.000Z | 2021-01-16T19:07:13.000Z | scripts/3.0-am-pl-mlp-char-disaster-tweets.py | adityamangal410/nlp_with_pytorch | 81919102339ee483210f366aeaec0dd30273a846 | [
"MIT"
] | null | null | null | scripts/3.0-am-pl-mlp-char-disaster-tweets.py | adityamangal410/nlp_with_pytorch | 81919102339ee483210f366aeaec0dd30273a846 | [
"MIT"
] | null | null | null | from argparse import ArgumentParser
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
import torch.utils.data
from torch.utils.data import Dataset
import sh
from sklearn.metrics import f1_score, accuracy_score, roc_auc_score
sh.rm('-r', '-f', 'lightning_logs/disaster_tweets')
class DisasterTweetsClassifier(pl.LightningModule):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(26, 32)
self.fc2 = torch.nn.Linear(32, 16)
self.fc3 = torch.nn.Linear(16, 8)
self.fc4 = torch.nn.Linear(8, 2)
self.loss = torch.nn.CrossEntropyLoss(reduction='none')
def setup(self, stage):
class TweetsDataset(Dataset):
def __init__(self, split='train'):
df = pd.read_csv('../data/processed/nlp_with_disaster_tweets/train_with_splits.csv')
df = df[df.split == split]
def _get_char_counts(text):
vec = np.zeros(26)
for word in text.split(' '):
for letter in word:
if letter.isalpha():
vec[ord(letter) - ord('a')] += 1
return vec
X = df.text.apply(_get_char_counts).apply(pd.Series).values
y = df.target.values
# Convert to tensors
self.X = torch.tensor(X, dtype=torch.float32)
self.y = torch.tensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
return {'x_data': self.X[idx], 'y_target': self.y[idx]}
self.train_ds = TweetsDataset()
self.val_ds = TweetsDataset(split='val')
self.test_ds = TweetsDataset(split='test')
def train_dataloader(self):
return torch.utils.data.DataLoader(
self.train_ds,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
num_workers=args.num_workers
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
self.val_ds,
batch_size=args.batch_size,
shuffle=False,
drop_last=False,
num_workers=args.num_workers
)
def forward(self, batch):
int1 = torch.nn.functional.relu(self.fc1(batch.float()))
int2 = torch.nn.functional.relu(self.fc2(int1))
int3 = torch.nn.functional.relu(self.fc3(int2))
output = self.fc4(int3)
return output
def training_step(self, batch, batch_idx):
y_pred = self(batch['x_data'])
loss = self.loss(y_pred, batch['y_target']).mean()
return {'loss': loss, 'log': {'train_loss': loss}}
def validation_step(self, batch, batch_idx):
y_pred = self(batch['x_data'])
loss = self.loss(y_pred, batch['y_target'])
acc = (y_pred.argmax(-1) == batch['y_target']).float()
return {'loss': loss, 'acc': acc}
def validation_epoch_end(self, outputs):
loss = torch.cat([o['loss'] for o in outputs], 0).mean()
acc = torch.cat([o['acc'] for o in outputs], 0).mean()
out = {'val_loss': loss, 'val_acc': acc}
return {**out, 'log': out}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=args.learning_rate)
def test_dataloader(self):
return torch.utils.data.DataLoader(
self.test_ds,
batch_size=args.batch_size,
shuffle=False,
drop_last=False,
num_workers=args.num_workers
)
def test_step(self, batch, batch_idx):
y_pred = self(batch['x_data'])
loss = self.loss(y_pred, batch['y_target'])
acc = (y_pred.argmax(-1) == batch['y_target']).float()
return {'loss': loss, 'acc': acc}
def test_epoch_end(self, outputs):
loss = torch.cat([o['loss'] for o in outputs], 0).mean()
acc = torch.cat([o['acc'] for o in outputs], 0).mean()
out = {'test_loss': loss, 'test_acc': acc}
return {**out, 'log': out}
def predict_target(text, classifier, text_vector=None):
if text_vector is None:
vec = np.zeros(26)
for word in text.split(' '):
for letter in word:
if letter.isalpha():
vec[ord(letter) - ord('a')] += 1
text_vector = torch.tensor(vec, dtype=torch.float32)
pred = torch.nn.functional.softmax(classifier(text_vector.unsqueeze(dim=0)), dim=1)
probability, target = pred.max(dim=1)
return {'pred': target.item(), 'probability': probability.item()}
def predict_on_dataset(classifier, ds):
df = pd.DataFrame(columns=["target", "pred", "probability"])
for sample in iter(ds):
result = predict_target(text=None, classifier=classifier, text_vector=sample['x_data'])
result['target'] = sample['y_target'].item()
df = df.append(result, ignore_index=True)
f1 = f1_score(df.target, df.pred)
acc = accuracy_score(df.target, df.pred)
roc_auc = roc_auc_score(df.target, df.probability)
print("Result metrics - \n Accuracy={} \n F1-Score={} \n ROC-AUC={}".format(acc, f1, roc_auc))
return df
if __name__ == '__main__':
parser = ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument('--batch_size', default=8, type=int)
parser.add_argument('--learning_rate', default=1e-4, type=float)
parser.add_argument('--num_workers', default=0, type=int)
args = parser.parse_args()
model = DisasterTweetsClassifier()
trainer = pl.Trainer.from_argparse_args(args)
trainer.configure_logger(pl.loggers.TensorBoardLogger('lightning_logs/', name='disaster_tweets'))
trainer.fit(model)
| 34.724551 | 101 | 0.599069 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.