content
stringlengths 5
1.05M
|
|---|
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2021, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numba
import numpy as np
import pandas as pd
import unittest
from itertools import (combinations_with_replacement, product, combinations, )
from numba.core import types
from sdc.tests.indexes.index_datagens import (
test_global_index_names,
_generate_multi_indexes_fixed,
_generate_multi_index_levels_unique,
_generate_multi_index_levels,
_generate_multi_indexes,
_get_multi_index_base_index,
get_sample_index,
get_codes_from_levels,
)
from sdc.tests.test_base import TestCase
from sdc.datatypes.indexes import *
from sdc.tests.test_utils import skip_numba_jit, assert_pandas_exception
class TestMultiIndex(TestCase):
def test_multi_index_type_inferred(self):
for index, name in product(_generate_multi_indexes(),
test_global_index_names):
with self.subTest(index=index):
native_index_type = numba.typeof(index)
self.assertIsInstance(native_index_type, MultiIndexType)
index.name = name
with self.subTest(index=index):
native_index_type = numba.typeof(index)
self.assertIsInstance(native_index_type, MultiIndexType)
def test_multi_index_create_and_box(self):
def test_impl(levels, codes):
return pd.MultiIndex(levels, codes)
sdc_func = self.jit(test_impl)
n = 11
np.random.seed(0)
for data in _generate_multi_index_levels_unique():
# creating pd.MultiIndex is only supported with levels and codes as tuples
levels = tuple(data)
codes = tuple(get_codes_from_levels(n, levels))
with self.subTest(levels=levels, codes=codes):
result = sdc_func(levels, codes)
result_ref = test_impl(levels, codes)
pd.testing.assert_index_equal(result, result_ref)
def test_multi_index_create_invalid_inputs(self):
def test_impl(levels, codes):
return pd.MultiIndex(levels, codes)
sdc_func = self.jit(test_impl)
level_and_codes = [
(['a', 'b', 'c'], [3, 0, 1, 2, 2]), # code 3 is out of bounds
(['a', 'b', 'c'], [1, 0, 1, -2, 2]), # code -2 is out of bounds
(['a', 'b', 'c', 'a', 'b'], [1, 0, 1, 2, 2]) # duplicate labels in level
]
exc_strs = [
"On one of the levels code max >= length of level.",
"On one of the levels code value < -1",
"Level values must be unique",
]
for i, level_codes_pair in enumerate(level_and_codes):
levels, codes = (level_codes_pair[0], ), (level_codes_pair[1], )
test_msg = f"Inconsistent codes: levels={levels}, codes={codes}"
sdc_exc_str = exc_strs[i]
assert_pandas_exception(self, test_msg, sdc_exc_str, test_impl, sdc_func, (levels, codes))
def test_multi_index_create_from_tuples(self):
def test_impl():
codes_max = 5
levels = (
['a', 'b', 'c', 'd', 'e'],
np.arange(codes_max)
)
codes = (
np.arange(0, codes_max),
np.arange(codes_max, 0, -1) - 1,
)
return pd.MultiIndex(levels, codes)
sdc_func = self.jit(test_impl)
result = sdc_func()
result_ref = test_impl()
pd.testing.assert_index_equal(result, result_ref)
@skip_numba_jit("MultiIndexType ctor supports levels and codes as tuples only")
def test_multi_index_create_from_lists(self):
def test_impl():
codes_max = 5
levels = [
['a', 'b', 'c', 'd', 'e'],
np.arange(codes_max),
]
codes = [
np.arange(0, codes_max),
np.arange(codes_max, 0, -1) - 1,
]
return pd.MultiIndex(levels, codes)
sdc_func = self.jit(test_impl)
result = sdc_func()
result_ref = test_impl()
pd.testing.assert_index_equal(result, result_ref)
def test_multi_index_create_param_names(self):
# using keyword arguments in typeref ctor, is not supported due to limitation of __call__ overload,
# TO-DO: refactor this after @overload is supported for typerefs (see FIXME_Numba#7111):
def test_impl(levels, codes, names):
# return pd.MultiIndex(levels, codes, name=names)
return pd.MultiIndex(levels, codes, None, None, None, False, names)
sdc_func = self.jit(test_impl)
n = 11
max_codes = 5
all_levels = [
[5, 2, 1, 4, 3],
np.arange(max_codes),
pd.RangeIndex(max_codes),
pd.RangeIndex(max_codes, name='abc'),
pd.Int64Index([5, 2, 1, 4, 3]),
pd.Int64Index([5, 2, 1, 4, 3], name='bce'),
]
for data, names in product(combinations(all_levels, 2),
combinations_with_replacement(test_global_index_names, 2)):
# all parameters are supported as tuples only in pd.MultiIndex ctor
levels = tuple(data)
codes = tuple(get_codes_from_levels(n, levels))
_names = tuple(names)
with self.subTest(levels=levels, codes=codes, names=_names):
result = sdc_func(levels, codes, _names)
result_ref = test_impl(levels, codes, _names)
pd.testing.assert_index_equal(result, result_ref)
def test_multi_index_unbox_and_box(self):
def test_impl(index):
return index
sdc_func = self.jit(test_impl)
np.random.seed(0)
for index in _generate_multi_indexes():
with self.subTest(index=index):
result = sdc_func(index)
result_ref = test_impl(index)
pd.testing.assert_index_equal(result, result_ref)
def test_multi_index_attribute_dtype(self):
from numba.typed import List
# index dtype cannot be returned (boxed), thus it only checks it can be used
def test_impl(index):
return List.empty_list(index.dtype)
sdc_func = self.jit(test_impl)
n = 11
index = get_sample_index(n, MultiIndexType)
result = sdc_func(index)
expected = types.Tuple.from_types([types.unicode_type, types.intp])
self.assertEqual(result._dtype, expected)
def test_multi_index_attribute_name(self):
def test_impl(index):
return index.name
sdc_func = self.jit(test_impl)
n = 11
index = get_sample_index(n, MultiIndexType)
for name in test_global_index_names:
index.name = name
with self.subTest(name=name):
result = sdc_func(index)
result_ref = test_impl(index)
self.assertEqual(result, result_ref)
@skip_numba_jit("StringArrayType as index has no name. TO-DO: StringIndexType")
def test_multi_index_attribute_names(self):
def test_impl(index):
return index.names
sdc_func = self.jit(test_impl)
np.random.seed(0)
for index in _generate_multi_indexes():
for names in combinations_with_replacement(
test_global_index_names,
index.nlevels):
index.names = names
with self.subTest(index=index):
result = sdc_func(index)
result_ref = test_impl(index)
self.assertEqual(result, result_ref)
def test_multi_index_attribute_nlevels(self):
def test_impl(index):
return index.nlevels
sdc_func = self.jit(test_impl)
np.random.seed(0)
for index in _generate_multi_indexes():
with self.subTest(index=index):
result = sdc_func(index)
result_ref = test_impl(index)
self.assertEqual(result, result_ref)
def test_multi_index_len(self):
def test_impl(index):
return len(index)
sdc_func = self.jit(test_impl)
np.random.seed(0)
for index in _generate_multi_indexes():
with self.subTest(index=index):
result = sdc_func(index)
result_ref = test_impl(index)
self.assertEqual(result, result_ref)
def test_multi_index_attribute_values(self):
def test_impl(index):
return index.values
sdc_func = self.jit(test_impl)
np.random.seed(0)
for index in _generate_multi_indexes():
with self.subTest(index_data=index):
result = sdc_func(index)
result_ref = test_impl(index)
# SDC MultiIndex.values return list but not numpy array
self.assertEqual(result, list(result_ref))
def test_multi_index_attribute_levels(self):
def test_impl(index):
return index.levels
sdc_func = self.jit(test_impl)
np.random.seed(0)
for index in _generate_multi_indexes():
with self.subTest(index_data=index):
result = sdc_func(index)
result_ref = test_impl(index)
# SDC MultiIndex.levels return tuple of levels not list
error_msg = f"Indexes'levels are different:\nresult={result},\nresult_ref{result_ref}"
self.assertEqual(len(result), len(result_ref), error_msg)
self.assertTrue(map(
lambda x, y: pd.testing.assert_index_equal(x, y),
zip(result, result_ref)),
error_msg
)
def test_multi_index_attribute_codes(self):
def test_impl(index):
return index.codes
sdc_func = self.jit(test_impl)
np.random.seed(0)
for index in _generate_multi_indexes():
with self.subTest(index_data=index):
result = sdc_func(index)
result_ref = test_impl(index)
# SDC MultiIndex.levels return tuple of levels not list
error_msg = f"Indexes'levels are different:\nresult={result},\nresult_ref{result_ref}"
self.assertEqual(len(result), len(result_ref), error_msg)
self.assertTrue(map(
lambda x, y: np.testing.assert_array_equal(x, y),
zip(result, result_ref)),
error_msg
)
def test_multi_index_contains(self):
def test_impl(index, value):
return value in index
sdc_func = self.jit(test_impl)
n = 11
index = get_sample_index(n, MultiIndexType)
values_to_test = [('a', 1), ('a', 4), ('e', 1), ('x', 5)]
for value in values_to_test:
with self.subTest(value=value):
result = sdc_func(index, value)
result_ref = test_impl(index, value)
np.testing.assert_array_equal(result, result_ref)
def test_multi_index_getitem_scalar(self):
def test_impl(index, idx):
return index[idx]
sdc_func = self.jit(test_impl)
n = 11
index = get_sample_index(n, MultiIndexType)
idxs_to_test = [0, n // 2, n - 1, -1]
for idx in idxs_to_test:
with self.subTest(idx=idx):
result = sdc_func(index, idx)
result_ref = test_impl(index, idx)
self.assertEqual(result, result_ref)
def test_multi_index_getitem_scalar_idx_bounds(self):
def test_impl(index, idx):
return index[idx]
sdc_func = self.jit(test_impl)
n = 11
index = get_sample_index(n, MultiIndexType)
idxs_to_test = [-(n + 1), n]
for idx in idxs_to_test:
with self.subTest(idx=idx):
with self.assertRaises(Exception) as context:
test_impl(index, idx)
pandas_exception = context.exception
with self.assertRaises(type(pandas_exception)) as context:
sdc_func(index, idx)
sdc_exception = context.exception
self.assertIsInstance(sdc_exception, type(pandas_exception))
self.assertIn("out of bounds", str(sdc_exception))
def test_multi_index_getitem_slice(self):
def test_impl(index, idx):
return index[idx]
sdc_func = self.jit(test_impl)
n = 17
index = get_sample_index(n, MultiIndexType)
slices_params = combinations_with_replacement(
[None, 0, -1, n // 2, n, n - 3, n + 3, -(n + 3)],
2
)
for slice_start, slice_stop in slices_params:
for slice_step in [1, -1, 2]:
idx = slice(slice_start, slice_stop, slice_step)
with self.subTest(idx=idx):
result = sdc_func(index, idx)
result_ref = test_impl(index, idx)
pd.testing.assert_index_equal(result, result_ref)
def test_multi_index_iterator_1(self):
def test_impl(index):
res = []
for i, label in enumerate(index):
res.append((i, label))
return res
sdc_func = self.jit(test_impl)
n = 11
index = get_sample_index(n, MultiIndexType)
result = sdc_func(index)
result_ref = test_impl(index)
self.assertEqual(result, result_ref)
def test_multi_index_iterator_2(self):
def test_impl(index):
res = []
for label in index:
str_part, _ = label
if str_part == 'a':
res.append(label)
return res
sdc_func = self.jit(test_impl)
n = 11
index = get_sample_index(n, MultiIndexType)
result = sdc_func(index)
result_ref = test_impl(index)
self.assertEqual(result, result_ref)
@skip_numba_jit("Requires np.array of complex dtypes (tuples) support in Numba")
def test_multi_index_nparray(self):
def test_impl(index):
return np.array(index)
sdc_func = self.jit(test_impl)
n = 11
index = get_sample_index(n, MultiIndexType)
result = sdc_func(index)
result_ref = test_impl(index)
np.testing.assert_array_equal(result, result_ref)
def test_multi_index_operator_eq_index(self):
def test_impl(index1, index2):
return index1 == index2
sdc_func = self.jit(test_impl)
n = 11
np.random.seed(0)
indexes_to_test = list(_generate_multi_indexes_fixed(n))
for index1, index2 in combinations_with_replacement(indexes_to_test, 2):
with self.subTest(index1=index1, index2=index2):
result = np.asarray(sdc_func(index1, index2)) # FIXME_Numba#5157: remove np.asarray
result_ref = test_impl(index1, index2)
np.testing.assert_array_equal(result, result_ref)
def test_multi_index_operator_eq_scalar(self):
def test_impl(A, B):
return A == B
sdc_func = self.jit(test_impl)
n = 11
A = get_sample_index(n, MultiIndexType)
scalars_to_test = [('a', 1), ('a', 4), ('e', 1), ('x', 5)]
for B in scalars_to_test:
for swap_operands in (False, True):
if swap_operands:
A, B = B, A
with self.subTest(left=A, right=B):
result = np.asarray(sdc_func(A, B)) # FIXME_Numba#5157: remove np.asarray
result_ref = test_impl(A, B)
np.testing.assert_array_equal(result, result_ref)
@skip_numba_jit("Requires np.array of complex dtypes (tuples) support in Numba")
def test_multi_index_operator_eq_nparray(self):
def test_impl(A, B):
return A == B
sdc_func = self.jit(test_impl)
n = 11
for A, B in product(
_generate_multi_indexes_fixed(n),
map(lambda x: np.array(x), _generate_multi_indexes_fixed(n))
):
for swap_operands in (False, True):
if swap_operands:
A, B = B, A
with self.subTest(left=A, right=B):
result = np.asarray(sdc_func(A, B)) # FIXME_Numba#5157: remove np.asarray
result_ref = test_impl(A, B)
np.testing.assert_array_equal(result, result_ref)
def test_multi_index_operator_ne_index(self):
def test_impl(index1, index2):
return index1 != index2
sdc_func = self.jit(test_impl)
n = 11
np.random.seed(0)
indexes_to_test = list(_generate_multi_indexes_fixed(n))
for index1, index2 in combinations_with_replacement(indexes_to_test, 2):
with self.subTest(index1=index1, index2=index2):
result = np.asarray(sdc_func(index1, index2)) # FIXME_Numba#5157: remove np.asarray
result_ref = test_impl(index1, index2)
np.testing.assert_array_equal(result, result_ref)
def test_multi_index_operator_is_nounbox(self):
def test_impl_1():
index1 = pd.MultiIndex(
levels=(['a', 'b', 'c'], [1, 2, 3]),
codes=([0, 1, 0, 1, 2], [0, 0, 1, 1, 2])
)
index2 = index1
return index1 is index2
sdc_func_1 = self.jit(test_impl_1)
def test_impl_2():
index1 = pd.MultiIndex(
levels=(['a', 'b', 'c'], [1, 2, 3]),
codes=([0, 1, 0, 1, 2], [0, 0, 1, 1, 2])
)
index2 = pd.MultiIndex(
levels=(['a', 'b', 'c'], [1, 2, 3]),
codes=([0, 1, 0, 1, 2], [0, 0, 1, 1, 2])
)
return index1 is index2
sdc_func_2 = self.jit(test_impl_2)
# positive testcase
with self.subTest(subtest="same indexes"):
result = sdc_func_1()
result_ref = test_impl_1()
self.assertEqual(result, result_ref)
self.assertEqual(result, True)
# negative testcase
with self.subTest(subtest="not same indexes"):
result = sdc_func_2()
result_ref = test_impl_2()
self.assertEqual(result, result_ref)
self.assertEqual(result, False)
def test_multi_index_getitem_by_mask(self):
def test_impl(index, mask):
return index[mask]
sdc_func = self.jit(test_impl)
n = 11
np.random.seed(0)
mask = np.random.choice([True, False], n)
for index in _generate_multi_indexes_fixed(n):
result = sdc_func(index, mask)
result_ref = test_impl(index, mask)
pd.testing.assert_index_equal(result, result_ref)
def test_multi_index_getitem_by_array(self):
def test_impl(index, idx):
return index[idx]
sdc_func = self.jit(test_impl)
n, k = 11, 7
np.random.seed(0)
idx = np.random.choice(np.arange(n), k)
for index in _generate_multi_indexes_fixed(n):
result = sdc_func(index, idx)
result_ref = test_impl(index, idx)
pd.testing.assert_index_equal(result, result_ref)
def test_multi_index_reindex_equal_indexes(self):
def test_func(index1, index2):
return index1.reindex(index2)
sdc_func = self.jit(test_func)
n = 10
index1 = get_sample_index(n, MultiIndexType)
index2 = index1.copy(deep=True)
result = sdc_func(index1, index2)
result_ref = test_func(index1, index2)
pd.testing.assert_index_equal(result[0], result_ref[0])
np.testing.assert_array_equal(result[1], result_ref[1])
def test_multi_index_reindex(self):
def test_impl(index1, index2):
return index1.reindex(index2)
sdc_func = self.jit(test_impl)
n = 11
np.random.seed(0)
base_index = _get_multi_index_base_index(n)
index1 = base_index[:n]
size_range = np.arange(len(index1))
reindex_by = list(map(
lambda x: base_index.take(x),
[
size_range, # same index as index1
np.random.choice(size_range, n), # random values from index1 with duplicates
np.random.choice(size_range, n, replace=False), # random unique values from index1
np.random.choice(np.arange(len(base_index)), n), # random values from larger set
size_range[:n // 2], # shorter index
np.random.choice(size_range, 2*n), # longer index
]
))
for index2 in reindex_by:
with self.subTest(index2=index2):
result = sdc_func(index1, index2)
result_ref = test_impl(index1, index2)
pd.testing.assert_index_equal(result[0], result_ref[0])
np.testing.assert_array_equal(result[1], result_ref[1])
def test_multi_index_equals(self):
def test_impl(index1, index2):
return index1.equals(index2)
sdc_func = self.jit(test_impl)
n = 11
np.random.seed(0)
indexes_to_test = list(_generate_multi_indexes_fixed(n))
for index1, index2 in combinations_with_replacement(indexes_to_test, 2):
with self.subTest(index1=index1, index2=index2):
result = sdc_func(index1, index2)
result_ref = test_impl(index1, index2)
self.assertEqual(result, result_ref)
def test_multi_index_ravel(self):
def test_impl(index):
return index.ravel()
sdc_func = self.jit(test_impl)
n = 11
index = get_sample_index(n, MultiIndexType)
result = sdc_func(index)
result_ref = test_impl(index)
# SDC MultiIndex.values return list but not numpy array
np.testing.assert_array_equal(result, list(result_ref))
def test_multi_index_take(self):
def test_impl(index, value):
return index.take(value)
sdc_func = self.jit(test_impl)
n = 11
np.random.seed(0)
index_pos = np.arange(n)
values_to_test = [
np.random.choice(index_pos, 2*n),
list(np.random.choice(index_pos, n, replace=False)),
pd.RangeIndex(n // 2),
pd.Int64Index(index_pos[n // 2:])
]
for index, value in product(_generate_multi_indexes_fixed(n), values_to_test):
with self.subTest(index=index, value=value):
result = sdc_func(index, value)
result_ref = test_impl(index, value)
pd.testing.assert_index_equal(result, result_ref)
def test_multi_index_append(self):
def test_impl(index, other):
return index.append(other)
sdc_func = self.jit(test_impl)
index = pd.MultiIndex.from_product([['a', 'b'], [1, 2]])
other = pd.MultiIndex.from_tuples(
[('a', 3), ('c', 1), ('c', 3), ('b', 2), ('b', 3)])
result = sdc_func(index, other)
result_ref = test_impl(index, other)
pd.testing.assert_index_equal(result, result_ref)
@skip_numba_jit("MultiIndexType.join is not implemented yet")
def test_multi_index_join(self):
def test_impl(index, other):
return index.join(other, 'outer', return_indexers=True)
sdc_func = self.jit(test_impl)
n = 11
np.random.seed(0)
indexes_to_test = list(_generate_multi_indexes_fixed(n))
for index, other in combinations_with_replacement(indexes_to_test, 2):
with self.subTest(index=index, other=other):
result = sdc_func(index, other)
result_ref = test_impl(index, other)
# check_names=False, since pandas behavior is not type-stable
pd.testing.assert_index_equal(result[0], result_ref[0], check_names=False)
np.testing.assert_array_equal(result[1], result_ref[1])
np.testing.assert_array_equal(result[2], result_ref[2])
def test_multi_index_from_product(self):
def test_impl(levels):
return pd.MultiIndex.from_product(levels)
sdc_func = self.jit(test_impl)
np.random.seed(0)
for data in _generate_multi_index_levels():
# creating pd.MultiIndex is only supported with levels and codes as tuples
levels = tuple(data)
with self.subTest(levels=levels):
result = sdc_func(levels)
result_ref = test_impl(levels)
pd.testing.assert_index_equal(result, result_ref)
def test_multi_index_from_tuples(self):
def test_impl(data):
return pd.MultiIndex.from_tuples(data)
sdc_func = self.jit(test_impl)
n = 100
np.random.seed(0)
for index in _generate_multi_indexes_fixed(n):
data = list(index.values)
with self.subTest(data=data):
result = sdc_func(data)
result_ref = test_impl(data)
pd.testing.assert_index_equal(result, result_ref)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
"""Copyright (c) 2014, Thomas Skowron
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."""
import os
import sys
import subprocess
import argparse
from math import radians, cos, sin, asin, sqrt
try:
from imposm.parser import OSMParser
except ImportError as e:
print("""It appears that imposm.parser is not installed.
In order to install dependencies, type: python setup.py install""")
sys.exit(0)
class OSMMeter(object):
def __init__(self, filepath, silent=True):
self.filepath = filepath
self.way_distances = []
self.coords = {}
self.ways = []
self.silent = silent
def calc_all_ways(self):
p = OSMParser(concurrency=4, coords_callback=self._read_coords,
ways_callback=self._read_ways)
if not self.silent:
print("Reading file")
p.parse(self.filepath)
if not self.silent:
print("Summing")
self._calc_ways()
return sum(self.way_distances)
def _read_coords(self, coordlist):
for coord in coordlist:
self.coords[coord[0]] = (coord[1], coord[2])
def _read_ways(self, waylist):
for way in waylist:
self.ways.append(way)
def _calc_ways(self):
for way in self.ways:
last_node = None
way_length = 0
for node_id in way[2]:
node = self.coords[node_id]
if last_node is not None:
way_length += self.haversine(last_node[0], last_node[1],
node[0], node[1])
last_node = (node[0], node[1])
self.way_distances.append(way_length)
def _read_ways_and_calc(self, waylist):
for way in waylist:
last_node = None
way_length = 0
for node_id in way[2]:
node = self.coords[node_id]
if last_node is not None:
way_length += self.haversine(last_node[0], last_node[1],
node[0], node[1])
last_node = (node[0], node[1])
self.way_distances.append(way_length)
def haversine(self, lon1, lat1, lon2, lat2):
EARTHRAD = 6367
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
return EARTHRAD * c
def filter_and_process(filepath, filterargs, silent):
converted = False
if filepath.endswith(".pbf"):
o5mfile = "osmmesser_temp.o5m"
if not silent:
print("Converting file")
subprocess.call("osmconvert {} -o={}".format(filepath, o5mfile), shell=True)
converted = True
else:
o5mfile = filepath
outputpath = "osmmesser_temp.osm"
if not silent:
print("Filtering file")
subprocess.call("""osmfilter {} --keep="{}" -o={}""".format(o5mfile, filterargs, outputpath), shell=True)
o = OSMMeter(outputpath)
print(str(o.calc_all_ways()) + " km")
if converted:
os.remove(o5mfile)
os.remove(outputpath)
def main():
parser = argparse.ArgumentParser(description="Measuring way lengths in OSM files.")
parser.add_argument('--filter', help="""filtering arguments for osmfilter, as specified in http://wiki.openstreetmap.org/wiki/Osmfilter, e.g. "highway=track" """)
parser.add_argument('osmfile', help="file to be processed")
parser.add_argument('-s', '--silent', action='store_true', help='prints out only the final result, no progress will be show while processing')
args = parser.parse_args()
if args.filter:
filter_and_process(args.osmfile, args.filter, args.silent)
else:
o = OSMMeter(args.osmfile)
print(str(o.calc_all_ways()) + " km")
if __name__ == "__main__":
main()
|
import tweepy
import pymysql as MySQLdb
from time import sleep
import conexao
def erro_limite():
print('Limite de acesso da API atingido... aguarde')
sleep(60 * 3)
api = conexao.get_api()
con = conexao.get_mysql()
cursor = con.cursor()
while True:
paginas = ['@G1','@sbtjornalismo','@VEJA','@folha','@portalR7']
for pag in paginas:
try:
resultados = api.user_timeline(screen_name=pag)
for tweet in resultados:
try:
cursor.execute('INSERT INTO tweet_paginas (nome, tweet, data, id_tweet) VALUES (%s, %s, %s, %s)', (tweet.user.screen_name, tweet.text, tweet.created_at, tweet.id))
print('Adicionado')
except:
# essa exceรงรฃo acontece caso o usuรกrio jรก exista na base de dados
print('Nรฃo adicionado')
continue
con.commit()
except tweepy.error.RateLimitError:
erro_limite()
sleep(5*60)# aguarda 5 min para as paginas atualizar o feed
con.close()
# http://docs.tweepy.org/en/v3.5.0/api.html
# https://www.geeksforgeeks.org/python-status-object-in-tweepy/
# https://gist.github.com/marcoscastro/bc43e1741b4af47fda0ef289093aae01
# https://developer.twitter.com/en/docs/twitter-api/v1/tweets/search/api-reference/get-search-tweets
|
# coding: utf-8
###
# feature_extractor.py -
# This file implements the extraction pipeline to obtain the expected features from a standardized data set of patients
# and lesions.
# This script outputs a csv file containing the computed feature per patient.
#
# Pseudo code Implementation scheme:
#
# Create an empty list of patients
# For each patient directory in the data directory :
# Create a new Patient object
# Add the patient to the patient list
# For each lesion directory in the patient directory:
# Create a new Lesion object
# Extract the features from the lesion and the patient PET scans
# Run through the patients' list and create a CSV containing the computed features per patients
#
# Author : Franรงois Paupier - francois.paupier@gmail.com
#
# Created on : 16/02/2018
###
import os
import pandas as pd
import radiomics
from code.model.Lesion import Lesion
from code.model.Patient import Patient
def run_extraction_pipe(PATH_TO_DATA, PATH_TO_FEATURES_CSV, PATH_TO_EXTRACTION_PARAMS):
"""
Pipe function takes the path to the standardized patients dataset and the path to the csv containing the
extracted features. If no CSV path is provided a CSV file will be created in the parent directory of the patient
data set.
Warning : If a CSV with the same name already exists it will be overwritten
"""
print("Patients data are loaded from : %s \nFeatures values will be written at: %s"
% (PATH_TO_DATA, PATH_TO_FEATURES_CSV))
list_patients = []
for refPatient in os.listdir(PATH_TO_DATA):
if not refPatient.startswith('.'):
print("Processing patients %s ..." % refPatient)
patient = Patient(refPatient, PATH_TO_DATA)
list_patients.append(patient)
for directoryName in os.listdir(os.path.join(PATH_TO_DATA, patient.ref)):
if directoryName != 'dcm' and 'l' in directoryName:
print(" Processing lesion %s ..." % directoryName)
masksPath = os.path.join(PATH_TO_DATA, refPatient, directoryName)
lesion = Lesion(directoryName, masksPath)
patient.list_lesions.append(lesion)
extract_features(PATH_TO_EXTRACTION_PARAMS, lesion, patient.image)
patients_dataFrame = convert_patients_list_to_dataFrame(list_patients)
patients_dataFrame.to_csv(PATH_TO_FEATURES_CSV, sep=',', encoding='utf-8')
def extract_features(PATH_TO_EXTRACTION_PARAMS, lesion, image):
"""
Extract the features specified in the .yaml parameter file.
Check radiomics extraction parameter for further
information about extraction parameters. Extracted features are recorded in the dict_features of the
lesion object
"""
# Extraction of wanted features ono by one
settings = {'binWidth': 0.3, 'interpolator': sitk.sitkBSpline, 'resampledPixelSpacing': None, 'delta': 1}
# First order features
extractor = radiomics.firstorder.RadiomicsFirstOrder(image, lesion.mask, **settings)
maximum = extractor.getMaximumFeatureValue()
# GLCM features
extractor = radiomics.glcm.RadiomicsGLCM(image, lesion.mask, **settings)
homogenity = extractor.getIdFeatureValue()
dissimilarity = extractor.getDifferenceAverageFeatureValue()
entropy = extractor.getSumEntropyFeatureValue()
# GLRLM features
extractor = radiomics.glrlm.RadiomicsGLRLM(image, lesion.mask, **settings)
HGLRE = extractor.getHighGrayLevelRunEmphasisFeatureValue()
# GLSZM features
extractor = radiomics.glszm.RadiomicsGLSZM(image, lesion.mask, **settings)
ZLNU = extractor.getSizeZoneNonUniformityFeatureValue()
SZHGE = extractor.getSmallAreaHighGrayLevelEmphasisFeatureValue()
ZP = extractor.getZonePercentageFeatureValue()
# Add features in the lesion dictionary
lesion.dict_features['entropy'] = entropy
lesion.dict_features['homogenity'] = homogenity
lesion.dict_features['dissimilarity'] = dissimilarity
lesion.dict_features['HGLRE'] = HGLRE
lesion.dict_features['ZLNU'] = ZLNU
lesion.dict_features['SZHGE'] = SZHGE
lesion.dict_features['ZP'] = ZP
lesion.dict_features['maximum'] = maximum
def convert_patients_list_to_dataFrame(list_patients):
"""
Take a patient list containing each patients' lesion and associated feature, output a panda data frame.
Each row contains the feature extracted from a patient lesion.
"""
list_series = []
for patient in list_patients:
for lesion in patient.list_lesions:
serieIndex = patient.ref + " " + lesion.ref
lesion.dict_features.update({"Index": serieIndex})
lesion.dict_features.move_to_end('Index', last=False)
list_series.append(lesion.dict_features)
patients_dataFrame = pd.DataFrame(list_series)
return patients_dataFrame
|
#!/usr/bin/env python3
with open("input") as infile:
starting_state = [int(x) for x in infile.read().strip().split(",")]
state = []
for i in range(9):
state.append(0)
# state is the count of each lanternfix, the index represents the number of days until
# reproduction
for st in starting_state:
state[st] += 1
for _ in range(80):
new_fish = state[0]
state = state[1:]
state.append(new_fish)
state[6] += new_fish
print(sum(state))
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function
import time
import os.path
import keyring
import numpy as np
import re
import tarfile
import string
import requests
from requests import HTTPError
import sys
from pkg_resources import resource_filename
from bs4 import BeautifulSoup
from astropy.extern.six.moves.urllib_parse import urljoin, urlparse
from astropy.extern.six import iteritems, StringIO
from astropy.extern import six
from astropy.table import Table, Column
from astropy import log
from astropy.utils.console import ProgressBar
from astropy import units as u
import astropy.coordinates as coord
import astropy.io.votable as votable
from ..exceptions import (RemoteServiceError, TableParseError,
InvalidQueryError, LoginError)
from ..utils import commons, url_helpers
from ..utils.process_asyncs import async_to_sync
from ..query import QueryWithLogin
from . import conf
__doctest_skip__ = ['AlmaClass.*']
@async_to_sync
class AlmaClass(QueryWithLogin):
TIMEOUT = conf.timeout
archive_url = conf.archive_url
USERNAME = conf.username
def __init__(self):
super(AlmaClass, self).__init__()
def query_object_async(self, object_name, cache=True, public=True,
science=True, payload=None, **kwargs):
"""
Query the archive with a source name
Parameters
----------
object_name : str
The object name. Will be parsed by SESAME on the ALMA servers.
cache : bool
Cache the query?
public : bool
Return only publicly available datasets?
science : bool
Return only data marked as "science" in the archive?
payload : dict
Dictionary of additional keywords. See `help`.
kwargs : dict
Passed to `query_async`
"""
if payload is None:
payload = {}
payload.update({'source_name_resolver': object_name, })
return self.query_async(payload, cache=cache, public=public,
science=science, **kwargs)
def query_region_async(self, coordinate, radius, cache=True, public=True,
science=True, payload=None, **kwargs):
"""
Query the ALMA archive with a source name and radius
Parameters
----------
coordinates : str / `astropy.coordinates`
the identifier or coordinates around which to query.
radius : str / `~astropy.units.Quantity`, optional
the radius of the region
cache : bool
Cache the query?
public : bool
Return only publicly available datasets?
science : bool
Return only data marked as "science" in the archive?
payload : dict
Dictionary of additional keywords. See `help`.
kwargs : dict
Passed to `query_async`
"""
coordinate = commons.parse_coordinates(coordinate)
cstr = coordinate.fk5.to_string(style='hmsdms', sep=':')
rdc = "{cstr}, {rad}".format(cstr=cstr, rad=coord.Angle(radius).deg)
if payload is None:
payload = {}
payload.update({'ra_dec': rdc})
return self.query_async(payload, cache=cache, public=public,
science=science, **kwargs)
def query_async(self, payload, cache=True, public=True, science=True,
max_retries=5,
get_html_version=False, get_query_payload=False, **kwargs):
"""
Perform a generic query with user-specified payload
Parameters
----------
payload : dict
A dictionary of payload keywords that are accepted by the ALMA
archive system. You can look these up by examining the forms at
http://almascience.org/aq or using the `help` method
cache : bool
Cache the query?
(note: HTML queries *cannot* be cached using the standard caching
mechanism because the URLs are different each time
public : bool
Return only publicly available datasets?
science : bool
Return only data marked as "science" in the archive?
"""
url = urljoin(self._get_dataarchive_url(), 'aq/')
payload.update(kwargs)
if get_html_version:
payload.update({'result_view': 'observation', 'format': 'URL',
'download': 'true'})
else:
payload.update({'result_view': 'raw', 'format': 'VOTABLE',
'download': 'true'})
if public:
payload['public_data'] = 'public'
if science:
payload['science_observations'] = '=%TARGET%'
self.validate_query(payload)
if get_query_payload:
return payload
response = self._request('GET', url, params=payload,
timeout=self.TIMEOUT,
cache=cache and not get_html_version)
self._last_response = response
response.raise_for_status()
if get_html_version:
if 'run' not in response.text:
if max_retries > 0:
log.info("Failed query. Retrying up to {0} more times"
.format(max_retries))
return self.query_async(payload=payload, cache=False,
public=public, science=science,
max_retries=max_retries-1,
get_html_version=get_html_version,
get_query_payload=get_query_payload,
**kwargs)
raise RemoteServiceError("Incorrect return from HTML table query.")
response2 = self._request('GET',
"{0}/{1}/{2}".format(
self._get_dataarchive_url(), 'aq',
response.text),
params={'query_url':
response.url.split("?")[-1]},
timeout=self.TIMEOUT,
cache=False,
)
self._last_response = response2
response2.raise_for_status()
if len(response2.text) == 0:
if max_retries > 0:
log.info("Failed (empty) query. Retrying up to {0} more times"
.format(max_retries))
return self.query_async(payload=payload, cache=cache,
public=public, science=science,
max_retries=max_retries-1,
get_html_version=get_html_version,
get_query_payload=get_query_payload,
**kwargs)
raise RemoteServiceError("Empty return.")
return response2
else:
return response
def validate_query(self, payload, cache=True):
"""
Use the ALMA query validator service to check whether the keywords are
valid
"""
# Check that the keywords specified are allowed
self._validate_payload(payload)
vurl = self._get_dataarchive_url() + '/aq/validate'
bad_kws = {}
for kw in payload:
vpayload = {'field': kw,
kw: payload[kw]}
response = self._request('GET', vurl, params=vpayload, cache=cache,
timeout=self.TIMEOUT)
if response.content:
bad_kws[kw] = response.content
if bad_kws:
raise InvalidQueryError("Invalid query parameters: "
"{0}".format(bad_kws))
def _get_dataarchive_url(self):
"""
If the generic ALMA URL is used, query it to determine which mirror to
access for querying data
"""
if not hasattr(self, 'dataarchive_url'):
if self.archive_url in ('http://almascience.org', 'https://almascience.org'):
response = self._request('GET', self.archive_url + "/aq",
cache=False)
response.raise_for_status()
# Jan 2017: we have to force https because the archive doesn't
# tell us it needs https.
self.dataarchive_url = response.url.replace("/aq/", "").replace("http://", "https://")
else:
self.dataarchive_url = self.archive_url
return self.dataarchive_url
def stage_data(self, uids):
"""
Stage ALMA data
Parameters
----------
uids : list or str
A list of valid UIDs or a single UID.
UIDs should have the form: 'uid://A002/X391d0b/X7b'
Returns
-------
data_file_table : Table
A table containing 3 columns: the UID, the file URL (for future
downloading), and the file size
"""
"""
With log.set_level(10)
INFO: Staging files... [astroquery.alma.core]
DEBUG: First request URL: https://almascience.eso.org/rh/submission [astroquery.alma.core]
DEBUG: First request payload: {'dataset': [u'ALMA+uid___A002_X3b3400_X90f']} [astroquery.alma.core]
DEBUG: First response URL: https://almascience.eso.org/rh/checkAuthenticationStatus/3f98de33-197e-4692-9afa-496842032ea9/submission [astroquery.alma.core]
DEBUG: Request ID: 3f98de33-197e-4692-9afa-496842032ea9 [astroquery.alma.core]
DEBUG: Submission URL: https://almascience.eso.org/rh/submission/3f98de33-197e-4692-9afa-496842032ea9 [astroquery.alma.core]
.DEBUG: Data list URL: https://almascience.eso.org/rh/requests/anonymous/786823226 [astroquery.alma.core]
"""
if isinstance(uids, six.string_types + (np.bytes_,)):
uids = [uids]
if not isinstance(uids, (list, tuple, np.ndarray)):
raise TypeError("Datasets must be given as a list of strings.")
log.info("Staging files...")
self._get_dataarchive_url()
url = urljoin(self.dataarchive_url, 'rh/submission')
log.debug("First request URL: {0}".format(url))
# 'ALMA+uid___A002_X391d0b_X7b'
payload = {'dataset': ['ALMA+' + clean_uid(uid) for uid in uids]}
log.debug("First request payload: {0}".format(payload))
self._staging_log = {'first_post_url': url}
# Request staging for the UIDs
# This component cannot be cached, since the returned data can change
# if new data are uploaded
response = self._request('POST', url, data=payload,
timeout=self.TIMEOUT, cache=False)
self._staging_log['initial_response'] = response
log.debug("First response URL: {0}".format(response.url))
if 'login' in response.url:
raise ValueError("You must login before downloading this data set.")
if response.status_code == 405:
if hasattr(self, '_last_successful_staging_log'):
log.warning("Error 405 received. If you have previously staged "
"the same UIDs, the result returned is probably "
"correct, otherwise you may need to create a fresh "
"astroquery.Alma instance.")
return self._last_successful_staging_log['result']
else:
raise HTTPError("Received an error 405: this may indicate you "
"have already staged the data. Try downloading "
"the file URLs directly with download_files.")
response.raise_for_status()
if 'j_spring_cas_security_check' in response.url:
time.sleep(1)
# CANNOT cache this stage: it not a real data page! results in
# infinite loops
response = self._request('POST', url, data=payload,
timeout=self.TIMEOUT, cache=False)
self._staging_log['initial_response'] = response
if 'j_spring_cas_security_check' in response.url:
log.warning("Staging request was not successful. Try again?")
response.raise_for_status()
if 'j_spring_cas_security_check' in response.url:
raise RemoteServiceError("Could not access data. This error "
"can arise if the data are private and "
"you do not have access rights or are "
"not logged in.")
request_id = response.url.split("/")[-2]
self._staging_log['request_id'] = request_id
log.debug("Request ID: {0}".format(request_id))
# Submit a request for the specific request ID identified above
submission_url = urljoin(self.dataarchive_url,
url_helpers.join('rh/submission', request_id))
log.debug("Submission URL: {0}".format(submission_url))
self._staging_log['submission_url'] = submission_url
staging_submission = self._request('GET', submission_url, cache=True)
self._staging_log['staging_submission'] = staging_submission
staging_submission.raise_for_status()
data_page_url = staging_submission.url
self._staging_log['data_page_url'] = data_page_url
dpid = data_page_url.split("/")[-1]
self._staging_log['staging_page_id'] = dpid
# CANNOT cache this step: please_wait will happen infinitely
data_page = self._request('GET', data_page_url, cache=False)
self._staging_log['data_page'] = data_page
data_page.raise_for_status()
has_completed = False
while not has_completed:
time.sleep(1)
summary = self._request('GET', url_helpers.join(data_page_url,
'summary'),
cache=False)
summary.raise_for_status()
print(".", end='')
sys.stdout.flush()
has_completed = summary.json()['complete']
self._staging_log['summary'] = summary
summary.raise_for_status()
self._staging_log['json_data'] = json_data = summary.json()
username = self.USERNAME if self.USERNAME else 'anonymous'
# templates:
# https://almascience.eso.org/dataPortal/requests/keflavich/946895898/ALMA/
# 2013.1.00308.S_uid___A001_X196_X93_001_of_001.tar/2013.1.00308.S_uid___A001_X196_X93_001_of_001.tar
# uid___A002_X9ee74a_X26f0/2013.1.00308.S_uid___A002_X9ee74a_X26f0.asdm.sdm.tar
url_decomposed = urlparse(data_page_url)
base_url = ('{uri.scheme}://{uri.netloc}/'
'dataPortal/requests/{username}/'
'{staging_page_id}/ALMA'.format(uri=url_decomposed,
staging_page_id=dpid,
username=username,
))
tbl = self._json_summary_to_table(json_data, base_url=base_url)
self._staging_log['result'] = tbl
self._staging_log['file_urls'] = tbl['URL']
self._last_successful_staging_log = self._staging_log
return tbl
def _HEADER_data_size(self, files):
"""
Given a list of file URLs, return the data size. This is useful for
assessing how much data you might be downloading!
(This is discouraged by the ALMA archive, as it puts unnecessary load
on their system)
"""
totalsize = 0 * u.B
data_sizes = {}
pb = ProgressBar(len(files))
for ii, fileLink in enumerate(files):
response = self._request('HEAD', fileLink, stream=False,
cache=False, timeout=self.TIMEOUT)
filesize = (int(response.headers['content-length']) * u.B).to(u.GB)
totalsize += filesize
data_sizes[fileLink] = filesize
log.debug("File {0}: size {1}".format(fileLink, filesize))
pb.update(ii + 1)
response.raise_for_status()
return data_sizes, totalsize.to(u.GB)
def download_files(self, files, savedir=None, cache=True, continuation=True):
"""
Given a list of file URLs, download them
Note: Given a list with repeated URLs, each will only be downloaded
once, so the return may have a different length than the input list
"""
downloaded_files = []
if savedir is None:
savedir = self.cache_location
for fileLink in unique(files):
try:
filename = self._request("GET", fileLink, save=True,
savedir=savedir,
timeout=self.TIMEOUT, cache=cache,
continuation=continuation)
downloaded_files.append(filename)
except requests.HTTPError as ex:
if ex.response.status_code == 401:
log.info("Access denied to {url}. Skipping to"
" next file".format(url=fileLink))
continue
else:
raise ex
return downloaded_files
def retrieve_data_from_uid(self, uids, cache=True):
"""
Stage & Download ALMA data. Will print out the expected file size
before attempting the download.
Parameters
----------
uids : list or str
A list of valid UIDs or a single UID.
UIDs should have the form: 'uid://A002/X391d0b/X7b'
cache : bool
Whether to cache the downloads.
Returns
-------
downloaded_files : list
A list of the downloaded file paths
"""
if isinstance(uids, six.string_types + (np.bytes_,)):
uids = [uids]
if not isinstance(uids, (list, tuple, np.ndarray)):
raise TypeError("Datasets must be given as a list of strings.")
files = self.stage_data(uids)
file_urls = files['URL']
totalsize = files['size'].sum() * files['size'].unit
# each_size, totalsize = self.data_size(files)
log.info("Downloading files of size {0}...".format(totalsize.to(u.GB)))
# TODO: Add cache=cache keyword here. Currently would have no effect.
downloaded_files = self.download_files(file_urls)
return downloaded_files
def _parse_result(self, response, verbose=False):
"""
Parse a VOtable response
"""
if not verbose:
commons.suppress_vo_warnings()
if 'run?' in response.url:
if response.text == "":
raise RemoteServiceError("Empty return.")
# this is a CSV-like table returned via a direct browser request
import pandas
table = Table.from_pandas(pandas.read_csv(StringIO(response.text)))
else:
fixed_content = self._hack_bad_arraysize_vofix(response.content)
tf = six.BytesIO(fixed_content)
vo_tree = votable.parse(tf, pedantic=False, invalid='mask')
first_table = vo_tree.get_first_table()
table = first_table.to_table(use_names_over_ids=True)
return table
def _hack_bad_arraysize_vofix(self, text):
"""
Hack to fix an error in the ALMA votables present in most 2016 and 2017 queries.
The problem is that this entry:
' <FIELD name="Band" datatype="char" ID="32817" xtype="adql:VARCHAR" arraysize="0*">\r',
has an invalid ``arraysize`` entry. Also, it returns a char, but it
should be an int.
Since that problem was discovered and fixed, many other entries have
the same error.
According to the IVOA, the tables are wrong, not astropy.io.votable:
http://www.ivoa.net/documents/VOTable/20130315/PR-VOTable-1.3-20130315.html#ToC11
"""
lines = text.split(b"\n")
newlines = []
for ln in lines:
if b'FIELD name="Band"' in ln:
ln = ln.replace(b'arraysize="0*"', b'arraysize="1*"')
ln = ln.replace(b'datatype="char"', b'datatype="int"')
elif b'arraysize="0*"' in ln:
ln = ln.replace(b'arraysize="0*"', b'arraysize="*"')
newlines.append(ln)
return b"\n".join(newlines)
def _login(self, username=None, store_password=False,
reenter_password=False):
"""
Login to the ALMA Science Portal.
Parameters
----------
username : str, optional
Username to the ALMA Science Portal. If not given, it should be
specified in the config file.
store_password : bool, optional
Stores the password securely in your keyring. Default is False.
reenter_password : bool, optional
Asks for the password even if it is already stored in the
keyring. This is the way to overwrite an already stored passwork
on the keyring. Default is False.
"""
if username is None:
if not self.USERNAME:
raise LoginError("If you do not pass a username to login(), "
"you should configure a default one!")
else:
username = self.USERNAME
# Check if already logged in
loginpage = self._request("GET", "https://asa.alma.cl/cas/login",
cache=False)
root = BeautifulSoup(loginpage.content, 'html5lib')
if root.find('div', class_='success'):
log.info("Already logged in.")
return True
# Get password from keyring or prompt
password, password_from_keyring = self._get_password(
"astroquery:asa.alma.cl", username, reenter=reenter_password)
# Authenticate
log.info("Authenticating {0} on asa.alma.cl ...".format(username))
# Do not cache pieces of the login process
data = {kw: root.find('input', {'name': kw})['value']
for kw in ('lt', '_eventId', 'execution')}
data['username'] = username
data['password'] = password
login_response = self._request("POST", "https://asa.alma.cl/cas/login",
params={'service':
urljoin(self.archive_url,
'rh/login')},
data=data,
cache=False)
authenticated = ('You have successfully logged in' in
login_response.text)
if authenticated:
log.info("Authentication successful!")
self.USERNAME = username
else:
log.exception("Authentication failed!")
# When authenticated, save password in keyring if needed
if authenticated and password_from_keyring is None and store_password:
keyring.set_password("astroquery:asa.alma.cl", username, password)
return authenticated
def get_cycle0_uid_contents(self, uid):
"""
List the file contents of a UID from Cycle 0. Will raise an error
if the UID is from cycle 1+, since those data have been released in
a different and more consistent format. See
http://almascience.org/documents-and-tools/cycle-2/ALMAQA2Productsv1.01.pdf
for details.
"""
# First, check if UID is in the Cycle 0 listing
if uid in self.cycle0_table['uid']:
cycle0id = self.cycle0_table[
self.cycle0_table['uid'] == uid][0]['ID']
contents = [row['Files']
for row in self._cycle0_tarfile_content
if cycle0id in row['ID']]
return contents
else:
info_url = urljoin(
self._get_dataarchive_url(),
'documents-and-tools/cycle-2/ALMAQA2Productsv1.01.pdf')
raise ValueError("Not a Cycle 0 UID. See {0} for details about "
"cycle 1+ data release formats.".format(info_url))
@property
def _cycle0_tarfile_content(self):
"""
In principle, this is a static file, but we'll retrieve it just in case
"""
if not hasattr(self, '_cycle0_tarfile_content_table'):
url = urljoin(self._get_dataarchive_url(),
'alma-data/archive/cycle-0-tarfile-content')
response = self._request('GET', url, cache=True)
# html.parser is needed because some <tr>'s have form:
# <tr width="blah"> which the default parser does not pick up
root = BeautifulSoup(response.content, 'html.parser')
html_table = root.find('table', class_='grid listing')
data = list(zip(*[(x.findAll('td')[0].text,
x.findAll('td')[1].text)
for x in html_table.findAll('tr')]))
columns = [Column(data=data[0], name='ID'),
Column(data=data[1], name='Files')]
tbl = Table(columns)
assert len(tbl) == 8497
self._cycle0_tarfile_content_table = tbl
else:
tbl = self._cycle0_tarfile_content_table
return tbl
@property
def cycle0_table(self):
"""
Return a table of Cycle 0 Project IDs and associated UIDs.
The table is distributed with astroquery and was provided by Felix
Stoehr.
"""
if not hasattr(self, '_cycle0_table'):
filename = resource_filename(
'astroquery.alma', 'data/cycle0_delivery_asdm_mapping.txt')
self._cycle0_table = Table.read(filename, format='ascii.no_header')
self._cycle0_table.rename_column('col1', 'ID')
self._cycle0_table.rename_column('col2', 'uid')
return self._cycle0_table
def get_files_from_tarballs(self, downloaded_files, regex=r'.*\.fits$',
path='cache_path', verbose=True):
"""
Given a list of successfully downloaded tarballs, extract files
with names matching a specified regular expression. The default
is to extract all FITS files
Parameters
----------
downloaded_files : list
A list of downloaded files. These should be paths on your local
machine.
regex : str
A valid regular expression
path : 'cache_path' or str
If 'cache_path', will use the astroquery.Alma cache directory
(``Alma.cache_location``), otherwise will use the specified path.
Note that the subdirectory structure of the tarball will be
maintained.
Returns
-------
filelist : list
A list of the extracted file locations on disk
"""
if path == 'cache_path':
path = self.cache_location
elif not os.path.isdir(path):
raise OSError("Specified an invalid path {0}.".format(path))
fitsre = re.compile(regex)
filelist = []
for fn in downloaded_files:
tf = tarfile.open(fn)
for member in tf.getmembers():
if fitsre.match(member.name):
if verbose:
log.info("Extracting {0} to {1}".format(member.name,
path))
tf.extract(member, path)
filelist.append(os.path.join(path, member.name))
return filelist
def download_and_extract_files(self, urls, delete=True, regex=r'.*\.fits$',
include_asdm=False, path='cache_path',
verbose=True):
"""
Given a list of tarball URLs:
1. Download the tarball
2. Extract all FITS files (or whatever matches the regex)
3. Delete the downloaded tarball
See ``Alma.get_files_from_tarballs`` for details
Parameters
----------
urls : str or list
A single URL or a list of URLs
include_asdm : bool
Only affects cycle 1+ data. If set, the ASDM files will be
downloaded in addition to the script and log files. By default,
though, this file will be downloaded and deleted without extracting
any information: you must change the regex if you want to extract
data from an ASDM tarball
"""
if isinstance(urls, six.string_types):
urls = [urls]
if not isinstance(urls, (list, tuple, np.ndarray)):
raise TypeError("Datasets must be given as a list of strings.")
all_files = []
for url in urls:
if url[-4:] != '.tar':
raise ValueError("URLs should be links to tarballs.")
tarfile_name = os.path.split(url)[-1]
if tarfile_name in self._cycle0_tarfile_content['ID']:
# It is a cycle 0 file: need to check if it contains FITS
match = (self._cycle0_tarfile_content['ID'] == tarfile_name)
if not any(re.match(regex, x) for x in
self._cycle0_tarfile_content['Files'][match]):
log.info("No FITS files found in {0}".format(tarfile_name))
continue
else:
if 'asdm' in tarfile_name and not include_asdm:
log.info("ASDM tarballs do not contain FITS files; "
"skipping.")
continue
try:
tarball_name = self._request('GET', url, save=True,
timeout=self.TIMEOUT)
except requests.ConnectionError as ex:
self.partial_file_list = all_files
log.error("There was an error downloading the file. "
"A partially completed download list is "
"in Alma.partial_file_list")
raise ex
except requests.HTTPError as ex:
if ex.response.status_code == 401:
log.info("Access denied to {url}. Skipping to"
" next file".format(url=url))
continue
else:
raise ex
fitsfilelist = self.get_files_from_tarballs([tarball_name],
regex=regex, path=path,
verbose=verbose)
if delete:
log.info("Deleting {0}".format(tarball_name))
os.remove(tarball_name)
all_files += fitsfilelist
return all_files
def help(self, cache=True):
"""
Return the valid query parameters
"""
help_list = self._get_help_page(cache=cache)
print("Valid ALMA keywords. Left column is the description, right "
"column is the name of the keyword to pass to astroquery.alma"
" queries:")
for title, section in help_list:
print()
print(title)
for row in section:
if len(row) == 2: # text value
name, payload_keyword = row
print(" {0:33s}: {1:35s}".format(name, payload_keyword))
# elif len(row) == 3: # radio button
# name,payload_keyword,value = row
# print(" {0:33s}: {1:20s} = {2:15s}".format(name,
# payload_keyword,
# value))
elif len(row) == 4: # radio button or checkbox
name, payload_keyword, checkbox, value = row
if isinstance(checkbox, list):
checkbox_str = ", ".join(["{0}={1}".format(x, y)
for x, y in zip(checkbox, value)])
print(" {0:33s}: {1:20s} -> {2}"
.format(name, payload_keyword, checkbox_str))
else:
print(" {2} {0:29s}: {1:20s} = {3:15s}"
.format(name, payload_keyword, checkbox, value))
else:
raise ValueError("Wrong number of rows - ALMA query page"
" did not parse properly.")
def _get_help_page(self, cache=True):
if not hasattr(self, '_help_list') or not self._help_list:
querypage = self._request(
'GET', self._get_dataarchive_url() + "/aq/",
cache=cache, timeout=self.TIMEOUT)
root = BeautifulSoup(querypage.content, "html5lib")
sections = root.findAll('td', class_='category')
whitespace = re.compile(r"\s+")
help_list = []
for section in sections:
title = section.find(
'div', class_='categorytitle').text.lstrip()
help_section = (title, [])
for inp in section.findAll('div', class_='inputdiv'):
sp = inp.find('span')
buttons = inp.findAll('input')
for b in buttons:
# old version:for=id=rawView; name=viewFormat
# new version:for=id=rawView; name=result_view
payload_keyword = b.attrs['name']
bid = b.attrs['id']
label = inp.find('label')
if sp is not None:
name = whitespace.sub(" ", sp.text)
elif label.attrs['for'] == bid:
name = whitespace.sub(" ", label.text)
else:
raise TableParseError("ALMA query page has"
" an unrecognized entry")
if b.attrs['type'] == 'text':
help_section[1].append((name, payload_keyword))
elif b.attrs['type'] == 'radio':
value = b.attrs['value']
if 'checked' in b.attrs:
checked = b.attrs['checked'] == 'checked'
checkbox = "(x)" if checked else "( )"
else:
checkbox = "( )"
help_section[1].append((name, payload_keyword,
checkbox, value))
elif b.attrs['type'] == 'checkbox':
if 'checked' in b.attrs:
checked = b.attrs['checked'] == 'checked'
else:
checked = False
value = b.attrs['value']
checkbox = "[x]" if checked else "[ ]"
help_section[1].append((name, payload_keyword,
checkbox, value))
select = inp.find('select')
if select is not None:
options = [("".join(filter_printable(option.text)),
option.attrs['value'])
for option in select.findAll('option')]
if sp is not None:
name = whitespace.sub(" ", sp.text)
else:
name = select.attrs['name']
checkbox = [o[0] for o in options]
value = [o[1] for o in options]
option_str = select.attrs['name']
help_section[1].append((name, option_str, checkbox, value))
help_list.append(help_section)
self._help_list = help_list
return self._help_list
def _validate_payload(self, payload):
if not hasattr(self, '_valid_params'):
help_list = self._get_help_page(cache=False)
self._valid_params = [row[1]
for title, section in help_list
for row in section]
if len(self._valid_params) == 0:
raise ValueError("The query validation failed for unknown "
"reasons. Try again?")
# These parameters are entirely hidden, but Felix says they are
# allowed
self._valid_params.append('download')
self._valid_params.append('format')
self._valid_params.append('member_ous_id')
invalid_params = [k for k in payload if k not in self._valid_params]
if len(invalid_params) > 0:
raise InvalidQueryError("The following parameters are not accepted"
" by the ALMA query service:"
" {0}".format(invalid_params))
def _parse_staging_request_page(self, data_list_page):
"""
Parse pages like this one:
https://almascience.eso.org/rh/requests/anonymous/786572566
that include links to data sets that have been requested and staged
Parameters
----------
data_list_page : requests.Response object
"""
root = BeautifulSoup(data_list_page.content, 'html5lib')
data_table = root.findAll('table', class_='list', id='report')[0]
columns = {'uid': [], 'URL': [], 'size': []}
for tr in data_table.findAll('tr'):
tds = tr.findAll('td')
# Cannot check class if it is not defined
cl = 'class' in tr.attrs
if (len(tds) > 1 and 'uid' in tds[0].text and
(cl and 'Level' in tr['class'][0])):
# New Style
text = tds[0].text.strip().split()
if text[0] in ('Asdm', 'Member'):
uid = text[-1]
elif len(tds) > 1 and 'uid' in tds[1].text:
# Old Style
uid = tds[1].text.strip()
elif cl and tr['class'] == 'Level_1':
raise ValueError("Heading was found when parsing the download "
"page but it was not parsed correctly")
if len(tds) > 3 and (cl and tr['class'][0] == 'fileRow'):
# New Style
size, unit = re.search(r'(-|[0-9\.]*)([A-Za-z]*)',
tds[2].text).groups()
href = tds[1].find('a')
if size == '':
# this is a header row
continue
authorized = ('access_authorized.png' in
tds[3].findChild('img')['src'])
if authorized:
columns['uid'].append(uid)
if href and 'href' in href.attrs:
columns['URL'].append(href.attrs['href'])
else:
columns['URL'].append('None_Found')
unit = (u.Unit(unit) if unit in ('GB', 'MB')
else u.Unit('kB') if 'kb' in unit.lower()
else 1)
try:
columns['size'].append(float(size) * u.Unit(unit))
except ValueError:
# size is probably a string?
columns['size'].append(-1 * u.byte)
log.log(level=5, msg="Found a new-style entry. "
"size={0} uid={1} url={2}"
.format(size, uid, columns['URL'][-1]))
else:
log.warning("Access to {0} is not authorized.".format(uid))
elif len(tds) > 3 and tds[2].find('a'):
# Old Style
href = tds[2].find('a')
size, unit = re.search(r'([0-9\.]*)([A-Za-z]*)',
tds[3].text).groups()
columns['uid'].append(uid)
columns['URL'].append(href.attrs['href'])
unit = (u.Unit(unit) if unit in ('GB', 'MB')
else u.Unit('kB') if 'kb' in unit.lower()
else 1)
columns['size'].append(float(size) * u.Unit(unit))
log.log(level=5, msg="Found an old-style entry. "
"size={0} uid={1} url={2}".format(size, uid,
columns['URL'][-1]))
columns['size'] = u.Quantity(columns['size'], u.Gbyte)
if len(columns['uid']) == 0:
raise RemoteServiceError(
"No valid UIDs were found in the staged data table. "
"Please include {0} in a bug report."
.format(self._staging_log['data_list_url']))
tbl = Table([Column(name=k, data=v) for k, v in iteritems(columns)])
return tbl
def _json_summary_to_table(self, data, base_url):
"""
"""
columns = {'uid': [], 'URL': [], 'size': []}
for entry in data['node_data']:
# de_type can be useful (e.g., MOUS), but it is not necessarily
# specified
# file_name and file_key *must* be specified.
is_file = (entry['file_name'] != 'null' and
entry['file_key'] != 'null')
if is_file:
# "de_name": "ALMA+uid://A001/X122/X35e",
columns['uid'].append(entry['de_name'][5:])
if entry['file_size'] == 'null':
columns['size'].append(np.nan * u.Gbyte)
else:
columns['size'].append(
(int(entry['file_size']) * u.B).to(u.Gbyte))
# example template for constructing url:
# https://almascience.eso.org/dataPortal/requests/keflavich/940238268/ALMA/
# uid___A002_X9d6f4c_X154/2013.1.00546.S_uid___A002_X9d6f4c_X154.asdm.sdm.tar
# above is WRONG... except for ASDMs, when it's right
# should be:
# 2013.1.00546.S_uid___A002_X9d6f4c_X154.asdm.sdm.tar/2013.1.00546.S_uid___A002_X9d6f4c_X154.asdm.sdm.tar
#
# apparently ASDMs are different from others:
# templates:
# https://almascience.eso.org/dataPortal/requests/keflavich/946895898/ALMA/
# 2013.1.00308.S_uid___A001_X196_X93_001_of_001.tar/2013.1.00308.S_uid___A001_X196_X93_001_of_001.tar
# uid___A002_X9ee74a_X26f0/2013.1.00308.S_uid___A002_X9ee74a_X26f0.asdm.sdm.tar
url = url_helpers.join(base_url,
entry['file_key'],
entry['file_name'])
if 'null' in url:
raise ValueError("The URL {0} was created containing "
"'null', which is invalid.".format(url))
columns['URL'].append(url)
columns['size'] = u.Quantity(columns['size'], u.Gbyte)
tbl = Table([Column(name=k, data=v) for k, v in iteritems(columns)])
return tbl
Alma = AlmaClass()
def clean_uid(uid):
"""
Return a uid with all unacceptable characters replaced with underscores
"""
if not hasattr(uid, 'replace'):
return clean_uid(str(uid.astype('S')))
try:
return uid.decode('utf-8').replace(u"/", u"_").replace(u":", u"_")
except AttributeError:
return uid.replace("/", "_").replace(":", "_")
def reform_uid(uid):
"""
Convert a uid with underscores to the original format
"""
return uid[:3] + "://" + "/".join(uid[6:].split("_"))
def unique(seq):
"""
Return unique elements of a list, preserving order
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def filter_printable(s):
""" extract printable characters from a string """
return filter(lambda x: x in string.printable, s)
|
#!/usr/bin/env python
'''
Write a Python program using ciscoconfparse that parses the 'cisco_ipsec.txt'
config file. Note, this config file is not fully valid (i.e. parts of the
configuration are missing).
The script should find all of the crypto map entries in the file (lines that
begin with 'crypto map CRYPTO') and print out the children of each crypto map.
'''
from ciscoconfparse import CiscoConfParse
cisco_cfg = CiscoConfParse("cisco_ipsec.txt")
print '\n'
print cisco_cfg
print '\n'
crypto = cisco_cfg.find_objects(r"^crypto map CRYPTO")
print crypto
print '\n'
for i in crypto:
print i.text
for child in i.children:
print child.text
print '\n'
|
from collections import OrderedDict
from tqdm import tqdm
def load_flickr_set(images, text, file, test):
"""
Produces a ordered dict of tuples of data and label from the ids defined in file.
Train Structure:
{
id: (id, img_data, word vector)
}
Test Structure:
{
id: (id, img_data, (c1, c2, c3, c4, c5))
}
:param images: preproccesed data as ordered dict
:param text: preproccesed text as pandas dataframe with columns 'image_idx', 'caption', 'caption_idx'
:param file: Flickr_8k.*Images.txt
:param test: indicates if we want a test dataset
:return: ordered dict
"""
dataset = OrderedDict()
set_ids = read_ids(file)
for pic_id in tqdm(set_ids, desc="Generating Flickr dataset"):
img_data = images[pic_id]
text_data = text[text['image_idx'] == pic_id]
# For testing data, we want all available captions as true labels
# result -> (id, x, (y1, y2, .., y5))
if test:
labels = ()
for idx, row in text_data.iterrows():
labels += (row['caption'],)
dataset[pic_id] = (pic_id, img_data, labels)
# For training data, we want to use the captions to generate new training objects
# result -> (id, x, y)
else:
for idx, row in text_data.iterrows():
new_name = pic_id + '-' + row['caption_idx']
dataset[new_name] = (new_name, img_data, row['word2vec'])
return dataset
def read_ids(file):
set_ids = []
with open(file, 'r', encoding='UTF-8') as f:
# Read the data
for line in f:
# Strip the ending newline char
set_ids.append(line.strip("\n"))
return set_ids
def get_caption_set(text, file):
""" Returns all captions of the pictures ids defined in file
:param text: preproccessed captions as dataframe
:param file: Flickr_8k.*Images.txt
:return: list of captions
"""
set_ids = read_ids(file)
result = []
for pic_id in tqdm(set_ids, desc="Generating captions training set"):
text_data = text[text['image_idx'] == pic_id]
for _, row in text_data.iterrows():
result.append(row['caption'])
return result
|
"""
This module is home to the Location class
"""
from pyecobee.ecobee_object import EcobeeObject
class Location(EcobeeObject):
"""
This class has been auto generated by scraping
https://www.ecobee.com/home/developer/api/documentation/v1/objects/Location.shtml
Attribute names have been generated by converting ecobee property
names from camelCase to snake_case.
A getter property has been generated for each attribute.
A setter property has been generated for each attribute whose value
of READONLY is "no".
An __init__ argument without a default value has been generated if
the value of REQUIRED is "yes".
An __init__ argument with a default value of None has been generated
if the value of REQUIRED is "no".
"""
__slots__ = [
'_time_zone_offset_minutes',
'_time_zone',
'_is_daylight_saving',
'_street_address',
'_city',
'_province_state',
'_country',
'_postal_code',
'_phone_number',
'_map_coordinates',
]
attribute_name_map = {
'time_zone_offset_minutes': 'timeZoneOffsetMinutes',
'timeZoneOffsetMinutes': 'time_zone_offset_minutes',
'time_zone': 'timeZone',
'timeZone': 'time_zone',
'is_daylight_saving': 'isDaylightSaving',
'isDaylightSaving': 'is_daylight_saving',
'street_address': 'streetAddress',
'streetAddress': 'street_address',
'city': 'city',
'province_state': 'provinceState',
'provinceState': 'province_state',
'country': 'country',
'postal_code': 'postalCode',
'postalCode': 'postal_code',
'phone_number': 'phoneNumber',
'phoneNumber': 'phone_number',
'map_coordinates': 'mapCoordinates',
'mapCoordinates': 'map_coordinates',
}
attribute_type_map = {
'time_zone_offset_minutes': 'int',
'time_zone': 'six.text_type',
'is_daylight_saving': 'bool',
'street_address': 'six.text_type',
'city': 'six.text_type',
'province_state': 'six.text_type',
'country': 'six.text_type',
'postal_code': 'six.text_type',
'phone_number': 'six.text_type',
'map_coordinates': 'six.text_type',
}
def __init__(
self,
time_zone_offset_minutes=None,
time_zone=None,
is_daylight_saving=None,
street_address=None,
city=None,
province_state=None,
country=None,
postal_code=None,
phone_number=None,
map_coordinates=None,
):
"""
Construct a Location instance
"""
self._time_zone_offset_minutes = time_zone_offset_minutes
self._time_zone = time_zone
self._is_daylight_saving = is_daylight_saving
self._street_address = street_address
self._city = city
self._province_state = province_state
self._country = country
self._postal_code = postal_code
self._phone_number = phone_number
self._map_coordinates = map_coordinates
@property
def time_zone_offset_minutes(self):
"""
Gets the time_zone_offset_minutes attribute of this Location
instance.
:return: The value of the time_zone_offset_minutes attribute of
this Location instance.
:rtype: int
"""
return self._time_zone_offset_minutes
@property
def time_zone(self):
"""
Gets the time_zone attribute of this Location instance.
:return: The value of the time_zone attribute of this Location
instance.
:rtype: six.text_type
"""
return self._time_zone
@time_zone.setter
def time_zone(self, time_zone):
"""
Sets the time_zone attribute of this Location instance.
:param time_zone: The time_zone value to set for the time_zone
attribute of this Location instance.
:type: six.text_type
"""
self._time_zone = time_zone
@property
def is_daylight_saving(self):
"""
Gets the is_daylight_saving attribute of this Location instance.
:return: The value of the is_daylight_saving attribute of this
Location instance.
:rtype: bool
"""
return self._is_daylight_saving
@is_daylight_saving.setter
def is_daylight_saving(self, is_daylight_saving):
"""
Sets the is_daylight_saving attribute of this Location instance.
:param is_daylight_saving: The is_daylight_saving value to set
for the is_daylight_saving attribute of this Location instance.
:type: bool
"""
self._is_daylight_saving = is_daylight_saving
@property
def street_address(self):
"""
Gets the street_address attribute of this Location instance.
:return: The value of the street_address attribute of this
Location instance.
:rtype: six.text_type
"""
return self._street_address
@street_address.setter
def street_address(self, street_address):
"""
Sets the street_address attribute of this Location instance.
:param street_address: The street_address value to set for the
street_address attribute of this Location instance.
:type: six.text_type
"""
self._street_address = street_address
@property
def city(self):
"""
Gets the city attribute of this Location instance.
:return: The value of the city attribute of this Location
instance.
:rtype: six.text_type
"""
return self._city
@city.setter
def city(self, city):
"""
Sets the city attribute of this Location instance.
:param city: The city value to set for the city attribute of
this Location instance.
:type: six.text_type
"""
self._city = city
@property
def province_state(self):
"""
Gets the province_state attribute of this Location instance.
:return: The value of the province_state attribute of this
Location instance.
:rtype: six.text_type
"""
return self._province_state
@province_state.setter
def province_state(self, province_state):
"""
Sets the province_state attribute of this Location instance.
:param province_state: The province_state value to set for the
province_state attribute of this Location instance.
:type: six.text_type
"""
self._province_state = province_state
@property
def country(self):
"""
Gets the country attribute of this Location instance.
:return: The value of the country attribute of this Location
instance.
:rtype: six.text_type
"""
return self._country
@country.setter
def country(self, country):
"""
Sets the country attribute of this Location instance.
:param country: The country value to set for the country
attribute of this Location instance.
:type: six.text_type
"""
self._country = country
@property
def postal_code(self):
"""
Gets the postal_code attribute of this Location instance.
:return: The value of the postal_code attribute of this Location
instance.
:rtype: six.text_type
"""
return self._postal_code
@postal_code.setter
def postal_code(self, postal_code):
"""
Sets the postal_code attribute of this Location instance.
:param postal_code: The postal_code value to set for the
postal_code attribute of this Location instance.
:type: six.text_type
"""
self._postal_code = postal_code
@property
def phone_number(self):
"""
Gets the phone_number attribute of this Location instance.
:return: The value of the phone_number attribute of this
Location instance.
:rtype: six.text_type
"""
return self._phone_number
@phone_number.setter
def phone_number(self, phone_number):
"""
Sets the phone_number attribute of this Location instance.
:param phone_number: The phone_number value to set for the
phone_number attribute of this Location instance.
:type: six.text_type
"""
self._phone_number = phone_number
@property
def map_coordinates(self):
"""
Gets the map_coordinates attribute of this Location instance.
:return: The value of the map_coordinates attribute of this
Location instance.
:rtype: six.text_type
"""
return self._map_coordinates
@map_coordinates.setter
def map_coordinates(self, map_coordinates):
"""
Sets the map_coordinates attribute of this Location instance.
:param map_coordinates: The map_coordinates value to set for the
map_coordinates attribute of this Location instance.
:type: six.text_type
"""
self._map_coordinates = map_coordinates
|
#coding:utf-8
#
# id: bugs.core_3475
# title: Parameters inside the CAST function are described as not nullable
# decription:
# tracker_id: CORE-3475
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = [('^((?!sqltype).)*$', ''), ('[ ]+', ' '), ('[\t]*', ' ')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set planonly;
set sqlda_display;
select cast(null as int) v1, cast(? as int) v2 from rdb$database;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
01: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
01: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
02: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
import ssl
import asyncio
from ..client import Client
from ..common import hexlify
from . import to_int
async def subscriber(host,
port,
cafile,
check_hostname,
client_id,
topic,
keep_alive_s,
session_expiry_interval):
if cafile:
print(f"CA File: '{cafile}'")
print(f"Check hostname: {check_hostname}")
context = ssl.create_default_context(cafile=cafile)
context.check_hostname = check_hostname
else:
context = None
client = Client(host,
port,
client_id,
keep_alive_s=keep_alive_s,
session_expiry_interval=session_expiry_interval,
subscriptions=[topic],
topic_alias_maximum=10,
ssl=context)
while True:
print(f"Connecting to '{host}:{port}'.")
await client.start()
print('Connected.')
while True:
topic, message = await client.messages.get()
if topic is None:
print('Broker connection lost!')
break
print(f'Topic: {topic}')
print(f'Message: {hexlify(message)}')
await client.stop()
def _do_subscribe(args):
asyncio.run(subscriber(args.host,
args.port,
args.cafile,
not args.no_check_hostname,
args.client_id,
args.topic,
args.keep_alive,
args.session_expiry_interval))
def add_subparser(subparsers):
subparser = subparsers.add_parser('subscribe',
description='Subscribe for given topic.')
subparser.add_argument('--host',
default='localhost',
help='Broker host (default: %(default)s).')
subparser.add_argument('--port',
type=int,
default=1883,
help='Broker port (default: %(default)s).')
subparser.add_argument('--client-id',
help='Client id (default: mqttools-<UUID[0..14]>).')
subparser.add_argument('--keep-alive',
type=int,
default=0,
help=('Keep alive time in seconds (default: '
'%(default)s). Give as 0 to disable keep '
'alive.'))
subparser.add_argument(
'--session-expiry-interval',
default=0,
type=to_int,
help='Session expiry interval in the range 0..0xffffffff (default: %(default)s).')
subparser.add_argument(
'--cafile',
default='',
help='CA file.')
subparser.add_argument(
'--no-check-hostname',
action='store_true',
help='Do not check certificate hostname.')
subparser.add_argument('topic', help='Topic to subscribe for.')
subparser.set_defaults(func=_do_subscribe)
|
from urllib.parse import urlparse
from ckan_cloud_operator import kubectl
from ckan_cloud_operator import logs
from ckan_cloud_operator.providers.routers import manager as routers_manager
def get_datapusher_url(instance_datapusher_url):
if instance_datapusher_url and len(instance_datapusher_url) > 10:
hostname = urlparse(instance_datapusher_url).hostname
if hostname.endswith('.l3.ckan.io'):
datapusher_name = hostname.replace('.l3.ckan.io', '')
elif hostname.endswith('.ckan.io'):
datapusher_name = hostname.replace('.ckan.io', '')
else:
logs.warning(f'failed to parse datapusher url from instance datapusher url: {instance_datapusher_url}')
datapusher_name = None
if datapusher_name:
routes = kubectl.get(
f'CkanCloudRoute -l ckan-cloud/route-datapusher-name={datapusher_name},ckan-cloud/route-type=datapusher-subdomain',
required=False
)
if routes:
routes = routes.get('items', [])
if len(routes) > 0:
assert len(routes) == 1
route = routes[0]
sub_domain = route['spec']['sub-domain']
root_domain = route['spec']['root-domain']
assert sub_domain and sub_domain != 'default', f'invalid sub_domain: {sub_domain}'
if not root_domain or root_domain == 'default':
default_root_domain = routers_manager.get_default_root_domain()
assert default_root_domain, 'missing routers default root domain'
root_domain = default_root_domain
return 'https://{}.{}/'.format(sub_domain, root_domain)
else:
logs.warning(f'failed to find route for datapusher: {datapusher_name}')
else:
logs.warning(f'failed to find route for datapusher: {datapusher_name}')
return None
|
"""
Copyright (c) 2018, Toby Slight. All rights reserved.
ISC License (ISCL) - see LICENSE file for details.
"""
import os
if os.name == 'nt':
CHARS = ['/', '"', ':', '<', '>', '^', '|', '*', '?']
else:
CHARS = ['\\', '"', ':', '<', '>', '^', '|', '*', '?']
def mknames(name):
"""
Iterate over char array to to build names with invalid chars.
"""
names = []
for i in enumerate(CHARS):
n, c = i
newname = name + str(n)
newname = c + " . " + "๐" + newname
newname = newname + "๐ " + " . " + c
newname = " . " + newname + " . "
names.append(newname)
return names
def mknodes(path):
"""
Use returned arrays from mknames to instantiate new filesystem nodes.
"""
dirs = mknames("testdir")
files = mknames("testfile")
for d in dirs:
dirpath = os.path.join(path, d)
if not os.path.exists(dirpath):
os.mkdir(dirpath)
for f in files:
filepath = os.path.join(path, f)
if not os.path.exists(filepath):
open(filepath, 'a').close()
def create(path, count, limit):
"""
Descend into directories to create children of specified depth.
"""
mknodes(path)
for root, dirs, files in os.walk(path):
if limit < 5: # creates depth level recursively in all directories....
if count < limit:
count = count + 1
for d in dirs:
create(os.path.join(root, d), count, limit)
# if more than 4, limit to one directory, as it will take too long...
else:
if count < limit:
count = count + 1
create(root, count, limit)
|
import re
import gnupg
from django.conf import settings
from . import exceptions
GPG = None
def is_enabled():
return settings.SNOOP_GPG_HOME and settings.SNOOP_GPG_BINARY
def _get_gpg():
global GPG
if is_enabled():
if not GPG:
GPG = gnupg.GPG(gnupghome=settings.SNOOP_GPG_HOME,
gpgbinary=settings.SNOOP_GPG_BINARY)
return GPG
else:
raise RuntimeError("MALDINI_GPG_BINARY or MALDINI_GPG_HOME not set")
class DecryptionError(exceptions.BrokenDocument):
flag = 'pgp_decryption_failed'
def extract_pgp_block(content):
if isinstance(content, bytes):
content = content.decode('latin-1')
m = re.search(
r'-----BEGIN PGP MESSAGE-----[^-]+-----END PGP MESSAGE-----',
content, re.DOTALL)
if m:
return m.group(0)
else:
return None
def contains_pgp_block(content):
if isinstance(content, bytes):
try:
content = content.decode('latin-1')
except ValueError:
return False
m = re.search(r'-----BEGIN PGP MESSAGE-----', content)
return bool(m)
def decrypt_pgp_block(content, passphrase=None):
text_block = extract_pgp_block(content)
if not text_block:
return content
gpg = _get_gpg()
if passphrase:
decrypt = gpg.decrypt(text_block, passphrase=passphrase)
else:
decrypt = gpg.decrypt(text_block)
if decrypt.ok:
return decrypt.data
raise DecryptionError(decrypt.status)
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
install_requires = [
'simplejson',
'pyaml',
'requests'
]
test_requires = [
'testtools',
'nose',
'mock',
]
setup(name='qubell-api-python-client',
version='1.32.30.3', # versionising: <major>.<minor>.<platform major>.<platform minor>
description='Qubell platform client library',
long_description=open('README').read(),
author='Vasyl Khomenko',
author_email='vkhomenko@qubell.com',
license=open('LICENSE').read(),
url='https://github.com/qubell/contrib-python-qubell-client',
packages=find_packages(exclude=['test_qubell_client', 'stories']),
package_data={'': ['LICENSE', 'README']},
include_package_data=True,
install_requires=install_requires,
tests_require=test_requires,
test_suite="nosetests",
)
|
from django.apps import AppConfig
class DjAuthConfig(AppConfig):
name = 'dj_auth'
def ready(self):
import dj_auth.signals
|
''' Helper class and functions for loading KITTI objects
Author: Charles R. Qi
Date: September 2017
Modified by Yurong You
Date: June 2019
'''
import os
import data_utils.kitti_util as utils
class kitti_object(object):
'''Load and parse object data into a usable format.'''
def __init__(self, root_dir=None,
lidar_dir='velodyne',
label_dir='label_2', calib_dir='calib',
image_dir='image_2'):
self.image_dir = os.path.join(root_dir, image_dir) \
if root_dir is not None else image_dir
self.label_dir = os.path.join(root_dir, label_dir) \
if root_dir is not None else label_dir
self.calib_dir = os.path.join(root_dir, calib_dir) \
if root_dir is not None else calib_dir
self.lidar_dir = os.path.join(root_dir, lidar_dir) \
if root_dir is not None else lidar_dir
def get_image(self, idx):
img_filename = os.path.join(self.image_dir, '%06d.png'%(idx))
return utils.load_image(img_filename)
def get_right_image(self, idx):
img_filename = os.path.join(self.right_image_dir, '%06d.png'%(idx))
return utils.load_image(img_filename)
def get_lidar(self, idx):
lidar_filename = os.path.join(self.lidar_dir, '%06d.bin'%(idx))
return utils.load_velo_scan(lidar_filename)
def get_calibration(self, idx):
calib_filename = os.path.join(self.calib_dir, '%06d.txt'%(idx))
return utils.Calibration(calib_filename)
def get_label_objects(self, idx):
label_filename = os.path.join(self.label_dir, '%06d.txt'%(idx))
return utils.read_label(label_filename)
def get_lidar_in_image_fov(pc_velo, calib, xmin, ymin, xmax, ymax,
return_more=False, clip_distance=2.0):
''' Filter lidar points, keep those in image FOV '''
pts_2d = calib.project_velo_to_image(pc_velo)
fov_inds = (pts_2d[:,0]<xmax) & (pts_2d[:,0]>=xmin) & \
(pts_2d[:,1]<ymax) & (pts_2d[:,1]>=ymin)
fov_inds = fov_inds & (pc_velo[:,0]>clip_distance)
imgfov_pc_velo = pc_velo[fov_inds,:]
if return_more:
return imgfov_pc_velo, pts_2d, fov_inds
else:
return imgfov_pc_velo
def get_rect_in_image_fov(pc_rect, calib, xmin, ymin, xmax, ymax,
return_more=False, clip_distance=2.0):
''' Filter lidar points, keep those in image FOV '''
pts_2d = calib.project_rect_to_image(pc_rect)
fov_inds = (pts_2d[:,0]<xmax) & (pts_2d[:,0]>=xmin) & \
(pts_2d[:,1]<ymax) & (pts_2d[:,1]>=ymin)
fov_inds = fov_inds & (pc_rect[:,2]>clip_distance)
imgfov_pc = pc_rect[fov_inds,:]
if return_more:
return imgfov_pc, pts_2d, fov_inds
else:
return imgfov_pc
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Text RNN model stored as a SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
flags.DEFINE_string("export_dir", None, "Directory to export SavedModel.")
class TextRnnModel(tf.train.Checkpoint):
"""Text RNN model.
A full generative text RNN model that can train and decode sentences from a
starting word.
"""
def __init__(self, vocab, emb_dim, buckets, state_size):
super(TextRnnModel, self).__init__()
self._buckets = buckets
self._lstm_cell = tf.keras.layers.LSTMCell(units=state_size)
self._rnn_layer = tf.keras.layers.RNN(
self._lstm_cell, return_sequences=True)
self._embeddings = tf.Variable(tf.random.uniform(shape=[buckets, emb_dim]))
self._logit_layer = tf.keras.layers.Dense(buckets)
self._set_up_vocab(vocab)
def _tokenize(self, sentences):
# Perform a minimalistic text preprocessing by removing punctuation and
# splitting on spaces.
normalized_sentences = tf.strings.regex_replace(
input=sentences, pattern=r"\pP", rewrite="")
sparse_tokens = tf.strings.split(normalized_sentences, " ").to_sparse()
# Deal with a corner case: there is one empty sentence.
sparse_tokens, _ = tf.sparse.fill_empty_rows(sparse_tokens, tf.constant(""))
# Deal with a corner case: all sentences are empty.
sparse_tokens = tf.sparse.reset_shape(sparse_tokens)
return (sparse_tokens.indices, sparse_tokens.values,
sparse_tokens.dense_shape)
def _set_up_vocab(self, vocab_tokens):
# TODO(vbardiovsky): Currently there is no real vocabulary, because
# saved_model serialization does not support trackable resources. Add a real
# vocabulary when it does.
vocab_list = ["UNK"] * self._buckets
for vocab_token in vocab_tokens:
index = self._words_to_indices(vocab_token).numpy()
vocab_list[index] = vocab_token
# This is a variable representing an inverse index.
self._vocab_tensor = tf.Variable(vocab_list)
def _indices_to_words(self, indices):
return tf.gather(self._vocab_tensor, indices)
def _words_to_indices(self, words):
return tf.strings.to_hash_bucket(words, self._buckets)
@tf.function(input_signature=[tf.TensorSpec([None], tf.dtypes.string)])
def train(self, sentences):
token_ids, token_values, token_dense_shape = self._tokenize(sentences)
tokens_sparse = tf.sparse.SparseTensor(
indices=token_ids, values=token_values, dense_shape=token_dense_shape)
tokens = tf.sparse.to_dense(tokens_sparse, default_value="")
sparse_lookup_ids = tf.sparse.SparseTensor(
indices=tokens_sparse.indices,
values=self._words_to_indices(tokens_sparse.values),
dense_shape=tokens_sparse.dense_shape)
lookup_ids = tf.sparse.to_dense(sparse_lookup_ids, default_value=0)
# Targets are the next word for each word of the sentence.
tokens_ids_seq = lookup_ids[:, 0:-1]
tokens_ids_target = lookup_ids[:, 1:]
tokens_prefix = tokens[:, 0:-1]
# Mask determining which positions we care about for a loss: all positions
# that have a valid non-terminal token.
mask = tf.logical_and(
tf.logical_not(tf.equal(tokens_prefix, "")),
tf.logical_not(tf.equal(tokens_prefix, "<E>")))
input_mask = tf.cast(mask, tf.int32)
with tf.GradientTape() as t:
sentence_embeddings = tf.nn.embedding_lookup(self._embeddings,
tokens_ids_seq)
lstm_initial_state = self._lstm_cell.get_initial_state(
sentence_embeddings)
lstm_output = self._rnn_layer(
inputs=sentence_embeddings, initial_state=lstm_initial_state)
# Stack LSTM outputs into a batch instead of a 2D array.
lstm_output = tf.reshape(lstm_output, [-1, self._lstm_cell.output_size])
logits = self._logit_layer(lstm_output)
targets = tf.reshape(tokens_ids_target, [-1])
weights = tf.cast(tf.reshape(input_mask, [-1]), tf.float32)
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
# Final loss is the mean loss for all token losses.
final_loss = tf.math.divide(
tf.reduce_sum(tf.multiply(losses, weights)),
tf.reduce_sum(weights),
name="final_loss")
watched = t.watched_variables()
gradients = t.gradient(final_loss, watched)
for w, g in zip(watched, gradients):
w.assign_sub(g)
return final_loss
@tf.function
def decode_greedy(self, sequence_length, first_word):
initial_state = self._lstm_cell.get_initial_state(
dtype=tf.float32, batch_size=1)
sequence = [first_word]
current_word = first_word
current_id = tf.expand_dims(self._words_to_indices(current_word), 0)
current_state = initial_state
for _ in range(sequence_length):
token_embeddings = tf.nn.embedding_lookup(self._embeddings, current_id)
lstm_outputs, current_state = self._lstm_cell(token_embeddings,
current_state)
lstm_outputs = tf.reshape(lstm_outputs, [-1, self._lstm_cell.output_size])
logits = self._logit_layer(lstm_outputs)
softmax = tf.nn.softmax(logits)
next_ids = tf.math.argmax(softmax, axis=1)
next_words = self._indices_to_words(next_ids)[0]
current_id = next_ids
current_word = next_words
sequence.append(current_word)
return sequence
def main(argv):
del argv
sentences = ["<S> hello there <E>", "<S> how are you doing today <E>"]
vocab = [
"<S>", "<E>", "hello", "there", "how", "are", "you", "doing", "today"
]
module = TextRnnModel(vocab=vocab, emb_dim=10, buckets=100, state_size=128)
for _ in range(100):
_ = module.train(tf.constant(sentences))
# We have to call this function explicitly if we want it exported, because it
# has no input_signature in the @tf.function decorator.
decoded = module.decode_greedy(
sequence_length=10, first_word=tf.constant("<S>"))
_ = [d.numpy() for d in decoded]
tf.saved_model.save(module, FLAGS.export_dir)
if __name__ == "__main__":
app.run(main)
|
import unicodedata
"""Functions related to CJK characters"""
def count_cjk_chars(s):
"""Count numbers of CJK characters in a string.
Arg:
s (str): The string contains CJK characters.
Returns:
int: The number of CJK characters.
"""
if not (type(s) is str):
raise TypeError("count_cjk_str only accept string.")
counts = 0
for c in s:
if unicodedata.east_asian_width(c) in 'WF':
counts += 1
return counts
|
# coding=utf-8
__title__ = 'gmusicapi_scripts'
__version__ = "0.5.0"
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 thebigmunch <mail@thebigmunch.me>'
|
import argparse
from scipy import signal
import pandas as pd
import numpy as np
def argument_parser():
parser = argparse.ArgumentParser(description='resample time series data')
parser.add_argument(
'filename', type=argparse.FileType('r'),
help='name of the file to convert')
parser.add_argument('rate', type=int, help='sampling rate in second')
parser.add_argument(
'--output', '-o', type=argparse.FileType('w'), help='output file'
)
parser.add_argument(
'--format', '-f', help='timestamp format'
)
return parser
def main(argv=None):
args = argument_parser().parse_args(argv)
df = pd.read_csv(
args.filename,
parse_dates=['timestamp']
)
interval = (df.loc[1].timestamp - df.loc[0].timestamp).seconds
new_size = len(df) * interval // args.rate
new_timestamp = pd.date_range(
df.loc[0].timestamp,
periods=new_size,
freq=f'{args.rate}S'
)
if args.format:
new_timestamp = new_timestamp.map(lambda x: x.strftime(args.format))
new_value = signal.resample(df['value'], new_size)
new_df = pd.DataFrame({'timestamp': new_timestamp, 'value': new_value})
if args.output:
new_df.to_csv(args.output)
if __name__ == '__main__':
main()
|
def initials_only(first, middle, last):
initials = first[0]+middle[0]+last[0]
return initials
firstn = input("put your first name: ")
middlen = input("put your middle name: ")
lastn = input("put your last name: ")
initials=initials_only(firstn,middlen,lastn)
print("This is your initials,",initials)
|
# Generated by Django 3.1.5 on 2021-01-11 12:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_core', '0159_call_part_introductory_text_blank'),
]
operations = [
migrations.AlterField(
model_name='proposalscientificcluster',
name='title',
field=models.CharField(help_text='Title of the scientific cluster', max_length=500),
),
]
|
import numpy as np
A = np.array([[n+m*10 for n in range(5)] for m in range(5)])
np.dot(A, A)
"""
array([[ 300, 310, 320, 330, 340],
[1300, 1360, 1420, 1480, 1540],
[2300, 2410, 2520, 2630, 2740],
[3300, 3460, 3620, 3780, 3940],
[4300, 4510, 4720, 4930, 5140]])
"""
v1 = np.arange(0, 5)
np.dot(A, v1) # array([ 30, 130, 230, 330, 430])
np.dot(v1, v1) # 30
# Tambiรฉn podemos hacer casting al tipo matrix. Eso cambia el comportamiento de los operadores +, -, * para usar รกlgebra matricial
M = np.matrix(A)
v = np.matrix(v1).T # vector columna
v
"""
matrix([[0],
[1],
[2],
[3],
[4]])
"""
M * M
"""matrix([[ 300, 310, 320, 330, 340],
[1300, 1360, 1420, 1480, 1540],
[2300, 2410, 2520, 2630, 2740],
[3300, 3460, 3620, 3780, 3940],
[4300, 4510, 4720, 4930, 5140]])
"""
M * v
"""
matrix([[ 30],
[130],
[230],
[330],
[430]])
"""
v.T * v # matrix([[30]])
v + M*v
"""
matrix([[ 30],
[131],
[232],
[333],
[434]])
"""
# Si intentamos operar entre elementos con dimensiones no compatibles, nos da error
v = np.matrix([1,2,3,4,5,6]).T
v
"""
matrix([[1],
[2],
[3],
[4],
[5],
[6]])
"""
M * v # Error: ValueError: shapes (5,5) and (6,1) not aligned: 5 (dim 1) != 6 (dim 0)
|
import sys
args = sys.argv
length = len(args)
def wrong():
print('Available Arguments:')
print(' [-h, --help, help] Open rx7 Documention Page (pypi Page)')
print(' [color, colors] Open a html Page That Contains All Colors and Information About style Class')
print(' (Works Offline & Online)')
print('-------')
print('More Features Will be Added Soon...')
if length != 2: #not length or
wrong()
elif args[1] in ('color','colors'):
import webbrowser
webbrowser.open_new_tab(f'{str(__file__)[:-11]}COLORS.html')
elif args[1] in ('-h','--help','help'):
import webbrowser
webbrowser.open_new_tab(f'https://pypi.org/project/rx7')
else:
wrong()
|
import numpy as np
from sklearn import preprocessing, neighbors
from sklearn.model_selection import train_test_split
import pandas as pd
df = pd.read_csv('breast-cancer-wisconsin.csv')
df.replace('?', -99999, inplace=True)
df.drop(['id'], axis=1, inplace=True)
X = np.array(df.drop(['class'], 1))
y =np.array(df['class'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf = neighbors.KNeighborsClassifier()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
print(accuracy*100)
|
"""Contains logic to acquire urls from a CSV file
This file can be imported as a module and contains the following classes:
* CsvUrlsProvider - Provides access to urls from a CSV file
"""
import csv
import io
import typing
import pydantic
from .file_urls_provider import FileUrlsProvider
from ..models import UrlModel
class CsvUrlsProvider(FileUrlsProvider):
"""Provides access to urls from a CSV file
Methods
-------
generator()
Property that returns an urls generator. The urls will be generated
one at a time when iterate over.
_parse_csv_lines()
Yields one line at a time from the input CSV file.
_map_dict_to_model()
Instantiate a model from a dict with an website's metadata.
"""
URL_COLUMN_NAME = "url"
REGEXP_COLUMN_NAME = "regexp"
DELIMITER = ","
def __init__(self, file_path: str, *args, **kwargs):
"""
Parameters
----------
file_path : str
Full path of the CSV file with the urls
*args : typing.Tuple
Non-keyworded variable-length argument
**kwargs:
Keyworded, variable-length arguments
"""
super().__init__(file_path, *args, **kwargs)
self._logger.debug("%s.__init__(...)", self.__class__.__name__)
self._regexp_column_name = kwargs.get(
self.REGEXP_COLUMN_NAME, self.REGEXP_COLUMN_NAME
)
self._url_column_name = kwargs.get(self.URL_COLUMN_NAME, self.URL_COLUMN_NAME)
self._delimiter = kwargs.get("delimiter", self.DELIMITER)
@property
def generator(self) -> typing.Generator[typing.Optional[UrlModel], None, None]:
self._logger.debug("%s.generator()", self.__class__.__name__)
return self._parse_csv_lines()
def _parse_csv_lines(
self,
) -> typing.Generator[typing.Optional[UrlModel], None, None]:
"""Yields one line at a time from the input CSV file."""
self._logger.debug("%s._parse_csv_lines()", self.__class__.__name__)
# The CSV file could be huge, so it must be parsed one line at a time,
# for not get OutOfMemory exception:
io_file = typing.cast(io.TextIOWrapper, self._file)
csv_reader = csv.DictReader(io_file, delimiter=self._delimiter)
line_index = 1
processed_lines_count = 0
for row in csv_reader:
self._logger.debug("row: %s", row)
data = {
"url": row[self._url_column_name],
"regexp": row[self._regexp_column_name],
}
url_model: typing.Optional[UrlModel] = self._map_dict_to_model(
data, line_index
)
line_index += 1
if url_model is None:
# The row doesn't contains a valid url row:
continue
# Process the row with the url metadata:
yield url_model
processed_lines_count += 1
self._logger.debug("The end of the file was found at line #'%s'", line_index)
self._logger.info(
"In total, there were processed %s urls rows",
processed_lines_count,
)
def _map_dict_to_model(
self, row: typing.Dict[str, str], line_index: int
) -> typing.Optional[UrlModel]:
"""Instantiate a model from a dict with an website's metadata.
After validate the website's metadata, it will initialize a UrlModel
with it.
Parameters
----------
row : typing.Dict[str, str]
A dict that should contains an url related metadata
line_index : int
Represents the base 0 index, of the string line in the input file
Returns
-------
typing.Optional[UrlModel]
If from the line could be loaded a row and contains the expected
fields, will be returned a UrlModel instance with the data.
If not, None will be returned.
"""
self._logger.debug("%s.__parse_row(...)", self.__class__.__name__)
try:
# Validate the row structure, and use the model to access its
# content:
return UrlModel(**row)
except pydantic.ValidationError:
self._logger.warning(
"Row in line '%s' is invalid or malformed. It must contain "
"all/just the expected fields. It will be ignored",
line_index,
)
return None
|
from flask import Request as BaseRequest
from flex.utils.decorators import cached_property
class Request(BaseRequest):
@property
def input(self):
"""The submitted data. If the mimetype is :mimetype:`application/json`
this will contain the parsed JSON data or ``None``. Otherwise, returns
the :attribute:`form` attribute
"""
return self.get_json(cache=True) if self.is_json else self.form
|
"""The rules for skipping a recurring task."""
from dataclasses import dataclass
from typing import Optional
from jupiter.framework.errors import InputValidationError
from jupiter.framework.value import Value
@dataclass(frozen=True)
class RecurringTaskSkipRule(Value):
"""The rules for skipping a recurring task."""
_skip_rule: str
@staticmethod
def from_raw(recurring_task_skip_rule_raw: Optional[str]) -> 'RecurringTaskSkipRule':
"""Validate and clean the recurring task skip rule."""
if not recurring_task_skip_rule_raw:
raise InputValidationError("Expected the skip rule info to be non-null")
return RecurringTaskSkipRule(recurring_task_skip_rule_raw.strip().lower())
def __str__(self) -> str:
"""Transform this to a string version."""
return self._skip_rule
|
import os
import shutil
import json
from utils import download_file, extract_file, copy_directory, remove_inner_ear_landmarks, crop_and_resize_image
with open('config.json', 'r') as f:
config = json.load(f)
data_dir = config['data_dir']
downloads_path = os.path.join(data_dir, config['downloads_dir'])
original_path = os.path.join(data_dir, config['original_dir'])
clean_path = os.path.join(data_dir, config['clean_dir'])
removed_path = os.path.join(data_dir, config['removed_dir'])
os.makedirs(data_dir, exist_ok=True)
# Download all
os.makedirs(downloads_path, exist_ok=True)
for filename, meta in config['dataset_urls'].items():
file_path = os.path.join(downloads_path, filename)
download_file(meta['url'], file_path, meta['md5'])
# Extract all
for filename in config['dataset_urls'].keys():
if filename.endswith('.zip'):
file_path = os.path.join(downloads_path, filename)
extract_file(file_path, original_path)
# Replace incorrect file
for replace_file in config['replace']:
print('Replacing "%s" with "%s"' % (os.path.join(replace_file['dir'], replace_file['filename_from']),
os.path.join(replace_file['dir'], replace_file['filename_to'])))
os.remove(os.path.join(original_path, replace_file['dir'], replace_file['filename_from']))
shutil.copyfile(os.path.join(downloads_path, replace_file['filename_to']),
os.path.join(original_path, replace_file['dir'], replace_file['filename_to']))
print('done.')
# Make a copy of the original dataset
copy_directory(original_path, clean_path)
# Remove duplicates, pictures depicting more than one cat, etc...
print('Cleaning...')
os.makedirs(removed_path)
for subdir in config['remove']:
for filename in config['remove'][subdir]:
path_from = os.path.join(clean_path, subdir, filename)
path_to = os.path.join(removed_path, subdir + '_' + filename)
os.rename(path_from, path_to)
os.rename(path_from + '.cat', path_to + '.cat')
print('done.')
# Remove landmarks 3, 5, 6, 8 (zero-based) - 2 inner points of each ear
print('Removing inner ear landmarks...')
cnt = 0
total = sum([len([f for f in os.listdir(os.path.join(clean_path, subdir)) if f.endswith('.cat')])
for subdir in os.listdir(clean_path)])
for i_subdir, subdir in enumerate(os.listdir(clean_path)):
subdir_path = os.path.join(clean_path, subdir)
for filename in os.listdir(subdir_path):
if filename.endswith('.cat'):
file_path = os.path.join(subdir_path, filename)
remove_inner_ear_landmarks(file_path)
cnt += 1
if not cnt % 100:
percent = cnt / total * 100
print('\r%.2f%% of %d' % (percent, total), end='')
print('\r100.00%% of %d' % total)
print('Splitting data into training/validation/test sets...')
cnt = 0
total = sum([len([fn for fn in os.listdir(os.path.join(clean_path, subdir)) if fn[-4:] in ('.cat', '.jpg')])
for subset in config['split'] for subdir in config['split'][subset]['subdirs']])
for subset in config['split']:
subset_path = os.path.join(clean_path, subset)
os.makedirs(subset_path)
for i_subdir, subdir in enumerate(config['split'][subset]['subdirs']):
subdir_path = os.path.join(clean_path, subdir)
operation = config['split'][subset]['operation']
for filename in os.listdir(subdir_path):
if filename[-4:] in ('.cat', '.jpg'):
file_path = os.path.join(subdir_path, filename)
file_path_subset = os.path.join(subset_path, subdir + '_' + filename)
if operation == 'move':
os.rename(file_path, file_path_subset)
elif operation == 'copy':
shutil.copyfile(file_path, file_path_subset)
cnt += 1
if not cnt % 100:
percent = cnt / total * 100
print('\r%.2f%% of %d' % (percent, total), end='')
if operation == 'move':
shutil.rmtree(subdir_path, ignore_errors=True)
print('\r100.00%% of %d' % total)
# Crop images in validation and test datasets to obtain uniformly distributed scales
print('Cropping subsets...')
cnt = 0
total = sum([len(l) for l in config['crop'].values()])
for subdir in config['crop']:
for filename, bounding_box in config['crop'][subdir].items():
file_path = os.path.join(clean_path, subdir, filename)
crop_and_resize_image(file_path, bounding_box, None, 'jpeg')
cnt += 1
if not cnt % 10:
percent = cnt / total * 100
print('\r%.2f%% of %d' % (percent, total), end='')
print('\r100.00%% of %d' % total)
# Crop and resize images in validation and test datasets for landmarks in ROI detection
print('Cropping and resizing landmarks subsets...')
cnt = 0
total = sum([len(l) for l in config['crop_landmarks'].values()])
for subdir in config['crop_landmarks']:
for filename, bounding_box in config['crop_landmarks'][subdir].items():
file_path = os.path.join(clean_path, subdir, filename)
crop_and_resize_image(file_path, bounding_box, config['img_size'], 'bmp')
cnt += 1
if not cnt % 10:
percent = cnt / total * 100
print('\r%.2f%% of %d' % (percent, total), end='')
print('\r100.00%% of %d' % total)
print('done.')
|
class Solution:
def findPivot(self, nums):
# find the total sum of nums
total = 0
for num in nums:
total += num
# keep a track of left sum
leftsum = 0
for i in range(len(nums)):
# if total - sum of all elements to left of current element - current element value == leftsum, we have a pivot index
if leftsum == total - leftsum - nums[i]:
return i
leftsum += nums[i]
return -1
def main():
mySol = Solution()
nums = [1, 2, -1, 5, 8, -6, 1, 12, 4, -3, -2, 1]
print("For the array " + str(nums) + " the pivot index is " + str(mySol.findPivot(nums)))
if __name__ == "__main__":
main()
|
import datetime
import numpy as np
import pandas as pd
if __name__ == '__main__':
# Load and process ratings
names = ['user', 'item', 'rating', 'timestamp']
dtype = {'user': str, 'item': str, 'rating': np.float64}
def date_parser(timestamp):
return datetime.datetime.fromtimestamp(float(timestamp))
df = pd.read_csv('ml-100k/u.data', sep='\t', names=names, dtype=dtype,
parse_dates=['timestamp'], date_parser=date_parser)
df['timestamp'] = df['timestamp'].map(lambda x: x.value)
df.sort_values(by='timestamp', inplace=True)
df.reset_index(drop=True, inplace=True)
# Load and process item infos
names = [
'item',
'title',
'release_date',
'video_release_date',
'imdb_url',
'unknown',
'action',
'adventure',
'animation',
'children_s',
'comedy',
'crime',
'documentary',
'drama',
'fantasy',
'film_noir',
'horror',
'musical',
'mystery',
'romance',
'sci_fi',
'thriller',
'war',
'western'
]
to_remove = ['video_release_date', 'imdb_url']
usecols = [name for name in names if name not in to_remove]
dtype = {name: np.uint8 for name in names[5:]}
dtype['item'] = str
item_infos = pd.read_csv('ml-100k/u.item', sep='|', engine='python', names=names, dtype=dtype,
usecols=usecols, parse_dates=['release_date'])
item_infos['release_date'] = item_infos['release_date'].map(lambda x: x.value)
item_infos['title'].replace('unknown', np.nan, inplace=True)
genres = item_infos.drop(columns=['item', 'title', 'release_date'])
item_infos['genres'] = genres.apply(
lambda row: ', '.join([genre for genre, occurs in zip(genres.columns, row) if occurs]),
axis=1
)
item_infos = item_infos[['item', 'title', 'release_date', 'genres']]
# Load and process user infos
names = ['user', 'age', 'gender', 'occupation', 'zip_code']
dtype = {'user': str, 'age': np.uint8}
user_infos = pd.read_csv('ml-100k/u.user', sep='|', names=names, dtype=dtype)
user_infos['occupation'].replace('none', np.nan, inplace=True)
# Merge everything together and save to csv file
df = df.merge(item_infos, how='left', on='item')
df = df.merge(user_infos, how='left', on='user')
df.to_csv('ml_100k.csv', sep='\t', index=False)
|
import torch
import torch.nn as nn
class encoder3(nn.Module):
def __init__(self, W, v2):
super(encoder3,self).__init__() # W - width
# vgg
# 224 x 224
self.conv1 = nn.Conv2d(3,3,1,1,0)
self.reflecPad1 = nn.ZeroPad2d((1,1,1,1))
# 226 x 226
self.conv2 = nn.Conv2d(3,32 if v2 else int(64*W),3,1,0)
self.relu2 = nn.ReLU(inplace=True)
# 224 x 224
self.reflecPad3 = nn.ZeroPad2d((1,1,1,1))
self.conv3 = nn.Conv2d(32 if v2 else int(64*W),int(64*W),3,1,0)
self.relu3 = nn.ReLU(inplace=True)
# 224 x 224
self.maxPool = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = False)
# 112 x 112
self.reflecPad4 = nn.ZeroPad2d((1,1,1,1))
self.conv4 = nn.Conv2d(int(64*W),int(128*W),3,1,0)
self.relu4 = nn.ReLU(inplace=True)
# 112 x 112
self.reflecPad5 = nn.ZeroPad2d((1,1,1,1))
self.conv5 = nn.Conv2d(int(128*W),int(128*W),3,1,0)
self.relu5 = nn.ReLU(inplace=True)
# 112 x 112
self.maxPool2 = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = False)
# 56 x 56
self.reflecPad6 = nn.ZeroPad2d((1,1,1,1))
self.conv6 = nn.Conv2d(int(128*W),int(256*W),3,1,0)
self.relu6 = nn.ReLU(inplace=True)
# 56 x 56
def forward(self,x):
x = x / 255.0
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.reflecPad3(out)
out = self.conv3(out)
pool1 = self.relu3(out)
out = self.maxPool(pool1)
out = self.reflecPad4(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.reflecPad5(out)
out = self.conv5(out)
pool2 = self.relu5(out)
out = self.maxPool2(pool2)
out = self.reflecPad6(out)
out = self.conv6(out)
out = self.relu6(out)
return out
class decoder3(nn.Module):
def __init__(self, W, v2):
super(decoder3,self).__init__()
# decoder
self.reflecPad7 = nn.ZeroPad2d((1,1,1,1))
self.conv7 = nn.Conv2d(int(256*W),int(128*W),3,1,0)
self.relu7 = nn.ReLU(inplace=True)
# 56 x 56
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
# 112 x 112
self.reflecPad8 = nn.ZeroPad2d((1,1,1,1))
self.conv8 = nn.Conv2d(int(128*W),int(128*W),3,1,0)
self.relu8 = nn.ReLU(inplace=True)
# 112 x 112
self.reflecPad9 = nn.ZeroPad2d((1,1,1,1))
self.conv9 = nn.Conv2d(int(128*W),int(64*W),3,1,0)
self.relu9 = nn.ReLU(inplace=True)
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
# 224 x 224
self.reflecPad10 = nn.ZeroPad2d((1,1,1,1))
self.conv10 = nn.Conv2d(int(64*W),32 if v2 else int(64*W),3,1,0)
self.relu10 = nn.ReLU(inplace=True)
self.reflecPad11 = nn.ZeroPad2d((1,1,1,1))
self.conv11 = nn.Conv2d(32 if v2 else int(64*W),3,3,1,0)
def forward(self,x):
output = {}
out = self.reflecPad7(x)
out = self.conv7(out)
out = self.relu7(out)
out = self.unpool(out)
out = self.reflecPad8(out)
out = self.conv8(out)
out = self.relu8(out)
out = self.reflecPad9(out)
out = self.conv9(out)
out = self.relu9(out)
out = self.unpool2(out)
out = self.reflecPad10(out)
out = self.conv10(out)
out = self.relu10(out)
out = self.reflecPad11(out)
out = self.conv11(out)
out = out.clamp(0,1)*255
return out
class CNN(nn.Module):
def __init__(self,W,matrixSize=32):
super(CNN,self).__init__()
# 256x64x64
self.convs = nn.Sequential(nn.Conv2d(int(256*W),int(128*W),3,1,1),
nn.ReLU(inplace=True),
nn.Conv2d(int(128*W),int(64*W),3,1,1),
nn.ReLU(inplace=True),
nn.Conv2d(int(64*W),matrixSize,3,1,1))
# 32x8x8
self.fc = nn.Linear(matrixSize*matrixSize,matrixSize*matrixSize)
def forward(self,x):
out = self.convs(x)
# 32x8x8
#b,c,h,w = out.size()
#print(1, b,c,h,w)
out = out.view(1,32, -1)
# 32x64
out = torch.bmm(out,out.transpose(1,2)).div(144*256)
#print(2,out.size())
# 32x32
out = out.view(1,-1)
return self.fc(out)
class MulLayer(nn.Module):
def __init__(self,W,matrixSize=32):
super(MulLayer,self).__init__()
self.snet = CNN(W,matrixSize)
self.cnet = CNN(W,matrixSize)
self.matrixSize = matrixSize
self.compress = nn.Conv2d(int(256*W),matrixSize,1,1,0)
self.unzip = nn.Conv2d(matrixSize,int(256*W),1,1,0)
self.transmatrix = None
def forward(self, cF, sF, alpha=1.0, trans=True):
#cFBK = cF.clone()
#cb, cc, ch, cw = cF.size()
cFF = cF.view(1, 64, -1)
cMean = torch.mean(cFF,dim=2,keepdim=True)
cMean = cMean.unsqueeze(3)
cF = cF - cMean
#sb, sc, sh, sw = sF.size()
sFF = sF.view(1, 64, -1)
sMean = torch.mean(sFF,dim=2,keepdim=True)
sMean = sMean.unsqueeze(3)
#self.sMeanC = sMean.expand_as(cF)
#sMeanS = sMean.expand_as(sF)
sF = sF - sMean
sF = sF * alpha + (1-alpha) * cF
compress_content = self.compress(cF)
#b,c,h,w = compress_content.size()
compress_content = compress_content.view(1,32,-1)
cMatrix = self.cnet(cF)
sMatrix = self.snet(sF)
sMatrix = sMatrix.view(1,self.matrixSize,self.matrixSize)
cMatrix = cMatrix.view(1,self.matrixSize,self.matrixSize)
self.transmatrix = torch.bmm(sMatrix,cMatrix)
transfeature = torch.bmm(self.transmatrix,compress_content).view(1,32,256,144)
out = self.unzip(transfeature.view(1,32,256,144))
out = out + sMean
return out
|
import z3
import tempfile
import random
from ..wire import Input, Output, Register, Const, WireVector
from ..fuzz.aflMutators import int2bin
from ..core import Block
from ..memory import RomBlock, MemBlock
def transfer_to_bin(value, bitwidth):
return "#b" + int2bin(value, bitwidth)
def translate_to_smt(block, output_file, circle=1, rom_blocks=None):
consts = dict()
for wire in list(block.wirevector_subset()):
if type(wire) == Const:
# some const is in the form like const_0_1'b1, is this legal operation?
wire.name = wire.name.split("'").pop(0)
consts[wire.name] = wire
Declare = []
# write "Main"
# node_cntr = 0
initializedMem = []
##################################6/2
# if there are rom blocks, need to be initialized
if rom_blocks is not None:
for x in rom_blocks:
if x.name not in initializedMem:
initializedMem.append(x.name)
output_file.write("(declare-const %s (Array (_ BitVec %s) (_ BitVec %s)))\n" % (x.name, x.addrwidth, x.bitwidth))
# if rom data is a function, calculate the data first
if callable(x.data):
romdata = [x.data(i) for i in range(2 ** x.addrwidth)]
x.data = romdata
# write rom block initialization data
for i in range(len(x.data)):
output_file.write("(assert (= (store %s %s %s) %s))\n" % (x.name, transfer_to_bin(i, x.addrwidth), transfer_to_bin(x.data[i], x.bitwidth), x.name))
##################################
if circle == 1:
for log_net in list(block.logic_subset()):
if log_net.op == '&':
if log_net.dests[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.dests[0].name, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name)
if log_net.args[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[0].name, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name)
if log_net.args[1].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[1].name, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name)
output_file.write("(assert (= %s (bvand %s %s)))\n" % (
log_net.dests[0].name, log_net.args[0].name, log_net.args[1].name))
elif log_net.op == '|':
if log_net.dests[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.dests[0].name, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name)
if log_net.args[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[0].name, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name)
if log_net.args[1].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[1].name, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name)
output_file.write("(assert (= %s (bvor %s %s)))\n" % (
log_net.dests[0].name, log_net.args[0].name, log_net.args[1].name))
elif log_net.op == '^':
if log_net.dests[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.dests[0].name, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name)
if log_net.args[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[0].name, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name)
if log_net.args[1].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[1].name, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name)
output_file.write("(assert (= %s (bvxor %s %s)))\n" % (
log_net.dests[0].name, log_net.args[0].name, log_net.args[1].name))
elif log_net.op == 'n':
if log_net.dests[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.dests[0].name, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name)
if log_net.args[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[0].name, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name)
if log_net.args[1].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[1].name, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name)
output_file.write("(assert (= %s (bvnand %s %s)))\n" % (
log_net.dests[0].name, log_net.args[0].name, log_net.args[1].name))
elif log_net.op == '~':
if log_net.dests[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.dests[0].name, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name)
if log_net.args[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[0].name, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name)
output_file.write("(assert (= %s (bvnot %s)))\n" % (log_net.dests[0].name, log_net.args[0].name))
elif log_net.op == '+':
if log_net.dests[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.dests[0].name, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name)
if log_net.args[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[0].name, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name)
if log_net.args[1].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[1].name, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name)
a = ''
for i in range(0, 2):
if (log_net.args[i].name in consts) and (log_net.args[i].signed):
a = a + " (concat #b1 " + log_net.args[i].name + ") "
else:
a = a + " ((_ zero_extend 1) " + log_net.args[i].name + ") "
output_file.write("(assert (= %s (bvadd %s)))\n" % (log_net.dests[0].name, a))
elif log_net.op == '-':
if log_net.dests[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.dests[0].name, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name)
if log_net.args[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[0].name, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name)
if log_net.args[1].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[1].name, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name)
sub = ''
for i in range(0, 2):
if (log_net.args[i].name in consts) and (log_net.args[i].signed):
sub = sub + " (concat #b1 " + log_net.args[i].name + ") "
else:
sub = sub + " ((_ zero_extend 1) " + log_net.args[i].name + ") "
output_file.write("(assert (= %s (bvsub %s)))\n" % (log_net.dests[0].name, sub))
elif log_net.op == '*':
if log_net.dests[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.dests[0].name, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name)
if log_net.args[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[0].name, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name)
if log_net.args[1].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[1].name, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name)
mul = ''
for i in range(0, 2):
if (log_net.args[i].name in consts) and (log_net.args[i].signed):
mu = ''
for j in range(0, log_net.args[i].bitwidth):
mu = mu + '1'
mul = mul + " (concat #b" + mu + " " + log_net.args[i].name + ") "
else:
mul = mul + " ((_ zero_extend " + str(log_net.args[i].bitwidth) + ") " + log_net.args[
i].name + ") "
output_file.write("(assert (= %s (bvmul %s)))\n" % (log_net.dests[0].name, mul))
elif log_net.op == '=':
if log_net.dests[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.dests[0].name, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name)
if log_net.args[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[0].name, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name)
if log_net.args[1].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[1].name, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name)
output_file.write("(assert (ite (= %s %s) (= %s #b1) (= %s #b0)))\n" % (
log_net.args[0].name, log_net.args[1].name, log_net.dests[0].name, log_net.dests[0].name))
elif log_net.op == '<':
if log_net.dests[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.dests[0].name, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name)
if log_net.args[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[0].name, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name)
if log_net.args[1].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[1].name, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name)
output_file.write("(assert (ite (bvult %s %s) (= %s #b1) (= %s #b0)))\n" % (
log_net.args[0].name, log_net.args[1].name, log_net.dests[0].name, log_net.dests[0].name))
elif log_net.op == '>':
if log_net.dests[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.dests[0].name, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name)
if log_net.args[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[0].name, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name)
if log_net.args[1].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[1].name, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name)
output_file.write("(assert (ite (bvugt %s %s) (= %s #b1) (= %s #b0)))\n" % (
log_net.args[0].name, log_net.args[1].name, log_net.dests[0].name, log_net.dests[0].name))
elif log_net.op == 'w':
if log_net.dests[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.dests[0].name, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name)
if log_net.args[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[0].name, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name)
output_file.write("(assert (= %s %s))\n" % (log_net.dests[0].name, log_net.args[0].name))
elif log_net.op == 'x':
if log_net.dests[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.dests[0].name, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name)
if log_net.args[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[0].name, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name)
if log_net.args[1].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[1].name, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name)
if log_net.args[2].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[2].name, log_net.args[2].bitwidth))
Declare.append(log_net.args[2].name)
output_file.write("(assert (ite (= %s #b0) (= %s %s) (= %s %s)))\n" % (
log_net.args[0].name, log_net.dests[0].name, log_net.args[1].name, log_net.dests[0].name,
log_net.args[2].name))
elif log_net.op == 'c':
if log_net.dests[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.dests[0].name, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name)
c = ''
for i in range(len(log_net.args)):
if log_net.args[i].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[i].name, log_net.args[i].bitwidth))
Declare.append(log_net.args[i].name)
c = c + ' ' + log_net.args[i].name
output_file.write("(assert (= %s (concat %s)))\n" % (log_net.dests[0].name, c))
elif log_net.op == 's':
if log_net.dests[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.dests[0].name, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name)
if log_net.args[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[0].name, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name)
string = ''
for i in log_net.op_param[::-1]:
string = string + "((_ extract " + str(i) + " " + str(i) + ")" + " " + log_net.args[0].name + ") "
output_file.write("(assert (= %s (concat %s)))\n" % (log_net.dests[0].name, string))
elif log_net.op == 'm': ########6/2
if not log_net.op_param[1].name in initializedMem:
initializedMem.append(log_net.op_param[1].name)
output_file.write("(declare-const %s (Array (_ BitVec %s) (_ BitVec %s)))\n" % (
log_net.op_param[1].name, log_net.op_param[1].addrwidth,
log_net.op_param[1].bitwidth))
if log_net.dests[0].name not in Declare:
output_file.write(
"(declare-const %s (_ BitVec %s))\n" % (log_net.dests[0].name, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name)
if log_net.args[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[0].name, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name)
output_file.write("(assert (= (select %s %s) %s))\n" % (
log_net.op_param[1].name, log_net.args[0].name, log_net.dests[0].name))
# node_cntr += 1
elif log_net.op == '@':
if not log_net.op_param[1].name in initializedMem:
initializedMem.append(log_net.op_param[1].name)
output_file.write("(declare-const %s (Array (_ BitVec %s) (_ BitVec %s)))\n" % (
log_net.op_param[1].name, log_net.op_param[1].addrwidth,
log_net.op_param[1].bitwidth))
if log_net.args[0].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[0].name, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name)
if log_net.args[1].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[1].name, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name)
if log_net.args[2].name not in Declare:
output_file.write("(declare-const %s (_ BitVec %s))\n" % (log_net.args[2].name, log_net.args[2].bitwidth))
Declare.append(log_net.args[2].name)
output_file.write("(assert (ite (= %s #b1) (= (store %s %s %s) %s) (= %s %s)))\n" % (log_net.args[2].name, log_net.op_param[1].name, log_net.args[0].name, log_net.args[1].name, log_net.op_param[1].name, log_net.op_param[1].name, log_net.op_param[1].name))
# node_cntr += 1
else:
pass
else:
for cir in range(0, circle):
for log_net in list(block.logic_subset()):
if log_net.op == '&':
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
if log_net.args[1].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[1].name, cir, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name + '_' + str(cir))
output_file.write("(assert (= %s_%s (bvand %s_%s %s_%s)))\n" % (log_net.dests[0].name, cir, log_net.args[0].name, cir, log_net.args[1].name, cir))
elif log_net.op == '|':
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
if log_net.args[1].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[1].name, cir, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name + '_' + str(cir))
output_file.write("(assert (= %s_%s (bvor %s_%s %s_%s)))\n" % (log_net.dests[0].name, cir, log_net.args[0].name, cir, log_net.args[1].name, cir))
elif log_net.op == '^':
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
if log_net.args[1].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[1].name, cir, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name + '_' + str(cir))
output_file.write("(assert (= %s_%s (bvxor %s_%s %s_%s)))\n" % (log_net.dests[0].name, cir, log_net.args[0].name, cir, log_net.args[1].name, cir))
elif log_net.op == 'n':
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
if log_net.args[1].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[1].name, cir, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name + '_' + str(cir))
output_file.write("(assert (= %s_%s (bvnand %s_%s %s_%s)))\n" % (log_net.dests[0].name, cir, log_net.args[0].name, cir, log_net.args[1].name, cir))
elif log_net.op == '~':
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
output_file.write("(assert (= %s_%s (bvnot %s_%s)))\n" % (log_net.dests[0].name, cir, log_net.args[0].name, cir))
elif log_net.op == '+':
a = ''
for i in range(0, 2):
if (log_net.args[i].name in consts) and (log_net.args[i].signed):
a = a + " (concat #b1 " + log_net.args[i].name + '_' + str(cir) + ") "
else:
a = a + " ((_ zero_extend 1) " + log_net.args[i].name + '_' + str(cir) + ") "
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
if log_net.args[1].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[1].name, cir, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name + '_' + str(cir))
output_file.write("(assert (= %s_%s (bvadd %s)))\n" % (log_net.dests[0].name, cir, a))
elif log_net.op == '-':
sub = ''
for i in range(0, 2):
if (log_net.args[i].name in consts) and (log_net.args[i].signed):
sub = sub + " (concat #b1 " + log_net.args[i].name + '_' + str(cir) + ") "
else:
sub = sub + " ((_ zero_extend 1) " + log_net.args[i].name + '_' + str(cir) + ") "
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
if log_net.args[1].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[1].name, cir, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name + '_' + str(cir))
output_file.write("(assert (= %s_%s (bvsub %s)))\n" % (log_net.dests[0].name, cir, sub))
elif log_net.op == '*':
mul = ''
for i in range(0, 2):
if (log_net.args[i].name in consts) and (log_net.args[i].signed):
mu = ''
for j in range(0, log_net.args[i].bitwidth):
mu = mu + '1'
mul = mul + " (concat #b" + mu + " " + log_net.args[i].name + '_' + str(cir) + ") "
else:
mul = mul + " ((_ zero_extend " + str(log_net.args[i].bitwidth) + ") " + log_net.args[
i].name + '_' + str(cir) + ") "
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
if log_net.args[1].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[1].name, cir, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name + '_' + str(cir))
output_file.write("(assert (= %s_%s (bvmul %s)))\n" % (log_net.dests[0].name, cir, mul))
elif log_net.op == '=':
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
if log_net.args[1].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[1].name, cir, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name + '_' + str(cir))
output_file.write("(assert (ite (= %s_%s %s_%s) (= %s_%s #b1) (= %s_%s #b0)))\n" % (log_net.args[0].name, cir, log_net.args[1].name, cir, log_net.dests[0].name, cir,
log_net.dests[0].name, cir))
elif log_net.op == '<':
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
if log_net.args[1].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[1].name, cir, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name + '_' + str(cir))
output_file.write("(assert (ite (bvult %s_%s %s_%s) (= %s_%s #b1) (= %s_%s #b0)))\n" % (log_net.args[0].name, cir, log_net.args[1].name, cir, log_net.dests[0].name, cir,
log_net.dests[0].name, cir))
elif log_net.op == '>':
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
if log_net.args[1].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[1].name, cir, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name + '_' + str(cir))
output_file.write("(assert (ite (bvugt %s_%s %s_%s) (= %s_%s #b1) (= %s_%s #b0)))\n" % (log_net.args[0].name, cir, log_net.args[1].name, cir, log_net.dests[0].name, cir,
log_net.dests[0].name, cir))
elif log_net.op == 'w':
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
output_file.write("(assert (= %s_%s %s_%s))\n" % (log_net.dests[0].name, cir, log_net.args[0].name, cir))
elif log_net.op == 'x':
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
if log_net.args[1].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[1].name, cir, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name + '_' + str(cir))
if log_net.args[2].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[2].name, cir, log_net.args[2].bitwidth))
Declare.append(log_net.args[2].name + '_' + str(cir))
output_file.write("(assert (ite (= %s_%s #b0) (= %s_%s %s_%s) (= %s_%s %s_%s)))\n" % (log_net.args[0].name, cir, log_net.dests[0].name, cir, log_net.args[1].name, cir,log_net.dests[0].name, cir, log_net.args[2].name, cir))
elif log_net.op == 'c':
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
c = ''
for i in range(len(log_net.args)):
if log_net.args[i].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (log_net.args[i].name, str(cir), log_net.args[i].bitwidth))
Declare.append(log_net.args[i].name + '_' + str(cir))
c = c + ' ' + log_net.args[i].name + '_' + str(cir)
output_file.write("(assert (= %s_%s (concat %s)))\n" % (log_net.dests[0].name, str(cir), c))
elif log_net.op == 's':
string = ''
for i in log_net.op_param[::-1]:
string = string + "((_ extract " + str(i) + " " + str(i) + ")" + " " + log_net.args[
0].name + '_' + str(cir) + ") "
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
output_file.write("(assert (= %s_%s (concat %s)))\n" % (log_net.dests[0].name, cir, string))
elif log_net.op == 'r':
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
if cir == 0:
pass
else:
output_file.write(
"(assert (= %s_%s %s_%s))\n" % (log_net.dests[0].name, cir, log_net.args[0].name, cir - 1))
elif log_net.op == 'm': #####6/2
# mem.append(log_net.op_param[1].name + "_" + str(cir))
if log_net.op_param[1].name not in initializedMem:
initializedMem.append(log_net.op_param[1].name)
output_file.write("(declare-const %s (Array (_ BitVec %s) (_ BitVec %s)))\n" % (
log_net.op_param[1].name, log_net.op_param[1].addrwidth,
log_net.op_param[1].bitwidth))
if log_net.dests[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.dests[0].name, cir, log_net.dests[0].bitwidth))
Declare.append(log_net.dests[0].name + '_' + str(cir))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
output_file.write("(assert (= (select %s %s_%s) %s_%s))\n" % (log_net.op_param[1].name, log_net.args[0].name, cir, log_net.dests[0].name, cir))
# node_cntr += 1
elif log_net.op == '@':
if not log_net.op_param[0] in initializedMem:
initializedMem.append(log_net.op_param[0])
output_file.write("(declare-const %s (Array (_ BitVec %s) (_ BitVec %s)))\n" % (
log_net.op_param[1].name, log_net.op_param[1].addrwidth,
log_net.op_param[1].bitwidth))
if log_net.args[0].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[0].name, cir, log_net.args[0].bitwidth))
Declare.append(log_net.args[0].name + '_' + str(cir))
if log_net.args[1].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[1].name, cir, log_net.args[1].bitwidth))
Declare.append(log_net.args[1].name + '_' + str(cir))
if log_net.args[2].name + '_' + str(cir) not in Declare:
output_file.write("(declare-const %s_%s (_ BitVec %s))\n" % (
log_net.args[2].name, cir, log_net.args[2].bitwidth))
Declare.append(log_net.args[2].name + '_' + str(cir))
output_file.write("(assert (ite (= %s_%s #b1) (= (store %s %s_%s %s_%s) %s)) (= %s %s))\n" % (log_net.args[2].name, cir, log_net.op_param[1].name, log_net.args[0].name, cir, log_net.args[1].name, cir, log_net.op_param[1].name, log_net.op_param[1].name, log_net.op_param[1].name))
# node_cntr += 1
else:
pass
if circle == 1:
for i in consts:
if consts[i].signed:
con = bin(pow(2, consts[i].bitwidth) - consts[i].val)
zero = ""
for j in range(0, consts[i].bitwidth - len(con) + 2):
zero = zero + "0"
output_file.write("(assert (= %s (bvneg %s)))\n" % (consts[i].name, "#b" + zero + con[2:]))
else:
con = bin(consts[i].val)
zero = ""
for j in range(0, consts[i].bitwidth - len(con) + 2):
zero = zero + "0"
output_file.write("(assert (= %s %s))\n" % (consts[i].name, "#b" + zero + bin(consts[i].val)[2:]))
else:
for cir in range(0, circle):
for i in consts:
if consts[i].signed:
con = bin(pow(2, consts[i].bitwidth) - consts[i].val)
zero = ""
for j in range(0, consts[i].bitwidth - len(con) + 2):
zero = zero + "0"
output_file.write("(assert (= %s_%s (bvneg %s)))\n" % (consts[i].name, cir, "#b" + zero + con[2:]))
else:
con = bin(consts[i].val)
zero = ""
for j in range(0, consts[i].bitwidth - len(con) + 2):
zero = zero + "0"
output_file.write("(assert (= %s_%s %s))\n" % (consts[i].name, cir, "#b" + zero + bin(consts[i].val)[2:]))
return 0
##################################################################
# get inputs for n cycles
##################################################################
def gen_inputs_for_n_cycles(block, n=1):
inps_cycles = []
for i in block.wirevector_subset(Input):
if n == 1:
inp_cycle = i.name
inps_cycles.append(inp_cycle)
else:
for cycle in range(n):
inp_cycle = i.name + "_%s" % str(cycle)
inps_cycles.append(inp_cycle)
return inps_cycles
def gen_outputs_for_n_cycles(block, n=1):
otps_cycles = []
for i in block.wirevector_subset(Output):
if n == 1:
otp_cycle = i.name
otps_cycles.append(otp_cycle)
else:
for cycle in range(n):
otp_cycle = i.name + "_%s" % str(cycle)
otps_cycles.append(otp_cycle)
return otps_cycles
def get_value_name(value):
s = value.split('_')[0:-1]
if len(s) == 1:
return s[0]
else:
signal = s[0]
for i in range(1, len(s)):
signal = signal + '_' + s[i]
return signal
def get_value_bitwidth(block, value):
for i in block.wirevector_subset():
if get_value_name(value) == i.name:
return i.bitwidth
print('error: %s is not in block.'%(get_value_name(value)) )
return 0
# mem=[]
# mux={mux1:[name, bitwith],mux2:...}
# mux_clock = {mux1:[0,1,0,1,...], mux2:[1,0,1,0,...]}
# initial_values={a_0:'0', b_0:'1',...}
def solve_smt(block, mux, mux_clock, cycle, initial_values=None, rom_blocks=None):
inputs = gen_inputs_for_n_cycles(block, cycle)
with tempfile.TemporaryFile(mode='w+') as output_file:
translate_to_smt(block, output_file, cycle, rom_blocks)
for i in mux:
if cycle == 1:
output_file.write("(assert (= %s %s))\n" % (mux[i][0], transfer_to_bin(mux_clock[i][0], mux[i][1])))
else:
for c in range(0, cycle):
output_file.write("(assert (= %s_%s %s))\n" % (mux[i][0], c, transfer_to_bin(mux_clock[i][c], mux[i][1])))
if initial_values is None:
for i in block.wirevector_subset(Register):
output_file.write("(assert (= %s_0 %s))\n" % (i.name, transfer_to_bin(0, i.bitwidth)))
else:
for i in initial_values:
output_file.write("(assert (= %s %s))\n" % (i, transfer_to_bin(initial_values[i], get_value_bitwidth(block, i))))
output_file.seek(0)
l = output_file.read()
inps = dict()
otps = dict()
s = z3.Solver()
s.add(z3.parse_smt2_string(l))
if s.check() == z3.sat:
m = s.model()
for i in range(0, len(m)):
if m[i].name() in inputs:
inps[m[i].name()] = m[m[i]].as_long()
for i in inputs:
if i not in inps:
#inps[i] = random.randint(0, 2**get_value_bitwidth(block, i) - 1)
inps[i] = 0
#outputs = gen_outputs_for_n_cycles(block, cycle)
#for i in range(0, len(m)):
# if m[i].name() in outputs:
# otps[m[i].name()] = m[m[i]].as_long()
#print(otps)
# for i in range(0, len(m)):
# if m[i].name() in mem:
# otps[m[i].name()] = m[m[i]].as_long()
# print(otps)
return inps
else:
return {}
|
#server code
import socket, cv2, pickle,struct,imutils
# Socket Create
server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# host_ip = '169.254.250.37'
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
print('HOST IP:',host_ip)
port = 9999
socket_address = (host_ip,port)
# Socket Bind
server_socket.bind(socket_address)
# Socket Listen
server_socket.listen(5)
print("LISTENING AT:",socket_address)
# Socket Accept
while True:
client_socket,addr = server_socket.accept()
print('Connection From:',addr)
if client_socket:
vid = cv2.VideoCapture(0)
while(vid.isOpened()):
img,frame = vid.read()
frame = imutils.resize(frame,width=320)
a = pickle.dumps(frame)
message = struct.pack("Q",len(a))+a
client_socket.sendall(message)
cv2.imshow('Video Being Transmitted',frame)
key = cv2.waitKey(1) & 0xFF
if key ==ord('q'):
client_socket.close()
#client_socket.close()
|
from .s3_book_image import S3BookImageStorage
|
"""Unit test package for smart_pandas."""
|
# -*- coding: utf-8 -*-
"""urls.py: messages extends"""
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^mark_read/(?P<message_id>\d+)/$', 'messages_extends.views.message_mark_read', name='message_mark_read'),
url(r'^mark_read/all/$', 'messages_extends.views.message_mark_all_read', name='message_mark_all_read'),
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import json
import os
from tqdm import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--question_file', type=str)
parser.add_argument('-o', '--out_file', type=str)
args = parser.parse_args()
question_file = args.question_file
q_dict = dict()
for n, line in tqdm(enumerate(open(question_file), 1)):
data = json.loads(line)
question = data['question']
q_dict[n] = question
doc_base, doc_ext = os.path.splitext(question_file)
out_file = args.out_file or doc_base + '.questions' + doc_ext
with open(out_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(q_dict, sort_keys=True, indent=2))
print('all done.')
|
from assurvey.survey.models import *
import csv
s = Survey.objects.get(slug='furry-philosophy')
all_questions = [
'How old are you?',
'What gender do you identify as?',
'Does your gender identity align with your sex assigned at birth?',
'What species is your primary character?',
'Where in the world are you located?',
'What religion do you identify with?',
'If we cannot observe something, it may as well not exist.',
'There is/are no God, gods, or deities.',
'There is no such thing as truth, only opinions.',
'Morality is a social construct.',
'Sometimes, it is better for individuals to have rights taken away, in order to protect others within a society.',
'When in Rome, do as the Romans do.',
'People require a national identity to understand themselves.',
'The only things we can be said to know are things we have experienced.',
'Science is the surest path to knowledge.',
'Some races require different treatment than others.',
'It is impossible for us to know if other beings are self-aware.',
'Without a belief in God, any action becomes permissible.',
'It is better to hold false but comforting beliefs, than to know something disturbing.',
'The ends can justify the means.',
'Everything has a scientific, naturalistic explanation.',
'There is an objective reality, which exists independently from us.',
'The most important goal of life is to become happy.',
'People posses free will, and are able to make choices as individual agents.',
'Certain things, such as racism, sexism, and homophobia, are always wrong, regardless of context.',
'The media positively contributes to liberal democracy.',
'We posses no knowledge at birth.',
'Our perceptions accurately represent reality.',
'Free market economics is the best way to distribute wealth.',
'Different societies ought to keep to themselves.',
]
philosophy_questions = [
u'If we cannot observe something, it may as well not exist.',
u'There is/are no God, gods, or deities.',
u'There is no such thing as truth, only opinions.',
u'Morality is a social construct.',
u'Sometimes, it is better for individuals to have rights taken away, in order to protect others within a society.',
u'When in Rome, do as the Romans do.',
u'People require a national identity to understand themselves.',
u'The only things we can be said to know are things we have experienced.',
u'Science is the surest path to knowledge.',
u'Some races require different treatment than others.',
u'It is impossible for us to know if other beings are self-aware.',
u'Without a belief in God, any action becomes permissible.',
u'It is better to hold false but comforting beliefs, than to know something disturbing.',
u'The ends can justify the means.',
u'Everything has a scientific, naturalistic explanation.',
u'There is an objective reality, which exists independently from us.',
u'The most important goal of life is to become happy.',
u'People posses free will, and are able to make choices as individual agents.',
u'Certain things, such as racism, sexism, and homophobia, are always wrong, regardless of context.',
u'The media positively contributes to liberal democracy.',
u'We posses no knowledge at birth.',
u'Our perceptions accurately represent reality.',
u'Free market economics is the best way to distribute wealth.',
u'Different societies ought to keep to themselves.',
]
with open('furry-philosophy.csv', 'wb') as f:
w = csv.DictWriter(f, fieldnames=all_questions)
w.writeheader()
for r in s.surveyresponse_set.all():
if 'c' in map(lambda x: x.action, r.responsetouchpoint_set.all()):
p_r = map(lambda x: {'text': x.question.text.encode('ascii', 'ignore'), 'answer': x.value}, r.answer_set.all())
response = {}
for p in p_r:
if p['answer'] and p['answer'] != '_other_':
if p['text'] in response:
response[p['text']] += '-' + p['answer']
else:
response[p['text']] = p['answer']
ignore = False
for q in philosophy_questions:
if q not in response:
ignore = True
break
if not ignore:
w.writerow(response)
|
# Generated by Django 3.1.6 on 2021-02-07 07:59
import multiselectfield.db.fields
from django.conf import settings
from django.db import migrations, models
try:
if getattr(settings, "ADMIN_CHARTS_USE_JSONFIELD", True):
from django.db.models import JSONField
else:
from jsonfield.fields import JSONField
except ImportError:
from jsonfield.fields import JSONField
class Migration(migrations.Migration):
dependencies = [
("admin_tools_stats", "0011_auto_20210204_1206"),
]
operations = [
migrations.AddField(
model_name="dashboardstats",
name="allowed_type_operation_field_name",
field=multiselectfield.db.fields.MultiSelectField(
blank=True,
choices=[
("Count", "Count"),
("Sum", "Sum"),
("Avg", "Avgerage"),
("AvgCountPerInstance", "Avgerage count per active model instance"),
("Max", "Max"),
("Min", "Min"),
("StdDev", "StdDev"),
("Variance", "Variance"),
],
help_text="choose the type operation what you want to aggregate, ex. Sum",
max_length=1000,
null=True,
verbose_name="Choose Type operation",
),
),
migrations.AlterField(
model_name="dashboardstats",
name="allowed_time_scales",
field=multiselectfield.db.fields.MultiSelectField(
choices=[
("hours", "Hours"),
("days", "Days"),
("weeks", "Weeks"),
("months", "Months"),
("years", "Years"),
],
default=("hours", "days", "weeks", "months", "years"),
max_length=1000,
verbose_name="Allowed time scales",
),
),
migrations.AlterField(
model_name="dashboardstatscriteria",
name="criteria_dynamic_mapping",
field=JSONField(
blank=True,
help_text='a JSON dictionary with records in two following possible formats:<br/>"key_value": "name"<br/>"key": [value, "name"]<br/>use blank key for no filter<br/>Example:<br/><pre>{<br/> "": [null, "All"],<br/> "True": [true, "True"],<br/> "False": [false, "False"]<br/>}</pre><br/>Left blank to exploit all choices of CharField with choices',
null=True,
verbose_name="dynamic criteria / value",
),
),
migrations.AlterField(
model_name="dashboardstatscriteria",
name="criteria_fix_mapping",
field=JSONField(
blank=True,
help_text="a JSON dictionary of key-value pairs that will be used for the criteria",
null=True,
verbose_name="fixed criteria / value",
),
),
]
|
""""
``test fstab``
================
"""
from insights.parsers import fstab
from insights.tests import context_wrap
FS_TAB_DATA = ['#',
'# /etc/fstab',
'# Created by anaconda on Fri May 6 19:51:54 2016',
'#',
'/dev/mapper/rhel_hadoop--test--1-root / xfs defaults 0 0',
'UUID=2c839365-37c7-4bd5-ac47-040fba761735 /boot xfs defaults 0 0',
'/dev/mapper/rhel_hadoop--test--1-home /home xfs defaults 0 0',
'/dev/mapper/rhel_hadoop--test--1-swap swap swap defaults 0 0',
' ',
'/dev/sdb1 /hdfs/data1 xfs rw,relatime,seclabel,attr2,inode64,noquota 0 0',
'/dev/sdc1 /hdfs/data2 xfs rw,relatime,seclabel,attr2,inode64,noquota 0 0',
'/dev/sdd1 /hdfs/data3 xfs rw,relatime,seclabel,attr2,inode64,noquota 0 0',
'localhost:/ /mnt/hdfs nfs rw,vers=3,proto=tcp,nolock,timeo=600 0 0',
' ',
'/dev/mapper/vg0-lv2 /test1 ext4 defaults,data=writeback 1 1',
'nfs_hostname.example.com:/nfs_share/data /srv/rdu/data/000 nfs ro,defaults,hard,intr,bg,noatime,nodev,nosuid,nfsvers=3,tcp,rsize=32768,wsize=32768 0']
content_fstab_without_mntopts = """
#
# /etc/fstab
# Created by anaconda on Mon Dec 5 14:53:47 2016
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/vg_osbase-lv_root / ext4 defaults 1 1
UUID=05ce4fc3-04c3-4111-xxxx /boot ext4 defaults 1 2
/dev/mapper/vg_osbase-lv_home /home ext4 defaults 1 2
/dev/mapper/vg_osbase-lv_tmp /tmp ext4 defaults 1 2
## default mount options##
/dev/foo /foo somefs
###SIMBOX MOUNT###
192.168.48.65:/cellSiteData /ceSiteData nfs
/dev/vg_data/lv_pg /var/opt/rh/rh-postgresql95/lib/pgsql xfs rw,noatime 0 0
"""
def test_fstab():
context = context_wrap(FS_TAB_DATA)
results = fstab.FSTab(context)
assert results is not None
assert len(results) == 10
sdb1 = None
nfs_host = None
for result in results:
if result.fs_spec == "/dev/sdb1":
sdb1 = result
elif result.fs_spec.startswith("nfs_hostname.example.com:"):
nfs_host = result
elif result.fs_spec.startswith("/dev/mapper/vg0"):
dev_vg0 = result
assert sdb1 is not None
assert sdb1.fs_file == "/hdfs/data1"
assert sdb1.fs_vfstype == "xfs"
assert sdb1.fs_mntops.rw
assert sdb1.fs_mntops.relatime
assert 'noquota' in sdb1.fs_mntops
assert sdb1.fs_freq == 0
assert sdb1.fs_passno == 0
assert sdb1.raw == '/dev/sdb1 /hdfs/data1 xfs rw,relatime,seclabel,attr2,inode64,noquota 0 0'
assert nfs_host is not None
assert nfs_host.fs_spec == "nfs_hostname.example.com:/nfs_share/data"
assert nfs_host.fs_file == "/srv/rdu/data/000"
assert nfs_host.fs_vfstype == "nfs"
assert nfs_host.fs_mntops.ro
assert nfs_host.fs_mntops.hard
assert 'bg' in nfs_host.fs_mntops
assert nfs_host.fs_mntops.rsize == "32768"
assert nfs_host.fs_freq == 0
assert nfs_host.fs_passno == 0
assert dev_vg0.fs_mntops.data == 'writeback'
assert dev_vg0.raw == '/dev/mapper/vg0-lv2 /test1 ext4 defaults,data=writeback 1 1'
for opt, v in dev_vg0.fs_mntops.items():
if opt.startswith('data'):
assert v == 'writeback'
assert results.mounted_on['/hdfs/data1'] == sdb1
assert results.mounted_on['/srv/rdu/data/000'] == nfs_host
# Test keyword searches - from examples
assert results.search(fs_file='/') == [l for l in results if l.fs_file == '/']
assert results.search(fs_spec__startswith='LABEL=') == [l for l in results if l.fs_spec.startswith('LABEL')]
assert results.search(fs_mntops__contains='uid') == [l for l in results if 'uid' in l.fs_mntops]
assert results.search(fs_vfstype='xfs', fs_mntops__contains='relatime') == [l for l in results if l.fs_vfstype == 'xfs' and 'relatime' in l.fs_mntops]
results = fstab.FSTab(context_wrap(content_fstab_without_mntopts))
sitedata_mount_list = [result for result in results if result.fs_file == "/ceSiteData"]
assert len(sitedata_mount_list) == 1
sitedata_mount = sitedata_mount_list[0]
assert sitedata_mount.fs_mntops['defaults'] is True
assert sitedata_mount.fs_vfstype == "nfs"
assert sitedata_mount.fs_spec == "192.168.48.65:/cellSiteData"
FSTAB_WITH_BLANK_IN_PATH = [
r'/dev/sda2 / ext4 1 1 # work',
r'/dev/sdb3 /var/crash ext4 defaults 1 1',
r'/dev/sdb5 /l\040ok/at ext4 defaults 1 1',
r'/dev/sdb7 /sdb7ok/at ext4 defaults',
r'/dev/sdba /sdbal\040ok/ab\040ta ext4,a,b defaults,c,d 1 1',
]
def test_fstab_with_blank_in_path():
fstab_info = fstab.FSTab(context_wrap(FSTAB_WITH_BLANK_IN_PATH))
assert ([l.fs_file for l in fstab_info.search(fs_file__contains='ok')] ==
['/l ok/at', '/sdb7ok/at', '/sdbal ok/ab ta'])
FSTAB_DEVICE_PATH_TEST_INFO = [
r'/dev/sda2 / ext4 defaults 1 1',
r'/dev/sdb2 /var ext4 defaults 1 1',
r'/dev/sdb3 /var/crash ext4 defaults 1 1',
r'/dev/sdb4 /abc/def ext4 defaults 1 1',
r'/dev/mapper/VolGroup-lv_usr /usr ext4 defaults 1 1',
r'UUID=qX0bSg-p8CN-cWER-i8qY-cETN-jiZL-LDt93V /kdump ext4 defaults 1 2',
r'/dev/mapper/VolGroup-lv_swap swap swap defaults 0 0',
r'proc /proc proc defaults 0 0',
r'/dev/mapper/vgext-lv--test /lv_test ext3 defaults 0 0',
r'/dev/sdb5 /l\040ok/at ext4 defaults 1 1',
]
def test_fsspec_of_path():
fstab_info = fstab.FSTab(context_wrap(FSTAB_DEVICE_PATH_TEST_INFO))
path_device_map = {'/var/crash': '/dev/sdb3',
'/var/some/path': '/dev/sdb2',
'/var/crash_xxx': '/dev/sdb2',
'/kdump/crash': 'UUID=qX0bSg-p8CN-cWER-i8qY-cETN-jiZL-LDt93V',
'/some/path': '/dev/sda2',
'/lv_test': '/dev/mapper/vgext-lv--test',
'/lv': '/dev/sda2',
'/': '/dev/sda2',
'error': None,
'/abc': '/dev/sda2',
'/abc/xxx': '/dev/sda2',
'/tmp/vm tools': '/dev/sda2',
'/l ok/at/you': '/dev/sdb5',
'/l ok': '/dev/sda2', # dict treat '/l\040ok' same as '/l ok'
r'/l\040ok': '/dev/sda2',
}
for path, dev in path_device_map.items():
assert dev == fstab_info.fsspec_of_path(path)
|
from constants import *
import os
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.metrics import classification_report
def can_ignore(file, key):
if key in file:
return True
return False
def flatten(binary_labels):
return np.argmax(binary_labels, axis=1)
def test(labels, features, metadata, model, clazzes, title="test"):
probabilities = model.predict(features, verbose=0)
expected = flatten(labels)
actual = flatten(probabilities)
print("\n## {title}\n".format(title=title))
max_probabilities = np.amax(probabilities, axis=1)
print("Average confidence: {average}\n".format(
average=np.mean(max_probabilities)))
errors = pd.DataFrame(np.zeros((len(clazzes), len(GENDERS)), dtype=int),
index=clazzes, columns=GENDERS)
threshold_errors = pd.DataFrame(
np.zeros((len(clazzes), len(GENDERS)), dtype=int),
index=clazzes,
columns=GENDERS)
threshold_scores = pd.DataFrame(
np.zeros((len(clazzes), len(GENDERS)), dtype=int),
index=clazzes,
columns=GENDERS)
for index in range(len(actual)):
clazz = metadata[index][LANGUAGE_INDEX]
gender = metadata[index][GENDER_INDEX]
if actual[index] != expected[index]:
errors[gender][clazz] += 1
if actual[index] >= THRESHOLD:
if actual[index] != expected[index]:
threshold_errors[gender][clazz] += 1
if actual[index] == expected[index]:
threshold_scores[gender][clazz] += 1
print("Amount of errors by gender:")
print(errors, "\n")
print("Amount of errors by gender (threshold {0}):".format(THRESHOLD))
print(threshold_errors, "\n")
print("Amount of scores by gender (threshold {0}):".format(THRESHOLD))
print(threshold_scores, "\n")
print(classification_report(expected, actual, target_names=clazzes))
def load_data(label_binarizer, input_dir, group, fold_indexes, input_shape):
all_metadata = []
all_features = []
for fold_index in fold_indexes:
filename = "{group}_metadata.fold{index}.npy".format(
group=group, index=fold_index)
metadata = np.load(os.path.join(input_dir, filename))
filename = "{group}_data.fold{index}.npy".format(
group=group, index=fold_index)
features = np.memmap(
os.path.join(input_dir, filename),
dtype=DATA_TYPE,
mode='r',
shape=(len(metadata),) + input_shape)
all_metadata.append(metadata)
all_features.append(features)
all_metadata = np.concatenate(all_metadata)
all_features = np.concatenate(all_features)
all_labels = label_binarizer.transform(all_metadata[:, 0])
print("[{group}] labels: {labels}, features: {features}".format(
group=group, labels=all_labels.shape, features=all_features.shape))
return all_labels, all_features, all_metadata
def build_label_binarizer():
label_binarizer = preprocessing.LabelBinarizer()
label_binarizer.fit(LANGUAGES)
clazzes = list(label_binarizer.classes_)
print("Classes:", clazzes)
return label_binarizer, clazzes
def train_generator(fold_count, input_dir, input_shape, max_iterations=1):
label_binarizer, clazzes = build_label_binarizer()
fold_indexes = list(range(1, fold_count + 1))
iteration = 0
for fold_index in fold_indexes:
train_fold_indexes = fold_indexes.copy()
train_fold_indexes.remove(fold_index)
train_labels, train_features, train_metadata = load_data(
label_binarizer,
input_dir,
'train',
train_fold_indexes,
input_shape)
test_fold_indexes = [fold_index]
test_labels, test_features, test_metadata = load_data(
label_binarizer,
input_dir,
'train',
test_fold_indexes,
input_shape)
yield (train_labels, train_features, test_labels,
test_features, test_metadata, clazzes)
del train_labels
del train_features
del train_metadata
del test_labels
del test_features
del test_metadata
iteration += 1
if iteration == max_iterations:
return
def remove_extension(file):
return os.path.splitext(file)[0]
def get_filename(file):
return os.path.basename(remove_extension(file))
def group_uids(files):
uids = dict()
# intialize empty sets
for language in LANGUAGES:
uids[language] = dict()
for gender in GENDERS:
uids[language][gender] = set()
# extract uids and append to language/gender sets
for file in files:
info = get_filename(file).split('_')
language = info[0]
gender = info[1]
uid = info[2].split('.')[0]
uids[language][gender].add(uid)
# convert sets to lists
for language in LANGUAGES:
for gender in GENDERS:
uids[language][gender] = sorted(list(uids[language][gender]))
return uids
if __name__ == "__main__":
generator = train_generator(3, 'fb', (FB_HEIGHT, WIDTH, COLOR_DEPTH))
for train_labels, train_features, test_labels, test_features in generator:
print(train_labels.shape)
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def main():
extras_require = {
'dev': ['pytest', 'tox'],
'test': ['pytest', 'tox'],
}
install_requires = [
'py>=3.5.0',
'setuptools',
'numpy',
'scipy',
'pandas',
'gym>=0.10.0'
]
setup(
name='wizluk',
version='0.1.0',
description='wizluk: width-based lookaheads Python library',
long_description=long_description,
url='https://github.com/miquelramirez/width-lookaheads-python',
author="Stefan O'Toole and Miquel Ramirez",
author_email='-',
keywords='planning reinforcement-learning gym',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: MIT License 3)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages('src'), # include all packages under src
package_dir={'': 'src'}, # tell distutils packages are under src
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=install_requires,
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
extras_require=extras_require,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
# This will include non-code files specified in the manifest, see e.g.
# http://python-packaging.readthedocs.io/en/latest/non-code-files.html
include_package_data=True
)
if __name__ == '__main__':
main()
|
#
# PySNMP MIB module DVMRP-STD-MIB-JUNI (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DVMRP-STD-MIB-JUNI
# Produced by pysmi-0.3.4 at Wed May 1 12:55:05 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint")
InterfaceIndexOrZero, InterfaceIndex = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero", "InterfaceIndex")
juniDvmrpExperiment, = mibBuilder.importSymbols("Juniper-Experiment", "juniDvmrpExperiment")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
Counter64, TimeTicks, Gauge32, NotificationType, iso, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Counter32, MibIdentifier, Unsigned32, Integer32, Bits, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "TimeTicks", "Gauge32", "NotificationType", "iso", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Counter32", "MibIdentifier", "Unsigned32", "Integer32", "Bits", "ObjectIdentity")
RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention")
junidDvmrpStdMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1))
junidDvmrpStdMIB.setRevisions(('1999-10-19 12:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: junidDvmrpStdMIB.setRevisionsDescriptions(('Initial version, published as RFC xxxx (to be filled in by RFC-Editor).',))
if mibBuilder.loadTexts: junidDvmrpStdMIB.setLastUpdated('9910191200Z')
if mibBuilder.loadTexts: junidDvmrpStdMIB.setOrganization('IETF IDMR Working Group.')
if mibBuilder.loadTexts: junidDvmrpStdMIB.setContactInfo(' Dave Thaler Microsoft One Microsoft Way Redmond, WA 98052-6399 EMail: dthaler@microsoft.com')
if mibBuilder.loadTexts: junidDvmrpStdMIB.setDescription('The MIB module for management of DVMRP routers.')
junidDvmrpMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1))
junidDvmrp = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1))
junidDvmrpScalar = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 1))
junidDvmrpVersionString = MibScalar((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpVersionString.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpVersionString.setDescription("The router's DVMRP version information. Similar to sysDescr in MIB-II, this is a free-form field which can be used to display vendor-specific information.")
junidDvmrpGenerationId = MibScalar((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpGenerationId.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpGenerationId.setDescription('The generation identifier for the routing process. This is used by neighboring routers to detect whether the DVMRP routing table should be resent.')
junidDvmrpNumRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpNumRoutes.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNumRoutes.setDescription('The number of entries in the routing table. This can be used to monitor the routing table size to detect illegal advertisements of unicast routes.')
junidDvmrpReachableRoutes = MibScalar((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpReachableRoutes.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpReachableRoutes.setDescription('The number of entries in the routing table with non infinite metrics. This can be used to detect network partitions by observing the ratio of reachable routes to total routes.')
junidDvmrpInterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2), )
if mibBuilder.loadTexts: junidDvmrpInterfaceTable.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpInterfaceTable.setDescription("The (conceptual) table listing the router's multicast- capable interfaces.")
junidDvmrpInterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1), ).setIndexNames((0, "DVMRP-STD-MIB-JUNI", "junidDvmrpInterfaceIfIndex"))
if mibBuilder.loadTexts: junidDvmrpInterfaceEntry.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpInterfaceEntry.setDescription('An entry (conceptual row) in the junidDvmrpInterfaceTable. This row augments ipMRouteInterfaceEntry in the IP Multicast MIB, where the threshold object resides.')
junidDvmrpInterfaceIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: junidDvmrpInterfaceIfIndex.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpInterfaceIfIndex.setDescription('The ifIndex value of the interface for which DVMRP is enabled.')
junidDvmrpInterfaceLocalAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 2), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: junidDvmrpInterfaceLocalAddress.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpInterfaceLocalAddress.setDescription('The IP address this system will use as a source address on this interface. On unnumbered interfaces, it must be the same value as junidDvmrpInterfaceLocalAddress for some interface on the system.')
junidDvmrpInterfaceMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 31)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: junidDvmrpInterfaceMetric.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpInterfaceMetric.setDescription('The distance metric for this interface which is used to calculate distance vectors.')
junidDvmrpInterfaceStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: junidDvmrpInterfaceStatus.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpInterfaceStatus.setDescription('The status of this entry. Creating the entry enables DVMRP on the virtual interface; destroying the entry or setting it to notInService disables DVMRP on the virtual interface.')
junidDvmrpInterfaceRcvBadPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpInterfaceRcvBadPkts.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpInterfaceRcvBadPkts.setDescription('The number of DVMRP messages received on the interface by the DVMRP process which were subsequently discarded as invalid (e.g. invalid packet format, or a route report from an unknown neighbor).')
junidDvmrpInterfaceRcvBadRoutes = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpInterfaceRcvBadRoutes.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpInterfaceRcvBadRoutes.setDescription('The number of routes, in valid DVMRP packets, which were ignored because the entry was invalid.')
junidDvmrpInterfaceSentRoutes = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpInterfaceSentRoutes.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpInterfaceSentRoutes.setDescription('The number of routes, in DVMRP Report packets, which have been sent on this interface. Together with junidDvmrpNeighborRcvRoutes at a peer, this object is useful for detecting routes being lost.')
junidDvmrpInterfaceInterfaceKey = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 8), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: junidDvmrpInterfaceInterfaceKey.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpInterfaceInterfaceKey.setDescription('The (shared) key for authenticating neighbors on this interface. This object is intended solely for the purpose of setting the interface key, and MUST be accessible only via requests using both authentication and privacy. The agent MAY report an empty string in response to get, get- next, get-bulk requests.')
junidDvmrpInterfaceInterfaceKeyVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 2, 1, 9), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: junidDvmrpInterfaceInterfaceKeyVersion.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpInterfaceInterfaceKeyVersion.setDescription('The highest version number of all known interface keys for this interface used for authenticating neighbors.')
junidDvmrpNeighborTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3), )
if mibBuilder.loadTexts: junidDvmrpNeighborTable.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborTable.setDescription("The (conceptual) table listing the router's DVMRP neighbors, as discovered by receiving DVMRP messages.")
junidDvmrpNeighborEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1), ).setIndexNames((0, "DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborIfIndex"), (0, "DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborAddress"))
if mibBuilder.loadTexts: junidDvmrpNeighborEntry.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborEntry.setDescription('An entry (conceptual row) in the junidDvmrpNeighborTable.')
junidDvmrpNeighborIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: junidDvmrpNeighborIfIndex.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborIfIndex.setDescription('The value of ifIndex for the virtual interface used to reach this DVMRP neighbor.')
junidDvmrpNeighborAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 2), IpAddress())
if mibBuilder.loadTexts: junidDvmrpNeighborAddress.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborAddress.setDescription('The IP address of the DVMRP neighbor for which this entry contains information.')
junidDvmrpNeighborUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpNeighborUpTime.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborUpTime.setDescription('The time since this DVMRP neighbor (last) became a neighbor of the local router.')
junidDvmrpNeighborExpiryTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 4), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpNeighborExpiryTime.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborExpiryTime.setDescription('The minimum time remaining before this DVMRP neighbor will be aged out.')
junidDvmrpNeighborGenerationId = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpNeighborGenerationId.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborGenerationId.setDescription("The neighboring router's generation identifier.")
junidDvmrpNeighborMajorVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpNeighborMajorVersion.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborMajorVersion.setDescription("The neighboring router's major DVMRP version number.")
junidDvmrpNeighborMinorVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpNeighborMinorVersion.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborMinorVersion.setDescription("The neighboring router's minor DVMRP version number.")
junidDvmrpNeighborCapabilities = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 8), Bits().clone(namedValues=NamedValues(("leaf", 0), ("prune", 1), ("generationID", 2), ("mtrace", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpNeighborCapabilities.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborCapabilities.setDescription("This object describes the neighboring router's capabilities. The leaf bit indicates that the neighbor has only one interface with neighbors. The prune bit indicates that the neighbor supports pruning. The generationID bit indicates that the neighbor sends its generationID in Probe messages. The mtrace bit indicates that the neighbor can handle mtrace requests.")
junidDvmrpNeighborRcvRoutes = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpNeighborRcvRoutes.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborRcvRoutes.setDescription('The total number of routes received in valid DVMRP packets received from this neighbor. This can be used to diagnose problems such as unicast route injection, as well as giving an indication of the level of DVMRP route exchange activity.')
junidDvmrpNeighborRcvBadPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpNeighborRcvBadPkts.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborRcvBadPkts.setDescription('The number of packet received from this neighbor which were discarded as invalid.')
junidDvmrpNeighborRcvBadRoutes = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpNeighborRcvBadRoutes.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborRcvBadRoutes.setDescription('The number of routes, in valid DVMRP packets received from this neighbor, which were ignored because the entry was invalid.')
junidDvmrpNeighborState = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 3, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("oneway", 1), ("active", 2), ("ignoring", 3), ("down", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpNeighborState.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborState.setDescription('State of the neighbor adjacency.')
junidDvmrpRouteTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4), )
if mibBuilder.loadTexts: junidDvmrpRouteTable.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpRouteTable.setDescription('The table of routes learned through DVMRP route exchange.')
junidDvmrpRouteEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4, 1), ).setIndexNames((0, "DVMRP-STD-MIB-JUNI", "junidDvmrpRouteSource"), (0, "DVMRP-STD-MIB-JUNI", "junidDvmrpRouteSourceMask"))
if mibBuilder.loadTexts: junidDvmrpRouteEntry.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpRouteEntry.setDescription('An entry (conceptual row) containing the multicast routing information used by DVMRP in place of the unicast routing information.')
junidDvmrpRouteSource = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4, 1, 1), IpAddress())
if mibBuilder.loadTexts: junidDvmrpRouteSource.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpRouteSource.setDescription('The network address which when combined with the corresponding value of junidDvmrpRouteSourceMask identifies the sources for which this entry contains multicast routing information.')
junidDvmrpRouteSourceMask = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4, 1, 2), IpAddress())
if mibBuilder.loadTexts: junidDvmrpRouteSourceMask.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpRouteSourceMask.setDescription('The network mask which when combined with the corresponding value of junidDvmrpRouteSource identifies the sources for which this entry contains multicast routing information.')
junidDvmrpRouteUpstreamNeighbor = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpRouteUpstreamNeighbor.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpRouteUpstreamNeighbor.setDescription('The address of the upstream neighbor (e.g., RPF neighbor) from which IP datagrams from these sources are received.')
junidDvmrpRouteIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4, 1, 4), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpRouteIfIndex.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpRouteIfIndex.setDescription('The value of ifIndex for the interface on which IP datagrams sent by these sources are received. A value of 0 typically means the route is an aggregate for which no next- hop interface exists.')
junidDvmrpRouteMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpRouteMetric.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpRouteMetric.setDescription('The distance in hops to the source subnet.')
junidDvmrpRouteExpiryTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4, 1, 6), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpRouteExpiryTime.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpRouteExpiryTime.setDescription('The minimum amount of time remaining before this entry will be aged out.')
junidDvmrpRouteUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 4, 1, 7), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpRouteUpTime.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpRouteUpTime.setDescription('The time since the route represented by this entry was learned by the router.')
junidDvmrpRouteNextHopTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 5), )
if mibBuilder.loadTexts: junidDvmrpRouteNextHopTable.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpRouteNextHopTable.setDescription('The (conceptual) table containing information on the next hops on outgoing interfaces for routing IP multicast datagrams.')
junidDvmrpRouteNextHopEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 5, 1), ).setIndexNames((0, "DVMRP-STD-MIB-JUNI", "junidDvmrpRouteNextHopSource"), (0, "DVMRP-STD-MIB-JUNI", "junidDvmrpRouteNextHopSourceMask"), (0, "DVMRP-STD-MIB-JUNI", "junidDvmrpRouteNextHopIfIndex"))
if mibBuilder.loadTexts: junidDvmrpRouteNextHopEntry.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpRouteNextHopEntry.setDescription('An entry (conceptual row) in the list of next hops on outgoing interfaces to which IP multicast datagrams from particular sources are routed.')
junidDvmrpRouteNextHopSource = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 5, 1, 1), IpAddress())
if mibBuilder.loadTexts: junidDvmrpRouteNextHopSource.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpRouteNextHopSource.setDescription('The network address which when combined with the corresponding value of junidDvmrpRouteNextHopSourceMask identifies the sources for which this entry specifies a next hop on an outgoing interface.')
junidDvmrpRouteNextHopSourceMask = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 5, 1, 2), IpAddress())
if mibBuilder.loadTexts: junidDvmrpRouteNextHopSourceMask.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpRouteNextHopSourceMask.setDescription('The network mask which when combined with the corresponding value of junidDvmrpRouteNextHopSource identifies the sources for which this entry specifies a next hop on an outgoing interface.')
junidDvmrpRouteNextHopIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 5, 1, 3), InterfaceIndex())
if mibBuilder.loadTexts: junidDvmrpRouteNextHopIfIndex.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpRouteNextHopIfIndex.setDescription('The ifIndex value of the interface for the outgoing interface for this next hop.')
junidDvmrpRouteNextHopType = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 5, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("leaf", 1), ("branch", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpRouteNextHopType.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpRouteNextHopType.setDescription('Type is leaf if no downstream dependent neighbors exist on the outgoing virtual interface. Otherwise, type is branch.')
junidDvmrpPruneTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 6), )
if mibBuilder.loadTexts: junidDvmrpPruneTable.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpPruneTable.setDescription("The (conceptual) table listing the router's upstream prune state.")
junidDvmrpPruneEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 6, 1), ).setIndexNames((0, "DVMRP-STD-MIB-JUNI", "junidDvmrpPruneGroup"), (0, "DVMRP-STD-MIB-JUNI", "junidDvmrpPruneSource"), (0, "DVMRP-STD-MIB-JUNI", "junidDvmrpPruneSourceMask"))
if mibBuilder.loadTexts: junidDvmrpPruneEntry.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpPruneEntry.setDescription('An entry (conceptual row) in the junidDvmrpPruneTable.')
junidDvmrpPruneGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 6, 1, 1), IpAddress())
if mibBuilder.loadTexts: junidDvmrpPruneGroup.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpPruneGroup.setDescription('The group address which has been pruned.')
junidDvmrpPruneSource = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 6, 1, 2), IpAddress())
if mibBuilder.loadTexts: junidDvmrpPruneSource.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpPruneSource.setDescription('The address of the source or source network which has been pruned.')
junidDvmrpPruneSourceMask = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 6, 1, 3), IpAddress())
if mibBuilder.loadTexts: junidDvmrpPruneSourceMask.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpPruneSourceMask.setDescription("The address of the source or source network which has been pruned. The mask must either be all 1's, or else junidDvmrpPruneSource and junidDvmrpPruneSourceMask must match junidDvmrpRouteSource and junidDvmrpRouteSourceMask for some entry in the junidDvmrpRouteTable.")
junidDvmrpPruneExpiryTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 6, 1, 4), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: junidDvmrpPruneExpiryTime.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpPruneExpiryTime.setDescription("The amount of time remaining before this prune should expire at the upstream neighbor. This value should be the minimum of the default prune lifetime and the remaining prune lifetimes of the local router's downstream neighbors, if any.")
junidDvmrpTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 0))
junidDvmrpNeighborLoss = NotificationType((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 0, 1)).setObjects(("DVMRP-STD-MIB-JUNI", "junidDvmrpInterfaceLocalAddress"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborState"))
if mibBuilder.loadTexts: junidDvmrpNeighborLoss.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborLoss.setDescription('A junidDvmrpNeighborLoss trap signifies the loss of a 2-way adjacency with a neighbor. This trap should be generated when the neighbor state changes from active to one-way, ignoring, or down. The trap should be generated only if the router has no other neighbors on the same interface with a lower IP address than itself.')
junidDvmrpNeighborNotPruning = NotificationType((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 1, 1, 0, 2)).setObjects(("DVMRP-STD-MIB-JUNI", "junidDvmrpInterfaceLocalAddress"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborCapabilities"))
if mibBuilder.loadTexts: junidDvmrpNeighborNotPruning.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborNotPruning.setDescription('A junidDvmrpNeighborNotPruning trap signifies that a non-pruning neighbor has been detected (in an implementation-dependent manner). This trap should be generated at most once per generation ID of the neighbor. For example, it should be generated at the time a neighbor is first heard from if the prune bit is not set in its capabilities. It should also be generated if the local system has the ability to tell that a neighbor which sets the the prune bit in its capabilities is not pruning any branches over an extended period of time. The trap should be generated only if the router has no other neighbors on the same interface with a lower IP address than itself.')
junidDvmrpMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2))
junidDvmrpMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 1))
junidDvmrpMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 2))
junidDvmrpMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 1, 1)).setObjects(("DVMRP-STD-MIB-JUNI", "junidDvmrpGeneralGroup"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpInterfaceGroup"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborGroup"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpRoutingGroup"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpTreeGroup"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpSecurityGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
junidDvmrpMIBCompliance = junidDvmrpMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpMIBCompliance.setDescription('The compliance statement for the DVMRP MIB.')
junidDvmrpGeneralGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 2, 2)).setObjects(("DVMRP-STD-MIB-JUNI", "junidDvmrpVersionString"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpGenerationId"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpNumRoutes"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpReachableRoutes"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
junidDvmrpGeneralGroup = junidDvmrpGeneralGroup.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpGeneralGroup.setDescription('A collection of objects used to describe general DVMRP configuration information.')
junidDvmrpInterfaceGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 2, 3)).setObjects(("DVMRP-STD-MIB-JUNI", "junidDvmrpInterfaceLocalAddress"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpInterfaceMetric"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpInterfaceStatus"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpInterfaceRcvBadPkts"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpInterfaceRcvBadRoutes"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpInterfaceSentRoutes"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
junidDvmrpInterfaceGroup = junidDvmrpInterfaceGroup.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpInterfaceGroup.setDescription('A collection of objects used to describe DVMRP interface configuration and statistics.')
junidDvmrpNeighborGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 2, 4)).setObjects(("DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborUpTime"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborExpiryTime"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborGenerationId"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborMajorVersion"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborMinorVersion"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborCapabilities"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborRcvRoutes"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborRcvBadPkts"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborRcvBadRoutes"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
junidDvmrpNeighborGroup = junidDvmrpNeighborGroup.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNeighborGroup.setDescription('A collection of objects used to describe DVMRP peer configuration and statistics.')
junidDvmrpRoutingGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 2, 5)).setObjects(("DVMRP-STD-MIB-JUNI", "junidDvmrpRouteUpstreamNeighbor"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpRouteIfIndex"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpRouteMetric"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpRouteExpiryTime"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpRouteUpTime"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpRouteNextHopType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
junidDvmrpRoutingGroup = junidDvmrpRoutingGroup.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpRoutingGroup.setDescription('A collection of objects used to store the DVMRP routing table.')
junidDvmrpSecurityGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 2, 6)).setObjects(("DVMRP-STD-MIB-JUNI", "junidDvmrpInterfaceInterfaceKey"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpInterfaceInterfaceKeyVersion"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
junidDvmrpSecurityGroup = junidDvmrpSecurityGroup.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpSecurityGroup.setDescription('A collection of objects used to store information related to DVMRP security.')
junidDvmrpTreeGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 2, 7)).setObjects(("DVMRP-STD-MIB-JUNI", "junidDvmrpPruneExpiryTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
junidDvmrpTreeGroup = junidDvmrpTreeGroup.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpTreeGroup.setDescription('A collection of objects used to store information related to DVMRP prune state.')
junidDvmrpNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 4874, 3, 2, 1, 1, 2, 2, 8)).setObjects(("DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborLoss"), ("DVMRP-STD-MIB-JUNI", "junidDvmrpNeighborNotPruning"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
junidDvmrpNotificationGroup = junidDvmrpNotificationGroup.setStatus('current')
if mibBuilder.loadTexts: junidDvmrpNotificationGroup.setDescription('A collection of notifications for signaling important DVMRP events.')
mibBuilder.exportSymbols("DVMRP-STD-MIB-JUNI", junidDvmrpSecurityGroup=junidDvmrpSecurityGroup, junidDvmrpNeighborMinorVersion=junidDvmrpNeighborMinorVersion, junidDvmrpNeighborState=junidDvmrpNeighborState, junidDvmrpRouteNextHopSourceMask=junidDvmrpRouteNextHopSourceMask, junidDvmrpRouteIfIndex=junidDvmrpRouteIfIndex, junidDvmrpPruneExpiryTime=junidDvmrpPruneExpiryTime, junidDvmrpTreeGroup=junidDvmrpTreeGroup, junidDvmrpTraps=junidDvmrpTraps, junidDvmrpNeighborGroup=junidDvmrpNeighborGroup, junidDvmrpPruneSourceMask=junidDvmrpPruneSourceMask, junidDvmrpRouteSourceMask=junidDvmrpRouteSourceMask, junidDvmrpNeighborTable=junidDvmrpNeighborTable, junidDvmrpRouteNextHopEntry=junidDvmrpRouteNextHopEntry, junidDvmrpPruneEntry=junidDvmrpPruneEntry, junidDvmrpInterfaceSentRoutes=junidDvmrpInterfaceSentRoutes, junidDvmrpMIBCompliances=junidDvmrpMIBCompliances, PYSNMP_MODULE_ID=junidDvmrpStdMIB, junidDvmrpMIBCompliance=junidDvmrpMIBCompliance, junidDvmrpGeneralGroup=junidDvmrpGeneralGroup, junidDvmrpInterfaceTable=junidDvmrpInterfaceTable, junidDvmrp=junidDvmrp, junidDvmrpInterfaceInterfaceKeyVersion=junidDvmrpInterfaceInterfaceKeyVersion, junidDvmrpNeighborRcvBadRoutes=junidDvmrpNeighborRcvBadRoutes, junidDvmrpNeighborEntry=junidDvmrpNeighborEntry, junidDvmrpRouteUpstreamNeighbor=junidDvmrpRouteUpstreamNeighbor, junidDvmrpNeighborUpTime=junidDvmrpNeighborUpTime, junidDvmrpRouteExpiryTime=junidDvmrpRouteExpiryTime, junidDvmrpInterfaceLocalAddress=junidDvmrpInterfaceLocalAddress, junidDvmrpNotificationGroup=junidDvmrpNotificationGroup, junidDvmrpRouteMetric=junidDvmrpRouteMetric, junidDvmrpVersionString=junidDvmrpVersionString, junidDvmrpPruneTable=junidDvmrpPruneTable, junidDvmrpInterfaceInterfaceKey=junidDvmrpInterfaceInterfaceKey, junidDvmrpInterfaceStatus=junidDvmrpInterfaceStatus, junidDvmrpInterfaceMetric=junidDvmrpInterfaceMetric, junidDvmrpMIBObjects=junidDvmrpMIBObjects, junidDvmrpNumRoutes=junidDvmrpNumRoutes, junidDvmrpNeighborGenerationId=junidDvmrpNeighborGenerationId, junidDvmrpNeighborRcvRoutes=junidDvmrpNeighborRcvRoutes, junidDvmrpInterfaceRcvBadRoutes=junidDvmrpInterfaceRcvBadRoutes, junidDvmrpNeighborLoss=junidDvmrpNeighborLoss, junidDvmrpNeighborNotPruning=junidDvmrpNeighborNotPruning, junidDvmrpNeighborAddress=junidDvmrpNeighborAddress, junidDvmrpNeighborMajorVersion=junidDvmrpNeighborMajorVersion, junidDvmrpRoutingGroup=junidDvmrpRoutingGroup, junidDvmrpReachableRoutes=junidDvmrpReachableRoutes, junidDvmrpNeighborCapabilities=junidDvmrpNeighborCapabilities, junidDvmrpRouteNextHopSource=junidDvmrpRouteNextHopSource, junidDvmrpMIBGroups=junidDvmrpMIBGroups, junidDvmrpRouteSource=junidDvmrpRouteSource, junidDvmrpRouteNextHopTable=junidDvmrpRouteNextHopTable, junidDvmrpPruneGroup=junidDvmrpPruneGroup, junidDvmrpInterfaceGroup=junidDvmrpInterfaceGroup, junidDvmrpInterfaceIfIndex=junidDvmrpInterfaceIfIndex, junidDvmrpRouteTable=junidDvmrpRouteTable, junidDvmrpInterfaceRcvBadPkts=junidDvmrpInterfaceRcvBadPkts, junidDvmrpNeighborRcvBadPkts=junidDvmrpNeighborRcvBadPkts, junidDvmrpRouteUpTime=junidDvmrpRouteUpTime, junidDvmrpPruneSource=junidDvmrpPruneSource, junidDvmrpNeighborExpiryTime=junidDvmrpNeighborExpiryTime, junidDvmrpGenerationId=junidDvmrpGenerationId, junidDvmrpInterfaceEntry=junidDvmrpInterfaceEntry, junidDvmrpNeighborIfIndex=junidDvmrpNeighborIfIndex, junidDvmrpRouteNextHopType=junidDvmrpRouteNextHopType, junidDvmrpRouteEntry=junidDvmrpRouteEntry, junidDvmrpMIBConformance=junidDvmrpMIBConformance, junidDvmrpScalar=junidDvmrpScalar, junidDvmrpStdMIB=junidDvmrpStdMIB, junidDvmrpRouteNextHopIfIndex=junidDvmrpRouteNextHopIfIndex)
|
import unittest
from chainer import dataset
from chainer import testing
class SimpleDataset(dataset.DatasetMixin):
def __init__(self, values):
self.values = values
def __len__(self):
return len(self.values)
def get_example(self, i):
return self.values[i]
class TestDatasetMixin(unittest.TestCase):
def setUp(self):
self.ds = SimpleDataset([1, 2, 3, 4, 5])
def test_getitem(self):
for i in range(len(self.ds.values)):
self.assertEqual(self.ds[i], self.ds.values[i])
def test_slice(self):
ds = self.ds
self.assertEqual(ds[:], ds.values)
self.assertEqual(ds[1:], ds.values[1:])
self.assertEqual(ds[2:], ds.values[2:])
self.assertEqual(ds[1:4], ds.values[1:4])
self.assertEqual(ds[0:4], ds.values[0:4])
self.assertEqual(ds[1:5], ds.values[1:5])
self.assertEqual(ds[:-1], ds.values[:-1])
self.assertEqual(ds[1:-2], ds.values[1:-2])
self.assertEqual(ds[-4:-1], ds.values[-4:-1])
self.assertEqual(ds[::-1], ds.values[::-1])
self.assertEqual(ds[4::-1], ds.values[4::-1])
self.assertEqual(ds[:2:-1], ds.values[:2:-1])
self.assertEqual(ds[-1::-1], ds.values[-1::-1])
self.assertEqual(ds[:-3:-1], ds.values[:-3:-1])
self.assertEqual(ds[-1:-3:-1], ds.values[-1:-3:-1])
self.assertEqual(ds[4:1:-1], ds.values[4:1:-1])
self.assertEqual(ds[-1:1:-1], ds.values[-1:1:-1])
self.assertEqual(ds[4:-3:-1], ds.values[4:-3:-1])
self.assertEqual(ds[-2:-4:-1], ds.values[-2:-4:-1])
self.assertEqual(ds[::2], ds.values[::2])
self.assertEqual(ds[1::2], ds.values[1::2])
self.assertEqual(ds[:3:2], ds.values[:3:2])
self.assertEqual(ds[1:4:2], ds.values[1:4:2])
self.assertEqual(ds[::-2], ds.values[::-2])
self.assertEqual(ds[:10], ds.values[:10])
testing.run_module(__name__, __file__)
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2020, Greg Landrum
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
from setuptools import setup, find_packages
INSTALL_REQUIRES = ['intake >=0.5.2']
setup(
name='intake-rdkit',
version="0.2.0",
description='rdkit plugins for Intake',
url='https://github.com/greglandrum/intake-rdkit',
maintainer='greg landrum',
maintainer_email='greg.landrum@t5informatics.com',
license='BSD',
py_modules=['intake_rdkit'],
packages=find_packages(),
entry_points={
'intake.drivers': [
'sdf = intake_rdkit.sdf:SDFSource',
'smiles = intake_rdkit.smiles:SmilesSource',
]
},
package_data={'': ['*.csv', '*.yml', '*.html']},
include_package_data=True,
install_requires=INSTALL_REQUIRES,
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
zip_safe=False,
)
|
from flask import Flask, render_template, url_for, Response
from model import Face
app = Flask(__name__, static_folder='static')
@app.route('/', methods=["GET"])
def index():
return render_template('index.html')
def livestream(source):
while True:
frame = source.input()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/video_feed')
def video_feed():
return Response(livestream(Face()), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host="localhost", debug=True)
|
from dataclasses import dataclass
from raiden.constants import EMPTY_ADDRESS, UINT256_MAX
from raiden.utils.formatting import to_checksum_address
from raiden.utils.typing import (
Address,
ChainID,
ChannelID,
T_Address,
T_ChainID,
T_ChannelID,
TokenNetworkAddress,
typecheck,
)
@dataclass(frozen=True, order=True)
class CanonicalIdentifier:
chain_identifier: ChainID
token_network_address: TokenNetworkAddress
channel_identifier: ChannelID
def validate(self) -> None:
typecheck(self.chain_identifier, T_ChainID)
typecheck(self.token_network_address, T_Address)
typecheck(self.channel_identifier, T_ChannelID)
if self.channel_identifier < 0 or self.channel_identifier > UINT256_MAX:
raise ValueError("channel id is invalid")
def __str__(self) -> str:
return (
"CanonicalIdentifier("
f"chain_identifier={self.chain_identifier}, "
f"token_network_address={to_checksum_address(self.token_network_address)}, "
f"channel_identifier={self.channel_identifier}"
")"
)
@dataclass(frozen=True)
class QueueIdentifier:
recipient: Address
canonical_identifier: CanonicalIdentifier
def __str__(self) -> str:
return (
"QueueIdentifier("
f"recipient={to_checksum_address(self.recipient)}, "
f"canonical_identifier={self.canonical_identifier}"
")"
)
CANONICAL_IDENTIFIER_UNORDERED_QUEUE = CanonicalIdentifier(
ChainID(0), TokenNetworkAddress(EMPTY_ADDRESS), ChannelID(0)
)
|
import datetime
from django.contrib import admin
from django.core import serializers
from django.http import HttpResponse
from django.utils.translation import ugettext_lazy as _
from bento.models import TextBox, ImageBox
class TextBoxAdmin(admin.ModelAdmin):
list_display = ('name', 'modification_date')
actions = ['export_selected_objects']
def export_selected_objects(self, request, queryset):
response = HttpResponse(mimetype='application/json')
filename = '%(model)s-%(date)s.json' % {
'model': self.opts.module_name,
'date': datetime.datetime.now().strftime('%Y%m%d%H%M%S'),
}
response['Content-Disposition'] = u'attachment; filename=%s' % filename
serializers.serialize('json', queryset, stream=response)
return response
export_selected_objects.short_description = 'Export to JSON'
class ImageBoxAdmin(TextBoxAdmin):
list_display = ('name', 'preview', 'modification_date')
def preview(self, obj):
template = u"""<img src="{url}" style="max-height: 48px;" />"""
url = obj.image.url if obj.image else ''
return template.format(url=url)
preview.short_description=_('preview')
preview.allow_tags = True
admin.site.register(TextBox, TextBoxAdmin)
admin.site.register(ImageBox, ImageBoxAdmin)
|
"""
API Response Objects
These are JSON Responses from APIs
"""
import datetime
from typing import Any, Dict, Iterator, List, Optional, Union
from camply.config.api_config import RecreationBookingConfig
from camply.containers.base_container import CamplyModel
class _CampsiteEquipment(CamplyModel):
EquipmentName: str
MaxLength: float
class _CampsiteAttribute(CamplyModel):
AttributeName: str
AttributeValue: str
class CampsiteResponse(CamplyModel):
"""
https://ridb.recreation.gov/api/v1/campsites/<CAMPSITE ID>
"""
CampsiteID: int
FacilityID: int
CampsiteName: str
CampsiteType: str
TypeOfUse: str
Loop: str
CampsiteAccessible: bool
CampsiteReservable: bool
CampsiteLongitude: float
CampsiteLatitude: float
CreatedDate: datetime.date
LastUpdatedDate: datetime.date
PERMITTEDEQUIPMENT: List[_CampsiteEquipment]
ATTRIBUTES: List[_CampsiteAttribute]
class UnawareDatetime(datetime.datetime):
"""
Datetime Unaware Timestamp Parsing
"""
@classmethod
def __get_validators__(cls) -> Iterator:
"""
Generate Validators
"""
yield cls.validate
@classmethod
def validate(cls, v: Union[str, datetime.datetime]) -> datetime.datetime:
"""
Validate Date Strings Into
Parameters
----------
v: Union[str, datetime.datetime]
Returns
-------
datetime.datetime
"""
if isinstance(v, str):
return datetime.datetime.strptime(v, "%Y-%m-%dT%H:%M:%SZ")
elif isinstance(v, datetime.datetime):
return v.replace(tzinfo=None)
else:
raise ValueError("You Must Provide a Parsable Datetime String or Object")
class _CampsiteAvailabilityCampsiteResponse(CamplyModel):
"""
https://ridb.recreation.gov/api/v1/campsites/<CAMPSITE ID>
"""
availabilities: Dict[UnawareDatetime, str] = {}
loop: str = RecreationBookingConfig.CAMPSITE_LOCATION_LOOP_DEFAULT
campsite_type: Optional[str]
max_num_people: int = 1
min_num_people: int = 1
type_of_use: Optional[str]
site: str = RecreationBookingConfig.CAMPSITE_LOCATION_SITE_DEFAULT
class CampsiteAvailabilityResponse(CamplyModel):
"""
https://ridb.recreation.gov/api/v1/campsites/<CAMPSITE ID>
"""
campsites: Dict[int, _CampsiteAvailabilityCampsiteResponse]
class _RecAreaAddress(CamplyModel):
"""
Recreation Area Address Field
"""
AddressStateCode: str
class RecreationAreaResponse(CamplyModel):
"""
https://ridb.recreation.gov/api/v1/campsites/<CAMPSITE ID>
"""
RecAreaID: int
RecAreaName: str
RECAREAADDRESS: List[_RecAreaAddress]
class _FacilityAddress(_RecAreaAddress):
"""
Facility Address aka RecArea Address
"""
class _FacilityRecArea(CamplyModel):
"""
Recreation Area inside of Facility
"""
RecAreaID: int
RecAreaName: str
class FacilityResponse(CamplyModel):
"""
/api/v1/facilities/<Facility ID>
"""
FacilityID: int
FacilityName: str
FacilityTypeDescription: str
Enabled: bool
Reservable: bool
FACILITYADDRESS: Optional[List[_FacilityAddress]]
RECAREA: Optional[List[_FacilityRecArea]]
class _PaginationCountResponse(CamplyModel):
"""
Pagination Counters
"""
CURRENT_COUNT: int
TOTAL_COUNT: int
class _PaginationMetadataResponse(CamplyModel):
"""
Pagination Metadata
"""
RESULTS: _PaginationCountResponse
class GenericResponse(CamplyModel):
"""
Generic Response to Be Paginated
"""
RECDATA: Any
METADATA: _PaginationMetadataResponse
|
# -*- coding: utf-8 -*-
import requests
from pymongo import MongoClient
import time
from redis import StrictRedis
import traceback
from parsel import Selector
import urlparse
#redis config
import json
import re
from multiprocessing import Pool
from collections import Counter
redis_setting = {
'dev': {
'host': 'localhost',
'port': 6379,
'max_connections': 200,
'db': 1,
},
}
REDIS_CLIENT = StrictRedis(**redis_setting['dev'])
MONGO_CLIENT = MongoClient('secret')
db = MONGO_CLIENT['nodebb']
articles_coll = db['articles']
tags_coll = db['tags']
headers = {
"User-Agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/51.0.2704.79 Chrome/51.0.2704.79 Safari/537.36',
'Host': '36kr.com',
'Cookie': 'aliyungf_tc=AQAAAMGySVIwkwgAgvgS2msFHJgu8btb; gr_user_id=2e2031d6-a43e-4df9-945d-97d4ed397646; kr_stat_uuid=ovuey24484520; c_name=point; gr_session_id_76d36bd044527820a1787b198651e2f1=7205350b-dc5f-42d2-9031-c6917d91e3cf; Hm_lvt_713123c60a0e86982326bae1a51083e1=1469071201; Hm_lpvt_713123c60a0e86982326bae1a51083e1=1469073547; krchoasss=eyJpdiI6IkpqS2tSbWVmcEtmMDBRVlkwOFdGNFE9PSIsInZhbHVlIjoiUVo2QkdLOUpKZ0pRYjRETnNTR2I0XC9ZSVJcL3FaTHhKd0JPUXBXK2MreFJ4ZGEzVzV1MFRndTdmRXNGMnNhalphWk4xVHZoSlhoTGlTNTRQcHdqSFwvMWc9PSIsIm1hYyI6Ijk0NmFjOTIwZGVkMjcyOWNlYTY4ZGZiYTBlMjY4M2I4MTM4Y2FjMjYxYTU5YmI2NTIxNTAzNjY1ZjZhZGMxNzIifQ%3D%3D'
}
to_get_tags = []
body_x='//script'
API='http://36kr.com/p/%s.html'
API_ID='http://36kr.com/api/info-flow/main_site/posts?column_id=&b_id=%s&per_page=%s'
set_name='html_id_set'
tag_x='//meta[@name="keywords"]/@content'
def parse_value(response, selector, all=True):
if all:
rlts = filter(lambda value: value.strip() != '',
response.xpath(selector).extract())
return map(lambda rlt: rlt.strip(), rlts)
else:
rlt = response.xpath(selector).extract_first()
if rlt:
return rlt.strip()
return ''
def save_article(id, data):
if not (articles_coll.find_one({'id': id})):
print('>>>>> add new: %s' % id)
articles_coll.insert_one(data)
else:
print('%s existed'%id)
def save_tags(id, item):
if not (tags_coll.find_one({'id': id})):
print('>>>>> add new: %s' % id)
tags_coll.insert_one(item)
else:
print('%s existed'%id)
def parse_tags(id_get):
if not (tags_coll.find_one({'id': id_get})):
url = API%id_get
resp = requests.get(url, headers=headers, timeout=10)
hxs = Selector(text=resp.text)
tags=parse_value(hxs,tag_x)[0]
print tags
item = {
'id':id_get,
'tags':tags
}
save_tags(id_get, item)
return item
else:
item = tags_coll.find_one({'id': id_get})
return item
def parse(id_get):
url = API%id_get
resp = requests.get(url, headers=headers, timeout=10)
hxs = Selector(text=resp.text)
body=parse_value(hxs,body_x)
set=body[5].split(',locationnal=')
text=set[0].replace('<script>var props=','')
data = json.loads(text)
detail=data['detailArticle|post']
id=detail['id']
'''date=detail['published_at']
content=re.sub(r'<([^<>]*)>', '', detail['content'])
tags=detail['extraction_tags']
related_company_type=detail['related_company_type']
catch_title=detail['catch_title']
summary=detail['summary']
title=detail['title']
author=detail['user']['name']
answer = {
'id': id,
'content':content,
'author':author,
'tags':tags,
'related_company_type':related_company_type,
'catch_title':catch_title,
'summary': summary,
'answer_content': text
}'''
print data
return detail
save_article(id,detail)
def get_id(in_id, id_num):
url_now = API_ID%(in_id, id_num)
resp = requests.get(url_now, headers=headers)
items = json.loads(resp.content)['data']['items']
for item in items:
REDIS_CLIENT.sadd(set_name,item['id'])
def get_article():
while True:
try:
id=REDIS_CLIENT.spop(set_name)
parse(id)
print('<<<< success')
except Exception,e:
print e
REDIS_CLIENT.sadd(set_name, id)
traceback.print_exc()
print('xxxx Failed')
def get_tags(id_list):
fail_list = []
while REDIS_CLIENT.scard(set_name) != 0:
try:
in_id = REDIS_CLIENT.spop(set_name)
id_list.append(in_id)
parse_tags(in_id)
except Exception, e:
if in_id not in fail_list:
REDIS_CLIENT.sadd(set_name, id)
print('xxxx Failed')
fail_list.append(in_id)
print e
else:
print e
def retrieve_tags(in_id):
if (tags_coll.find_one({'id': in_id})):
item = tags_coll.find_one({'id': in_id})
tags = item['tags']
return tags
else:
item = parse_tags(in_id)
tags = item['tags']
return tags
def get_tag_list(in_id, id_num):
tag_list = []
id_list = []
get_id(in_id, id_num)
pool = Pool()
pool.map(get_tags(id_list), range(10))
pool.close()
pool.join()
for id in id_list:
print id
tags = retrieve_tags(id)
tag_list.extend(tags.replace(u'ๅไธ่ต่ฎฏ,็งๆๆฐ้ป,','').split(','))
counter1 = Counter(tag_list)
return counter1
if __name__ == '__main__':
'''
pool = Pool()
pool.map(get_article(),range(10))
pool.close()
pool.join()
'''
counter1 = get_tag_list(5058359, 100)
print counter1
list1 = counter1.most_common(5)
for i in range(5):
print list1[i][0], list1[i][1]
#print retrieve_tags(303769651)
#taglist = parse_tag(5058359)
#print retrieve_tags(5058255)
|
#!/usr/bin/env python
import sys
from intcomputer import Intcomputer
def read_input_to_list(path):
with open(path) as file:
return [int(x) for x in file.readline().split(",")]
if __name__ == "__main__":
puzzle = sys.argv[1]
input = read_input_to_list(sys.argv[2])
if puzzle == "1":
intcomputer = Intcomputer(input)
out = []
intcomputer.run(input=[1], output=out)
print(out)
elif puzzle == "2":
intcomputer = Intcomputer(input)
out = []
intcomputer.run(input=[5], output=out)
print(out)
else:
print("Input argument 1 needs to be 1 or 2", file=sys.stderr)
exit(1)
|
"""
Loxone Cover
For more details about this component, please refer to the documentation at
https://github.com/JoDehli/PyLoxone
"""
import logging
from typing import Any
import random
from homeassistant.components.cover import (ATTR_POSITION, ATTR_TILT_POSITION,
DEVICE_CLASS_AWNING,
DEVICE_CLASS_BLIND,
DEVICE_CLASS_CURTAIN,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_GARAGE,
DEVICE_CLASS_SHUTTER,
DEVICE_CLASS_WINDOW, SUPPORT_CLOSE,
SUPPORT_OPEN, CoverEntity)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.event import track_utc_time_change
from . import LoxoneEntity, get_miniserver_from_config_entry
from .const import (DOMAIN, SENDDOMAIN, SUPPORT_CLOSE_TILT, SUPPORT_OPEN_TILT,
SUPPORT_SET_POSITION, SUPPORT_SET_TILT_POSITION,
SUPPORT_STOP, SUPPORT_STOP_TILT)
from .helpers import (get_all_covers, get_cat_name_from_cat_uuid,
get_room_name_from_room_uuid, map_range)
_LOGGER = logging.getLogger(__name__)
NEW_COVERS = "covers"
async def async_setup_platform(hass, config, async_add_devices, discovery_info={}):
"""Set up the Loxone covers."""
return True
async def async_setup_entry(hass, config_entry, async_add_entites):
"""Set Loxone covers."""
miniserver = get_miniserver_from_config_entry(hass, config_entry)
loxconfig = miniserver.loxone_config
covers = []
for cover in get_all_covers(loxconfig):
cover.update(
{
"hass": hass,
"room": get_room_name_from_room_uuid(loxconfig, cover.get("room", "")),
"cat": get_cat_name_from_cat_uuid(loxconfig, cover.get("cat", "")),
}
)
if cover["type"] == "Gate":
new_gate = LoxoneGate(**cover)
covers.append(new_gate)
elif cover["type"] == "Window":
new_window = LoxoneWindow(**cover)
covers.append(new_window)
else:
new_jalousie = LoxoneJalousie(**cover)
covers.append(new_jalousie)
@callback
def async_add_covers(_):
async_add_entites(_)
# miniserver.listeners.append(
# async_dispatcher_connect(
# hass, miniserver.async_signal_new_device(NEW_COVERS), async_add_entites
# )
# )
async_add_entites(covers)
class LoxoneGate(LoxoneEntity, CoverEntity):
"""Loxone Gate"""
def __init__(self, **kwargs):
LoxoneEntity.__init__(self, **kwargs)
self.hass = kwargs["hass"]
self._position_uuid = kwargs["states"]["position"]
self._state_uuid = kwargs["states"]["active"]
self._position = None
self._is_opening = False
self._is_closing = False
if self._position is None:
self._closed = True
else:
self._closed = self.current_cover_position <= 0
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP
@property
def should_poll(self):
"""No polling needed for a demo cover."""
return False
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
if self.animation == 0:
return DEVICE_CLASS_GARAGE
elif self.animation in [1, 2, 3, 4, 5]:
return DEVICE_CLASS_DOOR
return self.type
@property
def animation(self):
return self.details["animation"]
@property
def current_cover_position(self):
"""Return the current position of the cover."""
return self._position
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._closed
@property
def is_closing(self):
"""Return if the cover is closing."""
return self._is_closing
@property
def is_opening(self):
"""Return if the cover is opening."""
return self._is_opening
def open_cover(self, **kwargs):
"""Open the cover."""
if self._position == 100.0:
return
self.hass.bus.async_fire(SENDDOMAIN, dict(uuid=self.uuidAction, value="open"))
self.schedule_update_ha_state()
def close_cover(self, **kwargs):
"""Close the cover."""
if self._position == 0:
return
self.hass.bus.async_fire(SENDDOMAIN, dict(uuid=self.uuidAction, value="close"))
self.schedule_update_ha_state()
def stop_cover(self, **kwargs):
"""Stop the cover."""
if self.is_closing:
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="open")
)
return
if self.is_opening:
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="close")
)
return
async def event_handler(self, event):
if self.states["position"] in event.data or self._state_uuid in event.data:
if self.states["position"] in event.data:
self._position = float(event.data[self.states["position"]]) * 100.0
if self._position == 0:
self._closed = True
else:
self._closed = False
if self._state_uuid in event.data:
self._is_closing = False
self._is_opening = False
if event.data[self._state_uuid] == -1:
self._is_opening = True
elif event.data[self._state_uuid] == 1:
self._is_opening = True
self.schedule_update_ha_state()
@property
def extra_state_attributes(self):
"""Return device specific state attributes.
Implemented by platform classes.
"""
return {
"uuid": self.uuidAction,
"device_typ": self.type,
"category": self.cat,
"platform": "loxone",
}
@property
def device_info(self):
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.name,
"manufacturer": "Loxone",
"model": "Gate",
"type": self.type,
"suggested_area": self.room,
}
class LoxoneWindow(LoxoneEntity, CoverEntity):
# pylint: disable=no-self-use
def __init__(self, **kwargs):
LoxoneEntity.__init__(self, **kwargs)
self.hass = kwargs["hass"]
self._position = None
self._closed = True
self._direction = 0
async def event_handler(self, e):
if self.states["position"] in e.data or self.states["direction"] in e.data:
if self.states["position"] in e.data:
self._position = float(e.data[self.states["position"]]) * 100.0
if self._position == 0:
self._closed = True
else:
self._closed = False
if self.states["direction"] in e.data:
self._direction = e.data[self.states["direction"]]
self.schedule_update_ha_state()
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return self._position
@property
def extra_state_attributes(self):
"""
Return device specific state attributes.
Implemented by platform classes.
"""
device_att = {
"uuid": self.uuidAction,
"device_typ": self.type,
"platform": "loxone",
"room": self.room,
"category": self.cat,
}
return device_att
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_WINDOW
@property
def is_closing(self):
"""Return if the cover is closing."""
if self._direction == -1:
return True
return False
@property
def is_opening(self):
"""Return if the cover is opening."""
if self._direction == 1:
return True
return False
@property
def is_closed(self):
return self._closed
def open_cover(self, **kwargs: Any) -> None:
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="fullopen")
)
def close_cover(self, **kwargs: Any) -> None:
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="fullclose")
)
def stop_cover(self, **kwargs):
"""Stop the cover."""
if self.is_closing:
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="fullopen")
)
elif self.is_opening:
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="fullclose")
)
def set_cover_position(self, **kwargs):
"""Return the current tilt position of the cover."""
position = kwargs.get(ATTR_POSITION)
self.hass.bus.async_fire(
SENDDOMAIN,
dict(uuid=self.uuidAction, value="moveToPosition/{}".format(position)),
)
@property
def device_info(self):
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.name,
"manufacturer": "Loxone",
"model": "Window",
"suggested_area": self.room,
}
class LoxoneJalousie(LoxoneEntity, CoverEntity):
"""Loxone Jalousie"""
# pylint: disable=no-self-use
def __init__(self, **kwargs):
LoxoneEntity.__init__(self, **kwargs)
self.hass = kwargs["hass"]
if "autoInfoText" not in self.states:
self.states["autoInfoText"] = ""
if "autoState" not in self.states:
self.states["autoState"] = ""
self._position = 0
self._position_loxone = -1
self._tilt_position_loxone = 1
self._set_position = None
self._set_tilt_position = None
self._tilt_position = 0
self._requested_closing = True
self._unsub_listener_cover = None
self._unsub_listener_cover_tilt = None
self._is_opening = False
self._is_closing = False
self._animation = 0
self._is_automatic = False
self._auto_text = ""
self._auto_state = 0
if "isAutomatic" in self.details:
self._is_automatic = self.details["isAutomatic"]
if "animation" in self.details:
self._animation = self.details["animation"]
if self._position is None:
self._closed = True
else:
self._closed = self.current_cover_position <= 0
@property
def name(self):
return self._name
@name.setter
def name(self, n):
self._name = n
@property
def supported_features(self):
"""Flag supported features."""
supported_features = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP
if self.current_cover_position is not None:
supported_features |= SUPPORT_SET_POSITION
if self.current_cover_tilt_position is not None:
supported_features |= (
SUPPORT_OPEN_TILT
| SUPPORT_CLOSE_TILT
| SUPPORT_SET_TILT_POSITION
)
return supported_features
async def event_handler(self, e):
if (
self.states["position"] in e.data
or self.states["shadePosition"] in e.data
or self.states["up"] in e.data
or self.states["down"] in e.data
or self.states["autoInfoText"] in e.data
or self.states["autoState"] in e.data
):
if self.states["position"] in e.data:
self._position_loxone = float(e.data[self.states["position"]]) * 100.0
self._position = map_range(self._position_loxone, 0, 100, 100, 0)
if self._position == 0:
self._closed = True
else:
self._closed = False
if self.states["shadePosition"] in e.data:
self._tilt_position_loxone = float(e.data[self.states["shadePosition"]]) * 100.0
self._tilt_position = map_range(self._tilt_position_loxone, 0, 100, 100, 0)
if self.states["up"] in e.data:
self._is_opening = e.data[self.states["up"]]
if self.states["down"] in e.data:
self._is_closing = e.data[self.states["down"]]
if self.states["autoInfoText"] in e.data:
self._auto_text = e.data[self.states["autoInfoText"]]
if self.states["autoState"] in e.data:
self._auto_state = e.data[self.states["autoState"]]
self.schedule_update_ha_state()
@property
def should_poll(self):
"""No polling needed for a demo cover."""
return False
@property
def current_cover_position(self):
"""Return the current position of the cover."""
return self._position
@property
def current_cover_tilt_position(self):
"""Return the current tilt position of the cover."""
return self._tilt_position
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._closed
@property
def is_closing(self):
"""Return if the cover is closing."""
return self._is_closing
@property
def is_opening(self):
"""Return if the cover is opening."""
return self._is_opening
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
if self.animation in [0, 1]:
return DEVICE_CLASS_BLIND
elif self.animation in [2, 4, 5]:
return DEVICE_CLASS_CURTAIN
elif self.animation == 3:
return DEVICE_CLASS_SHUTTER
elif self.animation == 6:
return DEVICE_CLASS_AWNING
@property
def animation(self):
return self.details["animation"]
@property
def is_automatic(self):
return self._is_automatic
@property
def auto(self):
if self._is_automatic and self._auto_state:
return STATE_ON
else:
return STATE_OFF
@property
def shade_postion_as_text(self):
"""Returns shade postionn as text"""
if self.current_cover_tilt_position == 100 and self.current_cover_position < 10:
return "shading on"
else:
return " "
@property
def extra_state_attributes(self):
"""
Return device specific state attributes.
Implemented by platform classes.
"""
device_att = {
"uuid": self.uuidAction,
"device_typ": self.type,
"platform": "loxone",
"room": self.room,
"category": self.cat,
"current_position": self.current_cover_position,
"current_shade_mode": self.shade_postion_as_text,
"current_position_loxone_style": round(self._position_loxone, 0),
}
if self._is_automatic:
device_att.update(
{"automatic_text": self._auto_text, "auto_state": self.auto}
)
return device_att
def close_cover(self, **kwargs):
"""Close the cover."""
if self._position == 0:
return
elif self._position is None:
self._closed = True
self.schedule_update_ha_state()
return
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="FullDown")
)
self.schedule_update_ha_state()
def open_cover(self, **kwargs):
"""Open the cover."""
if self._position == 100.0:
return
elif self._position is None:
self._closed = False
self.schedule_update_ha_state()
return
self.hass.bus.async_fire(SENDDOMAIN, dict(uuid=self.uuidAction, value="FullUp"))
self.schedule_update_ha_state()
def stop_cover(self, **kwargs):
"""Stop the cover."""
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="stop")
)
def set_cover_position(self, **kwargs):
"""Return the current tilt position of the cover."""
position = kwargs.get(ATTR_POSITION)
mapped_pos = map_range(position, 0, 100, 100, 0)
self.hass.bus.async_fire(SENDDOMAIN, dict(uuid=self.uuidAction, value=f"manualPosition/{mapped_pos}"))
def open_cover_tilt(self, **kwargs):
"""Close the cover tilt."""
position = 0.0 + random.uniform(0.000000001, 0.00900000)
self.hass.bus.async_fire(SENDDOMAIN, dict(uuid=self.uuidAction, value=f"manualLamelle/{position}"))
def stop_cover_tilt(self, **kwargs):
"""Stop the cover."""
self.hass.bus.async_fire(
SENDDOMAIN, dict(uuid=self.uuidAction, value="stop")
)
def close_cover_tilt(self, **kwargs):
"""Close the cover tilt."""
position = 100.0 + random.uniform(0.000000001, 0.00900000)
self.hass.bus.async_fire(SENDDOMAIN, dict(uuid=self.uuidAction, value=f"manualLamelle/{position}"))
def set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
tilt_position = kwargs.get(ATTR_TILT_POSITION)
mapped_pos = map_range(tilt_position, 0, 100, 100, 0)
position = mapped_pos + random.uniform(0.000000001, 0.00900000)
self.hass.bus.async_fire(SENDDOMAIN, dict(uuid=self.uuidAction, value=f"manualLamelle/{position}"))
@property
def device_info(self):
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.name,
"manufacturer": "Loxone",
"model": "Jalousie",
"type": self.type,
"suggested_area": self.room,
}
|
from libs.primelib import Prime
import time
# The sum of the squares of the first ten natural numbers is,
# 12 + 22 + ... + 102 = 385
# The square of the sum of the first ten natural numbers is,
# (1 + 2 + ... + 10)2 = 552 = 3025
# Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 โ 385 = 2640.
# Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
LIMIT = 100
def main():
print('script started ...')
start = time.clock()
sumOfSquares = 0
squaresOfSum = 0
for x in range(1, LIMIT + 1):
sumOfSquares += x**2
squaresOfSum += x
squaresOfSum = squaresOfSum**2
answer = squaresOfSum - sumOfSquares
end = time.clock()
print('answer: ' + str(answer))
print('time: ' + str(end - start))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Attributes2Classname: A discriminative model for attribute-based unsupervised zero-shot learning.
# Written by berkan
# Contact: demirelberkan@gmail.com
# --------------------------------------------------------
import numpy as np
import tensorflow as tf
import scipy.io as sio
from scipy import spatial
import master, results
import tflearn, itertools
import pickle
from datetime import datetime
FLAGS = tf.app.flags.FLAGS
def extractData(data, dataName):
dataContent = sio.loadmat(data)
dataContent = dataContent[dataName]
# Return feature matrix.
return dataContent
def flatten(listOfLists):
"Flatten one level of nesting"
return itertools.chain.from_iterable(listOfLists)
def generateAverageWordVectors( wordVectors, vectorWeights):
return vectorWeights.dot(wordVectors)
def generatePerturbedExamples( predicateMatrix, corruptionLevel ):
newData = predicateMatrix
for i in xrange(corruptionLevel-1):
tmpPredicateMatrix = predicateMatrix
r = np.random.random((len(predicateMatrix), len(predicateMatrix[0])))
si = np.argsort(r)
si = si[:, range(0,i)]
for j in xrange(len(predicateMatrix)):
tmpPredicateMatrix[j, si[j,:]] = np.logical_not(tmpPredicateMatrix[j, si[j,:]])
newData = np.concatenate((newData, tmpPredicateMatrix), axis=0)
return newData
def lossFunction( classVec, attributeVec, wrongClassVec, correctPredicateBasedAttrVec, wrongPredicateBasedAttrVec, hammingDistance ):
classVec = classVec/tf.sqrt(tf.reduce_sum(tf.square(classVec), 1, keep_dims=True))
attributeVec = attributeVec / tf.sqrt(tf.reduce_sum(tf.square(attributeVec), 1, keep_dims=True))
correctPredicateBasedAttrVec = correctPredicateBasedAttrVec / tf.sqrt(tf.reduce_sum(tf.square(correctPredicateBasedAttrVec), 1, keep_dims=True))
wrongPredicateBasedAttrVec = wrongPredicateBasedAttrVec / tf.sqrt(tf.reduce_sum(tf.square(wrongPredicateBasedAttrVec), 1, keep_dims=True))
wrongClassVec = wrongClassVec / tf.sqrt(tf.reduce_sum(tf.square(wrongClassVec), 1, keep_dims=True))
correctComb = tf.matmul(classVec, attributeVec, transpose_b=True)
wrongComb = tf.matmul(wrongClassVec, attributeVec, transpose_b=True)
predicateBasedCorrectAttributeComb = tf.matmul(classVec, correctPredicateBasedAttrVec, transpose_b=True)
predicateBasedWrongAttributeComb = tf.matmul(classVec, wrongPredicateBasedAttrVec, transpose_b=True)
if master.applyLossType == master.lossType[0]: #predicate matrix based
return tf.maximum((predicateBasedWrongAttributeComb + hammingDistance) - predicateBasedCorrectAttributeComb, 0)
elif master.applyLossType == master.lossType[1]: #image based
return tf.maximum((wrongComb + hammingDistance) - correctComb, 0)
else: #combined
return tf.maximum((predicateBasedWrongAttributeComb + hammingDistance) - predicateBasedCorrectAttributeComb, 0) \
+ tf.maximum((wrongComb + hammingDistance) - correctComb, 0 )
def evalFunction( classVec, attributeVec, groundTruthLabels ):
classVec = classVec/tf.sqrt(tf.reduce_sum(tf.square(classVec), 1, keep_dims=True))
attributeVec = attributeVec / tf.sqrt(tf.reduce_sum(tf.square(attributeVec), 1, keep_dims=True))
similarity = tf.matmul(classVec, attributeVec, transpose_b=True)
return similarity
def batch_norm(x, n_out, phase_train, scope='bn'):
with tf.variable_scope(scope):
beta = tf.Variable(tf.constant(0.0, shape=[n_out]),
name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[n_out]),
name='gamma', trainable=True)
batch_mean, batch_var = tf.nn.moments(x, [0,1], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
def main(argv=None):
with open('objs.pickle') as f:
__C = pickle.load(f)
# Get the data.
train_classes_filename = __C.get('TRAIN_CLASS_PATH')
test_classes_filename = __C.get('TEST_CLASS_PATH')
attribute_vectors_filename = __C.get('ATTRIBUTE_VECTOR_PATH')
predicate_matrix_filename = __C.get('PREDICATE_MATRIX_PATH')
attr_classifiers_filename = __C.get('ATTR_CLASSIFIER_RESULTS_PATH')
groundtruth_labels_filename = __C.get('GROUND_TRUTH_LABELS')
train_image_labels_filename = __C.get('TRAIN_IMAGE_LABELS')
train_scores_filename = __C.get('TRAIN_SCORES')
logFileName = __C.get('LOG_FILE')
tmpFileName = __C.get('TMP_FILENAME')
plotAccuracyPerNIter = __C.get('PLOT_ACC_PER_N_ITER')
networkModel = __C.get('CURR_MODEL')
# Get the number of epochs for training.
num_epochs = __C.get('NUM_EPOCH')
#Get the verbose status
verbose = __C.get('VERBOSE')
# Get the size of layer one.
num_hidden = __C.get('CURR_HIDDEN')
# Get the status of hand-crafted examples
perturbed_examples = __C.get('PERTURBED_EXAMPLES')
#Get the corruption level of hand-crafted examples
corruption_level = __C.get('PERTURBED_EXAMPLE_CORRLEVEL')
#get batch size
batch_size = __C.get('MAX_BATCH_SIZE')-1
trainClasses = extractData(train_classes_filename, 'trainClasses')
testClasses = extractData(test_classes_filename, 'testClasses')
attributeVectors = extractData(attribute_vectors_filename, 'attributeVectors')
predicateMatrix = extractData(predicate_matrix_filename, 'predicateMatrix')
attributeClassifierResults = extractData(attr_classifiers_filename, 'attClassifierResults')
groundTruthLabels = extractData(groundtruth_labels_filename, 'groundTruthLabels')
trainImageLabels = extractData(train_image_labels_filename, 'trainImageLabels')
trainScores = extractData(train_scores_filename, 'trainScores')
# XXX TEMPORARY
#trainClasses = trainClasses / np.linalg.norm(trainClasses, axis = 1, keepdims=True)
#testClasses = testClasses / np.linalg.norm(testClasses, axis = 1, keepdims=True)
#attributeVectors = attributeVectors / np.linalg.norm(attributeVectors, axis = 1, keepdims=True)
# XXX TEMPORARY
#const_scale=0.4
#attributeVectors = attributeVectors*const_scale
#trainClasses = trainClasses*const_scale
#testClasses = testClasses*const_scale
# Get the shape of the training data.
train_size,num_features = trainClasses.shape
# Get the shape of the training images.
image_size, _ = predicateMatrix.shape
# Get Average word vectors
averageTrainAttributeVectors = generateAverageWordVectors( attributeVectors, trainScores )
averageTrainPredicateMatrixBasedAttributeVectors = generateAverageWordVectors(attributeVectors, predicateMatrix)
averageTestAttributeVectors = generateAverageWordVectors( attributeVectors, attributeClassifierResults )
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
classVecInput = tf.placeholder("float", shape=[None, num_features], name='CC')
correctAttributeVecInput = tf.placeholder("float", shape=[None, num_features], name='CA')
wrongPredicateBasedAttributeVecInput = tf.placeholder("float", shape=[None, num_features], name='WPA')
correctPredicateBasedAttributeVecInput = tf.placeholder("float", shape=[None, num_features], name='CPA')
hammingDistanceInput = tf.placeholder("float", shape=[None, None], name='HD')
wrongClassVecInput = tf.placeholder("float", shape=[None, num_features], name='WC')
groundTruthLabelsInput = tf.constant(groundTruthLabels.T, 'float')
# hamming distance between class vectors.
hammingDistClasses = np.zeros((len(predicateMatrix),len(predicateMatrix)), dtype=float)
for i in xrange(len(predicateMatrix)):
for j in xrange(len(predicateMatrix)):
hammingDistClasses[i,j] = spatial.distance.hamming( predicateMatrix[i,:], predicateMatrix[j,:] )
# Initialize the hidden weights and pass inputs
with tf.variable_scope("wScope", reuse=False):
wHidden = tf.get_variable('W1',
shape=[num_features, num_hidden],
initializer=tflearn.initializations.uniform_scaling(shape=None, factor=1.0, dtype=tf.float32, seed=0))
wHidden2 = tf.get_variable('W2',
shape=[num_hidden, num_hidden],
initializer=tflearn.initializations.uniform_scaling(shape=None, factor=1.0, dtype=tf.float32, seed=0))
firstLayer = tf.nn.tanh(tf.matmul(classVecInput, wHidden))
correctClassOutput = tf.nn.sigmoid(tf.matmul(firstLayer, wHidden2))
with tf.variable_scope("wScope", reuse=True):
wHidden = tf.get_variable('W1')
wHidden2 = tf.get_variable('W2')
firstLayer = tf.nn.tanh(tf.matmul(correctAttributeVecInput, wHidden))
correctAttributeOutput = tf.nn.sigmoid(tf.matmul(firstLayer, wHidden2))
with tf.variable_scope("wScope", reuse=True):
wHidden = tf.get_variable('W1')
wHidden2 = tf.get_variable('W2')
firstLayer = tf.nn.tanh(tf.matmul(wrongClassVecInput, wHidden))
wrongClassOutput = tf.nn.sigmoid(tf.matmul(firstLayer, wHidden2))
with tf.variable_scope("wScope", reuse=True):
wHidden = tf.get_variable('W1')
wHidden2 = tf.get_variable('W2')
firstLayer = tf.nn.tanh(tf.matmul(correctPredicateBasedAttributeVecInput, wHidden))
correctPredicateBasedAttributeOutput = tf.nn.sigmoid(tf.matmul(firstLayer, wHidden2))
with tf.variable_scope("wScope", reuse=True):
wHidden = tf.get_variable('W1')
wHidden2 = tf.get_variable('W2')
firstLayer = tf.nn.tanh(tf.matmul(wrongPredicateBasedAttributeVecInput, wHidden))
wrongPredicateBasedAttributeOutput = tf.nn.sigmoid(tf.matmul(firstLayer, wHidden2))
loss = tf.reduce_sum(
lossFunction(correctClassOutput, correctAttributeOutput, wrongClassOutput,
correctPredicateBasedAttributeOutput, wrongPredicateBasedAttributeOutput, hammingDistanceInput))
# Optimization.
train = tf.train.AdamOptimizer(1e-4).minimize(loss)
accuracy = evalFunction( correctClassOutput, correctAttributeOutput, groundTruthLabelsInput )
classVectorsTensor = correctClassOutput
attributeVectorsTensor = correctAttributeOutput
#write results to the tmp file.
file_ = open( tmpFileName, 'a' )
logFile = open(logFileName, 'a')
saver = tf.train.Saver()
randomnessFlag = False
timeStamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
loggedTrainData = []
loggedTestData = []
initializationFlag = False
# Create a local session to run this computation.
with tf.Session() as s:
# Run all the initializers to prepare the trainable parameters.
try:
if __C.get('SAVE_MODEL') == True:
saver.restore(s, __C.get('LEARNED_MODEL_PATH')+str(num_hidden)+".ckpt")
else:
tf.initialize_all_variables().run()
except:
tf.initialize_all_variables().run()
totalLoss = 0
numberOfVectorPerIter = len( trainImageLabels )
# Iterate and train.
for step in xrange( num_epochs * image_size):
offset = step % train_size
currClassIndices = [i for i, x in enumerate(trainImageLabels) if x == offset+1] #is this class valid for training set?
if currClassIndices != []:
currTrainClass = trainClasses[offset:(offset + 1), :] # word vector of current training class
# determine average word vector of attributes which is valid for currTraining Class
currTrainAttributes = averageTrainAttributeVectors[currClassIndices, :]
validIndices = range(0, numberOfVectorPerIter)
validIndices = list(set(validIndices) - set(currClassIndices)) # find valid training indices for another classes
invalidClasses = np.unique(trainImageLabels[validIndices]) # determine another classes
wrongTrainClasses = trainClasses[invalidClasses-1, :] # word vectors of another classes
currPredicateBasedTrainAttributes = averageTrainPredicateMatrixBasedAttributeVectors[np.unique(trainImageLabels[currClassIndices])-1,:]
wrongPredicateBasedTrainAttributes = averageTrainPredicateMatrixBasedAttributeVectors[np.unique(invalidClasses-1,),:]
if master.applyLossType == master.lossType[2]:
currPredicateBasedTrainAttributes = \
np.repeat(currPredicateBasedTrainAttributes, len(currTrainAttributes), axis=0)
repeatTimes = len(currTrainAttributes) / len(wrongPredicateBasedTrainAttributes)
wrongPredicateBasedTrainAttributes = \
np.repeat(wrongPredicateBasedTrainAttributes, repeatTimes+1, axis=0)
wrongPredicateBasedTrainAttributes = wrongPredicateBasedTrainAttributes[0:len(currTrainAttributes),:]
currentHammingDistance = hammingDistClasses[offset:(offset + 1), invalidClasses-1]
#forward pass
_, curr_loss = s.run([train, loss], feed_dict={classVecInput: currTrainClass,
correctAttributeVecInput: currTrainAttributes,
wrongClassVecInput: wrongTrainClasses,
correctPredicateBasedAttributeVecInput: currPredicateBasedTrainAttributes,
wrongPredicateBasedAttributeVecInput: wrongPredicateBasedTrainAttributes,
hammingDistanceInput: currentHammingDistance.T})
totalLoss = curr_loss + totalLoss
if offset == 0:
if verbose:
print 'Loss: ', totalLoss
trainAccuracy = 0
testAccuracy = 0
accuracyFlag = False
if (step % plotAccuracyPerNIter) == 0:
#evaluate network results
trainScores = \
accuracy.eval(feed_dict={classVecInput: trainClasses[np.unique(trainImageLabels)-1,:],
correctAttributeVecInput: averageTrainAttributeVectors})
trainAccuracy = results.getResults(trainImageLabels, trainScores)
print 'train Accuracy: ' + str(trainAccuracy)
accuracyFlag = True
testScores = \
accuracy.eval(feed_dict={classVecInput: testClasses,
correctAttributeVecInput: averageTestAttributeVectors})
testAccuracy = results.getResults(groundTruthLabels, testScores, False)
print 'Test Accuracy: ' + str(testAccuracy)
if initializationFlag == False:
if master.saveWordVectors == True:
initialTestClasses = \
classVectorsTensor.eval(feed_dict={classVecInput: testClasses,
correctAttributeVecInput: averageTestAttributeVectors})
initialAttributes = \
attributeVectorsTensor.eval(feed_dict={classVecInput: testClasses,
correctAttributeVecInput: averageTestAttributeVectors})
initialTrainClasses = \
classVectorsTensor.eval(feed_dict={classVecInput: trainClasses[np.unique(trainImageLabels) - 1, :],
correctAttributeVecInput: averageTrainAttributeVectors})
initialTestScores = testScores
initializationFlag = True
if accuracyFlag == True:
loggedTrainData.append(trainAccuracy*100)
loggedTestData.append(testAccuracy*100)
logFile.write('#HiddenUnit:'+ str(__C.get('CURR_HIDDEN'))
+',Step:'+str(step)+',Accuracy:'+str(testAccuracy*100) + '\n')
if master.applyCrossValidation == False:
results.drawAccuracyCurves(loggedTrainData, loggedTestData, timeStamp)
if (totalLoss <= __C.get('OVERFITTING_THRESHOLD') or __C.get('STOP_ITER') <= step) and step !=0:
testAccuracy = results.getResults(groundTruthLabels, testScores, False)
file_.write(str(testAccuracy) + '\n')
file_.close()
logFile.close()
results.getResults(groundTruthLabels, testScores, False, True)
if __C.get('SAVE_MODEL') == True:
saver.save(s, __C.get('LEARNED_MODEL_PATH')+str(num_hidden)+".ckpt")
if master.saveWordVectors == True:
wordVectorsSavePath = __C.get('WORD_VECTORS')
finalTrainClasses = \
classVectorsTensor.eval(feed_dict={classVecInput: trainClasses[np.unique(trainImageLabels) - 1, :],
correctAttributeVecInput: averageTrainAttributeVectors})
finalTestClasses = \
classVectorsTensor.eval(feed_dict={classVecInput: testClasses,
correctAttributeVecInput: averageTestAttributeVectors})
finalAttributes = \
attributeVectorsTensor.eval(feed_dict={classVecInput: testClasses,
correctAttributeVecInput: averageTestAttributeVectors})
finalTestScores = testScores
sio.savemat(wordVectorsSavePath+'initialTestClasses.mat', {'initialTestClasses': initialTestClasses})
sio.savemat(wordVectorsSavePath+'finalTestClasses.mat', {'finalTestClasses': finalTestClasses})
sio.savemat(wordVectorsSavePath+'initialAttributes.mat', {'initialAttributes': initialAttributes})
sio.savemat(wordVectorsSavePath+'finalAttributes.mat', {'finalAttributes': finalAttributes})
sio.savemat(wordVectorsSavePath + 'initialTrainClasses.mat',{'initialTrainClasses': initialTrainClasses})
sio.savemat(wordVectorsSavePath + 'finalTrainClasses.mat',{'finalTrainClasses': finalTrainClasses})
sio.savemat(wordVectorsSavePath + 'initialTestScores.mat',{'initialTestScores': initialTestScores})
sio.savemat(wordVectorsSavePath + 'finalTestScores.mat',{'finalTestScores': finalTestScores})
return
totalLoss = 0
if __name__ == '__main__':
tf.app.run()
|
import config
from numba.core import types
from numba.typed import Dict
import numpy as np
import numba
numba.config.THREADING_LAYER = 'safe'
def to_typed_dict_rule_tensor(untyped_d, dimension, pi=False):
if dimension == 1:
t = types.float64[:]
elif dimension == 2:
t = types.float64[:, :]
elif dimension == 3:
t = types.float64[:, :, :]
typed_d = Dict.empty(key_type=types.int64, value_type=t)
if pi:
for nonterm, tensor, in untyped_d.items():
typed_d[nonterm] = tensor.astype(np.float64)
else:
for rule, tensor in untyped_d.items():
assert (hash(rule) not in typed_d)
typed_d[hash(rule)] = tensor.astype(np.float64)
return typed_d
def to_typed_dict_nonterm_rules(untyped_d):
typed_d = Dict.empty(key_type=types.int64, value_type=types.int64[:])
for nonterm, rules in untyped_d.items():
np_rules = np.array([hash(rule) for rule in rules], dtype=np.int64)
typed_d[nonterm] = np_rules
return typed_d
def to_typed_dict_rule_float(untyped_d, pi=False):
typed_d = Dict.empty(key_type=types.int64, value_type=types.float64)
if pi:
for nonterm, prob in untyped_d.items():
typed_d[nonterm] = prob
else:
for rule, prob in untyped_d.items():
assert(hash(rule) not in typed_d)
typed_d[hash(rule)] = prob
return typed_d
config.rule3s_full = to_typed_dict_rule_tensor(config.lpcfg.rule3s, 3)
config.rule1s_full = to_typed_dict_rule_tensor(config.lpcfg.rule1s, 1)
config.pi_full = to_typed_dict_rule_tensor(config.lpcfg.pi, 1, pi=True)
config.rule3s_prune = to_typed_dict_rule_float(config.pcfg.rule3s)
config.rule1s_prune = to_typed_dict_rule_float(config.pcfg.rule1s)
config.pi_prune = to_typed_dict_rule_float(config.pcfg.pi, pi=True)
config.rule3s_lookupC = to_typed_dict_nonterm_rules(config.rule3s_lookupC)
config.rule1s_lookup = to_typed_dict_nonterm_rules(config.rule1s_lookup)
config.numba_ready = True
|
from main import decompose
from main import decompose2
def test(benchmark):
assert benchmark(decompose, 5) == [3, 4]
def test2(benchmark):
assert benchmark(decompose2, 5) == [3, 4]
'''''''''
---------------------------------------------------------------------------------- benchmark: 2 tests ----------------------------------------------------------------------------------
Name (time in us) Min Max Mean StdDev Median IQR Outliers OPS (Kops/s) Rounds Iterations
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
test 1.1290 (1.0) 12.9650 (1.0) 1.2070 (1.0) 0.2362 (1.0) 1.1900 (1.0) 0.0230 (1.0) 1203;4325 828.4878 (1.0) 122026 1
test2 2.0980 (1.86) 32.3090 (2.49) 2.2388 (1.85) 0.4605 (1.95) 2.1970 (1.85) 0.0300 (1.30) 2014;6523 446.6719 (0.54) 143103 1
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Legend:
Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.
OPS: Operations Per Second, computed as 1 / Mean
============================================================================ 2 passed in 2.68 seconds =================================================
'''''''''
|
from django.contrib import admin
from watchlist_app.models import Review, MovieList, StreamPlatform, Genre
# Register your models here.
admin.site.register(Genre)
admin.site.register(Review)
admin.site.register(MovieList)
admin.site.register(StreamPlatform)
|
# -*- coding: utf-8 -*-
"""Advent of Code 2020 - Day 10 - Adapter Array."""
import argparse
import pdb
import traceback
from itertools import combinations
from typing import Any, List, Set, Tuple
def read_adapters(fname: str) -> List[int]:
with open(fname, "rt") as inf:
return [0] + list(sorted(map(int, inf.read().splitlines())))
def get_candidates(target: int, joltages: List[int]) -> List[int]:
results: List[int] = []
for joltage in joltages:
if joltage <= target + 3:
results.append(joltage)
return results
def solve(adapters: List[int]):
diffs: Dict[int, int] = { 1: 0, 2: 0, 3: 1}
for i in range(1, len(adapters)):
diffs[adapters[i] - adapters[i-1]] += 1
one = diffs[1] * diffs[3]
paths = [1] + [0] * (len(adapters) - 1)
for i, adapter in enumerate(adapters):
for j in range(i - 3, i):
if adapter - adapters[j] <= 3:
paths[i] += paths[j]
two = paths[-1]
return (one, two)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Advent of Code - 2020 - Day 10 - Adapter Array.")
parser.add_argument(
"input",
type=str,
default="input.txt",
nargs="?",
help="The puzzle input. (Default %(default)s)",
)
args = parser.parse_args()
try:
data = read_adapters(args.input)
print(solve(data))
except Exception:
traceback.print_exc()
pdb.post_mortem()
|
from colosseum.agents.episodic.q_learning.agent import QLearningEpisodic
Agent = QLearningEpisodic
|
import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import TORCH
def parameter_count(model: nn.Module):
total = 0
for parameter in model.parameters():
total += numpy.prod(parameter.shape)
return int(total)
def dense_net(in_channels: int,
out_channels: int,
layers: tuple or list,
batch_norm=False) -> nn.Module:
if batch_norm:
raise NotImplementedError("only batch_norm=False currently supported")
layers = [in_channels, *layers, out_channels]
return DenseNet(layers)
class DenseNet(nn.Module):
def __init__(self,
layers: list):
super(DenseNet, self).__init__()
self._layers = layers
for i, (s1, s2) in enumerate(zip(layers[:-1], layers[1:])):
self.add_module(f'linear{i}', nn.Linear(s1, s2, bias=True))
def forward(self, x):
for i in range(len(self._layers) - 2):
x = F.relu(getattr(self, f'linear{i}')(x))
x = getattr(self, f'linear{len(self._layers) - 2}')(x)
return x
def u_net(in_channels: int,
out_channels: int,
levels: int = 4,
filters: int or tuple or list = 16,
batch_norm=True) -> nn.Module:
if not batch_norm:
raise NotImplementedError("only batch_norm=True currently supported")
if isinstance(filters, (tuple, list)):
assert len(filters) == levels, f"List of filters has length {len(filters)} but u-net has {levels} levels."
else:
filters = (filters,) * levels
net = UNet(in_channels, out_channels, filters)
net = net.to(TORCH.get_default_device().ref)
return net
class UNet(nn.Module):
def __init__(self,
in_channels: int,
out_channels: int,
filter_counts: tuple,
batch_norm=True):
super(UNet, self).__init__()
assert batch_norm, "Not yet implemented" # TODO
self._levels = len(filter_counts)
self.inc = DoubleConv(in_channels, filter_counts[0])
for i in range(1, self._levels):
self.add_module(f'down{i}', Down(filter_counts[i - 1], filter_counts[i]))
self.add_module(f'up{i}', Up(filter_counts[i] + filter_counts[i-1], filter_counts[i - 1]))
self.outc = OutConv(filter_counts[0], out_channels)
def forward(self, x):
x = self.inc(x)
xs = [x]
for i in range(1, self._levels):
xs.insert(0, getattr(self, f'down{i}')(x))
x = xs[0]
for i in range(1, self._levels):
x = getattr(self, f'up{i}')(x, xs[i])
x = self.outc(x)
return x
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
|
import asyncio
import hashlib
import hmac
from time import time
from typing import Optional
import stomper
import websockets
import requests
class LatokenClient:
baseWS = 'wss://api.latoken.com/stomp'
baseAPI = 'https://api.latoken.com'
# REST (calls)
# Basic info
user_info_call = '/v2/auth/user' # PRIVATE
time_call = '/v2/time' # PUBLIC
# Balances (all PRIVATE)
account_balances_call = '/v2/auth/account'
currency_balance_by_type_call = '/v2/auth/account/currency/{currency}/{accountType}'
# Orders (all PRIVATE)
order_place_call = '/v2/auth/order/place'
order_cancel_all_call = '/v2/auth/order/cancelAll'
order_cancel_id_call = '/v2/auth/order/cancel'
order_cancel_pair_call = '/v2/auth/order/cancelAll/{currency}/{quote}'
order_status_call = '/v2/auth/order/getOrder/{}' # Get order by id
order_pair_active_call = '/v2/auth/order/pair/{currency}/{quote}/active'
order_pair_all_call = '/v2/auth/order/pair/{currency}/{quote}'
order_all_call = '/v2/auth/order' # Provides orders history (closed, cancelled, placed orders)
# Fees
fee_levels_call = '/v2/trade/feeLevels' # PUBLIC
fee_scheme_per_pair_call = '/v2/trade/fee/{currency}/{quote}' # PUBLIC
fee_scheme_par_pair_and_user_call = '/v2/auth/trade/fee/{currency}/{quote}' # PRIVATE
# Trades
trades_user_call = '/v2/auth/trade' # PRIVATE
trades_user_pair_call = '/v2/auth/trade/pair/{currency}/{quote}' # PRIVATE
trades_all_call = '/v2/trade/history/{currency}/{quote}' # PUBLIC
# Books (all PUBLIC)
orderbook_call = '/v2/book/{currency}/{quote}'
# Tickers (all PUBLIC)
tickers_call = '/v2/ticker'
tickers_per_pair_call = '/v2/ticker/{currency}/{quote}'
# Currencies and pairs (all PUBLIC)
active_currency_call = '/v2/currency' # Available path param not implemented as it returns the same as this endpoint
currency_call = '/v2/currency/{currency}'
quote_currency_call = '/v2/currency/quotes'
active_pairs_call = '/v2/pair' # Available path param not implemented as it returns the same as this endpoint
# Historic prices (all PUBLIC)
weekly_chart_call = '/v2/chart/week'
weekly_chart_by_pair_call = '/v2/chart/week/{currency}/{quote}'
candles_call = '/v2/tradingview/history?symbol={currency}%2F{quote}&resolution={resolution}&from={from}&to={to}'
# Spot transfers (all Private)
deposit_spot_call = '/v2/auth/spot/deposit'
withdraw_spot_call = '/v2/auth/spot/withdraw'
# Transfers (all Private)
transfer_by_id_call = '/v2/auth/transfer/id'
transfer_by_phone_call = '/v2/auth/transfer/phone'
transfer_by_email_call = '/v2/auth/transfer/email'
transfer_get_all_call = '/v2/auth/transfer'
# Bindings data (for deposits and withdrawals)
bindings_active_call = '/v2/transaction/bindings' # PUBLIC
bindings_active_currencies_call = '/v2/auth/transaction/bindings' # PRIVATE
bindings_currency_call = '/v2/auth/transaction/bindings/{currency}' # PRIVATE
# Transactions (all Private)
deposit_address_call = '/v2/auth/transaction/depositAddress'
withdrawal_request_call = '/v2/auth/transaction/withdraw'
withdrawal_cancel_call = '/v2/auth/transaction/withdraw/cancel'
withdrawal_confirmation_call = '/v2/auth/transaction/withdraw/confirm'
withdrawal_code_resend_call = '/v2/auth/transaction/withdraw/resendCode'
transaction_all_call = '/v2/auth/transaction'
transaction_by_id_call = '/v2/auth/transaction/{id}'
# WS (streams)
# Public
book_stream = '/v1/book/{currency}/{quote}'
trades_stream = '/v1/trade/{currency}/{quote}'
currencies_stream = '/v1/currency' # All available currencies
pairs_stream = '/v1/pair' # All available pairs
ticker_all_stream = '/v1/ticker'
tickers_pair_stream = '/v1/ticker/{currency}/{quote}' # 24h and 7d volume and change + last price for pairs
rates_stream = '/v1/rate/{currency}/{quote}'
rates_quote_stream = '/v1/rate/{quote}'
# Private
orders_stream = '/user/{user}/v1/order'
accounts_stream = '/user/{user}/v1/account/total' # Returns all accounts of a user including empty ones
account_stream = '/user/{user}/v1/account'
transactions_stream = '/user/{user}/v1/transaction' # Returns external transactions (deposits and withdrawals)
transfers_stream = '/user/{user}/v1/transfers' # Returns internal transfers on the platform (inter_user, ...)
topics = list()
# INITIALISATION
def __init__(self, apiKey: Optional[str] = None, apiSecret: Optional[str] = None,
baseAPI: str = baseAPI, baseWS: str = baseWS, topics: list = topics):
self.apiKey = apiKey
self.apiSecret = apiSecret
self.baseAPI = baseAPI
self.baseWS = baseWS
self.topics = topics
# CONTROLLERS
def _inputController(self, currency: Optional[str] = None, quote: Optional[str] = None,
pair: Optional[str] = None, currency_name: Optional[str] = 'currency',
quote_name: Optional[str] = 'quote') -> dict:
"""Converting lower case currency tag into upper case as required"""
def controller(arg):
if len(arg) == 36:
return arg
else:
return arg.upper()
if pair:
currency = pair.split('/')[0]
quote = pair.split('/')[1]
pathParams = {
str(currency_name): controller(currency),
str(quote_name): controller(quote)
}
return pathParams
elif currency and quote:
pathParams = {
str(currency_name): controller(currency),
str(quote_name): controller(quote)
}
return pathParams
elif currency:
pathParams = {
str(currency_name): controller(currency)
}
return pathParams
# SIGNATURES
def _APIsigned(self, endpoint: str, params: dict = None, request_type: Optional[str] = 'get'):
"""Signing get and post private calls by api key and secret by HMAC-SHA512"""
if params:
serializeFunc = map(lambda it: it[0] + '=' + str(it[1]), params.items())
queryParams = '&'.join(serializeFunc)
else:
queryParams = ''
if request_type == 'get':
signature = hmac.new(
self.apiSecret,
('GET' + endpoint + queryParams).encode('ascii'),
hashlib.sha512
)
url = self.baseAPI + endpoint + '?' + queryParams
response = requests.get(
url,
headers = {
'X-LA-APIKEY': self.apiKey,
'X-LA-SIGNATURE': signature.hexdigest(),
'X-LA-DIGEST': 'HMAC-SHA512'
}
)
elif request_type == 'post':
signature = hmac.new(
self.apiSecret,
('POST' + endpoint + queryParams).encode('ascii'),
hashlib.sha512
)
url = self.baseAPI + endpoint
response = requests.post(
url,
headers = {
'Content-Type': 'application/json',
'X-LA-APIKEY': self.apiKey,
'X-LA-SIGNATURE': signature.hexdigest(),
'X-LA-DIGEST': 'HMAC-SHA512'
},
json = params
)
return response.json()
# EXCHANGE ENDPOINTS
def getUserInfo(self) -> dict:
"""Returns information about the authenticated user
:returns: dict - dict of personal data
.. code-block:: python
{
'id': 'a44444aa-4444-44a4-444a-44444a444aaa', # User id (unique for each user)
'status': 'ACTIVE', # Account status (ACTIVE, DISABLED, FROZEN)
'role': 'INVESTOR', # Can be ignored
'email': 'example@email.com', # Email address on user account
'phone': '', # Phone number on user account
'authorities': [..., 'VIEW_TRANSACTIONS', 'PLACE_ORDER', ...], # List of account priviliges
'forceChangePassword': None, # Can be ignored
'authType': 'API_KEY', # Can be ignored
'socials': [] # Can be ignored
}
"""
return self._APIsigned(endpoint = self.user_info_call)
def getServerTime(self) -> dict:
"""Returns the currenct server time
:returns: dict
.. code-block:: python
{
'serverTime': 1628934753710
}
"""
return requests.get(self.baseAPI + self.time_call).json()
def getAccountBalances(self, currency: Optional[str] = None, account_type: Optional[str] = None,
zeros: Optional[bool] = False):
"""Returns account balances for all/specific currency and wallet type
A request for a specific currency and wallet type has a priority over all-currencies request
:param currency: required for one-currency request, can be currency tag or currency id
:param account_type: required for one-currency request
:param zeros: required for all-currencies request, default is False (doesn't return zero balances)
:type zeros: string (method argument accepts boolean for user convenience)
:returns: list - list of dictionaries per currency and wallet, if all-currencies request, otherwise one dict
.. code block:: python
[...,
{
'id': 'a44444aa-4444-44a4-444a-44444a444aaa', # Account id (unique for each account of a user)
'status': 'ACCOUNT_STATUS_ACTIVE', # Currency account status (ACTIVE, DISABLED, FROZEN)
'type': 'ACCOUNT_TYPE_SPOT', # Account type (SPOT, FUTURES, WALLET, CROWDSALE)
'timestamp': 1628381804961, # Timestamp when server returned the responce
'currency': '0c3a106d-bde3-4c13-a26e-3fd2394529e5', # Currency id
'available': '100.830064349760000000', # Currently available on the account (excludes blocked funds)
'blocked': '0.000000' # Currently blocked (orders placed, for example)
},
...
]
"""
if currency and account_type:
pathParams = self._inputController(currency = currency)
pathParams.update({
'accountType': str(account_type)
})
return self._APIsigned(endpoint = self.currency_balance_by_type_call.format(**pathParams))
else:
queryParams = {'zeros': str(zeros).lower()}
return self._APIsigned(endpoint = self.account_balances_call, params = queryParams)
def getOrders(self, order_id: Optional[str] = None, pair: Optional[str] = None, active: Optional[bool] = False,
limit: Optional[int] = 100, timestamp: Optional[str] = None):
"""Returns user orders history
A request for the order by id has a priority over a request for orders by pair
that itself has a priority over a request for all orders
:param order_id: required for a particular order request (other arguments will be ignored)
:param pair: required for request for orders in a specific pair (should be of format ***/***)
:param active: optional, defaults to False (returns all orders, otherwise only active are returned)
:param limit: optional, defaults to 100
:type limit: string (method argument accepts integer for user convenience)
:param timestamp: optional, defaults to current (orders before this timestamp are returned)
:returns: list - list of dictionaries for each order, otherwise dict if only 1 order exists
.. code block:: python
[...,
{
'id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'status': 'ORDER_STATUS_CLOSED',
'side': 'ORDER_SIDE_SELL',
'condition': 'ORDER_CONDITION_GOOD_TILL_CANCELLED',
'type': 'ORDER_TYPE_LIMIT',
'baseCurrency': 'd286007b-03eb-454e-936f-296c4c6e3be9',
'quoteCurrency': '0c3a106d-bde3-4c13-a26e-3fd2394529e5',
'clientOrderId': 'my order 1',
'price': '3.6200',
'quantity': '100.000',
'cost': '362.0000000',
'filled': '100.000',
'trader': 'a44444aa-4444-44a4-444a-44444a444aaa', # User id
'timestamp': 1624804464728
},
...
]
"""
if order_id:
return self._APIsigned(endpoint = self.order_status_call.format(order_id))
elif pair:
queryParams = {
'from': str(timestamp),
'limit': str(limit)
}
queryParams = {x: y for x, y in queryParams.items() if y != 'None'}
pathParams = self._inputController(pair = pair)
if active:
return self._APIsigned(endpoint = self.order_pair_active_call.format(**pathParams), params = queryParams)
else:
return self._APIsigned(endpoint = self.order_pair_all_call.format(**pathParams), params = queryParams)
else:
queryParams = {
'from': str(timestamp),
'limit': str(limit)
}
queryParams = {x: y for x, y in queryParams.items() if y != 'None'}
return self._APIsigned(endpoint = self.order_all_call, params = queryParams)
def placeOrder(self, pair: str, side: str, client_message: str, price: float, quantity: float,
timestamp: int, condition: str = 'GTC', order_type: str = 'LIMIT') -> dict:
"""Places an order
:param pair: max 20 characters, can be any combination of currency id or currency tag (format ***/***)
:param side: max 10 characters, can be "BUY", "BID", "SELL", "ASK"
:param client_message: max 50 characters, write whatever you want here
:param price: max 50 characters
:type price: string (method argument accepts float for user convenience)
:param quantity: max 50 characters
:type quantity: string (method argument accepts float for user convenience)
:param timestamp: required for correct signature
:param condition: max 30 characters, can be "GTC" (default), "GOOD_TILL_CANCELLED",
"IOC", "IMMEDIATE_OR_CANCEL", "FOK", "FILL_OR_KILL", "AON", "ALL_OR_NONE"
:param order_type: max 30 characters, can be "LIMIT" (default), "MARKET"
:returns: dict - dict with responce
.. code block:: python
{
'message': 'order accepted for placing',
'status': 'SUCCESS',
'id': 'a44444aa-4444-44a4-444a-44444a444aaa' # Order id
}
"""
requestBodyParams = self._inputController(pair = pair, currency_name = 'baseCurrency', quote_name = 'quoteCurrency')
requestBodyParams.update({
'side': str(side.upper()),
'condition': str(condition.upper()),
'type': str(order_type.upper()),
'clientOrderId': str(client_message),
'price': str(price),
'quantity': str(quantity),
'timestamp': int(timestamp)
})
return self._APIsigned(endpoint = self.order_place_call, params = requestBodyParams, request_type = 'post')
def cancelOrder(self, order_id: Optional[str] = None, pair: Optional[str] = None, cancel_all: Optional[bool] = False) -> dict:
"""Cancels orders
A request to cancel order by id has a priority over a request to cancel orders by pair
that itself has a priority over a request to cancel all orders
:param order_id: required for a particular order cancellation request (other arguments will be ignored)
:param pair: required for cancel orders in a specific pair (should be of format ***/***)
:param cancel_all: optional, defaults to False (you should explicitly set it to True to cancel all orders)
:returns: dict - dict with responce
.. code block:: python
{
'message': 'cancellation request successfully submitted',
'status': 'SUCCESS',
'id': 'a44444aa-4444-44a4-444a-44444a444aaa' # Only returned if a specific order is cancelled
}
"""
if order_id:
requestBodyParams = {'id': str(order_id)}
return self._APIsigned(endpoint = self.order_cancel_id_call, params = requestBodyParams, request_type = 'post')
elif pair:
pathParams = self._inputController(pair = pair)
return self._APIsigned(endpoint = self.order_cancel_pair_call.format(**pathParams), request_type = 'post')
elif cancel_all:
return self._APIsigned(endpoint = self.order_cancel_all_call, request_type = 'post')
def getTrades(self, pair: Optional[str] = None, user: bool = False, limit: Optional[int] = 100, timestamp: Optional[str] = None):
"""Returns user trades history
A request for user trades by pair has a priority over a request all user trades
that itself has a priority over a request for all trades in the market.
:param user: required for request of trades by user and by user in a specific pair. Defaults to False
that means that all market trades regardless the user are returned.
:param pair: required for request for trade of the user in a specific pair (should be of format ***/***)
:param limit: optional, defaults to 100
:type limit: string (method argument accepts integer for user convenience)
:param timestamp: optional, defaults to current (orders before this timestamp are returned)
:returns: list - list of dictionaries for each trade, otherwise dict if only 1 trade exists
.. code block:: python
[...,
{
'id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'isMakerBuyer': False,
'direction': 'TRADE_DIRECTION_SELL',
'baseCurrency': '92151d82-df98-4d88-9a4d-284fa9eca49f',
'quoteCurrency': '0c3a106d-bde3-4c13-a26e-3fd2394529e5',
'price': '30000.00',
'quantity': '0.03500',
'cost': '1050.00',
'fee': '4.095000000000000000', # Omitted from public trades (given in quoteCurrency)
'order': 'a44444aa-4444-44a4-444a-44444a444aaa', # Omitted from public trades
'timestamp': 1624373391929,
'makerBuyer': False
},
...
]
"""
queryParams = {
'from': str(timestamp),
'limit': str(limit)
}
queryParams = {x: y for x, y in queryParams.items() if y != 'None'}
if user and pair: # PRIVATE
pathParams = self._inputController(pair = pair)
return self._APIsigned(endpoint = self.trades_user_pair_call.format(**pathParams), params = queryParams)
elif user: # PRIVATE
return self._APIsigned(endpoint = self.trades_user_call, params = queryParams)
elif pair: # PUBLIC
serializeFunc = map(lambda it: it[0] + '=' + str(it[1]), queryParams.items())
queryParams = '&'.join(serializeFunc)
pathParams = self._inputController(pair = pair)
return requests.get(self.baseAPI + self.trades_all_call.format(**pathParams) + '?' + queryParams).json()
def transferSpot(self, amount: float, currency_id: str, deposit: bool = True) -> dict:
"""Transfers between Spot and Wallet accounts
:param amount: should be >= 0
:type amount: string (method argument accepts float for user convenience)
:param currency_id: apart from other methods, this one only accepts currency id (currency tag will return an error)
:param deposit: defaults to True (deposit to Spot from Wallet), False means withdraw from Spot to Wallet
:returns: dict - dict with the transfer result
.. code block:: python
{
'id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'status': 'TRANSFER_STATUS_PENDING',
'type': 'TRANSFER_TYPE_DEPOSIT_SPOT', # Will be TRANSFER_TYPE_WITHDRAW_SPOT, if deposit set to False
'fromAccount': 'a44444aa-4444-44a4-444a-44444a444aaa',
'toAccount': 'a44444aa-4444-44a4-444a-44444a444aaa',
'transferringFunds': '10',
'usdValue': '0',
'rejectReason': '',
'timestamp': 1629537163208,
'direction': 'INTERNAL',
'method': 'TRANSFER_METHOD_UNKNOWN',
'recipient': '',
'sender': '',
'currency': '0c3a106d-bde3-4c13-a26e-3fd2394529e5',
'codeRequired': False,
'fromUser': 'a44444aa-4444-44a4-444a-44444a444aaa', # This is the authenticated user id
'toUser': 'a44444aa-4444-44a4-444a-44444a444aaa', # This is the authenticated user id (same as fromUser)
'fee': '0'
}
"""
requestBodyParams = {
'value': str(amount),
'currency': str(currency_id)
}
if deposit:
return self._APIsigned(endpoint = self.deposit_spot_call, params = requestBodyParams, request_type = 'post')
elif deposit == False:
return self._APIsigned(endpoint = self.withdraw_spot_call, params = requestBodyParams, request_type = 'post')
def transferAccount(self, amount: float, currency_id: str, user_id: Optional[str] = None,
phone: Optional[str] = None, email: Optional[str] = None) -> dict:
"""Transfers between external to the user accounts (within exchange)
A request for transfer by user_id has a priority over the request for transfer by phone
that itself has a priority over the request for transfer by email
:param amount: should be >= 0
:type amount: string (method argument accepts float for user convenience)
:param currency_id: apart from other methods, this one only accepts currency id (currency tag will return an error)
:param user_id: required for transfer by user_id, other arguments (phone and email) will be ignored
:param phone: required for transfer by phone, other argument (email) will be ignored
:param email: required for transfer by email, will only be used if other arguments are not present
:returns: dict - dict with the transfer result
.. code block:: python
{
'id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'status': 'TRANSFER_STATUS_UNVERIFIED',
'type': 'TRANSFER_TYPE_INTER_USER',
'fromAccount': None,
'toAccount': None,
'transferringFunds': '10',
'usdValue': '0',
'rejectReason': None,
'timestamp': 1629539250161,
'direction': 'OUTCOME',
'method': 'TRANSFER_METHOD_DIRECT',
'recipient': 'a44444aa-4444-44a4-444a-44444a444aaa',
'sender': 'exampleemail@email.com',
'currency': '0c3a106d-bde3-4c13-a26e-3fd2394529e5',
'codeRequired': False,
'fromUser': 'a44444aa-4444-44a4-444a-44444a444aaa',
'toUser': 'b44444aa-4444-44b4-444a-33333a444bbb',
'fee': None
}
"""
requestBodyParams = {
'value': str(amount),
'currency': str(currency_id)
}
if user_id:
requestBodyParams.update({'recipient': str(user_id)})
return self._APIsigned(endpoint = self.transfer_by_id_call, params = requestBodyParams, request_type = 'post')
elif phone:
requestBodyParams.update({'recipient': str(phone)})
return self._APIsigned(endpoint = self.transfer_by_phone_call, params = requestBodyParams, request_type = 'post')
elif email:
requestBodyParams.update({'recipient': str(email)})
return self._APIsigned(endpoint = self.transfer_by_email_call, params = requestBodyParams, request_type = 'post')
else:
print('No transfer method provided')
def getTransfers(self, page: Optional[int] = 0, size: Optional[int] = 10) -> dict:
"""Returns history of user transfers without their account and to other users
:param page: should be >= 0
:param size: should be 1-1000 (defaults to 10), number of results returned per page
:returns: dict - dict with transfers history (from the most recent to the least recent)
.. code block:: python
{
'hasNext': True, # Means that it has the next page
'content': [
{
'id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'status': 'TRANSFER_STATUS_UNVERIFIED',
'type': 'TRANSFER_TYPE_INTER_USER',
'fromAccount': None,
'toAccount': None,
'transferringFunds': '10',
'usdValue': '0',
'rejectReason': None,
'timestamp': 1629539250161,
'direction': 'OUTCOME',
'method': 'TRANSFER_METHOD_DIRECT',
'recipient': 'a44444aa-4444-44a4-444a-44444a444aaa',
'sender': 'exampleemail@email.com',
'currency': '0c3a106d-bde3-4c13-a26e-3fd2394529e5',
'codeRequired': False,
'fromUser': 'a44444aa-4444-44a4-444a-44444a444aaa',
'toUser': 'a44444aa-4444-44a4-444a-44444a444bbb',
'fee': None
},
{
'id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'status': 'TRANSFER_STATUS_PENDING',
'type': 'TRANSFER_TYPE_DEPOSIT_SPOT',
'fromAccount': 'a44444aa-4444-44a4-444a-44444a444aaa',
'toAccount': 'a44444aa-4444-44a4-444a-44444a444aaa',
'transferringFunds': '10',
'usdValue': '0',
'rejectReason': '',
'timestamp': 1629537163208,
'direction': 'INTERNAL',
'method': 'TRANSFER_METHOD_UNKNOWN',
'recipient': '',
'sender': '',
'currency': '0c3a106d-bde3-4c13-a26e-3fd2394529e5',
'codeRequired': False,
'fromUser': 'a44444aa-4444-44a4-444a-44444a444aaa',
'toUser': 'a44444aa-4444-44a4-444a-44444a444aaa',
'fee': '0'000000000000000'
}],
'first': True, # Means that this is the first page and there is no page before
'pageSize': 1,
'hasContent': True # Means that page is not empty
}
"""
queryParams = {
'page': str(page),
'size': str(size)
}
return self._APIsigned(endpoint = self.transfer_get_all_call, params = queryParams)
def makeWithdrawal(self, currency_binding_id: str, amount: float, address: str, memo: Optional[str] = None,
twoFaCode: Optional[str] = None) -> dict:
"""Makes a withdrawal from LATOKEN
:param currency_binding_id: LATOKEN internal OUTPUT binding id (each currency has a separate INPUT and OUTPUT binding per each provider)
:type amount: string (method argument accepts float for user convenience)
:returns: dict - dict with the transaction result
.. code block:: python
{
'withdrawalId': 'a44444aa-4444-44a4-444a-44444a444aaa',
'codeRequired': False,
'transaction': {
'id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'status': 'TRANSACTION_STATUS_PENDING',
'type': 'TRANSACTION_TYPE_WITHDRAWAL',
'senderAddress': None,
'recipientAddress': 'TTccMcccM8ccMcMMc46KHzv6MeMeeeeeee', # Address to send withdrawal to
'amount': '20',
'transactionFee': '3', # Fee in sent currency
'timestamp': 1629561656227,
'transactionHash': None, # Not present in response as status is pending
'blockHeight': None, # Not present in response as status is pending
'currency': '0c3a106d-bde3-4c13-a26e-3fd2394529e5',
'memo': None,
'paymentProvider': None, # LATOKEN payment provider id
'requiresCode': False
}
}
"""
requestBodyParams = {
'currencyBinding': str(currency_binding_id),
'amount': str(amount),
'recipientAddress': str(address),
'memo': str(memo),
'twoFaCode': str(twoFaCode)
}
requestBodyParams = {x: y for x, y in requestBodyParams.items() if y != 'None'}
return self._APIsigned(endpoint = self.withdrawal_request_call, params = requestBodyParams, request_type = 'post')
def cancelWithdrawal(self, withdrawal_id: str) -> dict:
"""Cancel UNVERIFIED withdrawal
:returns: dict - dict with the cancellation result
"""
requestBodyParams = {'id': str(withdrawal_id)}
return self._APIsigned(endpoint = self.withdrawal_cancel_call, params = requestBodyParams, request_type = 'post')
def confirmWithdrawal(self, withdrawal_id: str, code: str) -> dict:
"""Confirm UNVERIFIED withdrawal
:returns: dict - dict with the confirmation result
"""
requestBodyParams = {
'id': str(withdrawal_id),
'confirmationCode': str(code)
}
return self._APIsigned(endpoint = self.withdrawal_confirmation_call, params = requestBodyParams, request_type = 'post')
def resendCode(self, withdrawal_id: str) -> dict:
"""Resends verification code for UNVERIFIED withdrawal confirmation
:returns: dict - dict with the code result
"""
requestBodyParams = {'id': str(withdrawal_id)}
return self._APIsigned(endpoint = self.withdrawal_code_resend_call, params = requestBodyParams, request_type = 'post')
def getDepositAddress(self, currency_binding_id: str) -> dict:
"""Returns a deposit address
:param currency_binding_id: LATOKEN internal INPUT binding id
:returns: dict - dict with the operation message and deposit address
.. code block:: python
{
'message': 'address generated',
'status': 'SUCCESS',
'depositAccount': {
'address': '0x55bb55b5b555bbb5bbb5b02555bbbb5bb5555bbb',
'memo': ''
}
}
"""
requestBodyParams = {'currencyBinding': str(currency_binding_id)}
return self._APIsigned(endpoint = self.deposit_address_call, params = requestBodyParams, request_type = 'post')
def getWithdrawalBindings(self) -> list:
"""Returns a list of OUTPUT bindings
.. code block:: python
[{
'id': '230a4acf-e1c6-440d-a59f-607a5fb1c390', # Currency id
'tag': 'ARX',
'bindings': [{
'minAmount': '144.000000000000000000', # In transacted currency
'fee': '48.000000000000000000', # In transacted currency
'percentFee': '1.000000000000000000', # In %
'providerName': 'ERC20', # Protocol that currency supports (once currency can have multiple providers)
'id': 'dbd3d401-8564-4d5d-9881-6d4b70d439b0', # OUTPUT currency binding id
'currencyProvider': '35607b89-df9e-47bd-974c-d7ca378fe4e6'
}]
},
...
]
"""
return requests.get(self.baseAPI + self.bindings_active_call).json()
def getActiveCurrencyBindings(self) -> dict:
"""Returns active currency bindings
:returns: dict - dict with currency ids for active INPUT (deposits) and OUTPUT (withdrawals) bindings
.. code block:: python
{
'inputs': [
'bf7cfeb8-2a8b-4356-a600-2b2f34c85fc9',
'ceb03f7c-2bcf-4775-9e6d-8dd95610abb7',
...
],
'outputs':[
'2e72c082-1de2-4010-bda9-d28aac11755d',
'6984a559-3ec0-4f84-bd25-166fbff69a7a',
...
]
}
"""
return self._APIsigned(endpoint = self.bindings_active_currencies_call)
def getCurrencyBindings(self, currency: str) -> list:
"""Returns all bindings of a specific currencies
:param currency: can be either currency id or currency tag
:returns: list - list with dict per each currency binding (both active and inactive)
.. code block:: python
[{
'id': '7d28ec03-6d1a-4586-b38d-df4b334cec1c',
'currencyProvider': '9899d208-a3e5-46bc-a594-3048b1a982bc',
'status': 'CURRENCY_BINDING_STATUS_ACTIVE',
'type': 'CURRENCY_BINDING_TYPE_OUTPUT',
'currency': '92151d82-df98-4d88-9a4d-284fa9eca49f',
'minAmount': '0.001000000000000000',
'fee': '0.000500000000000000',
'percentFee': '1.000000000000000000',
'warning': '',
'feeCurrency': '92151d82-df98-4d88-9a4d-284fa9eca49f',
'title': 'BTC Wallet',
'confirmationBlocks': 2,
'memoSupported': False,
'decimals': 6,
'config': {},
'providerName': 'BTC',
'restrictedCountries': []
},
{
'id': '3a29a9cb-3f10-46e9-a8af-52c3ca8f3cab',
'currencyProvider': '9899d208-a3e5-46bc-a594-3048b1a982bc',
'status': 'CURRENCY_BINDING_STATUS_ACTIVE',
'type': 'CURRENCY_BINDING_TYPE_INPUT',
'currency': '92151d82-df98-4d88-9a4d-284fa9eca49f',
'minAmount': '0.000500000000000000',
'fee': '0',
'percentFee': '0',
'warning': '',
'feeCurrency': '92151d82-df98-4d88-9a4d-284fa9eca49f',
'title': 'BTC Wallet ',
'confirmationBlocks': 2,
'memoSupported': False,
'decimals': 6,
'config': {},
'providerName': 'BTC',
'restrictedCountries': []
},
{
'id': 'e225e53e-6756-4f2b-bc2c-ebc4dc60b2d9',
'currencyProvider': 'bf169c61-26cd-49a0-a6e1-a8781d1d4058',
'status': 'CURRENCY_BINDING_STATUS_DISABLED',
'type': 'CURRENCY_BINDING_TYPE_INPUT',
'currency': '92151d82-df98-4d88-9a4d-284fa9eca49f',
'minAmount': '0.000300000000000000',
'fee': '0',
'percentFee': '0',
'warning': '',
'feeCurrency': '92151d82-df98-4d88-9a4d-284fa9eca49f',
'title': 'BTCB Wallet BEP-20',
'confirmationBlocks': 15,
'memoSupported': False,
'decimals': 18,
'config': {
'address': '0x7130d2A12B9BCbFAe4f2634d864A1Ee1Ce3Ead9c'
},
'providerName': 'BSC_TOKEN',
'restrictedCountries': []
},
{
'id': 'c65cd18f-6d9c-40f3-acca-072d4d1977fd',
'currencyProvider': 'bf169c61-26cd-49a0-a6e1-a8781d1d4058',
'status': 'CURRENCY_BINDING_STATUS_DISABLED',
'type': 'CURRENCY_BINDING_TYPE_OUTPUT',
'currency': '92151d82-df98-4d88-9a4d-284fa9eca49f',
'minAmount': '0.000300000000000000',
'fee': '0.000300000000000000',
'percentFee': '1.000000000000000000',
'warning': '',
'feeCurrency': '92151d82-df98-4d88-9a4d-284fa9eca49f',
'title': 'BTCB Wallet BEP-20',
'confirmationBlocks': 15,
'memoSupported': False,
'decimals': 18,
'config': {
'address': '0x7130d2A12B9BCbFAe4f2634d864A1Ee1Ce3Ead9c'
},
'providerName': 'BSC_TOKEN',
'restrictedCountries': []
}
]
"""
pathParams = self._inputController(currency = currency)
return self._APIsigned(endpoint = self.bindings_currency_call.format(**pathParams))
def getTransactions(self, transaction_id: Optional[str] = None, page: Optional[int] = 0, size: Optional[int] = 10) -> dict:
"""Returns a history of user transactions
A request for transaction by id pas a priority over the request for all transactions
:param transaction_id: required, if request a specific transaction
:param page: should be >= 0
:param size: should be 1-1000 (defaults to 10), number of results returned per page
..code block:: python
{
'hasNext': True,
'content': [{
'id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'status': 'TRANSACTION_STATUS_CONFIRMED',
'type': 'TRANSACTION_TYPE_WITHDRAWAL',
'senderAddress': '',
'recipientAddress': 'TTccMcccM8ccMcMMc46KHzv6MeMeeeeeee',
'amount': '20.000000000000000000',
'transactionFee': '3.000000000000000000',
'timestamp': 1629561656406,
'transactionHash': '900a0000000a0cc647a2aa10555a555555233aaa065a5a6369600000000000',
'blockHeight': 0,
'currency': '0c3a106d-bde3-4c13-a26e-3fd2394529e5',
'memo': None,
'paymentProvider': '4732c7cc-5f53-4f12-a757-96c7c6ba2e8e',
'requiresCode': False
},
{
...
}],
'first': True,
'pageSize': 1,
'hasContent': True
}
"""
if transaction_id:
pathParams = {'id': str(transaction_id)}
return self._APIsigned(endpoint = self.transaction_by_id_call.format(**pathParams))
else:
queryParams = {
'page': str(page),
'size': str(size)
}
return self._APIsigned(endpoint = self.transaction_all_call, params = queryParams)
def getCurrencies(self, currency: Optional[str] = None, get_all: bool = True):
"""Returns currency data
:param currency: can be either currency tag or currency id
:returns: dict - dict with currency information (if requesting one currency), list with currencies otherwise
.. code block:: python
[{
'id': '92151d82-df98-4d88-9a4d-284fa9eca49f',
'status': 'CURRENCY_STATUS_ACTIVE',
'type': 'CURRENCY_TYPE_CRYPTO',
'name': 'Bitcoin',
'tag': 'BTC',
'description': '',
'logo': '',
'decimals': 8,
'created': 1572912000000,
'tier': 1,
'assetClass': 'ASSET_CLASS_UNKNOWN',
'minTransferAmount': 0
},
...
]
"""
if currency:
pathParams = self._inputController(currency = currency)
return requests.get(self.baseAPI + self.currency_call.format(**pathParams)).json()
elif get_all:
return requests.get(self.baseAPI + self.active_currency_call).json()
def getQuoteCurrencies(self) -> list:
"""Returns quote currencies
:returns: list - list of currencies used as quote on LATOKEN
.. code block:: python
[
'0c3a106d-bde3-4c13-a26e-3fd2394529e5',
'92151d82-df98-4d88-9a4d-284fa9eca49f',
'620f2019-33c0-423b-8a9d-cde4d7f8ef7f',
'34629b4b-753c-4537-865f-4b62ff1a31d6',
'707ccdf1-af98-4e09-95fc-e685ed0ae4c6',
'd286007b-03eb-454e-936f-296c4c6e3be9'
]
"""
return requests.get(self.baseAPI + self.quote_currency_call).json()
def getActivePairs(self) -> list:
"""Returns active pairs
:returns: list - list of active pairs information
.. code block:: python
[...,
{
'id': '752896cd-b656-4d9a-814d-b97686246350',
'status': 'PAIR_STATUS_ACTIVE',
'baseCurrency': 'c9f5bf11-92ec-461b-877c-49e32f133e13',
'quoteCurrency': '0c3a106d-bde3-4c13-a26e-3fd2394529e5',
'priceTick': '0.000000000010000000',
'priceDecimals': 11,
'quantityTick': '0.010000000',
'quantityDecimals': 2,
'costDisplayDecimals': 9,
'created': 1625153024491,
'minOrderQuantity': '0',
'maxOrderCostUsd': '999999999999999999',
'minOrderCostUsd': '0',
'externalSymbol': ''
},
...]
"""
return requests.get(self.baseAPI + self.active_pairs_call).json()
def getOrderbook(self, pair: str, limit: Optional[int] = 1000) -> dict:
"""Returns orderbook for a specific pair
:param pair: can be either currency tag or currency id (should of format ***/***)
:param limit: number or price levels returned in bids and asks, defaults to 1000
:returns: dict - dict with asks and bids that contain information for each price level
..code block:: python
{
'ask':
[{
'price': '46566.69',
'quantity': '0.0081',
'cost': '377.190189',
'accumulated': '377.190189'
}],
'bid':
[{
'price': '46561.91',
'quantity': '0.0061',
'cost': '284.027651',
'accumulated': '284.027651
}],
'totalAsk': '3.4354', # In base currency
'totalBid': '204967.154792' # In quote currency
}
"""
pathParams = self._inputController(pair = pair)
queryParams = f'limit={limit}'
return requests.get(self.baseAPI + self.orderbook_call.format(**pathParams) + '?' + queryParams).json()
def getTickers(self, pair: Optional[str] = None, get_all: bool = True):
"""Returns tickers
:param pair: can be either currency tag or currency id (should of format ***/***)
:param get_all: defaults to True (returns tickers for all pairs)
:returns: list - list of dicts with pairs' tickers (by default), dict with one pair ticker otherwise
.. code block:: python
[...,
{
'symbol': 'SNX/USDT',
'baseCurrency': 'c4624bdb-1148-440d-803d-7b55031d481d',
'quoteCurrency': '0c3a106d-bde3-4c13-a26e-3fd2394529e5',
'volume24h': '1177568.232859658500000000',
'volume7d': '1177568.232859658500000000',
'change24h': '0',
'change7d': '0',
'lastPrice': '12.02347082'
},
...
]
"""
if pair:
pathParams = self._inputController(pair = pair)
return requests.get(self.baseAPI + self.tickers_per_pair_call.format(**pathParams)).json()
elif get_all:
return requests.get(self.baseAPI + self.tickers_call).json()
def getFeeLevels(self) -> list:
"""Returns fee levels
:returns: list - list of dicts with maker and taker fee per each volume level (30d accumulated volume)
.. code block:: python
[
{'makerFee': '0.0049', 'takerFee': '0.0049', 'volume': '0'},
{'makerFee': '0.0039', 'takerFee': '0.0039', 'volume': '10000'},
{'makerFee': '0.0029', 'takerFee': '0.0029', 'volume': '50000'},
{'makerFee': '0.0012', 'takerFee': '0.0019', 'volume': '100000'},
{'makerFee': '0.0007', 'takerFee': '0.0011', 'volume': '250000'},
{'makerFee': '0.0006', 'takerFee': '0.0009', 'volume': '1000000'},
{'makerFee': '0.0004', 'takerFee': '0.0007', 'volume': '2500000'},
{'makerFee': '0.0002', 'takerFee': '0.0005', 'volume': '10000000'},
{'makerFee': '0', 'takerFee': '0.0004', 'volume': '20000000'}
]
"""
return requests.get(self.baseAPI + self.fee_levels_call).json()
def getFeeScheme(self, pair: str, user: Optional[bool] = False) -> dict:
"""Returns fee scheme for a particular pair
:param user: defaults to False (returns fee scheme per pair for all users, for particular user otherwise)
.. code block:: python
{
'makerFee': '0.004900000000000000', # Proportion (not %)
'takerFee': '0.004900000000000000', # Proportion (not %)
'type': 'FEE_SCHEME_TYPE_PERCENT_QUOTE',
'take': 'FEE_SCHEME_TAKE_PROPORTION'
}
"""
pathParams = self._inputController(pair = pair)
if pair and user:
return self._APIsigned(endpoint = self.fee_scheme_par_pair_and_user_call.format(**pathParams))
elif pair:
return requests.get(self.baseAPI + self.fee_scheme_per_pair_call.format(**pathParams)).json()
def getChart(self, pair: Optional[str] = None):
"""Returns charts
:param pair: can be either currency tag or currency id (should of format ***/***)
:returns: if no arguments specified, the dict is returned with currency ids as keys
and list of 169 weekly prices as values, otherwise a single list is retured
..code block:: python
{
'30a1032d-1e3e-4c28-8ca7-b60f3406fc3e': [..., 1.375e-05, 1.382e-05, 1.358e-05, ...],
'd8958071-c13f-40fb-bd54-d2f64c36e15b': [..., 0.0001049, 0.000104, 0.0001045, ...],
...
}
"""
if pair:
pathParams = self._inputController(pair = pair)
return requests.get(self.baseAPI + self.weekly_chart_by_pair_call.format(**pathParams)).json()
else:
return requests.get(self.baseAPI + self.weekly_chart_call).json()
def getCandles(self, start: str, end: str, pair: str = None, resolution: str = '1h') -> dict:
"""Returns charts
:param pair: can be either currency tag or currency id (should of format ***/***)
:param resolution: can be 1m, 1h (default), 4h, 6h, 12h, 1d, 7d or 1w, 30d or 1M
:param start: timestamp in seconds (included in responce)
:param end: timestamp in seconds (not included in responce)
:returns: dict - the dict with open, close, low, high, time, volume as keys and list of values
..code block:: python
{
"o":["49926.320000000000000000", ..., "49853.580000000000000000"],
"c":["50193.230000000000000000", ..., "49948.57"],
"l":["49777.000000000000000000", ...,"49810.200000000000000000"],
"h":["50555.000000000000000000", ...,"49997.350000000000000000"],
"t":[1630800000, ..., 1630828800],
"v":["2257782.696156400000000000", ..., "811505.269468400000000000"],
"s":"ok"
}
"""
pathParams = self._inputController(pair = pair)
pathParams.update({
'resolution': str(resolution),
'from': str(start),
'to': str(end)
})
return requests.get(self.baseAPI + self.candles_call.format(**pathParams)).json()
# WEBSOCKETS
def _WSsigned(self) -> dict:
timestamp = str(int(float(time()) * 1000))
# We should sign a timestamp in milliseconds by the api secret
signature = hmac.new(
self.apiSecret,
timestamp.encode('ascii'),
hashlib.sha512
)
return {
'X-LA-APIKEY': self.apiKey,
'X-LA-SIGNATURE': signature.hexdigest(),
'X-LA-DIGEST': 'HMAC-SHA512',
'X-LA-SIGDATA': timestamp
}
async def connect(self, streams: list = topics, signed: bool = False, on_message = None):
async with websockets.connect(self.baseWS) as websocket:
msg = stomper.Frame()
msg.cmd = "CONNECT"
msg.headers = {"accept-version": "1.1", "heart-beat": "0,0"}
# If the request is for a public stream, then add signature headers to headers
if signed:
msg.headers.update(self._WSsigned())
await websocket.send(msg.pack())
await websocket.recv()
# Subscribing to streams, subscription id is assigned as an index in topics list
for stream in streams:
msg = stomper.subscribe(stream, streams.index(stream), ack="auto")
await websocket.send(msg)
# Telling the application to execute a business logic (consumer()) on each message from the server
while True:
message = await websocket.recv()
message = stomper.unpack_frame(message.decode())
await on_message(message)
def run(self, connect):
loop = asyncio.get_event_loop()
loop.run_until_complete(connect)
# Websocket streams
def streamAccounts(self) -> dict:
"""Returns all user currency balances
:returns: dict - dict with all user balances by wallet type
.. code block:: python
{
'cmd': 'MESSAGE',
'headers': {
'destination': '/user/a44444aa-4444-44a4-444a-44444a444aaa/v1/account',
'message-id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'content-length': '22090',
'subscription': '0'
},
'body': '{
"payload":[
{
"id":"a44444aa-4444-44a4-444a-44444a444aaa",
"status":"ACCOUNT_STATUS_ACTIVE",
"type":"ACCOUNT_TYPE_FUTURES",
"timestamp":1594198124804,
"currency":"ebf4eb8a-06ec-4955-bd81-85a7860764b9",
"available":"31.265578482497400000",
"blocked":"0",
"user":"a44444aa-4444-44a4-444a-44444a444aaa"
},
...
],
"nonce":0,
"timestamp":1630172200117
}'
}
"""
user_id = self.getUserInfo()['id']
pathParams = {'user': str(user_id)}
accounts_topics = self.account_stream.format(**pathParams)
return self.topics.append(accounts_topics)
def streamTransactions(self):
"""Stream returns user transactions (to/from outside LATOKEN) history, function only returns a subscription endpoint
.. code block:: python
{
'cmd': 'MESSAGE',
'headers': {
'destination': '/user/a44444aa-4444-44a4-444a-44444a444aaa/v1/transaction',
'message-id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'content-length': '13608',
'subscription': '0'
},
'body': '{
"payload":[
{
"id":"a44444aa-4444-44a4-444a-44444a444aaa",
"status":"TRANSACTION_STATUS_CONFIRMED",
"type":"TRANSACTION_TYPE_WITHDRAWAL",
"senderAddress":"",
"recipientAddress":"TTccMcccM8ccMcMMc46KHzv6MeMeeeeeee",
"transferredAmount":"20.000000000000000000",
"timestamp":1629561656404,
"transactionHash":"000000rrrrr000c647a27f7f7f7777ff9052338bf065000000fffffff7iiii88",
"blockHeight":0,
"transactionFee":"3.000000000000000000",
"currency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5",
"user":"a44444aa-4444-44a4-444a-44444a444aaa",
"paymentProvider":"4732c7cc-5f53-4f12-a757-96c7c6ba2e8e",
"requiresCode":false
},
...
],
"nonce":0,
"timestamp":1630182304943
}'
}
"""
user_id = self.getUserInfo()['id']
pathParams = {'user': str(user_id)}
transactions_topics = self.transactions_stream.format(**pathParams)
return self.topics.append(transactions_topics)
def streamTransfers(self):
"""Stream returns user transfers (within LATOKEN) history, function only returns a subscription endpoint
.. code block:: python
"""
user_id = self.getUserInfo()['id']
pathParams = {'user': str(user_id)}
transfers_topics = self.transfers_stream.format(**pathParams)
return self.topics.append(transfers_topics)
def streamOrders(self):
"""Stream returns user orders history, function only returns a subscription endpoint
.. code block:: python
{
'cmd': 'MESSAGE',
'headers': {
'destination': '/user/a44444aa-4444-44a4-444a-44444a444aaa/v1/order',
'message-id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'content-length': '27246',
'subscription': '0'
},
'body': '{
"payload":[
{
"id":"a44444aa-4444-44a4-444a-44444a444aaa",
"user":"a44444aa-4444-44a4-444a-44444a444aaa",
"changeType":"ORDER_CHANGE_TYPE_UNCHANGED",
"status":"ORDER_STATUS_CANCELLED",
"side":"ORDER_SIDE_BUY",
"condition":"ORDER_CONDITION_GOOD_TILL_CANCELLED",
"type":"ORDER_TYPE_LIMIT",
"baseCurrency":"92151d82-df98-4d88-9a4d-284fa9eca49f",
"quoteCurrency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5",
"clientOrderId":"test1",
"price":"39000",
"quantity":"0.001",
"cost":"39.000000000000000000",
"filled":"0.000000000000000000",
"deltaFilled":"0",
"timestamp":1629039302489,
"rejectError":null,
"rejectComment":null
},
...
],
"nonce":0,
"timestamp":1630180898279
}'
}
"""
user_id = self.getUserInfo()['id']
pathParams = {'user': str(user_id)}
orders_topics = self.orders_stream.format(**pathParams)
return self.topics.append(orders_topics)
def streamCurrencies(self):
"""Stream returns currencies information, function only returns a subscription endpoint
.. code block:: python
{
'cmd': 'MESSAGE',
'headers': {
'destination': '/v1/currency',
'message-id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'content-length': '265186',
'subscription': '0'
},
'body': '{
"payload":[
{
"id":"af544ebf-630b-4bac-89c1-35ee5caca50b",
"status":"CURRENCY_STATUS_ACTIVE",
"type":"CURRENCY_TYPE_CRYPTO",
"name":"Javvy Crypto Solution",
"description":"",
"decimals":18,
"tag":"JVY",
"logo":"",
"minTransferAmount":"",
"assetClass":"ASSET_CLASS_UNKNOWN"
},
...
],
"nonce":0,
"timestamp":1630180614787
}'
}
"""
return self.topics.append(self.currencies_stream)
def streamPairs(self):
"""Stream returns pairs information, function only returns a subscription endpoint
.. code block:: python
{
'cmd': 'MESSAGE',
'headers': {
'destination': '/v1/pair',
'message-id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'content-length': '293954',
'subscription': '0'
},
'body': '{
"payload":[
{
"id":"c49baa32-88f0-4f7b-adca-ab66afadc75e",
"status":"PAIR_STATUS_ACTIVE",
"baseCurrency":"59c87258-af77-4c15-ae12-12da8cadc545",
"quoteCurrency":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f",
"priceTick":"0.000000010000000000",
"quantityTick":"1.000000000",
"costDisplayDecimals":8,
"quantityDecimals":0,
"priceDecimals":8,
"externalSymbol":"",
"minOrderQuantity":"0.000000000000000000",
"maxOrderCostUsd":"999999999999999999.000000000000000000",
"minOrderCostUsd":"0.000000000000000000"
},
...
],
"nonce":0,
"timestamp":1630180179490
}'
}
"""
return self.topics.append(self.pairs_stream)
def streamTickers(self):
"""Stream returns tickers for all pairs, function only returns a subscription endpoint
.. code block:: python
{
'cmd': 'MESSAGE',
'headers': {
'destination': '/v1/ticker',
'message-id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'content-length': '260547',
'subscription': '0'
},
'body': '{
"payload":[
{
"baseCurrency":"1cbcbd8f-74e6-4476-aaa1-e883a467ee3f",
"quoteCurrency":"92151d82-df98-4d88-9a4d-284fa9eca49f",
"volume24h":"0",
"volume7d":"0",
"change24h":"0",
"change7d":"0",
"lastPrice":"0.0000012"
},
...
],
"nonce":1,
"timestamp":1630179152495
}'
}
"""
return self.topics.append(self.ticker_all_stream)
def streamBook(self, pairs: list):
"""Stream returns orderbook of a specific pair, function only returns a subscription endpoint
:param pairs: should consist of currency_ids only, otherwise will return nothing, pair should be of format ***/***
:returns: dict - dict for each requested pair as a separate message
.. code block:: python
{
'cmd': 'MESSAGE',
'headers': {
'destination': '/v1/book/620f2019-33c0-423b-8a9d-cde4d7f8ef7f/0c3a106d-bde3-4c13-a26e-3fd2394529e5',
'message-id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'content-length': '184',
'subscription': '1'
},
'body': '{
"payload":{
"ask":[],
"bid":[
{
"price":"3218.07",
"quantityChange":"1.63351",
"costChange":"5256.7495257",
"quantity":"1.63351",
"cost":"5256.7495257"
},
...
]
},
"nonce":1,
"timestamp":1630178170860
}'
}
"""
pathParams = [self._inputController(pair = pair) for pair in pairs]
book_topics = [self.book_stream.format(**pathParam) for pathParam in pathParams]
return [self.topics.append(book_topic) for book_topic in book_topics]
def streamPairTickers(self, pairs: list):
"""Stream returns pairs' volume and price changes, function only returns a subscription endpoint
:param pairs: should consist of currency_ids only, otherwise will return nothing, pair should be of format ***/***
:returns: dict - dict for each requested pair as a separate message
.. code block:: python
{
'cmd': 'MESSAGE',
'headers': {
'destination': '/v1/ticker/620f2019-33c0-423b-8a9d-cde4d7f8ef7f/0c3a106d-bde3-4c13-a26e-3fd2394529e5',
'message-id': '0a44444aa-4444-44a4-444a-44444a444aaa',
'content-length': '277',
'subscription': '1'
},
'body': '{
"payload":{
"baseCurrency":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f",
"quoteCurrency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5",
"volume24h":"37874830.2350945",
"volume7d":"183513541.8285953",
"change24h":"0.28",
"change7d":"-0.71",
"lastPrice":"3239"
},
"nonce":0,
"timestamp":1630177120904
}'
}
"""
pathParams = [self._inputController(pair = pair) for pair in pairs]
pair_tickers_topics = [self.tickers_pair_stream.format(**pathParam) for pathParam in pathParams]
return [self.topics.append(pair_tickers_topic) for pair_tickers_topic in pair_tickers_topics]
def streamTrades(self, pairs: list):
"""Stream returns market trades, function only returns a subscription endpoint
:param pairs: should consist of currency_ids only, otherwise will return an empty message, pair should be of format ***/***
.. code block:: python
{
'cmd': 'MESSAGE',
'headers': {
'destination': '/v1/trade/620f2019-33c0-423b-8a9d-cde4d7f8ef7f/0c3a106d-bde3-4c13-a26e-3fd2394529e5',
'message-id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'content-length': '30105',
'subscription': '1'
},
'body': '{
"payload":[
{
"id":"a44444aa-4444-44a4-444a-44444a444aaa",
"timestamp":1630175902267,
"baseCurrency":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f",
"quoteCurrency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5",
"direction":null,
"price":"3243.14",
"quantity":"0.44887",
"cost":"1455.748251800000000000",
"order":null,
"makerBuyer":false
},
...
],
"nonce":0,
"timestamp":1630175907898
}'
}
"""
pathParams = [self._inputController(pair = pair) for pair in pairs]
trades_topics = [self.trades_stream.format(**pathParam) for pathParam in pathParams]
return [self.topics.append(trades_topic) for trades_topic in trades_topics]
def streamRates(self, pairs: list):
"""Stream returns rate for specified pairs, function only returns a subscription endpoint
:param pairs: can consist of currency_ids or currency tag, pair should be of format ***/***
:returns: dict - dict for each requested pair as a separate message
.. code block:: python
{
'cmd': 'MESSAGE',
'headers': {
'destination': '/v1/rate/BTC/USDT', # Returns the format you requested the pair in (can be mixute of currency id and quote)
'message-id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'content-length': '87',
'subscription': '0'
},
'body': '{
"payload":[
{
"symbol":"BTC/USDT",
"rate":48984.99
}
],
"nonce":0,
"timestamp":1630173083252
}'
}
"""
pathParams = [self._inputController(pair = pair) for pair in pairs]
rates_topics = [self.rates_stream.format(**pathParam) for pathParam in pathParams]
return [self.topics.append(rates_topic) for rates_topic in rates_topics]
def streamQuoteRates(self, quotes: list):
"""Stream returns rates for all currencies quoted to specified quotes, function only returns a subscription endpoint
:param quotes: is a list of quote currencies that can be either currency tag or currency id (should of format ***/***)
.. code block:: python
{
'cmd': 'MESSAGE',
'headers': {
'destination': '/v1/rate/USDT',
'message-id': 'a44444aa-4444-44a4-444a-44444a444aaa',
'content-length': '49986',
'subscription': '0'
},
'body': '{
"payload":[
{"symbol":"USDN/USDT","rate":0.9988},
{"symbol":"USDJ/USDT","rate":0.98020001},
...,
{"symbol":"VTHO/USDT","rate":0.011324}
],
"nonce":0,
"timestamp":1630171197332
}'
}
"""
pathParams = [self._inputController(currency = quote, currency_name = 'quote') for quote in quotes]
quote_rates_topics = [self.rates_quote_stream.format(**pathParam) for pathParam in pathParams]
return [self.topics.append(quote_rates_topic) for quote_rates_topic in quote_rates_topics]
|
import tensorflow as tf
import sys
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
path = sys.path[0]
class Cifar(object):
def __init__(self):
#ๅๅงๅๆไฝ
self.height = 32
self.width = 32
self.channels = 3
#ๅญ่ๆฐ
self.image_bytes = self.height * self.width * self.channels
self.label_bytes = 1
self.all_bytes = self.image_bytes + self.label_bytes
def read_binary(self):
file_name = os.listdir('cifar-10')
#print('file_name:\n', file_name)
file_list = [os.path.join('./cifar-10/', file) for file in file_name if file[-3:] == 'bin']
#print('file_list:\n', file_list)
#1.ๆ้ ๆไปถๅ้ๅ
file_queue = tf.train.string_input_producer(file_list)
#2.่ฏปๅไธ่งฃ็
#่ฏปๅ้ถๆฎต
reader = tf.FixedLengthRecordReader(self.all_bytes)
#keyๆไปถๅ valueไธไธชๆ ทๆฌ
key, value = reader.read(file_queue)
print('key:\n', key)
print('value:\n', value)
#่งฃ็ ้ถๆฎต
decoded = tf.decode_raw(value, tf.uint8)
print('decoded:\n', decoded)
#ๅฐ็ฎๆ ๅผๅ็นๅพๅผๅๅผ
label = tf.slice(decoded, [0], [self.label_bytes])
image = tf.slice(decoded, [self.label_bytes], [self.image_bytes])
print('label:\n', label)
print('image:\n', image)
#่ฐๆดๅพ็ๅฝข็ถ
image_reshaped = tf.reshape(image, shape = [self.channels, self.height, self.width])
print('image_reshaped:\n', image_reshaped)
#่ฝฌ็ฝฎ
image_transposed = tf.transpose(image_reshaped, [1, 2, 0])
print('image_transposed:\n', image_transposed)
#่ฐๆดๅพๅ็ฑปๅ
image_cast = tf.cast(image_transposed, tf.float32)
print('image_cast:\n', image_cast)
#3.ๆนๅค็
label_batch, image_batch = tf.train.batch([label, image_cast], batch_size = 100, num_threads = 1, capacity = 100)
print('label_batch:\n', label_batch)
print('image_batch:\n', image_batch)
#ๅผๅฏไผ่ฏ
with tf.Session() as sess:
#ๅผๅฏ็บฟ็จ
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess = sess, coord = coord)
key_new, value_new, decoded_new, label_new, image_new, image_reshaped_new, image_transposed_new, label_value, image_value = sess.run([key, value, decoded, label, image, image_reshaped, image_transposed, label_batch, image_batch])
#print('key_new:\n', key_new)
#print('value_new:\n', value_new)
#print('decoded_new:\n', decoded_new)
#print('label_new:\n', label_new)
#print('image_new:\n', image_new)
#print('image_reshaped_new:\n', image_reshaped_new)
#print('image_transposed_new:\n', image_transposed_new)
#ๅๆถ็บฟ็จ
coord.request_stop()
coord.join(threads)
return image_value, label_value
def write_to_tfrecords(self, image_batch, label_batch):
"""
ๅฐๆ ทๆฌ็นๅพๅผๅ็ฎๆ ๅผไธ่ตทๅๅ
ฅtfrecordsๆไปถ
"""
with tf.python_io.TFRecordWriter(path + '\cifar10.tfrecords') as write:
#ๅพช็ฏๆ้ exampleๅฏน่ฑก๏ผๅนถๅบๅๅๅๅ
ฅๆไปถ
for i in range(100):
image = image_batch[i].tostring()
label = label_batch[i][0]
#print('tfrecords_image:\n', image)
#print('tfrecords_label:\n', label)
example = tf.train.Example(features = tf.train.Features(feature = {
'image':tf.train.Feature(bytes_list = tf.train.BytesList(value = [image])),
'label':tf.train.Feature(int64_list = tf.train.Int64List(value = [label])),
}))
#ๅฐๅบๅๅๅ็exampleๅๅ
ฅๆไปถ
write.write(example.SerializeToString())
return None
def read_tfrecords(self):
"""
่ฏปๅtfrecordsๆไปถ
"""
#1ใๆ้ ๆไปถ้ๅๅ
file_queue = tf.train.string_input_producer([path + '\cifar10.tfrecords'])
#2ใ่ฏปๅไธ่งฃ็
#่ฏปๅ
reader = tf.TFRecordReader()
key, value = reader.read(file_queue)
#่งฃๆexample
feature = tf.parse_single_example(value, features = {
'image':tf.FixedLenFeature([], tf.string),
'label':tf.FixedLenFeature([], tf.int64),
})
image = feature['image']
label = feature['label']
print('read_tf_image:\n', image)
print('read_tf_label:\n', label)
#่งฃ็
image_decoded = tf.decode_raw(image, tf.uint8)
print('image_decoded:\n', image_decoded)
#ๅพๅๅฝข็ถ่ฐๆด
image_reshaped = tf.reshape(image_decoded, [self.height, self.width, self.channels])
print('image_reshaped:\n', image_reshaped)
#3ใๆ้ ๆนๅค็้ๅ
image_batch, label_batch = tf.train.batch([image_reshaped, label], batch_size = 100, num_threads = 1, capacity = 100)
print('image_batch:\n', image_batch)
print('label_batch:\n', label_batch)
#ๅผๅฏไผ่ฏ
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess = sess, coord = coord)
image_value, label_value = sess.run([image, label])
#print('image_value:\n', image_value)
#print('label_value:\n', label_value)
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
#ๅฎไพๅCifar
cifar = Cifar()
#image_value, label_value = cifar.read_binary()
#cifar.write_to_tfrecords(image_value, label_value)
cifar.read_tfrecords()
|
import tkinter, sys
from math import sqrt, pi, atan2
from cmath import exp
from optparse import OptionParser
def get_parent_indices(i, j, nbran):
return (i-1, j//nbran)
class fractal_tree:
def __init__(self, ratio=0.8, angle=pi/2., ngen=3, nbran=2):
"""
ratio: ratio of branch length with respect to previous generation
angle: fan angle, how wide spread the branches will be (0<angle<2*pi)
ngen: number of generations.
nbran: number of new branches.
"""
# coords in complex notation
self.ratio = ratio
self.angle = angle
self.ngen = ngen
self.nbran = nbran
# the root is at (0,0)
# the top of the trunk is at position (0,1), this will be the
# starting node
self.pts2xy = {(-1,0): 0j, (0,0): 1j}
self.xmax, self.ymax = 0, 1
alpha_0 = angle/2.
delta_alpha = angle / float(nbran-1)
for i in range(1, ngen):
for j in range(nbran**i):
# child
ij = (i,j)
# parent
kl = get_parent_indices(i, j, nbran)
k, l = kl
xy_kl = self.pts2xy[kl]
xy_mn = self.pts2xy[(k-1, l//nbran)]
ph_ij = atan2(xy_kl.imag - xy_mn.imag, xy_kl.real - xy_mn.real)
self.pts2xy[ij] = self.pts2xy[kl] + \
ratio**i * \
exp(1j*(ph_ij+ alpha_0 - (j%nbran)*delta_alpha))
x, y = self.pts2xy[ij].real, self.pts2xy[ij].imag
self.xmax = max(x, self.xmax)
self.ymax = max(y, self.ymax)
def get_pixel_coordinates(self, x, y, width, height):
xpix = 0.5 * width * (1 + x/self.xmax)
ypix = height * (1. - y/self.ymax)
return (xpix, ypix)
def draw(self, root, height=600):
"""
Draw the tree using tkinter
"""
aspect_ratio = 2*self.xmax/self.ymax
width = aspect_ratio*height
canvas = tkinter.Canvas(root, height=height, width=width,
background='white')
canvas.pack()
for i in range(0, self.ngen):
for j in range(max(1, self.nbran**i)):
ij = (i,j)
x, y = self.pts2xy[ij].real, self.pts2xy[ij].imag
xpix, ypix = self.get_pixel_coordinates(x,y,
width=width,
height=height)
# parent
kl = get_parent_indices(i,j, self.nbran)
u, v = self.pts2xy[kl].real, self.pts2xy[kl].imag
upix, vpix = self.get_pixel_coordinates(u, v,
width=width,
height=height)
canvas.create_line(upix,vpix, xpix,ypix, fill='black')
##############################################################################
def main():
parser = OptionParser()
parser.add_option('-r', '--ratio', action='store', type="float",
dest="ratio",
help='Ratio of branch length with respect to previous generation (<1.).',
default=0.5,
)
parser.add_option('-n', '--ngen', action='store', type="int",
dest="ngen",
help='Number of generations (>1).',
default=4,
)
parser.add_option('-y', '--ysize', action='store', type="int",
dest="ysize",
help='Number of vertical pixels.',
default=400,
)
parser.add_option('-a', '--angle', action='store', type="float",
dest="angle",
help='Fan angle between group of branches in deg.',
default=90,
)
parser.add_option('-N', '--Nbran', action='store', type="int",
dest="nbran",
help='Number of new branches.',
default=2,
)
options, args = parser.parse_args(sys.argv)
t = fractal_tree(ngen=options.ngen, ratio=options.ratio,
angle=options.angle*pi/180.0, nbran=options.nbran)
root = tkinter.Tk()
t.draw(root, height=options.ysize)
root.mainloop()
if __name__=='__main__': main()
|
from ..utils.parsing_utils import sanitize_text
class DoorStyle:
def __init__(
self, style_name: str, species: str, y_range: range, text_lines: list
) -> None:
self.name = style_name
self.species = species
self.ypos_range = y_range
self.text_lines = text_lines
self.doors = []
self.drawers = []
self.inside_profile = ""
self.outside_profile = ""
self._door_type_defs = ("BE", "WE", "S", "P")
self._drawer_type_defs = ("FF", "DF")
self._door_ypos = []
self._drawer_ypos = []
self._door_text_lines = set()
self._drawer_text_lines = set()
self._get_door_drawer_ypos()
self._get_door_drawer_text_lines()
self._group_data_into_dicts_by_ypos()
self._get_outside_profile()
self._get_inside_profile()
def _get_door_drawer_ypos(self) -> None:
self._door_ypos = set(
[
line.y0
for line in self.text_lines
if line.get_text().replace("\n", "") in self._door_type_defs
]
)
self._drawer_ypos = set(
[
line.y0
for line in self.text_lines
if line.get_text().replace("\n", "") in self._drawer_type_defs
]
)
def _get_door_drawer_text_lines(self) -> None:
self._door_text_lines = [
line for line in self.text_lines if line.y0 in self._door_ypos
]
self._drawer_text_lines = [
line for line in self.text_lines if line.y0 in self._drawer_ypos
]
def _group_data_into_dicts_by_ypos(self) -> None:
door_dict = {}
drawer_dict = {}
for line in self._door_text_lines:
if line.y0 not in door_dict:
door_dict[line.y0] = [line]
else:
door_dict[line.y0].append(line)
for line in self._drawer_text_lines:
if line.y0 not in drawer_dict:
drawer_dict[line.y0] = [line]
else:
drawer_dict[line.y0].append(line)
for ypos in door_dict:
door = {"size": "", "qty": ""}
for line in door_dict[ypos]:
text = sanitize_text(line)
if line.x0 > 280:
continue
if "x" in text:
door["size"] = text
else:
door["qty"] = text
door_dict[ypos] = door
for ypos in drawer_dict:
drawer = {"size": "", "qty": ""}
for line in drawer_dict[ypos]:
text = sanitize_text(line)
if line.x0 > 280:
continue
if "x" in text:
drawer["size"] = text
else:
drawer["qty"] = text
drawer_dict[ypos] = drawer
self.doors = list(door_dict.values())
self.drawers = list(drawer_dict.values())
def _get_outside_profile(self) -> None:
for line in self.text_lines:
if "Outside Edge Profile" in line.get_text():
self.outside_profile = (
line.get_text().replace("\n", "").split(":")[1].strip()
)
def _get_inside_profile(self) -> None:
for line in self.text_lines:
if "Inside Edge Profile" in line.get_text():
self.inside_profile = (
line.get_text().replace("\n", "").split(":")[1].strip()
)
|
# -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class CdbTdsqlRenameInstanceRequest(Request):
def __init__(self):
super(CdbTdsqlRenameInstanceRequest, self).__init__(
'tdsql', 'qcloudcliV1', 'CdbTdsqlRenameInstance', 'tdsql.api.qcloud.com')
def get_cdbInstanceId(self):
return self.get_params().get('cdbInstanceId')
def set_cdbInstanceId(self, cdbInstanceId):
self.add_param('cdbInstanceId', cdbInstanceId)
def get_name(self):
return self.get_params().get('name')
def set_name(self, name):
self.add_param('name', name)
|
from hashlib import md5
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import select, func
from flask_security import RoleMixin, UserMixin
from src import db, BaseMixin, ReprMixin
from src.utils.serializer_helper import serialize_data
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer, db.ForeignKey('user.id')),
db.Column('role_id', db.Integer, db.ForeignKey('role.id')))
class Role(db.Model, RoleMixin, ReprMixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
class User(db.Model, BaseMixin, ReprMixin, UserMixin):
email = db.Column(db.String(127), unique=True, nullable=False)
password = db.Column(db.String(255), default='', nullable=False)
number = db.Column(db.BigInteger)
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
last_login_at = db.Column(db.DateTime())
current_login_at = db.Column(db.DateTime())
last_login_ip = db.Column(db.String(45))
current_login_ip = db.Column(db.String(45))
login_count = db.Column(db.Integer)
shop_id = db.Column(db.Integer, db.ForeignKey('shop.id'), unique=True)
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
user_profile = db.relationship('UserProfile', uselist=False, backref='user')
addresses = db.relationship('Address', uselist=True, backref='user', lazy='dynamic')
coupons = db.relationship('CouponUserMapping', back_populates='customer')
@staticmethod
def hash_md5(data):
return md5(data.encode('utf-8')).hexdigest()
def get_auth_token(self):
pass
def generate_auth_token(self):
token = serialize_data([str(self.id), self.hash_md5(self.password)])
return token
@hybrid_property
def is_admin(self):
return self.has_role('admin') or self.has_role('shop_owner')
@hybrid_property
def is_admin(self):
return self.has_role('admin') or self.has_role('shop_owner')
@hybrid_property
def first_name(self):
return self.user_profile.first_name or ''
@hybrid_property
def last_name(self):
return self.user_profile.last_name or ''
@first_name.expression
def first_name(cls):
return select([func.lower(UserProfile.first_name)]).where(UserProfile.user_id == cls.id).as_scalar()
@last_name.expression
def last_name(cls):
return select([UserProfile.last_name]).where(UserProfile.user_id == cls.id).as_scalar()
@hybrid_property
def authentication_token(self):
return self.generate_auth_token()
class UserProfile(db.Model, BaseMixin):
first_name = db.Column(db.String(255))
last_name = db.Column(db.String(255))
# dob = db.Column(db.DateTime, default=db.func.current_timestamp(), nullable=True)
profile_picture = db.Column(db.String(512), nullable=True)
address = db.Column(db.Integer)
user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete='CASCADE'), unique=True)
class Address(db.Model, BaseMixin):
address_line1 = db.Column(db.String(255), nullable=False)
address_line2 = db.Column(db.String(255), nullable=True)
pin_code = db.Column(db.Integer)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
locality = db.Column(db.String(255), nullable=True)
city = db.Column(db.String(255), nullable=True)
# locality_id = db.Column(db.Integer, db.ForeignKey('locality.id'))
#
# locality = db.relationship('Locality', uselist=False, backref='address')
#
# @hybrid_property
# def locality_name(self):
# return self.locality.name
#
# @locality_name.expression
# def locality_name(cls):
# return select([Locality.name]).where(Locality.id == cls.locality_id).as_scalar()
class Locality(db.Model, BaseMixin):
name = db.Column(db.String(127), nullable=False)
city = db.Column(db.Integer, db.ForeignKey('city.id'))
class City(db.Model, BaseMixin):
name = db.Column(db.String(55))
class Coupon(db.Model, BaseMixin, ReprMixin):
name = db.Column(db.String(255))
discount_type = db.Column(db.Enum('value', 'percentage'), default='percentage')
discount = db.Column(db.Float(precision=2))
max_usage = db.Column(db.SmallInteger)
min_usage = db.Column(db.SmallInteger)
for_all = db.Column(db.Boolean(True))
expiry = db.Column(db.DateTime)
is_taxable = db.Column(db.Boolean(True))
customers = db.relationship('CouponUserMapping', back_populates='coupon')
class CouponUserMapping(db.Model, BaseMixin):
id = db.Column(db.Integer, primary_key=True)
used = db.Column(db.SmallInteger, primary_key=True)
coupon_id = db.Column(db.Integer, db.ForeignKey('coupon.id'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
customer = db.relationship('User', back_populates='coupons')
coupon = db.relationship('Coupon', back_populates='customers')
|
import importlib
import grpc
import web3
from snet.snet_cli.utils.utils import RESOURCES_PATH, add_to_path
class ConcurrencyManager:
def __init__(self, concurrent_calls):
self.__concurrent_calls = concurrent_calls
self.__token = ''
self.__planned_amount = 0
self.__used_amount = 0
@property
def concurrent_calls(self):
return self.__concurrent_calls
def get_token(self, service_client, channel, service_call_price):
if len(self.__token) == 0:
self.__token = self.__get_token(service_client, channel, service_call_price)
elif self.__used_amount >= self.__planned_amount:
self.__token = self.__get_token(service_client, channel, service_call_price, new_token=True)
return self.__token
def __get_token(self, service_client, channel, service_call_price, new_token=False):
if not new_token:
amount = channel.state["last_signed_amount"]
if amount != 0:
try:
token_reply = self.__get_token_for_amount(service_client, channel, amount)
planned_amount = token_reply.planned_amount
used_amount = token_reply.used_amount
if planned_amount - used_amount > 0:
self.__used_amount = used_amount
self.__planned_amount = planned_amount
return token_reply.token
except grpc.RpcError as e:
if e.details() != "Unable to retrieve planned Amount ":
raise
amount = channel.state["last_signed_amount"] + service_call_price
token_reply = self.__get_token_for_amount(service_client, channel, amount)
self.__used_amount = token_reply.used_amount
self.__planned_amount = token_reply.planned_amount
return token_reply.token
def __get_stub_for_get_token(self, service_client):
grpc_channel = service_client.get_grpc_base_channel()
with add_to_path(str(RESOURCES_PATH.joinpath("proto"))):
token_service_pb2_grpc = importlib.import_module("token_service_pb2_grpc")
return token_service_pb2_grpc.TokenServiceStub(grpc_channel)
def __get_token_for_amount(self, service_client, channel, amount):
nonce = channel.state["nonce"]
stub = self.__get_stub_for_get_token(service_client)
with add_to_path(str(RESOURCES_PATH.joinpath("proto"))):
token_service_pb2 = importlib.import_module("token_service_pb2")
current_block_number = service_client.sdk_web3.eth.getBlock("latest").number
message = web3.Web3.soliditySha3(
["string", "address", "uint256", "uint256", "uint256"],
["__MPE_claim_message", service_client.mpe_address, channel.channel_id, nonce, amount]
)
mpe_signature = service_client.generate_signature(message)
message = web3.Web3.soliditySha3(
["bytes", "uint256"],
[mpe_signature, current_block_number]
)
sign_mpe_signature = service_client.generate_signature(message)
request = token_service_pb2.TokenRequest(
channel_id=channel.channel_id, current_nonce=nonce, signed_amount=amount,
signature=bytes(sign_mpe_signature), claim_signature=bytes(mpe_signature),
current_block=current_block_number)
token_reply = stub.GetToken(request)
return token_reply
def record_successful_call(self):
self.__used_amount += 1
|
from http.client import RemoteDisconnected
from xmlrpc.client import Fault
from fastapi import APIRouter, HTTPException
from XenAPI.XenAPI import Failure
from XenGarden.session import create_session
from XenGarden.VIF import VIF
from API.v1.Common import xenapi_failure_jsonify
from app.settings import Settings
from .model import IPAddressesModel, IPAddressModel
router = APIRouter()
@router.get("/{cluster_id}/vif/{vif_uuid}/ipv4/allowed")
async def vif_get_ipv4_by_uuid(cluster_id: str, vif_uuid: str):
"""Get VIF Allowed IPv4 by UUID"""
try:
session = create_session(
_id=cluster_id, get_xen_clusters=Settings.get_xen_clusters()
)
vif: VIF = VIF.get_by_uuid(session=session, uuid=vif_uuid)
ret = dict(
success=True,
data=vif.get_allowed_address_v4(),
)
session.xenapi.session.logout()
return ret
except Failure as xenapi_error:
raise HTTPException(
status_code=500, detail=xenapi_failure_jsonify(xenapi_error)
)
except Fault as xml_rpc_error:
raise HTTPException(
status_code=int(xml_rpc_error.faultCode),
detail=xml_rpc_error.faultString,
)
except RemoteDisconnected as rd_error:
raise HTTPException(status_code=500, detail=rd_error.strerror)
@router.post("/{cluster_id}/vif/{vif_uuid}/ipv4/allowed")
async def vif_add_ipv4_by_uuid(cluster_id: str, vif_uuid: str, address: IPAddressModel):
"""Add VIF Allowed IPv4 by UUID"""
try:
session = create_session(
_id=cluster_id, get_xen_clusters=Settings.get_xen_clusters()
)
vif: VIF = VIF.get_by_uuid(session=session, uuid=vif_uuid)
vif.add_allowed_address_v4(address.address)
ret = dict(
success=True,
)
session.xenapi.session.logout()
return ret
except Failure as xenapi_error:
raise HTTPException(
status_code=500, detail=xenapi_failure_jsonify(xenapi_error)
)
except Fault as xml_rpc_error:
raise HTTPException(
status_code=int(xml_rpc_error.faultCode),
detail=xml_rpc_error.faultString,
)
except RemoteDisconnected as rd_error:
raise HTTPException(status_code=500, detail=rd_error.strerror)
@router.put("/{cluster_id}/vif/{vif_uuid}/ipv4/allowed")
async def vif_set_ipv4_by_uuid(
cluster_id: str, vif_uuid: str, addresses: IPAddressesModel
):
"""Set VIF Allowed IPv4 by UUID"""
try:
session = create_session(
_id=cluster_id, get_xen_clusters=Settings.get_xen_clusters()
)
vif: VIF = VIF.get_by_uuid(session=session, uuid=vif_uuid)
vif.set_allowed_address_v4(addresses.address)
ret = dict(
success=True,
)
session.xenapi.session.logout()
return ret
except Failure as xenapi_error:
raise HTTPException(
status_code=500, detail=xenapi_failure_jsonify(xenapi_error)
)
except Fault as xml_rpc_error:
raise HTTPException(
status_code=int(xml_rpc_error.faultCode),
detail=xml_rpc_error.faultString,
)
except RemoteDisconnected as rd_error:
raise HTTPException(status_code=500, detail=rd_error.strerror)
@router.delete("/{cluster_id}/vif/{vif_uuid}/ipv4/allowed")
async def vif_reset_ipv4_by_uuid(
cluster_id: str, vif_uuid: str, address: IPAddressModel
):
"""Set VIF Allowed IPv4 by UUID"""
try:
session = create_session(
_id=cluster_id, get_xen_clusters=Settings.get_xen_clusters()
)
vif: VIF = VIF.get_by_uuid(session=session, uuid=vif_uuid)
vif.delete_allowed_address_v4(address.address)
ret = dict(
success=True,
)
session.xenapi.session.logout()
return ret
except Failure as xenapi_error:
raise HTTPException(
status_code=500, detail=xenapi_failure_jsonify(xenapi_error)
)
except Fault as xml_rpc_error:
raise HTTPException(
status_code=int(xml_rpc_error.faultCode),
detail=xml_rpc_error.faultString,
)
except RemoteDisconnected as rd_error:
raise HTTPException(status_code=500, detail=rd_error.strerror)
|
"""
Bosons.
Package:
RoadNarrows elemenpy package.
File:
boson.py
Link:
https://github.com/roadnarrows-robotics/
Copyright:
(c) 2019. RoadNarrows LLC
http://www.roadnarrows.com
All Rights Reserved
License:
MIT
"""
from copy import copy
from enum import Enum
from elemenpy.core.common import (isderivedclass)
from elemenpy.core.format import (Format, default_encoder)
from elemenpy.core.prettyprint import (print2cols)
from elemenpy.sm.standardmodel import (StandardModel as sm, SubatomicParticle)
from elemenpy.sm.spin import (SpinQuantumNumber)
from elemenpy.sm.electriccharge import (ElectricCharge)
from elemenpy.sm.colorcharge import (ColorCharge)
# -----------------------------------------------------------------------------
# Boson Base Class
# -----------------------------------------------------------------------------
class Boson(SubatomicParticle):
""" Boson base class. """
class BosonSubfamily(Enum):
""" Boson subfamily enumeration. """
UNKNOWN = 0
SCALAR = 1 # scalar
VECTOR = 2 # vector
Classification = sm.Classification.BOSON
Family = sm.Family.BOSON
Statistics = sm.Statistics.BOSONIC
Name = 'boson'
Symbol = 'boson'
Subfamily = BosonSubfamily.UNKNOWN
# registered boson subclasses by the @Boson.subclass decorator
Subclasses = {}
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def subclass(klass):
"""
Boson subclass decorator to add a subclass to an internal list.
"""
def wrap(D):
"""
Store derived subclass.
Parameters:
D Prospective derived class.
"""
if isderivedclass(D, klass):
klass.Subclasses[D.__name__] = D
return D
return wrap
@classmethod
def finalize_boson_family(klass):
"""
Finalize all registered boson subclass attributes.
Bosons are interdependent.
"""
for qname, qklass in klass.Subclasses.items():
qklass.finalize_boson()
@classmethod
def boson_family(klass):
"""
Get the dictionary of all registered boson subclasses.
Returns:
{qname: qclass, ...}
"""
return klass.Subclasses
@classmethod
def boson_class(klass, qname):
"""
Get the boson subclass.
Parameters:
qname Boson subclass name.
Returns:
qclass
"""
return klass.Subclasses[qname]
@classmethod
def subfamily(klass):
""" Return boson subfamily. """
return klass.Subfamily
@classmethod
def print_boson_properties(klass, indent=0, **print_kwargs):
"""
Print fixed meson particle properties to output stream.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
klass.print_subatomic_properties(indent=indent, **print_kwargs)
#print(f"{'':<{indent+2}}Boson", **print_kwargs)
print2cols([
('Subfamily', klass.Subfamily.name),],
c1width=16, indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self):
""" Boson initializer. """
SubatomicParticle.__init__(self)
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"()"
def __str__(self):
return self.name
@property
def subfamily(self):
""" Return boson subfamily. """
return self.Subfamily
def print_state(self, indent=0, **print_kwargs):
"""
Print boson state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
SubatomicParticle.print_state(self, indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# Photon Class
# -----------------------------------------------------------------------------
@Boson.subclass()
class Photon(Boson):
""" Photon class. """
#
# Class Fixed Properties
#
Pid = sm.ParticleId.PHOTON
Name = "photon"
Symbol = default_encoder('$sm(gamma)')
RestMass = 0.0
ElecCharge = ElectricCharge(0)
QSpin = SpinQuantumNumber(1) # intrinsic spin number
Subfamily = Boson.BosonSubfamily.VECTOR
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_boson(klass):
"""
Finalize boson's class attibutes.
Finalization can only proceed when all boson classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.boson_class('Photon')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_boson_properties(indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self):
""" Photon initializer. """
Boson.__init__(self)
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"()"
def __str__(self):
return self.Name
def print_state(self, indent=0, **print_kwargs):
"""
Print photon state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
Boson.print_state(self, indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# WBosonN Class
# -----------------------------------------------------------------------------
@Boson.subclass()
class WBosonN(Boson):
""" WBosonN class. """
#
# Class Fixed Properties
#
Pid = sm.ParticleId.W_BOSON_N
Name = "W-boson-"
Symbol = default_encoder('$sm(W-)')
RestMass = 80.385e3
ElecCharge = ElectricCharge(-1)
QSpin = SpinQuantumNumber(1) # intrinsic spin number
Subfamily = Boson.BosonSubfamily.VECTOR
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_boson(klass):
"""
Finalize boson's class attibutes.
Finalization can only proceed when all boson classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.boson_class('WBosonP')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_boson_properties(indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self):
""" W- boson initializer. """
Boson.__init__(self)
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"()"
def __str__(self):
return self.Name
def print_state(self, indent=0, **print_kwargs):
"""
Print W- boson state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
Boson.print_state(self, indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# WBosonP Class
# -----------------------------------------------------------------------------
@Boson.subclass()
class WBosonP(Boson):
""" WBosonP class. """
#
# Class Fixed Properties
#
Pid = sm.ParticleId.W_BOSON_P
Name = "W-boson+"
Symbol = default_encoder('$sm(W+)')
RestMass = 80.385e3
ElecCharge = ElectricCharge(1)
QSpin = SpinQuantumNumber(1) # intrinsic spin number
Subfamily = Boson.BosonSubfamily.VECTOR
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_boson(klass):
"""
Finalize boson's class attibutes.
Finalization can only proceed when all boson classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.boson_class('WBosonN')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_boson_properties(indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self):
""" W+ boson initializer. """
Boson.__init__(self)
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"()"
def __str__(self):
return self.Name
def print_state(self, indent=0, **print_kwargs):
"""
Print W+ boson state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
Boson.print_state(self, indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# ZBoson Class
# -----------------------------------------------------------------------------
@Boson.subclass()
class ZBoson(Boson):
""" ZBoson class. """
#
# Class Fixed Properties
#
Pid = sm.ParticleId.Z_BOSON
Name = "Z-boson"
Symbol = default_encoder('$sm(Z)')
RestMass = 91.1875e3
ElecCharge = ElectricCharge(0)
QSpin = SpinQuantumNumber(1) # intrinsic spin number
Subfamily = Boson.BosonSubfamily.VECTOR
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_boson(klass):
"""
Finalize boson's class attibutes.
Finalization can only proceed when all boson classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.boson_class('ZBoson')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_boson_properties(indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self):
""" Z boson initializer. """
Boson.__init__(self)
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"()"
def __str__(self):
return self.Name
def print_state(self, indent=0, **print_kwargs):
"""
Print Z boson state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
Boson.print_state(self, indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# Gluon Class
# -----------------------------------------------------------------------------
@Boson.subclass()
class Gluon(Boson):
""" Gluon class. """
#
# Class Fixed Properties
#
Pid = sm.ParticleId.GLUON
Name = "gluon"
Symbol = default_encoder('$sm(g)')
RestMass = 0.0
ElecCharge = ElectricCharge(0)
QSpin = SpinQuantumNumber(1) # intrinsic spin number
Subfamily = Boson.BosonSubfamily.VECTOR
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_boson(klass):
"""
Finalize boson's class attibutes.
Finalization can only proceed when all boson classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.boson_class('Gluon')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_boson_properties(indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self, color, anticolor):
"""
Gluon initializer.
Parameters:
color Primary color charge.
anticolor Anticolor charge.
"""
Boson.__init__(self)
self._color_charge = ColorCharge(color)
self._anticolor_charge = ColorCharge(anticolor)
if not self.color_charge.is_primary_color():
raise ValueError(
f"{self.name} '{self.color_charge.name}' is not a primary color")
if not self.anticolor_charge.is_anticolor():
raise ValueError(
f"{self.name} '{self.anticolor_charge.name}' is not an anticolor")
if self.color_charge == self.anticolor_charge.complement:
raise ValueError(f"{self.name} " +
f"'{self.color_charge.name}-{self.anticolor_charge.name}' " +
"defines a meson")
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"{self.color_charge!r}, {self.anticolor_charge!r})"
def __str__(self):
return self.fqname
def __eq__(self, gluon):
"""
Equal to. self == gluon.
Two gluons are considered equal if they are of the same kind.
That is, gluons with the same color charges.
"""
return self.color_charge == gluon.color_charge and \
self.anticolor_charge == gluon.anticolor_charge
def __ne__(self, gluon):
"""
Not equal to. self != gluon.
Two gluons are considered not equal if they are not of the same kind.
That is, gluons that do not have the same color charges.
"""
return self.color_charge != gluon.color_charge or \
self.anticolor_charge != gluon.anticolor_charge
@property
def fqname(self):
return f"{self.color_charge.name}-{self.anticolor_charge.name} {self.name}"
@property
def color_charge(self):
""" Return primary color charge. """
return self._color_charge
@property
def anticolor_charge(self):
""" Return anticolor charge. """
return self._anticolor_charge
def print_state(self, indent=0, **print_kwargs):
"""
Print gluon state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
Boson.print_state(self, indent=indent, **print_kwargs)
print2cols([
('FQ Name', self.fqname),
('Color Charge',
f"{self.color_charge.symbol} {self.color_charge.name}"),
('Anticolor Charge',
f"{self.anticolor_charge.symbol} {self.anticolor_charge.name}"),],
indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# HiggsBoson Class
# -----------------------------------------------------------------------------
@Boson.subclass()
class HiggsBoson(Boson):
""" HiggsBoson class. """
#
# Class Fixed Properties
#
Pid = sm.ParticleId.HIGGS_BOSON
Name = "higgs-boson"
Symbol = default_encoder('$sm(H0)')
RestMass = 125.09e3
ElecCharge = ElectricCharge(0)
QSpin = SpinQuantumNumber(0)
Subfamily = Boson.BosonSubfamily.SCALAR
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@classmethod
def finalize_boson(klass):
"""
Finalize boson's class attibutes.
Finalization can only proceed when all boson classes have been
defined due to interdependencies.
"""
klass.AntiParticle = klass.boson_class('HiggsBoson')
@classmethod
def print_properties(klass, indent=0, **print_kwargs):
klass.print_boson_properties(indent=indent, **print_kwargs)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Class Instance Methods
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
def __init__(self):
""" Higgs boson initializer. """
Boson.__init__(self)
def __repr__(self):
return f"{self.__module__}.{self.__class__.__name__}"\
f"()"
def __str__(self):
return self.Name
def print_state(self, indent=0, **print_kwargs):
"""
Print Higgs boson state to output stream using default encoder.
Parameters:
indent Line indentation.
print_kwargs Print control keyword arguments.
"""
Boson.print_state(self, indent=indent, **print_kwargs)
# -----------------------------------------------------------------------------
# On module load execution
# -----------------------------------------------------------------------------
Boson.finalize_boson_family()
# -----------------------------------------------------------------------------
# Unit tests
# -----------------------------------------------------------------------------
if __name__ == "__main__":
import sys
import tests.utboson as ut
sys.exit(ut.utmain())
|
import asyncio
import unittest
from asgi.stream import AsgiHttpRequest, AsgiHttpResponse
from fakes import StreamReader, StreamWriter
class TestAsgiHttpRequest(unittest.TestCase):
def test_reading_scope(self):
raw_request = (
b"GET /api/v1/ HTTP/1.0\r\n"
b"User-Agent: curl/7.54.0\r\n"
b"Host: localhost:8000\r\n"
b"\r\n"
)
request = AsgiHttpRequest(StreamReader(raw_request))
scope = asyncio.run(request.scope())
self.assertDictEqual(
scope,
{
"type": "http",
"asgi": {"version": "3.0", "spec_version": "2.0",},
"http_version": "1.0",
"method": "GET",
"scheme": "http",
"path": "/api/v1/",
"query_string": "",
"headers": [
[b"user-agent", b"curl/7.54.0"],
[b"host", b"localhost:8000"],
],
},
)
def test_reading_entire_request(self):
raw_request = (
b"POST /api/v1/ HTTP/1.0\r\n"
b"User-Agent: curl/7.54.0\r\n"
b"Host: localhost:8000\r\n"
b"Content-Type: application/json\r\n"
b"Content-Length: 44\r\n"
b"\r\n"
b'{"first_name":"paul","last_name":"atreides"}'
)
async def read_request():
request = AsgiHttpRequest(StreamReader(raw_request))
scope = await request.scope()
event = await request.read()
return event
scope = asyncio.run(read_request())
self.assertDictEqual(
scope,
{
"type": "http.request",
"body": b'{"first_name":"paul","last_name":"atreides"}',
"more_body": False,
},
)
class TestAsgiHttpResponse(unittest.TestCase):
def test_writing_headers(self):
message = {
"type": "http.response.start",
"status": 200,
"headers": [
[b"content-type", b"application/json"],
[b"content-length", b"44"],
],
}
writer = StreamWriter()
response = AsgiHttpResponse(writer)
asyncio.run(response.write(message))
self.assertEqual(
writer.stream,
(
b"HTTP/1.0 200 OK\r\n"
b"content-type: application/json\r\n"
b"content-length: 44\r\n"
b"\r\n"
),
)
def test_writing_headers_and_body(self):
messages = [
{
"type": "http.response.start",
"status": 200,
"headers": [
[b"content-type", b"application/json"],
[b"content-length", b"44"],
],
},
{
"type": "http.response.body",
"body": b'{"first_name":"paul","last_name":"atreides"}',
"more_body": False,
},
]
writer = StreamWriter()
response = AsgiHttpResponse(writer)
for message in messages:
asyncio.run(response.write(message))
self.assertEqual(
writer.stream,
(
b"HTTP/1.0 200 OK\r\n"
b"content-type: application/json\r\n"
b"content-length: 44\r\n"
b"\r\n"
b'{"first_name":"paul","last_name":"atreides"}'
),
)
|
# coding: utf-8
import numpy as np
import re
import copy
import sys
import networkx as nx
#import matplotlib.pyplot as plt
#import operator
#from collections import defaultdict
from collections import Counter
from collections import deque
import time
def parse_input(ii, DBG=True):
world = {}
index = 0
init_state = ii[0]
for c in range(15, len(init_state)):
if init_state[c]=="#":
world[index]=1
index = index+1
elif init_state[c]==".":
world[index]=0
index = index+1
else:
print("**"+init_state[c]+"**")
sys.exit()
if DBG: print(ii[0], world)
rules = []
rr = len(ii)
for ri in range(2,rr):
raw_rule = ii[ri]
rule_lhs=[]
for idx in range(0,5):
if raw_rule[idx]=="#":
rule_lhs.append(1)
elif raw_rule[idx]==".":
rule_lhs.append(0)
else:
print("**"+raw_rule[idx]+"**")
sys.exit()
if raw_rule[9]=="#":
rule_rhs=1
elif raw_rule[9]==".":
rule_rhs=0
rules.append((rule_lhs,rule_rhs))
if DBG: print(raw_rule, rule_lhs,rule_rhs)
return (world,rules)
def match(world, index, rule, DBG = True):
world_five_array = []
if DBG: print(rule)
for idx in range(-2,3):
if (index+idx) in world:
world_five_array.append(world[index+idx])
else:
world_five_array.append(0)
if DBG: print(world_five_array)
if world_five_array == rule[0]:
return (True, rule[1])
else:
return (False, None)
def count_plants(world):
count =0
sum_idx = 0
for k in world:
if world[k] == 1:
count = count+1
sum_idx = sum_idx + k
return(count, sum_idx)
def world_str(world):
out = ""
for k in world:
if (world[k]==1):
out = out +"#"
elif (world[k]==0):
out = out +"."
(ct,sum_idx) = count_plants(world)
return out + " " + str(ct) + " "+ str(sum_idx)
def function(ii, DBG = True):
# parse initial state into world
# parse rules
(world, rules) = parse_input(ii, DBG)
#if DBG: print(match(world,0,rules[1]))
if DBG: print("0: "+world_str(world))
# loop on xxx generations
# apply rules left to right, into a new world
# copy new world into world
# --> print out sum of indexes, and delta from previous generation
# (print out state, count plants)
# ** we see that delta is always the same after some generations
# so we can apply a formula (see below)
last_sum_idx = count_plants(world)[1]
for gen in range(1,135):
new_world = {}
for index in range(-2, len(world)+1):
#new_world[index] = 0
for rule in rules:
mm = match(world,index,rule,False)
if mm[0]==True:
new_world[index] = mm[1]
continue
#if DBG: print(str(gen)+": "+world_str(new_world))
world = new_world
(count, sum_idx) = count_plants(world)
delta = sum_idx-last_sum_idx
last_sum_idx = sum_idx
print(gen, count, sum_idx, delta)
# gen 128 count 78 sum_idx 12196 delta 247
# gen 129 count 78 sum_idx 12274 delta 78
# gen 130 count 78 sum_idx 12352 delta 78
# sum_idx(gen) = 12196 + ((gen-128) * 78)
return count_plants(world)[1]
def test(cc=None, expected=None, DBG = False):
start_millis = int(round(time.time() * 1000))
result = function(cc,DBG)
stop_millis = int(round(time.time() * 1000))
result = str(result)
expected = str(expected)
flag = (result == expected)
if(expected=="None"):
print("*** "+str(cc) + " *** -> Result = "+str(result))
else:
print("*** "+str(cc) + " *** -> Result = "+str(result), " -> success = "+ str(flag) + " -> expected " + expected)
print((stop_millis-start_millis),"ms",int((stop_millis-start_millis)/1000),"s",int((stop_millis-start_millis)/1000/60),"min")
return flag
t1="""initial state: #..#.#..##......###...###
...## => #
..#.. => #
.#... => #
.#.#. => #
.#.## => #
.##.. => #
.#### => #
#.#.# => #
#.### => #
##.#. => #
##.## => #
###.. => #
###.# => #
####. => #"""
tt1 = t1.splitlines()
#test(tt1,325,True)
#sys.exit()
INPUT_FILE="input-d12.txt"
f = open(INPUT_FILE, "r")
contents = f.read()
puzzle_input = contents.splitlines()
f.close()
ret = function(puzzle_input, True)
print(ret)
print(12196 + ( (50000000000-128) * 78))
# 3900000002212
|
import os
from PIL import Image
def get_img_size(file_name: str):
im = Image.open(file_name)
return im.size
types = {"ๅธฆ็ต่ฏๅ
็ตๅฎ": "core", "ไธๅธฆ็ต่ฏๅ
็ตๅฎ": "coreless"}
template = """
<object>
<name>{}</name>
<pose>Frontal</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>{}</xmin>
<ymin>{}</ymin>
<xmax>{}</xmax>
<ymax>{}</ymax>
</bndbox>
</object>
"""
def parse_to_xml(image_root, annotation_root, img_id):
txt_path = annotation_root + os.sep + img_id + ".txt"
img_path = image_root + os.sep + img_id + ".jpg"
objects = []
filename = img_id
# read txt
with open(txt_path, "r", encoding="utf-8") as f:
for line in f:
line = line.rstrip()
t, x1, y1, x2, y2 = line.split(" ")[1:6]
t = types.get(t, "unknow")
objects.append({"t": t, "x1": x1, "x2": x2, "y1": y1, "y2": y2})
img_size = get_img_size(img_path)
size_w, size_h = img_size[0], img_size[1]
unknow = True
object_str = ""
for o in objects:
x1 = o["x1"]
x2 = o["x2"]
y1 = o["y1"]
y2 = o["y2"]
if o["t"] != "unknow":
unknow = False
object_str += template.format(o["t"], x1, y1, x2, y2)
if unknow:
return None
content = f"""
<annotation>
<folder>ml</folder>
<filename>{filename}.jpg</filename>
<source>
<database>imgs</database>
<annotation>imgs_anno</annotation>

<flickrid>{filename}.jpg</flickrid>
</source>
<owner>
<flickrid>buaa</flickrid>
<name>soft</name>
</owner>
<size>
<width>{size_w}</width>
<height>{size_h}</height>
<depth>3</depth>
</size>
<segmented>0</segmented>
{object_str}
</annotation>
"""
return content
|
'''
Generative Adversarial Network
version: 1.1.1
date: Jan/13/2019 version: 1.0.0
Jan/14/2019 1.1.0 deal with STL10 dataset
Jan/16/2019 1.1.1 bug fix
'''
import argparse
import os
import torch.nn as nn
import torch.optim as optim
import torchvision.utils as vutils
from utils.DatasetConverter import DatasetConverter
from utils.TrainGAN import TrainGAN
from utils.discriminators.DiscriminatorDCGAN import DiscriminatorDCGAN
from utils.discriminators.DiscriminatorGAN import DiscriminatorGAN
from utils.generators.GeneratorDCGAN import GeneratorDCGAN
from utils.generators.GeneratorGAN import GeneratorGAN
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-b', '--batch', help='batch size', type=int, default=100)
parser.add_argument('-e', '--epoch', help='number of epochs', type=int, default=20)
parser.add_argument('-d', '--dataset', help='select dataset: mnist, cifar10, stl10', type=str, default='mnist')
parser.add_argument('-gt', '--gantype', help='select gan type: gcgan, gan', type=str, default='gcgan')
parser.add_argument('-ngf', type=int, default=64)
parser.add_argument('-ndf', type=int, default=64)
args = parser.parse_args()
BATCH_SIZE = args.batch
NGF = args.ngf
NDF = args.ndf
MAX_EPOCH = args.epoch
DATA_SET = args.dataset
GAN_TYPE = args.gantype
CHANNEL_SIZE = 1
BASE_DIR = './output_images/'
SAVE_DIR = BASE_DIR + DATA_SET + '_' + GAN_TYPE
os.makedirs(SAVE_DIR, exist_ok=True)
if DATA_SET == 'cifar10' or DATA_SET == 'stl10':
CHANNEL_SIZE = 3
netD = DiscriminatorDCGAN(CHANNEL_SIZE, NDF)
netG = GeneratorDCGAN(100, NGF, CHANNEL_SIZE)
criterion = nn.BCELoss()
if GAN_TYPE == 'gan':
netD = DiscriminatorGAN(64 * 64 * CHANNEL_SIZE, 1)
netG = GeneratorGAN(100, 64 * 64 * CHANNEL_SIZE, nc=CHANNEL_SIZE)
criterion = nn.BCELoss()
print('BATCH SIZE:', BATCH_SIZE)
print('EPOCHS:', MAX_EPOCH)
print('DATASET:', DATA_SET.upper())
print('GAN type:', GAN_TYPE.upper())
optimizerG = optim.Adam(netG.parameters(), lr=0.0002, betas=(0.5, 0.999))
optimizerD = optim.Adam(netD.parameters(), lr=0.0002, betas=(0.5, 0.999))
train_gan = TrainGAN(BATCH_SIZE)
train_gan.networks(netD, netG)
train_gan.optimizers(optimizerD, optimizerG)
train_gan.loss(criterion)
(train_loader, _) = DatasetConverter(args.dataset, batch_size=BATCH_SIZE).run()
for epoch in range(MAX_EPOCH):
for i, (data, _) in enumerate(train_loader):
real_img = data
errD, _, _ = train_gan.update_discriminator(real_img)
errG, _ = train_gan.update_generator()
if i % 100 == 0:
print('[{}/{}][{}/{}] Loss_D: {:.3f} Loss_G: {:.3f}'.format(epoch + 1, MAX_EPOCH, i + 1, len(train_loader),
errD.item(), errG.item()))
if epoch == 0 and i == 0:
vutils.save_image(real_img, SAVE_DIR + '/real_sample.png', normalize=True, nrow=10)
fake_img = train_gan.outimg()
vutils.save_image(fake_img.detach(), SAVE_DIR + '/fake_samples_epoch_{:03d}.png'.format(epoch+1), normalize=True,
nrow=10)
|
import json
import typing as t
from datetime import datetime
from aio_anyrun import const as cst
class BaseCollection:
def __init__(self, raw_data: dict):
self.raw_data = raw_data
self._ignores = ['items', 'json', 'raw_data', 'keys', 'values']
self.properties = [prop for prop in dir(self) if not prop.startswith('_') and prop not in self._ignores]
def json(self):
return json.dumps(self.raw_data, indent=4)
def __str__(self):
return f'{self.__name__}({ ", ".join([f"{k}={v}" for k, v in self.items()]) })'
def __repr__(self):
return self.__str__()
def __getitem__(self, key):
return self.raw_data.get(key)
def items(self):
for prop in self.properties:
yield prop, getattr(self, prop)
class Task(BaseCollection):
@property
def threat_level(self) -> int:
return self.raw_data['scores']['verdict']['threat_level']
@property
def verdict(self) -> str:
level = self.threat_level
for k, v in cst.VERDICTS.data.items():
if v == level:
return k
return ''
@property
def tags(self) -> t.List[str]:
return self.raw_data['tags']
@property
def task_uuid(self) -> str:
return self.raw_data['uuid']
@property
def os_version(self) -> dict:
return self.raw_data['public']['environment']['OS']
@property
def run_type(self) -> str:
return self.raw_data['public']['objects']['runType']
@property
def main_object(self) -> dict:
return self.raw_data['public']['objects']['mainObject']
@property
def hashes(self) -> dict:
return self.main_object['hashes']
@property
def md5(self) -> str:
return self.hashes['md5']
@property
def sha1(self) -> str:
return self.hashes['sha1']
@property
def sha256(self) -> str:
return self.hashes['sha256']
@property
def object_uuid(self) -> str:
return self.main_object['uuid']
@property
def names(self) -> dict:
return self.main_object['names']
@property
def name(self) -> str:
if self.run_type == 'file':
return self.names['basename']
else:
return self.names['url']
@property
def info(self) -> dict:
return self.main_object['info']
@property
def file_type(self) -> t.Optional[str]:
if self.run_type != 'url':
return self.info['meta']['file']
@property
def mime_type(self) -> t.Optional[str]:
if self.run_type != 'url':
return self.info['meta']['mime']
@property
def exif(self) -> t.Optional[dict]:
if self.run_type != 'url':
return self.info['meta']['exif']
@property
def ole(self) -> t.Optional[str]:
if self.run_type != 'url':
return self.info['meta']['ole']
@property
def is_downloadable(self) -> bool:
return self.run_type != 'url'
StrOrInt = t.Union[int, str]
REPUTATION_TABLE: t.Dict[int, str] = {
0: 'unknown',
1: 'suspicious',
2: 'malicious',
3: 'whitelisted',
4: 'unsafe'
}
class IoCObject(BaseCollection):
@property
def category(self):
return self.raw_data.get('category')
@property
def types(self):
return self.raw_data.get('type')
@property
def ioc(self):
return self.raw_data.get('ioc')
@property
def reputation(self):
return REPUTATION_TABLE[self.raw_data['reputation']]
@property
def name(self):
return self.raw_data.get('name')
class IoC(BaseCollection):
''' Class to represent IoC information.
'''
@staticmethod
def _parse(obj: t.Optional[dict]) -> t.List[IoCObject]:
if obj is None:
return []
return [IoCObject(o) for o in obj]
@property
def main_objects(self) -> t.List[IoCObject]:
return self._parse(self.raw_data['Main object'])
@property
def dropped_files(self) -> t.List[IoCObject]:
return self._parse(self.raw_data.get('Dropped executable file'))
@property
def dns(self) -> t.List[IoCObject]:
return self._parse(self.raw_data.get('DNS requests'))
@property
def connections(self) -> t.List[IoCObject]:
return self._parse(self.raw_data.get('Connections'))
class MITRE_Attack(BaseCollection):
@property
def _external_references(self) -> t.Optional[t.List[dict]]:
return self.raw_data.get('external_references')
@property
def mitre_url(self) -> t.Optional[str]:
for ref in self._external_references:
if ref.get('source_name') == 'mitre-attack':
return ref.get('url')
return ''
@property
def technique(self) -> t.Optional[str]:
return self.raw_data.get('technique')
@property
def name(self) -> t.Optional[str]:
return self.raw_data.get('name')
@property
def mitre_detection(self) -> t.Optional[str]:
return self.raw_data.get('x_mitre_detection')
@property
def platforms(self) -> t.Optional[t.List[str]]:
return self.raw_data.get('x_mitre_platforms')
@property
def kill_chain_phases(self) -> t.Optional[t.List[dict]]:
return self.raw_data.get('kill_chain_phases')
@property
def description(self) -> t.Optional[str]:
return self.raw_data.get('description')
@property
def mitre_data_sources(self) -> t.Optional[t.List[str]]:
return self.raw_data.get('x_mitre_data_sources')
@property
def created(self) -> t.Optional[datetime]:
if self.raw_data.get('created'):
return datetime.strptime(self.raw_data['created'], '%Y-%m-%dT%H:%M:%S.%f%z')
|
class PatchServerException(Exception):
pass
class Unauthorized(PatchServerException):
pass
class InvalidPatchDefinitionError(PatchServerException):
pass
class SoftwareTitleNotFound(PatchServerException):
pass
class InvalidWebhook(PatchServerException):
pass
class PatchArchiveRestoreFailure(PatchServerException):
pass
|
from pandas import Timestamp
from google.protobuf import timestamp_pb2 as pr_ts
from zipline.assets import ExchangeInfo, Equity, Future
from zipline.finance.order import Order
from zipline.finance.transaction import Transaction
from zipline.finance.position import Position
from zipline import protocol
from protos import assets_pb2 as pr_asset
from protos import controller_pb2 as ctl
from protos import finance_pb2 as fin
from protos import metrics_pb2 as metrics
from protos import protocol_pb2 as pr
from protos import broker_pb2
def datetime_from_bytes(bytes_):
ts = pr_ts.Timestamp()
to_datetime(ts.ParseFromString(bytes_))
def to_proto_timestamp(dt):
"""
Parameters
----------
dt: pandas.Timestamp
Returns
-------
google.protobuf.timestamp_pb2.Timestamp
"""
ts = pr_ts.Timestamp()
ts.FromDatetime(dt)
return ts
def to_datetime(proto_ts):
return Timestamp(proto_ts.ToDatetime(), tz='UTC')
def to_pandas_timestamp(protos_ts, tz=None):
return Timestamp(to_datetime(protos_ts)).tz_localize(tz)
def to_proto_commission(zp_commission):
return broker_pb2.Commission(
asset=to_proto_asset(zp_commission['asset']),
order=to_proto_order(zp_commission['order']),
cost=zp_commission['cost']
)
def from_proto_commission(proto_commission):
return {
'asset': to_zp_asset(proto_commission.asset),
'order': to_proto_order(proto_commission.order),
'cost': proto_commission.cost
}
_proto_asset_types = {
Equity: 'equity',
Future: 'future'
}
_zp_asset_types = {
'equity': Equity,
'future': Future
}
def to_proto_asset(zp_asset):
asset_type = type(zp_asset)
kwargs = {
'type': _proto_asset_types[type(zp_asset)],
'sid': zp_asset.sid,
'symbol': zp_asset.symbol,
'asset_name': zp_asset.asset_name,
'start_date': to_proto_timestamp(zp_asset.start_date),
'end_date': to_proto_timestamp(zp_asset.end_date),
'first_traded': to_proto_timestamp(zp_asset.start_date),
'auto_close_date': to_proto_timestamp(zp_asset.auto_close_date),
'exchange': zp_asset.exchange,
'exchange_full': zp_asset.exchange_full,
'country_code': zp_asset.country_code,
'tick_size': zp_asset.tick_size,
'multiplier': zp_asset.price_multiplier
}
if asset_type == Future:
kwargs.update({
'root_symbol': zp_asset.root_symbol,
'expiration_date': to_proto_timestamp(zp_asset.expiration_date),
'notice_date': to_proto_timestamp(zp_asset.notice_date)
})
return pr_asset.Asset(**kwargs)
def to_zp_asset(pr_asset):
kwargs = {
'sid': pr_asset.sid,
'exchange_info': ExchangeInfo(
pr_asset.exchange_full,
pr_asset.exchange,
pr_asset.country_code
),
'symbol': pr_asset.symbol,
'asset_name': pr_asset.asset_name,
'start_date': to_datetime(pr_asset.start_date),
'end_date': to_datetime(pr_asset.end_date),
'first_traded': to_datetime(pr_asset.first_traded),
'auto_close_date': to_datetime(pr_asset.auto_close_date),
'tick_size': pr_asset.tick_size,
'multiplier': pr_asset.multiplier
}
asset_type = pr_asset.type
if asset_type == 'future':
kwargs.update(
{
'root_symbol': pr_asset.root_symbol,
'expiration_date': pr_asset.expiration_date,
'notice_date': pr_asset.notice_date
}
)
return _zp_asset_types[asset_type](**kwargs)
# def to_zp_execution_style(proto_order_params):
# style = proto_order_params.style
# asset = proto_order_params.asset
# if style == 'stop_limit':
# return execution.StopLimitOrder(
# proto_order_params.limit_price,
# proto_order_params.stop_price,
# to_zp_asset(asset),
# exchange=proto_order_params.exchange)
# elif style == 'market':
# return execution.MarketOrder(
# exchange=proto_order_params.exchange)
# elif style == 'limit':
# return execution.LimitOrder(
# proto_order_params.limit_price,
# to_zp_asset(asset),
# exchange=proto_order_params.exchange)
# elif style == 'stop':
# return execution.StopOrder(
# proto_order_params.stop_price,
# asset=to_zp_asset(asset)
# )
# else:
# raise ValueError('Unexpected order style {}'.format(style))
# def to_proto_order_params(asset, amount, style, order_id=None):
# ast = to_proto_asset(asset)
# if type(style) == execution.LimitOrder:
# return broker_pb2.OrderParams(
# asset=ast,
# style='limit',
# amount=amount,
# limit_price=style.limit_price,
# exchange=style.exchange)
# elif type(style) == execution.MarketOrder:
# return broker_pb2.OrderParams(
# asset=ast,
# style='market',
# amount=amount,
# exchange=style.exchange)
# elif type(style) == execution.StopOrder:
# return broker_pb2.OrderParams(
# asset=ast,
# style='stop',
# amount=amount,
# stop_price=style.stop_price,
# exchange=style.exchange)
# elif type(style) == execution.StopLimitOrder:
# return broker_pb2.OrderParams(
# asset=ast,
# style='stop_limit',
# amount=amount,
# stop_price=style.stop_price,
# limit_price=style.limit_price)
# else:
# raise ValueError('Unexpected order style {}'.format(type(style)))
def to_zp_order(proto_order):
stop = proto_order.stop
limit = proto_order.limit
return Order(
dt=to_datetime(proto_order.dt),
asset=to_zp_asset(proto_order.asset),
amount=proto_order.amount,
stop=stop.value if stop.is_set else None,
limit=limit.value if limit.is_set else None,
filled=proto_order.filled,
commission=proto_order.commission,
id=proto_order.id
)
def to_zp_transaction(proto_transaction):
return Transaction(
to_zp_asset(proto_transaction.asset),
proto_transaction.amount,
to_datetime(proto_transaction.dt),
proto_transaction.price,
proto_transaction.order_id)
def to_zp_position(proto_position):
return Position(
to_zp_asset(proto_position.asset),
proto_position.amount,
proto_position.cost_basis,
proto_position.last_sale_price,
to_datetime(proto_position.last_sale_date)
).to_dict()
def to_zp_portfolio(proto_portfolio):
portfolio = protocol.MutableView(protocol.Portfolio(to_datetime(proto_portfolio.start_date)))
portfolio.cash = proto_portfolio.cash
portfolio.cash_flow = proto_portfolio.cash_flow
portfolio.starting_cash = proto_portfolio.starting_cash
portfolio.pnl = proto_portfolio.pnl
portfolio.portfolio_value = proto_portfolio.portfolio_value
portfolio.returns = proto_portfolio.returns
portfolio.positions = {position.key: to_zp_position(position) for position in proto_portfolio.position}
portfolio.positions_value = proto_portfolio.positions_value
portfolio.positions_exposure = proto_portfolio.positions_exposure
def to_zp_account(proto_account):
pass
def to_proto_position(zp_position):
"""
Parameters
----------
zp_position : zipline.finance.position.Position
Returns
-------
"""
return pr.Position(
asset=to_proto_asset(zp_position['sid']),
amount=zp_position['amount'],
cost_basis=zp_position['cost_basis'],
last_sale_price=zp_position['last_sale_price']
)
def to_proto_portfolio(zp_portfolio):
"""
Parameters
----------
zp_portfolio : protocol.Portfolio
Returns
-------
"""
return pr.Portfolio(
cash_flow=zp_portfolio.cash_flow,
starting_cash=zp_portfolio.starting_cash,
portfolio_value=zp_portfolio.portfolio_value,
pnl=zp_portfolio.pnl,
returns=zp_portfolio.returns,
cash=zp_portfolio.cash,
positions=[
pr.AssetPositionPair(
key=to_proto_asset(asset),
position=to_proto_position(position))
for asset, position in zp_portfolio.positions.items()],
start_date=to_proto_timestamp(zp_portfolio.start_date),
positions_value=zp_portfolio.positions_value,
positions_exposure=zp_portfolio.positions_exposure)
def to_proto_account(zp_account):
"""
Parameters
----------
zp_account : protocol.Account
Returns
-------
"""
return pr.Account(
)
def to_proto_order(zp_order):
limit = zp_order['limit']
stop = zp_order['stop']
return pr.Order(
dt=to_proto_timestamp(zp_order['dt']),
asset=to_proto_asset(zp_order['sid']),
amount=zp_order['amount'],
stop=pr.Order.Stop(
is_set=stop != None,
value=stop
),
limit = pr.Order.Limit(
is_set=limit != None,
value=limit
),
filled=zp_order['filled'],
commission=zp_order['commission'],
id=zp_order['id']
)
def to_proto_transaction(zp_transaction):
return fin.Transaction(
asset=to_proto_asset(zp_transaction['sid']),
amount=zp_transaction['amount'],
dt=to_proto_timestamp(zp_transaction['dt']),
price=zp_transaction['price'],
order_id=zp_transaction['order_id'])
def from_proto_cum_metrics(cum_metrics):
return {
'period_open': to_datetime(cum_metrics.period_open),
'period_close': to_datetime(cum_metrics.period_close),
'returns': cum_metrics.returns,
'pnl': cum_metrics.pnl,
# 'cash_flow': cum_metrics.cash_flow,
'capital_used': cum_metrics.capital_used,
'starting_exposure': cum_metrics.starting_exposure,
'starting_value': cum_metrics.starting_value,
'ending_value': cum_metrics.ending_value,
'ending_exposure': cum_metrics.ending_exposure,
'starting_cash': cum_metrics.starting_cash,
'ending_cash': cum_metrics.ending_cash,
'portfolio_value': cum_metrics.portfolio_value,
'longs_count': cum_metrics.longs_count,
'shorts_count': cum_metrics.shorts_count,
'long_value': cum_metrics.long_value,
'short_value': cum_metrics.short_value,
'long_exposure': cum_metrics.long_exposure,
'short_exposure': cum_metrics.short_exposure,
'gross_leverage': cum_metrics.gross_leverage,
'net_leverage': cum_metrics.net_leverage
}
def from_proto_cum_risk_metrics(cum_risk_metrics):
return {
'algo_volatility': cum_risk_metrics.algo_volatility,
'benchmark_period_return': cum_risk_metrics.benchmark_period_return,
'benchmark_volatility': cum_risk_metrics.benchmark_volatility,
'algorithm_period_return': cum_risk_metrics.algorithm_period_return,
'alpha': cum_risk_metrics.alpha,
'beta': cum_risk_metrics.beta,
'sharpe': cum_risk_metrics.sharpe,
'sortino': cum_risk_metrics.sortino,
'max_drawdown': cum_risk_metrics.max_drawdown,
'max_leverage': cum_risk_metrics.max_leverage,
'trading_days': cum_risk_metrics.trading_days,
'period_label': cum_risk_metrics.period_label,
'excess_return': cum_risk_metrics.excess_return,
'treasury_period_return': cum_risk_metrics.treasury_period_return
}
def from_proto_period_metrics(period_metrics):
return {
'orders': [to_zp_order(order) for order in period_metrics.orders],
'transactions': [to_zp_transaction(trx) for trx in period_metrics.transactions],
'positions': [to_zp_position(pos) for pos in period_metrics.positions],
'period_open': to_datetime(period_metrics.period_open),
'period_close': to_datetime(period_metrics.period_close),
'capital_used': period_metrics.capital_used,
'starting_exposure': period_metrics.starting_exposure,
'ending_exposure': period_metrics.ending_exposure,
'starting_value': period_metrics.starting_value,
'starting_cash': period_metrics.starting_cash,
'returns': period_metrics.returns,
'pnl': period_metrics.pnl
}
def from_proto_performance_packet(proto_perf_packet):
return {
'cumulative_perf': from_proto_cum_metrics(proto_perf_packet.cumulative_perf),
proto_perf_packet.packet_type: from_proto_period_metrics(proto_perf_packet.period_perf),
'cumulative_risk_metrics': from_proto_cum_risk_metrics(proto_perf_packet.cumulative_risk_metrics)
}
def to_proto_cum_metrics(cum_perf):
return metrics.CumulativeMetrics(
period_open=to_proto_timestamp(cum_perf['period_open']),
period_close=to_proto_timestamp(cum_perf['period_close']),
returns=cum_perf['returns'],
pnl=cum_perf['pnl'],
capital_used=cum_perf['capital_used'],
# cash_flow=cum_perf['cash_flow'],
starting_exposure=cum_perf['starting_exposure'],
ending_exposure=cum_perf['ending_exposure'],
starting_value=cum_perf['starting_value'],
ending_value=cum_perf['ending_value'],
starting_cash=cum_perf['starting_cash'],
ending_cash=cum_perf['ending_cash'],
portfolio_value=cum_perf['portfolio_value'],
longs_count=cum_perf['longs_count'],
shorts_count=cum_perf['shorts_count'],
long_value=cum_perf['long_value'],
short_value=cum_perf['short_value'],
long_exposure=cum_perf['long_exposure'],
short_exposure=cum_perf['short_exposure'],
gross_leverage=cum_perf['gross_leverage'],
net_leverage=cum_perf['net_leverage']
)
def to_proto_period_perf(period_perf):
return metrics.PeriodMetrics(
orders=[to_proto_order(order) for order in period_perf['orders']],
transactions=[to_proto_transaction(trc) for trc in period_perf['transactions']],
positions=[to_proto_position(pos) for pos in period_perf['positions']],
period_open=to_proto_timestamp(period_perf['period_open']),
period_close=to_proto_timestamp(period_perf['period_close']),
capital_used=period_perf['capital_used'],
starting_exposure=period_perf['starting_exposure'],
ending_exposure=period_perf['ending_exposure'],
starting_value=period_perf['starting_value'],
starting_cash=period_perf['starting_cash'],
returns=period_perf['returns'],
pnl=period_perf['pnl']
)
def to_proto_cum_risk_metrics(cum_risk_metrics):
return metrics.CumulativeRiskMetrics(
algo_volatility=cum_risk_metrics['algo_volatility'],
benchmark_period_return=cum_risk_metrics['benchmark_period_return'],
benchmark_volatility=cum_risk_metrics['benchmark_volatility'],
algorithm_period_return=cum_risk_metrics['algorithm_period_return'],
alpha=cum_risk_metrics['alpha'],
beta=cum_risk_metrics['beta'],
sharpe=cum_risk_metrics['sharpe'],
sortino=cum_risk_metrics['sortino'],
max_drawdown=cum_risk_metrics['max_drawdown'],
max_leverage=cum_risk_metrics['max_leverage'],
trading_days=cum_risk_metrics['trading_days'],
period_label=cum_risk_metrics['period_label'],
excess_return=cum_risk_metrics['excess_return'],
treasury_period_return=cum_risk_metrics['treasury_period_return']
)
def to_proto_performance_packet(perf_packet):
period_perf = perf_packet.get('daily_perf', None)
key = 'daily_perf'
if not period_perf:
period_perf = perf_packet.get('minute_perf')
key = 'minute_perf'
return ctl.PerformancePacket(
cumulative_perf=to_proto_cum_metrics(perf_packet['cumulative_perf']),
period_perf=to_proto_period_perf(period_perf),
cumulative_risk_metrics=to_proto_cum_risk_metrics(perf_packet['cumulative_risk_metrics']),
packet_type=key
)
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fairness_indicators.examples.util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from fairness_indicators.examples import util
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
class UtilTest(tf.test.TestCase):
def _create_example(self):
example = text_format.Parse(
"""
features {
feature { key: "comment_text"
value { bytes_list { value: [ "comment 1" ] }}
}
feature { key: "toxicity" value { float_list { value: [ 0.1 ] }}}
feature { key: "heterosexual" value { float_list { value: [ 0.1 ] }}}
feature { key: "homosexual_gay_or_lesbian"
value { float_list { value: [ 0.1 ] }}
}
feature { key: "bisexual" value { float_list { value: [ 0.5 ] }}}
feature { key: "other_sexual_orientation"
value { float_list { value: [ 0.1 ] }}
}
feature { key: "male" value { float_list { value: [ 0.1 ] }}}
feature { key: "female" value { float_list { value: [ 0.2 ] }}}
feature { key: "transgender" value { float_list { value: [ 0.3 ] }}}
feature { key: "other_gender" value { float_list { value: [ 0.4 ] }}}
feature { key: "christian" value { float_list { value: [ 0.0 ] }}}
feature { key: "jewish" value { float_list { value: [ 0.1 ] }}}
feature { key: "muslim" value { float_list { value: [ 0.2 ] }}}
feature { key: "hindu" value { float_list { value: [ 0.3 ] }}}
feature { key: "buddhist" value { float_list { value: [ 0.4 ] }}}
feature { key: "atheist" value { float_list { value: [ 0.5 ] }}}
feature { key: "other_religion"
value { float_list { value: [ 0.6 ] }}
}
feature { key: "black" value { float_list { value: [ 0.1 ] }}}
feature { key: "white" value { float_list { value: [ 0.2 ] }}}
feature { key: "asian" value { float_list { value: [ 0.3 ] }}}
feature { key: "latino" value { float_list { value: [ 0.4 ] }}}
feature { key: "other_race_or_ethnicity"
value { float_list { value: [ 0.5 ] }}
}
feature { key: "physical_disability"
value { float_list { value: [ 0.6 ] }}
}
feature { key: "intellectual_or_learning_disability"
value { float_list { value: [ 0.7 ] }}
}
feature { key: "psychiatric_or_mental_illness"
value { float_list { value: [ 0.8 ] }}
}
feature { key: "other_disability"
value { float_list { value: [ 1.0 ] }}
}
}
""", tf.train.Example())
empty_comment_example = text_format.Parse(
"""
features {
feature { key: "comment_text"
value { bytes_list {} }
}
feature { key: "toxicity" value { float_list { value: [ 0.1 ] }}}
}
""", tf.train.Example())
return [example, empty_comment_example]
def _write_tf_records(self, examples):
filename = os.path.join(tempfile.mkdtemp(), 'input')
with tf.io.TFRecordWriter(filename) as writer:
for e in examples:
writer.write(e.SerializeToString())
return filename
def test_convert_data(self):
input_file = self._write_tf_records(self._create_example())
output_file = util.convert_comments_data(input_file)
output_example_list = []
for serialized in tf.data.TFRecordDataset(filenames=[output_file]):
output_example = tf.train.Example()
output_example.ParseFromString(serialized.numpy())
output_example_list.append(output_example)
self.assertEqual(len(output_example_list), 1)
self.assertEqual(
output_example_list[0],
text_format.Parse(
"""
features {
feature { key: "comment_text"
value { bytes_list {value: [ "comment 1" ] }}
}
feature { key: "toxicity" value { float_list { value: [ 0.0 ] }}}
feature { key: "sexual_orientation"
value { bytes_list { value: ["bisexual"] }}
}
feature { key: "gender" value { bytes_list { }}}
feature { key: "race"
value { bytes_list { value: [ "other_race_or_ethnicity" ] }}
}
feature { key: "religion"
value { bytes_list {
value: [ "atheist", "other_religion" ] }
}
}
feature { key: "disability" value { bytes_list {
value: [
"physical_disability",
"intellectual_or_learning_disability",
"psychiatric_or_mental_illness",
"other_disability"] }}
}
}
""", tf.train.Example()))
if __name__ == '__main__':
tf.enable_eager_execution()
tf.test.main()
|
T2_VERSION = "0.5.0-SNAPSHOT"
|
import enum
class RecordsetType(enum.Enum):
worksheet = 'worksheet'
table = 'table'
view = 'view'
|
import numpy as np
from .VariableUnitTest import VariableUnitTest
from gwlfe.MultiUse_Fxns.Runoff import AgRunoff
class TestAgRunoff(VariableUnitTest):
# @skip("not ready")
def test_AgRunoff(self):
z = self.z
np.testing.assert_array_almost_equal(
AgRunoff.AgRunoff_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb,
z.Grow_0, z.Landuse, z.Area),
AgRunoff.AgRunoff(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.CN, z.AntMoist_0, z.NUrb,
z.Grow_0, z.Landuse, z.Area), decimal=7)
|
#!/usr/bin/python
import sys
import Quartz
d = Quartz.CGSessionCopyCurrentDictionary()
# we want to return 0, not 1, if a session is active
sys.exit(not (d and
d.get("CGSSessionScreenIsLocked", 0) == 0 and
d.get("kCGSSessionOnConsoleKey", 0) == 1))
|
print("Why is Rohan such a smooth criminal?")
|
from setuptools import setup, find_packages
with open("DOCUMENTATION.md", "r") as fh:
long_description = fh.read()
setup(name="neowise",
version='0.1.0',
description="A Deep Learning library built from scratch using Python and NumPy",
author="Pranav Sastry",
author_email="pranava.sri@gmail.com",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Pranav Sastry",
url="https://github.com/pranavsastry/neowise",
packages=find_packages(),
install_requires=['numpy', 'matplotlib', 'hdfdict', 'prettytable', 'tqdm'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License"
])
|
import cv2
class Webcam(object):
def __init__(self, cam_id=0):
self.cam = cv2.VideoCapture(cam_id)
def getFrame(self):
return self.cam.read()
|
"""
Common type operations.
"""
from typing import Any, Callable, Union
import warnings
import numpy as np
from pandas._libs import algos
from pandas._libs.tslibs import conversion
from pandas._typing import ArrayLike, DtypeObj
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
registry,
)
from pandas.core.dtypes.generic import ABCCategorical, ABCIndexClass
from pandas.core.dtypes.inference import ( # noqa:F401
is_array_like,
is_bool,
is_complex,
is_dataclass,
is_decimal,
is_dict_like,
is_file_like,
is_float,
is_hashable,
is_integer,
is_interval,
is_iterator,
is_list_like,
is_named_tuple,
is_nested_list_like,
is_number,
is_re,
is_re_compilable,
is_scalar,
is_sequence,
)
_POSSIBLY_CAST_DTYPES = {
np.dtype(t).name
for t in [
"O",
"int8",
"uint8",
"int16",
"uint16",
"int32",
"uint32",
"int64",
"uint64",
]
}
DT64NS_DTYPE = conversion.DT64NS_DTYPE
TD64NS_DTYPE = conversion.TD64NS_DTYPE
_INT64_DTYPE = np.dtype(np.int64)
# oh the troubles to reduce import time
_is_scipy_sparse = None
ensure_float64 = algos.ensure_float64
ensure_float32 = algos.ensure_float32
_ensure_datetime64ns = conversion.ensure_datetime64ns
_ensure_timedelta64ns = conversion.ensure_timedelta64ns
def ensure_float(arr):
"""
Ensure that an array object has a float dtype if possible.
Parameters
----------
arr : array-like
The array whose data type we want to enforce as float.
Returns
-------
float_arr : The original array cast to the float dtype if
possible. Otherwise, the original array is returned.
"""
if issubclass(arr.dtype.type, (np.integer, np.bool_)):
arr = arr.astype(float)
return arr
ensure_uint64 = algos.ensure_uint64
ensure_int64 = algos.ensure_int64
ensure_int32 = algos.ensure_int32
ensure_int16 = algos.ensure_int16
ensure_int8 = algos.ensure_int8
ensure_platform_int = algos.ensure_platform_int
ensure_object = algos.ensure_object
def ensure_str(value: Union[bytes, Any]) -> str:
"""
Ensure that bytes and non-strings get converted into ``str`` objects.
"""
if isinstance(value, bytes):
value = value.decode("utf-8")
elif not isinstance(value, str):
value = str(value)
return value
def ensure_categorical(arr):
"""
Ensure that an array-like object is a Categorical (if not already).
Parameters
----------
arr : array-like
The array that we want to convert into a Categorical.
Returns
-------
cat_arr : The original array cast as a Categorical. If it already
is a Categorical, we return as is.
"""
if not is_categorical_dtype(arr.dtype):
from pandas import Categorical
arr = Categorical(arr)
return arr
def ensure_int_or_float(arr: ArrayLike, copy: bool = False) -> np.array:
"""
Ensure that an dtype array of some integer dtype
has an int64 dtype if possible.
If it's not possible, potentially because of overflow,
convert the array to float64 instead.
Parameters
----------
arr : array-like
The array whose data type we want to enforce.
copy: bool
Whether to copy the original array or reuse
it in place, if possible.
Returns
-------
out_arr : The input array cast as int64 if
possible without overflow.
Otherwise the input array cast to float64.
Notes
-----
If the array is explicitly of type uint64 the type
will remain unchanged.
"""
# TODO: GH27506 potential bug with ExtensionArrays
try:
return arr.astype("int64", copy=copy, casting="safe") # type: ignore
except TypeError:
pass
try:
return arr.astype("uint64", copy=copy, casting="safe") # type: ignore
except TypeError:
if is_extension_array_dtype(arr.dtype):
return arr.to_numpy(dtype="float64", na_value=np.nan)
return arr.astype("float64", copy=copy)
def ensure_python_int(value: Union[int, np.integer]) -> int:
"""
Ensure that a value is a python int.
Parameters
----------
value: int or numpy.integer
Returns
-------
int
Raises
------
TypeError: if the value isn't an int or can't be converted to one.
"""
if not is_scalar(value):
raise TypeError(
f"Value needs to be a scalar value, was type {type(value).__name__}"
)
try:
new_value = int(value)
assert new_value == value
except (TypeError, ValueError, AssertionError) as err:
raise TypeError(f"Wrong type {type(value)} for value {value}") from err
return new_value
def classes(*klasses) -> Callable:
""" evaluate if the tipo is a subclass of the klasses """
return lambda tipo: issubclass(tipo, klasses)
def classes_and_not_datetimelike(*klasses) -> Callable:
"""
evaluate if the tipo is a subclass of the klasses
and not a datetimelike
"""
return lambda tipo: (
issubclass(tipo, klasses)
and not issubclass(tipo, (np.datetime64, np.timedelta64))
)
def is_object_dtype(arr_or_dtype) -> bool:
"""
Check whether an array-like or dtype is of the object dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of the object dtype.
Examples
--------
>>> is_object_dtype(object)
True
>>> is_object_dtype(int)
False
>>> is_object_dtype(np.array([], dtype=object))
True
>>> is_object_dtype(np.array([], dtype=int))
False
>>> is_object_dtype([1, 2, 3])
False
"""
return _is_dtype_type(arr_or_dtype, classes(np.object_))
def is_sparse(arr) -> bool:
"""
Check whether an array-like is a 1-D pandas sparse array.
Check that the one-dimensional array-like is a pandas sparse array.
Returns True if it is a pandas sparse array, not another type of
sparse array.
Parameters
----------
arr : array-like
Array-like to check.
Returns
-------
bool
Whether or not the array-like is a pandas sparse array.
Examples
--------
Returns `True` if the parameter is a 1-D pandas sparse array.
>>> is_sparse(pd.arrays.SparseArray([0, 0, 1, 0]))
True
>>> is_sparse(pd.Series(pd.arrays.SparseArray([0, 0, 1, 0])))
True
Returns `False` if the parameter is not sparse.
>>> is_sparse(np.array([0, 0, 1, 0]))
False
>>> is_sparse(pd.Series([0, 1, 0, 0]))
False
Returns `False` if the parameter is not a pandas sparse array.
>>> from scipy.sparse import bsr_matrix
>>> is_sparse(bsr_matrix([0, 1, 0, 0]))
False
Returns `False` if the parameter has more than one dimension.
"""
from pandas.core.arrays.sparse import SparseDtype
dtype = getattr(arr, "dtype", arr)
return isinstance(dtype, SparseDtype)
def is_scipy_sparse(arr) -> bool:
"""
Check whether an array-like is a scipy.sparse.spmatrix instance.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a scipy.sparse.spmatrix instance.
Notes
-----
If scipy is not installed, this function will always return False.
Examples
--------
>>> from scipy.sparse import bsr_matrix
>>> is_scipy_sparse(bsr_matrix([1, 2, 3]))
True
>>> is_scipy_sparse(pd.arrays.SparseArray([1, 2, 3]))
False
"""
global _is_scipy_sparse
if _is_scipy_sparse is None:
try:
from scipy.sparse import issparse as _is_scipy_sparse
except ImportError:
_is_scipy_sparse = lambda _: False
assert _is_scipy_sparse is not None
return _is_scipy_sparse(arr)
def is_categorical(arr) -> bool:
"""
Check whether an array-like is a Categorical instance.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is of a Categorical instance.
Examples
--------
>>> is_categorical([1, 2, 3])
False
Categoricals, Series Categoricals, and CategoricalIndex will return True.
>>> cat = pd.Categorical([1, 2, 3])
>>> is_categorical(cat)
True
>>> is_categorical(pd.Series(cat))
True
>>> is_categorical(pd.CategoricalIndex([1, 2, 3]))
True
"""
warnings.warn(
"is_categorical is deprecated and will be removed in a future version. "
"Use is_categorical_dtype instead",
FutureWarning,
stacklevel=2,
)
return isinstance(arr, ABCCategorical) or is_categorical_dtype(arr)
def is_datetime64_dtype(arr_or_dtype) -> bool:
"""
Check whether an array-like or dtype is of the datetime64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of the datetime64 dtype.
Examples
--------
>>> is_datetime64_dtype(object)
False
>>> is_datetime64_dtype(np.datetime64)
True
>>> is_datetime64_dtype(np.array([], dtype=int))
False
>>> is_datetime64_dtype(np.array([], dtype=np.datetime64))
True
>>> is_datetime64_dtype([1, 2, 3])
False
"""
return _is_dtype_type(arr_or_dtype, classes(np.datetime64))
def is_datetime64tz_dtype(arr_or_dtype) -> bool:
"""
Check whether an array-like or dtype is of a DatetimeTZDtype dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of a DatetimeTZDtype dtype.
Examples
--------
>>> is_datetime64tz_dtype(object)
False
>>> is_datetime64tz_dtype([1, 2, 3])
False
>>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive
False
>>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_datetime64tz_dtype(dtype)
True
>>> is_datetime64tz_dtype(s)
True
"""
if arr_or_dtype is None:
return False
return DatetimeTZDtype.is_dtype(arr_or_dtype)
def is_timedelta64_dtype(arr_or_dtype) -> bool:
"""
Check whether an array-like or dtype is of the timedelta64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of the timedelta64 dtype.
Examples
--------
>>> is_timedelta64_dtype(object)
False
>>> is_timedelta64_dtype(np.timedelta64)
True
>>> is_timedelta64_dtype([1, 2, 3])
False
>>> is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]"))
True
>>> is_timedelta64_dtype('0 days')
False
"""
return _is_dtype_type(arr_or_dtype, classes(np.timedelta64))
def is_period_dtype(arr_or_dtype) -> bool:
"""
Check whether an array-like or dtype is of the Period dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of the Period dtype.
Examples
--------
>>> is_period_dtype(object)
False
>>> is_period_dtype(PeriodDtype(freq="D"))
True
>>> is_period_dtype([1, 2, 3])
False
>>> is_period_dtype(pd.Period("2017-01-01"))
False
>>> is_period_dtype(pd.PeriodIndex([], freq="A"))
True
"""
# TODO: Consider making Period an instance of PeriodDtype
if arr_or_dtype is None:
return False
return PeriodDtype.is_dtype(arr_or_dtype)
def is_interval_dtype(arr_or_dtype) -> bool:
"""
Check whether an array-like or dtype is of the Interval dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of the Interval dtype.
Examples
--------
>>> is_interval_dtype(object)
False
>>> is_interval_dtype(IntervalDtype())
True
>>> is_interval_dtype([1, 2, 3])
False
>>>
>>> interval = pd.Interval(1, 2, closed="right")
>>> is_interval_dtype(interval)
False
>>> is_interval_dtype(pd.IntervalIndex([interval]))
True
"""
# TODO: Consider making Interval an instance of IntervalDtype
if arr_or_dtype is None:
return False
return IntervalDtype.is_dtype(arr_or_dtype)
def is_categorical_dtype(arr_or_dtype) -> bool:
"""
Check whether an array-like or dtype is of the Categorical dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of the Categorical dtype.
Examples
--------
>>> is_categorical_dtype(object)
False
>>> is_categorical_dtype(CategoricalDtype())
True
>>> is_categorical_dtype([1, 2, 3])
False
>>> is_categorical_dtype(pd.Categorical([1, 2, 3]))
True
>>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3]))
True
"""
if arr_or_dtype is None:
return False
return CategoricalDtype.is_dtype(arr_or_dtype)
def is_string_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of the string dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of the string dtype.
Examples
--------
>>> is_string_dtype(str)
True
>>> is_string_dtype(object)
True
>>> is_string_dtype(int)
False
>>>
>>> is_string_dtype(np.array(['a', 'b']))
True
>>> is_string_dtype(pd.Series([1, 2]))
False
"""
# TODO: gh-15585: consider making the checks stricter.
def condition(dtype) -> bool:
return dtype.kind in ("O", "S", "U") and not is_excluded_dtype(dtype)
def is_excluded_dtype(dtype) -> bool:
"""
These have kind = "O" but aren't string dtypes so need to be explicitly excluded
"""
is_excluded_checks = (is_period_dtype, is_interval_dtype)
return any(is_excluded(dtype) for is_excluded in is_excluded_checks)
return _is_dtype(arr_or_dtype, condition)
def is_dtype_equal(source, target) -> bool:
"""
Check if two dtypes are equal.
Parameters
----------
source : The first dtype to compare
target : The second dtype to compare
Returns
-------
boolean
Whether or not the two dtypes are equal.
Examples
--------
>>> is_dtype_equal(int, float)
False
>>> is_dtype_equal("int", int)
True
>>> is_dtype_equal(object, "category")
False
>>> is_dtype_equal(CategoricalDtype(), "category")
True
>>> is_dtype_equal(DatetimeTZDtype(tz="UTC"), "datetime64")
False
"""
try:
source = _get_dtype(source)
target = _get_dtype(target)
return source == target
except (TypeError, AttributeError):
# invalid comparison
# object == category will hit this
return False
def is_any_int_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of an integer dtype.
In this function, timedelta64 instances are also considered "any-integer"
type objects and will return True.
This function is internal and should not be exposed in the public API.
.. versionchanged:: 0.24.0
The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered
as integer by this function.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of an integer dtype.
Examples
--------
>>> is_any_int_dtype(str)
False
>>> is_any_int_dtype(int)
True
>>> is_any_int_dtype(float)
False
>>> is_any_int_dtype(np.uint64)
True
>>> is_any_int_dtype(np.datetime64)
False
>>> is_any_int_dtype(np.timedelta64)
True
>>> is_any_int_dtype(np.array(['a', 'b']))
False
>>> is_any_int_dtype(pd.Series([1, 2]))
True
>>> is_any_int_dtype(np.array([], dtype=np.timedelta64))
True
>>> is_any_int_dtype(pd.Index([1, 2.])) # float
False
"""
return _is_dtype_type(arr_or_dtype, classes(np.integer, np.timedelta64))
def is_integer_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of an integer dtype.
Unlike in `in_any_int_dtype`, timedelta64 instances will return False.
.. versionchanged:: 0.24.0
The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered
as integer by this function.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of an integer dtype and
not an instance of timedelta64.
Examples
--------
>>> is_integer_dtype(str)
False
>>> is_integer_dtype(int)
True
>>> is_integer_dtype(float)
False
>>> is_integer_dtype(np.uint64)
True
>>> is_integer_dtype('int8')
True
>>> is_integer_dtype('Int8')
True
>>> is_integer_dtype(pd.Int8Dtype)
True
>>> is_integer_dtype(np.datetime64)
False
>>> is_integer_dtype(np.timedelta64)
False
>>> is_integer_dtype(np.array(['a', 'b']))
False
>>> is_integer_dtype(pd.Series([1, 2]))
True
>>> is_integer_dtype(np.array([], dtype=np.timedelta64))
False
>>> is_integer_dtype(pd.Index([1, 2.])) # float
False
"""
return _is_dtype_type(arr_or_dtype, classes_and_not_datetimelike(np.integer))
def is_signed_integer_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of a signed integer dtype.
Unlike in `in_any_int_dtype`, timedelta64 instances will return False.
.. versionchanged:: 0.24.0
The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered
as integer by this function.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a signed integer dtype
and not an instance of timedelta64.
Examples
--------
>>> is_signed_integer_dtype(str)
False
>>> is_signed_integer_dtype(int)
True
>>> is_signed_integer_dtype(float)
False
>>> is_signed_integer_dtype(np.uint64) # unsigned
False
>>> is_signed_integer_dtype('int8')
True
>>> is_signed_integer_dtype('Int8')
True
>>> is_signed_integer_dtype(pd.Int8Dtype)
True
>>> is_signed_integer_dtype(np.datetime64)
False
>>> is_signed_integer_dtype(np.timedelta64)
False
>>> is_signed_integer_dtype(np.array(['a', 'b']))
False
>>> is_signed_integer_dtype(pd.Series([1, 2]))
True
>>> is_signed_integer_dtype(np.array([], dtype=np.timedelta64))
False
>>> is_signed_integer_dtype(pd.Index([1, 2.])) # float
False
>>> is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned
False
"""
return _is_dtype_type(arr_or_dtype, classes_and_not_datetimelike(np.signedinteger))
def is_unsigned_integer_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of an unsigned integer dtype.
.. versionchanged:: 0.24.0
The nullable Integer dtypes (e.g. pandas.UInt64Dtype) are also
considered as integer by this function.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of an unsigned integer dtype.
Examples
--------
>>> is_unsigned_integer_dtype(str)
False
>>> is_unsigned_integer_dtype(int) # signed
False
>>> is_unsigned_integer_dtype(float)
False
>>> is_unsigned_integer_dtype(np.uint64)
True
>>> is_unsigned_integer_dtype('uint8')
True
>>> is_unsigned_integer_dtype('UInt8')
True
>>> is_unsigned_integer_dtype(pd.UInt8Dtype)
True
>>> is_unsigned_integer_dtype(np.array(['a', 'b']))
False
>>> is_unsigned_integer_dtype(pd.Series([1, 2])) # signed
False
>>> is_unsigned_integer_dtype(pd.Index([1, 2.])) # float
False
>>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32))
True
"""
return _is_dtype_type(
arr_or_dtype, classes_and_not_datetimelike(np.unsignedinteger)
)
def is_int64_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of the int64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of the int64 dtype.
Notes
-----
Depending on system architecture, the return value of `is_int64_dtype(
int)` will be True if the OS uses 64-bit integers and False if the OS
uses 32-bit integers.
Examples
--------
>>> is_int64_dtype(str)
False
>>> is_int64_dtype(np.int32)
False
>>> is_int64_dtype(np.int64)
True
>>> is_int64_dtype('int8')
False
>>> is_int64_dtype('Int8')
False
>>> is_int64_dtype(pd.Int64Dtype)
True
>>> is_int64_dtype(float)
False
>>> is_int64_dtype(np.uint64) # unsigned
False
>>> is_int64_dtype(np.array(['a', 'b']))
False
>>> is_int64_dtype(np.array([1, 2], dtype=np.int64))
True
>>> is_int64_dtype(pd.Index([1, 2.])) # float
False
>>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned
False
"""
return _is_dtype_type(arr_or_dtype, classes(np.int64))
def is_datetime64_any_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of the datetime64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
bool
Whether or not the array or dtype is of the datetime64 dtype.
Examples
--------
>>> is_datetime64_any_dtype(str)
False
>>> is_datetime64_any_dtype(int)
False
>>> is_datetime64_any_dtype(np.datetime64) # can be tz-naive
True
>>> is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern"))
True
>>> is_datetime64_any_dtype(np.array(['a', 'b']))
False
>>> is_datetime64_any_dtype(np.array([1, 2]))
False
>>> is_datetime64_any_dtype(np.array([], dtype="datetime64[ns]"))
True
>>> is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]"))
True
"""
if arr_or_dtype is None:
return False
return is_datetime64_dtype(arr_or_dtype) or is_datetime64tz_dtype(arr_or_dtype)
def is_datetime64_ns_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of the datetime64[ns] dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
bool
Whether or not the array or dtype is of the datetime64[ns] dtype.
Examples
--------
>>> is_datetime64_ns_dtype(str)
False
>>> is_datetime64_ns_dtype(int)
False
>>> is_datetime64_ns_dtype(np.datetime64) # no unit
False
>>> is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern"))
True
>>> is_datetime64_ns_dtype(np.array(['a', 'b']))
False
>>> is_datetime64_ns_dtype(np.array([1, 2]))
False
>>> is_datetime64_ns_dtype(np.array([], dtype="datetime64")) # no unit
False
>>> is_datetime64_ns_dtype(np.array([], dtype="datetime64[ps]")) # wrong unit
False
>>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]"))
True
"""
if arr_or_dtype is None:
return False
try:
tipo = _get_dtype(arr_or_dtype)
except TypeError:
if is_datetime64tz_dtype(arr_or_dtype):
tipo = _get_dtype(arr_or_dtype.dtype)
else:
return False
return tipo == DT64NS_DTYPE or getattr(tipo, "base", None) == DT64NS_DTYPE
def is_timedelta64_ns_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of the timedelta64[ns] dtype.
This is a very specific dtype, so generic ones like `np.timedelta64`
will return False if passed into this function.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of the timedelta64[ns] dtype.
Examples
--------
>>> is_timedelta64_ns_dtype(np.dtype('m8[ns]'))
True
>>> is_timedelta64_ns_dtype(np.dtype('m8[ps]')) # Wrong frequency
False
>>> is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]'))
True
>>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64))
False
"""
return _is_dtype(arr_or_dtype, lambda dtype: dtype == TD64NS_DTYPE)
def is_datetime_or_timedelta_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of
a timedelta64 or datetime64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a timedelta64,
or datetime64 dtype.
Examples
--------
>>> is_datetime_or_timedelta_dtype(str)
False
>>> is_datetime_or_timedelta_dtype(int)
False
>>> is_datetime_or_timedelta_dtype(np.datetime64)
True
>>> is_datetime_or_timedelta_dtype(np.timedelta64)
True
>>> is_datetime_or_timedelta_dtype(np.array(['a', 'b']))
False
>>> is_datetime_or_timedelta_dtype(pd.Series([1, 2]))
False
>>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.timedelta64))
True
>>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.datetime64))
True
"""
return _is_dtype_type(arr_or_dtype, classes(np.datetime64, np.timedelta64))
def _is_unorderable_exception(e: TypeError) -> bool:
"""
Check if the exception raised is an unorderable exception.
Parameters
----------
e : Exception or sub-class
The exception object to check.
Returns
-------
bool
Whether or not the exception raised is an unorderable exception.
"""
return "'>' not supported between instances of" in str(e)
# This exists to silence numpy deprecation warnings, see GH#29553
def is_numeric_v_string_like(a, b):
"""
Check if we are comparing a string-like object to a numeric ndarray.
NumPy doesn't like to compare such objects, especially numeric arrays
and scalar string-likes.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean
Whether we return a comparing a string-like object to a numeric array.
Examples
--------
>>> is_numeric_v_string_like(1, 1)
False
>>> is_numeric_v_string_like("foo", "foo")
False
>>> is_numeric_v_string_like(1, "foo") # non-array numeric
False
>>> is_numeric_v_string_like(np.array([1]), "foo")
True
>>> is_numeric_v_string_like("foo", np.array([1])) # symmetric check
True
>>> is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"]))
True
>>> is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))
True
>>> is_numeric_v_string_like(np.array([1]), np.array([2]))
False
>>> is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"]))
False
"""
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
is_a_numeric_array = is_a_array and is_numeric_dtype(a)
is_b_numeric_array = is_b_array and is_numeric_dtype(b)
is_a_string_array = is_a_array and is_string_like_dtype(a)
is_b_string_array = is_b_array and is_string_like_dtype(b)
is_a_scalar_string_like = not is_a_array and isinstance(a, str)
is_b_scalar_string_like = not is_b_array and isinstance(b, str)
return (
(is_a_numeric_array and is_b_scalar_string_like)
or (is_b_numeric_array and is_a_scalar_string_like)
or (is_a_numeric_array and is_b_string_array)
or (is_b_numeric_array and is_a_string_array)
)
# This exists to silence numpy deprecation warnings, see GH#29553
def is_datetimelike_v_numeric(a, b):
"""
Check if we are comparing a datetime-like object to a numeric object.
By "numeric," we mean an object that is either of an int or float dtype.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean
Whether we return a comparing a datetime-like to a numeric object.
Examples
--------
>>> from datetime import datetime
>>> dt = np.datetime64(datetime(2017, 1, 1))
>>>
>>> is_datetimelike_v_numeric(1, 1)
False
>>> is_datetimelike_v_numeric(dt, dt)
False
>>> is_datetimelike_v_numeric(1, dt)
True
>>> is_datetimelike_v_numeric(dt, 1) # symmetric check
True
>>> is_datetimelike_v_numeric(np.array([dt]), 1)
True
>>> is_datetimelike_v_numeric(np.array([1]), dt)
True
>>> is_datetimelike_v_numeric(np.array([dt]), np.array([1]))
True
>>> is_datetimelike_v_numeric(np.array([1]), np.array([2]))
False
>>> is_datetimelike_v_numeric(np.array([dt]), np.array([dt]))
False
"""
if not hasattr(a, "dtype"):
a = np.asarray(a)
if not hasattr(b, "dtype"):
b = np.asarray(b)
def is_numeric(x):
"""
Check if an object has a numeric dtype (i.e. integer or float).
"""
return is_integer_dtype(x) or is_float_dtype(x)
return (needs_i8_conversion(a) and is_numeric(b)) or (
needs_i8_conversion(b) and is_numeric(a)
)
def needs_i8_conversion(arr_or_dtype) -> bool:
"""
Check whether the array or dtype should be converted to int64.
An array-like or dtype "needs" such a conversion if the array-like
or dtype is of a datetime-like dtype
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype should be converted to int64.
Examples
--------
>>> needs_i8_conversion(str)
False
>>> needs_i8_conversion(np.int64)
False
>>> needs_i8_conversion(np.datetime64)
True
>>> needs_i8_conversion(np.array(['a', 'b']))
False
>>> needs_i8_conversion(pd.Series([1, 2]))
False
>>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
True
>>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
"""
if arr_or_dtype is None:
return False
return (
is_datetime_or_timedelta_dtype(arr_or_dtype)
or is_datetime64tz_dtype(arr_or_dtype)
or is_period_dtype(arr_or_dtype)
)
def is_numeric_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of a numeric dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a numeric dtype.
Examples
--------
>>> is_numeric_dtype(str)
False
>>> is_numeric_dtype(int)
True
>>> is_numeric_dtype(float)
True
>>> is_numeric_dtype(np.uint64)
True
>>> is_numeric_dtype(np.datetime64)
False
>>> is_numeric_dtype(np.timedelta64)
False
>>> is_numeric_dtype(np.array(['a', 'b']))
False
>>> is_numeric_dtype(pd.Series([1, 2]))
True
>>> is_numeric_dtype(pd.Index([1, 2.]))
True
>>> is_numeric_dtype(np.array([], dtype=np.timedelta64))
False
"""
return _is_dtype_type(
arr_or_dtype, classes_and_not_datetimelike(np.number, np.bool_)
)
def is_string_like_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of a string-like dtype.
Unlike `is_string_dtype`, the object dtype is excluded because it
is a mixed dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of the string dtype.
Examples
--------
>>> is_string_like_dtype(str)
True
>>> is_string_like_dtype(object)
False
>>> is_string_like_dtype(np.array(['a', 'b']))
True
>>> is_string_like_dtype(pd.Series([1, 2]))
False
"""
return _is_dtype(arr_or_dtype, lambda dtype: dtype.kind in ("S", "U"))
def is_float_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of a float dtype.
This function is internal and should not be exposed in the public API.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a float dtype.
Examples
--------
>>> is_float_dtype(str)
False
>>> is_float_dtype(int)
False
>>> is_float_dtype(float)
True
>>> is_float_dtype(np.array(['a', 'b']))
False
>>> is_float_dtype(pd.Series([1, 2]))
False
>>> is_float_dtype(pd.Index([1, 2.]))
True
"""
return _is_dtype_type(arr_or_dtype, classes(np.floating))
def is_bool_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of a boolean dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a boolean dtype.
Notes
-----
An ExtensionArray is considered boolean when the ``_is_boolean``
attribute is set to True.
Examples
--------
>>> is_bool_dtype(str)
False
>>> is_bool_dtype(int)
False
>>> is_bool_dtype(bool)
True
>>> is_bool_dtype(np.bool)
True
>>> is_bool_dtype(np.array(['a', 'b']))
False
>>> is_bool_dtype(pd.Series([1, 2]))
False
>>> is_bool_dtype(np.array([True, False]))
True
>>> is_bool_dtype(pd.Categorical([True, False]))
True
>>> is_bool_dtype(pd.arrays.SparseArray([True, False]))
True
"""
if arr_or_dtype is None:
return False
try:
dtype = _get_dtype(arr_or_dtype)
except TypeError:
return False
if isinstance(arr_or_dtype, CategoricalDtype):
arr_or_dtype = arr_or_dtype.categories
# now we use the special definition for Index
if isinstance(arr_or_dtype, ABCIndexClass):
# TODO(jreback)
# we don't have a boolean Index class
# so its object, we need to infer to
# guess this
return arr_or_dtype.is_object and arr_or_dtype.inferred_type == "boolean"
elif is_extension_array_dtype(arr_or_dtype):
dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype)
return dtype._is_boolean
return issubclass(dtype.type, np.bool_)
def is_extension_type(arr) -> bool:
"""
Check whether an array-like is of a pandas extension class instance.
.. deprecated:: 1.0.0
Use ``is_extension_array_dtype`` instead.
Extension classes include categoricals, pandas sparse objects (i.e.
classes represented within the pandas library and not ones external
to it like scipy sparse matrices), and datetime-like arrays.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is of a pandas extension class instance.
Examples
--------
>>> is_extension_type([1, 2, 3])
False
>>> is_extension_type(np.array([1, 2, 3]))
False
>>>
>>> cat = pd.Categorical([1, 2, 3])
>>>
>>> is_extension_type(cat)
True
>>> is_extension_type(pd.Series(cat))
True
>>> is_extension_type(pd.arrays.SparseArray([1, 2, 3]))
True
>>> from scipy.sparse import bsr_matrix
>>> is_extension_type(bsr_matrix([1, 2, 3]))
False
>>> is_extension_type(pd.DatetimeIndex([1, 2, 3]))
False
>>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
>>>
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_extension_type(s)
True
"""
warnings.warn(
"'is_extension_type' is deprecated and will be removed in a future "
"version. Use 'is_extension_array_dtype' instead.",
FutureWarning,
stacklevel=2,
)
if is_categorical_dtype(arr):
return True
elif is_sparse(arr):
return True
elif is_datetime64tz_dtype(arr):
return True
return False
def is_extension_array_dtype(arr_or_dtype) -> bool:
"""
Check if an object is a pandas extension array type.
See the :ref:`Use Guide <extending.extension-types>` for more.
Parameters
----------
arr_or_dtype : object
For array-like input, the ``.dtype`` attribute will
be extracted.
Returns
-------
bool
Whether the `arr_or_dtype` is an extension array type.
Notes
-----
This checks whether an object implements the pandas extension
array interface. In pandas, this includes:
* Categorical
* Sparse
* Interval
* Period
* DatetimeArray
* TimedeltaArray
Third-party libraries may implement arrays or types satisfying
this interface as well.
Examples
--------
>>> from pandas.api.types import is_extension_array_dtype
>>> arr = pd.Categorical(['a', 'b'])
>>> is_extension_array_dtype(arr)
True
>>> is_extension_array_dtype(arr.dtype)
True
>>> arr = np.array(['a', 'b'])
>>> is_extension_array_dtype(arr.dtype)
False
"""
dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype)
return isinstance(dtype, ExtensionDtype) or registry.find(dtype) is not None
def is_complex_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of a complex dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a complex dtype.
Examples
--------
>>> is_complex_dtype(str)
False
>>> is_complex_dtype(int)
False
>>> is_complex_dtype(np.complex)
True
>>> is_complex_dtype(np.array(['a', 'b']))
False
>>> is_complex_dtype(pd.Series([1, 2]))
False
>>> is_complex_dtype(np.array([1 + 1j, 5]))
True
"""
return _is_dtype_type(arr_or_dtype, classes(np.complexfloating))
def _is_dtype(arr_or_dtype, condition) -> bool:
"""
Return a boolean if the condition is satisfied for the arr_or_dtype.
Parameters
----------
arr_or_dtype : array-like, str, np.dtype, or ExtensionArrayType
The array-like or dtype object whose dtype we want to extract.
condition : callable[Union[np.dtype, ExtensionDtype]]
Returns
-------
bool
"""
if arr_or_dtype is None:
return False
try:
dtype = _get_dtype(arr_or_dtype)
except (TypeError, ValueError, UnicodeEncodeError):
return False
return condition(dtype)
def _get_dtype(arr_or_dtype) -> DtypeObj:
"""
Get the dtype instance associated with an array
or dtype object.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype object whose dtype we want to extract.
Returns
-------
obj_dtype : The extract dtype instance from the
passed in array or dtype object.
Raises
------
TypeError : The passed in object is None.
"""
if arr_or_dtype is None:
raise TypeError("Cannot deduce dtype from null object")
# fastpath
elif isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype)
# if we have an array-like
elif hasattr(arr_or_dtype, "dtype"):
arr_or_dtype = arr_or_dtype.dtype
return pandas_dtype(arr_or_dtype)
def _is_dtype_type(arr_or_dtype, condition) -> bool:
"""
Return a boolean if the condition is satisfied for the arr_or_dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype object whose dtype we want to extract.
condition : callable[Union[np.dtype, ExtensionDtypeType]]
Returns
-------
bool : if the condition is satisfied for the arr_or_dtype
"""
if arr_or_dtype is None:
return condition(type(None))
# fastpath
if isinstance(arr_or_dtype, np.dtype):
return condition(arr_or_dtype.type)
elif isinstance(arr_or_dtype, type):
if issubclass(arr_or_dtype, ExtensionDtype):
arr_or_dtype = arr_or_dtype.type
return condition(np.dtype(arr_or_dtype).type)
# if we have an array-like
if hasattr(arr_or_dtype, "dtype"):
arr_or_dtype = arr_or_dtype.dtype
# we are not possibly a dtype
elif is_list_like(arr_or_dtype):
return condition(type(None))
try:
tipo = pandas_dtype(arr_or_dtype).type
except (TypeError, ValueError, UnicodeEncodeError):
if is_scalar(arr_or_dtype):
return condition(type(None))
return False
return condition(tipo)
def infer_dtype_from_object(dtype):
"""
Get a numpy dtype.type-style object for a dtype object.
This methods also includes handling of the datetime64[ns] and
datetime64[ns, TZ] objects.
If no dtype can be found, we return ``object``.
Parameters
----------
dtype : dtype, type
The dtype object whose numpy dtype.type-style
object we want to extract.
Returns
-------
dtype_object : The extracted numpy dtype.type-style object.
"""
if isinstance(dtype, type) and issubclass(dtype, np.generic):
# Type object from a dtype
return dtype
elif isinstance(dtype, (np.dtype, ExtensionDtype)):
# dtype object
try:
_validate_date_like_dtype(dtype)
except TypeError:
# Should still pass if we don't have a date-like
pass
return dtype.type
try:
dtype = pandas_dtype(dtype)
except TypeError:
pass
if is_extension_array_dtype(dtype):
return dtype.type
elif isinstance(dtype, str):
# TODO(jreback)
# should deprecate these
if dtype in ["datetimetz", "datetime64tz"]:
return DatetimeTZDtype.type
elif dtype in ["period"]:
raise NotImplementedError
if dtype == "datetime" or dtype == "timedelta":
dtype += "64"
try:
return infer_dtype_from_object(getattr(np, dtype))
except (AttributeError, TypeError):
# Handles cases like _get_dtype(int) i.e.,
# Python objects that are valid dtypes
# (unlike user-defined types, in general)
#
# TypeError handles the float16 type code of 'e'
# further handle internal types
pass
return infer_dtype_from_object(np.dtype(dtype))
def _validate_date_like_dtype(dtype) -> None:
"""
Check whether the dtype is a date-like dtype. Raises an error if invalid.
Parameters
----------
dtype : dtype, type
The dtype to check.
Raises
------
TypeError : The dtype could not be casted to a date-like dtype.
ValueError : The dtype is an illegal date-like dtype (e.g. the
the frequency provided is too specific)
"""
try:
typ = np.datetime_data(dtype)[0]
except ValueError as e:
raise TypeError(e) from e
if typ != "generic" and typ != "ns":
raise ValueError(
f"{repr(dtype.name)} is too specific of a frequency, "
f"try passing {repr(dtype.type.__name__)}"
)
def pandas_dtype(dtype) -> DtypeObj:
"""
Convert input into a pandas only dtype object or a numpy dtype object.
Parameters
----------
dtype : object to be converted
Returns
-------
np.dtype or a pandas dtype
Raises
------
TypeError if not a dtype
"""
# short-circuit
if isinstance(dtype, np.ndarray):
return dtype.dtype
elif isinstance(dtype, (np.dtype, ExtensionDtype)):
return dtype
# registered extension types
result = registry.find(dtype)
if result is not None:
return result
# try a numpy dtype
# raise a consistent TypeError if failed
try:
npdtype = np.dtype(dtype)
except SyntaxError as err:
# np.dtype uses `eval` which can raise SyntaxError
raise TypeError(f"data type '{dtype}' not understood") from err
# Any invalid dtype (such as pd.Timestamp) should raise an error.
# np.dtype(invalid_type).kind = 0 for such objects. However, this will
# also catch some valid dtypes such as object, np.object_ and 'object'
# which we safeguard against by catching them earlier and returning
# np.dtype(valid_dtype) before this condition is evaluated.
if is_hashable(dtype) and dtype in [object, np.object_, "object", "O"]:
# check hashability to avoid errors/DeprecationWarning when we get
# here and `dtype` is an array
return npdtype
elif npdtype.kind == "O":
raise TypeError(f"dtype '{dtype}' not understood")
return npdtype
|
__all__ = [
"AlexNet", "DeeplabV1", "DeeplabV2", "DeeplabV3", "DenseASPP", "FCN", "GoogLeNet", "LeNet5", "ResNet", "UNet", "VGG"
]
from .AlexNet import AlexNet
from .DenseASPP import DenseASPP
from .GoogLeNet import GoogLeNet
from .LeNet5 import LeNet5
from .VGG import VGG11, VGG13, VGG16, VGG19
from .ResNet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
from .UNet import UNet
from .FCN import FCN8s, FCN16s, FCN32s
from .DeeplabV1 import DeeplabV1
from .DeeplabV2 import DeeplabV2
from .DeeplabV3 import DeeplabV3
|
#
# Story Time App
# Integration tests for the story_time_service functions
#
from storytime import story_time_service
def test_get_stories_by_category_id():
category_funny = story_time_service.get_category_by_label('Funny')
stories = story_time_service.get_published_stories_by_category_id(category_funny.id)
assert stories.count() == 2
assert any(story.title == 'Fresh Prince' for story in stories)
assert any(story.title == 'Animal Escape' for story in stories)
|
def pretty_table(headers, rows):
"""
:param headers: A list, the column names.
:param rows: A list of lists, the row data.
"""
if not all([len(headers) == len(row) for row in rows]):
return "Incorrect number of rows."
rows = [[stringify(s) for s in row] for row in rows]
headers = [stringify(s) for s in headers]
widths = col_widths(headers, rows)
top = [x.ljust(widths[i]) for i, x in enumerate(headers)]
separator = ["-" * widths[i] for i in range(len(headers))]
rest = []
for item in rows:
row = []
for i, x in enumerate(item):
if isnumeric(x):
row.append(x.rjust(widths[i]))
else:
row.append(x.ljust(widths[i]))
rest.append(row)
top = " | ".join(top)
separator = "-+-".join(separator)
rest = [" | ".join(r) for r in rest]
top += "\n"
separator += "\n"
rest = "\n".join(rest)
return top + separator + rest + "\n"
def col_widths(headers, rows):
l = [headers] + rows
transpose = [list(x) for x in zip(*l)]
return [len(max(x, key=len)) for x in transpose]
def isnumeric(s):
for c in s:
if not c.isdigit() and c != ".":
return False
return s.count(".") < 2
def stringify(s):
if s is None:
return ""
try:
unicode
except NameError:
return str(s)
else:
if isinstance(s, unicode):
return s.encode("utf8")
return str(s)
|
"""Auto-generated file, do not edit by hand. GA metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_GA = PhoneMetadata(id='GA', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d(?:\\d{2})?', possible_length=(2, 4)),
toll_free=PhoneNumberDesc(national_number_pattern='1(?:(?:3\\d|73)\\d|8)', example_number='18', possible_length=(2, 4)),
emergency=PhoneNumberDesc(national_number_pattern='1(?:(?:3\\d|73)\\d|8)', example_number='18', possible_length=(2, 4)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:3\\d\\d|730|8)', example_number='18', possible_length=(2, 4)),
short_data=True)
|
import tensorflow as tf
from utility import draw_toolbox
from utility import anchor_manipulator
import cv2
import numpy as np
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
model_file = 'model/ssd300_vgg16_short_mlu.pb'
graph = load_graph(model_file)
#for tensor in tf.get_default_graph().as_graph_def().node: print(tensor.name)
config = tf.ConfigProto(allow_soft_placement=True, inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
config.mlu_options.data_parallelism = 1
config.mlu_options.model_parallelism = 1
config.mlu_options.core_num = 1
config.mlu_options.core_version = 'MLU270'
config.mlu_options.precision = 'int8'
with tf.Session(config = config, graph = graph) as sess:
init = tf.global_variables_initializer()
sess.run(init)
#for op in tf.get_default_graph().get_operations(): print(str(op.name))
for tensor in tf.get_default_graph().as_graph_def().node: print(tensor.name)
image_input = graph.get_tensor_by_name('import/define_input/image_input:0')
cls_pred = graph.get_tensor_by_name("import/ssd300/cls_pred/concat:0" )
location_pred = graph.get_tensor_by_name("import/ssd300/location_pred/concat:0" )
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
means = [_B_MEAN, _G_MEAN, _R_MEAN, ]
np_image = cv2.imread('demo/test.jpg')
np_image = np.float32(np_image)
image = cv2.resize(np_image, (300, 300))
#cv2.imwrite('demo/test_out2.jpg', image)
image = (image - means)# / 255.0
#print('image', type(image), image.shape, image)
image = np.expand_dims(image, axis=0)
#print('image', type(image), image.shape, image)
cls_pred_, location_pred_ = sess.run([cls_pred, location_pred], feed_dict = {image_input : image})
print('cls_pred', type(cls_pred_), cls_pred_.shape)
print('location_pred', type(location_pred_), location_pred_.shape)
g2 = tf.Graph()
with g2.as_default():
with tf.device('/cpu:0'):
def select_bboxes(scores_pred, bboxes_pred, num_classes, select_threshold):
selected_bboxes = {}
selected_scores = {}
with tf.name_scope('select_bboxes', values = [scores_pred, bboxes_pred]):
for class_ind in range(1, num_classes):
class_scores = scores_pred[:, class_ind]
select_mask = class_scores > select_threshold
select_mask = tf.cast(select_mask, tf.float32)
selected_bboxes[class_ind] = tf.multiply(bboxes_pred, tf.expand_dims(select_mask, axis=-1))
selected_scores[class_ind] = tf.multiply(class_scores, select_mask)
return selected_bboxes, selected_scores
def clip_bboxes(ymin, xmin, ymax, xmax, name):
with tf.name_scope(name, 'clip_bboxes', [ymin, xmin, ymax, xmax]):
ymin = tf.maximum(ymin, 0.)
xmin = tf.maximum(xmin, 0.)
ymax = tf.minimum(ymax, 1.)
xmax = tf.minimum(xmax, 1.)
ymin = tf.minimum(ymin, ymax)
xmin = tf.minimum(xmin, xmax)
return ymin, xmin, ymax, xmax
def filter_bboxes(scores_pred, ymin, xmin, ymax, xmax, min_size, name):
with tf.name_scope(name, 'filter_bboxes', [scores_pred, ymin, xmin, ymax, xmax]):
width = xmax - xmin
height = ymax - ymin
filter_mask = tf.logical_and(width > min_size, height > min_size)
filter_mask = tf.cast(filter_mask, tf.float32)
return tf.multiply(ymin, filter_mask), tf.multiply(xmin, filter_mask), \
tf.multiply(ymax, filter_mask), tf.multiply(xmax, filter_mask), tf.multiply(scores_pred, filter_mask)
def sort_bboxes(scores_pred, ymin, xmin, ymax, xmax, keep_topk, name):
with tf.name_scope(name, 'sort_bboxes', [scores_pred, ymin, xmin, ymax, xmax]):
cur_bboxes = tf.shape(scores_pred)[0]
scores, idxes = tf.nn.top_k(scores_pred, k=tf.minimum(keep_topk, cur_bboxes), sorted=True)
ymin, xmin, ymax, xmax = tf.gather(ymin, idxes), tf.gather(xmin, idxes), \
tf.gather(ymax, idxes), tf.gather(xmax, idxes)
paddings_scores = tf.expand_dims(tf.stack([0, tf.maximum(keep_topk-cur_bboxes, 0)], axis=0), axis=0)
return tf.pad(ymin, paddings_scores, "CONSTANT"), tf.pad(xmin, paddings_scores, "CONSTANT"),\
tf.pad(ymax, paddings_scores, "CONSTANT"), tf.pad(xmax, paddings_scores, "CONSTANT"),\
tf.pad(scores, paddings_scores, "CONSTANT")
def nms_bboxes(scores_pred, bboxes_pred, nms_topk, nms_threshold, name):
with tf.name_scope(name, 'nms_bboxes', [scores_pred, bboxes_pred]):
idxes = tf.image.non_max_suppression(bboxes_pred, scores_pred, nms_topk, nms_threshold)
return tf.gather(scores_pred, idxes), tf.gather(bboxes_pred, idxes)
def parse_by_class(cls_pred, bboxes_pred, num_classes, select_threshold, min_size,
keep_topk, nms_topk, nms_threshold):
with tf.name_scope('select_bboxes', values = [cls_pred, bboxes_pred]):
scores_pred = tf.nn.softmax(cls_pred)
selected_bboxes, selected_scores = select_bboxes(scores_pred, bboxes_pred, num_classes, select_threshold)
for class_ind in range(1, num_classes):
ymin, xmin, ymax, xmax = tf.unstack(selected_bboxes[class_ind], 4, axis=-1)
#ymin, xmin, ymax, xmax = tf.squeeze(ymin), tf.squeeze(xmin), tf.squeeze(ymax), tf.squeeze(xmax)
ymin, xmin, ymax, xmax = clip_bboxes(ymin, xmin, ymax, xmax, 'clip_bboxes_{}'.format(class_ind))
ymin, xmin, ymax, xmax, selected_scores[class_ind] = filter_bboxes(
selected_scores[class_ind], ymin, xmin, ymax, xmax, min_size, 'filter_bboxes_{}'.format(class_ind))
ymin, xmin, ymax, xmax, selected_scores[class_ind] = sort_bboxes(
selected_scores[class_ind], ymin, xmin, ymax, xmax, keep_topk, 'sort_bboxes_{}'.format(class_ind))
selected_bboxes[class_ind] = tf.stack([ymin, xmin, ymax, xmax], axis=-1)
selected_scores[class_ind], selected_bboxes[class_ind] = nms_bboxes(
selected_scores[class_ind], selected_bboxes[class_ind], nms_topk,
nms_threshold, 'nms_bboxes_{}'.format(class_ind))
return selected_bboxes, selected_scores
out_shape = [300] * 2
anchor_creator = anchor_manipulator.AnchorCreator(
out_shape,
layers_shapes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)],
anchor_scales = [(0.1,), (0.2,), (0.375,), (0.55,), (0.725,), (0.9,)],
extra_anchor_scales = [(0.1414,), (0.2739,), (0.4541,), (0.6315,), (0.8078,), (0.9836,)],
anchor_ratios = [(1., 2., .5), (1., 2., 3., .5, 0.3333), (1., 2., 3., .5, 0.3333),
(1., 2., 3., .5, 0.3333), (1., 2., .5), (1., 2., .5)],
#anchor_ratios = [(2., .5), (2., 3., .5, 0.3333), (2., 3., .5, 0.3333),
#(2., 3., .5, 0.3333), (2., .5), (2., .5)],
layer_steps = [8, 16, 32, 64, 100, 300])
all_anchors, all_num_anchors_depth, all_num_anchors_spatial = anchor_creator.get_all_anchors()
anchor_encoder_decoder = anchor_manipulator.AnchorEncoder(allowed_borders = [1.0] * 6,
positive_threshold = None,
ignore_threshold = None,
prior_scaling=[0.1, 0.1, 0.2, 0.2])
def decode_fn(pred):
return anchor_encoder_decoder.ext_decode_all_anchors(
pred, all_anchors, all_num_anchors_depth, all_num_anchors_spatial)
with tf.name_scope('g2_cls_pred'):
g2_cls_pred = tf.placeholder(tf.float32, shape=(8732, 21), name='g2_cls_pred')
with tf.name_scope('g2_location_pred'):
g2_location_pred = tf.placeholder(tf.float32, shape=(8732, 4), name='g2_location_pred')
bboxes_pred = decode_fn(g2_location_pred)
bboxes_pred = tf.concat(bboxes_pred, axis=0)
num_classes = 21
select_threshold = 0.2
min_size = 0.03
keep_topk = 200
nms_topk = 20
nms_threshold = 0.45
selected_bboxes, selected_scores = parse_by_class(g2_cls_pred, bboxes_pred,
num_classes, select_threshold, min_size,
keep_topk, nms_topk, nms_threshold)
labels_list = []
scores_list = []
bboxes_list = []
for k, v in selected_scores.items():
labels_list.append(tf.ones_like(v, tf.int32) * k)
scores_list.append(v)
bboxes_list.append(selected_bboxes[k])
all_labels = tf.concat(labels_list, axis=0)
all_scores = tf.concat(scores_list, axis=0)
all_bboxes = tf.concat(bboxes_list, axis=0)
print('sess2 start')
with tf.Session(graph=g2) as sess2:
print('sess2 end')
labels_, scores_, bboxes_ = sess2.run([all_labels, all_scores, all_bboxes],
feed_dict = {g2_cls_pred: cls_pred_, g2_location_pred: location_pred_})
#print('labels_', labels_, type(labels_), labels_.shape)
#print('scores_', scores_, type(scores_), scores_.shape)
#print('bboxes_', bboxes_, type(bboxes_), bboxes_.shape, bboxes_.shape[0])
img_to_draw = draw_toolbox.bboxes_draw_on_img(np_image, labels_, scores_, bboxes_, thickness=2)
cv2.imwrite('demo/test_out.jpg', img_to_draw)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 6 11:29:45 2021
@author: sefaaksungu
"""
def MLearning(x1,x2,x3,x4):
# Python version
import sys
#print('Python: {}'.format(sys.version))
# scipy
import scipy
#print('scipy: {}'.format(scipy.__version__))
# numpy
import numpy
#print('numpy: {}'.format(numpy.__version__))
# matplotlib
import matplotlib
#print('matplotlib: {}'.format(matplotlib.__version__))
# pandas
import pandas
#print('pandas: {}'.format(pandas.__version__))
# scikit-learn
import sklearn
#print('sklearn: {}'.format(sklearn.__version__))
# Load libraries
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
url = r"C:\Users\HP\Desktop\IrisSpeciesPrediction\IrisData.csv"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = pandas.read_csv(url, names=names)
# # shape
# #print(dataset.shape)
# # head
# #print(dataset.head(20))
# # descriptions
# #print(dataset.describe())
# # class distribution
# #print(dataset.groupby('class').size())
# # # box and whisker plots
#dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)
#pyplot.show()
# # # histograms
#dataset.hist()
#pyplot.show()
# # # scatter plot matrix
#scatter_matrix(dataset)
#pyplot.show()
# # # Split-out validation dataset
array = dataset.values
X = array[:,0:4]
y = array[:,4]
X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.20, random_state=1)
# # Spot Check Algorithms
models = []
models.append(('LR', LogisticRegression(solver='liblinear', multi_class='ovr')))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC(gamma='auto')))
# # evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = StratifiedKFold(n_splits=10, random_state=1, shuffle=True)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring='accuracy')
results.append(cv_results)
names.append(name)
# # #print('%s: %f (%f)' % (name, cv_results.mean(), cv_results.std()))
# # # # Compare Algorithms
# pyplot.boxplot(results, labels=names)
# pyplot.title('Algorithm Comparison')
# pyplot.show()
# # # Make predictions on validation dataset
model = SVC(gamma='auto')
model.fit(X_train, Y_train)
predictions = model.predict(X_validation)
# # # Evaluate predictions
# # #print(accuracy_score(Y_validation, predictions))
# # #print(confusion_matrix(Y_validation, predictions))
# # #print(classification_report(Y_validation, predictions))
x_pred = numpy.array([[x1, x2, x3, x4]], dtype=object)
pred2 = model.predict(x_pred)
return pred2[0]
#โชMLearning(5.2,3.5,1.5,0.2)
|
# the revised Cambridge Reference Sequence (rCRS)
# http://www.ncbi.nlm.nih.gov/entrez/viewer.fcgi?db=nucleotide&val=115315570
# see http://www.mitomap.org/mitoseq.html for explanation
rCRS = "GATCACAGGTCTATCACCCTATTAACCACTCACGGGAGCTCTCCATGCATTTGGTATTTTCGTCTGGGGGGTATGCACGCGATAGCATTGCGAGACGCTGGAGCCGGAGCACCCTATGTCGCAGTATCTGTCTTTGATTCCTGCCTCATCCTATTATTTATCGCACCTACGTTCAATATTACAGGCGAACATACTTACTAAAGTGTGTTAATTAATTAATGCTTGTAGGACATAATAATAACAATTGAATGTCTGCACAGCCACTTTCCACACAGACATCATAACAAAAAATTTCCACCAAACCCCCCCTCCCCCGCTTCTGGCCACAGCACTTAAACACATCTCTGCCAAACCCCAAAAACAAAGAACCCTAACACCAGCCTAACCAGATTTCAAATTTTATCTTTTGGCGGTATGCACTTTTAACAGTCACCCCCCAACTAACACATTATTTTCCCCTCCCACTCCCATACTACTAATCTCATCAATACAACCCCCGCCCATCCTACCCAGCACACACACACCGCTGCTAACCCCATACCCCGAACCAACCAAACCCCAAAGACACCCCCCACAGTTTATGTAGCTTACCTCCTCAAAGCAATACACTGAAAATGTTTAGACGGGCTCACATCACCCCATAAACAAATAGGTTTGGTCCTAGCCTTTCTATTAGCTCTTAGTAAGATTACACATGCAAGCATCCCCGTTCCAGTGAGTTCACCCTCTAAATCACCACGATCAAAAGGAACAAGCATCAAGCACGCAGCAATGCAGCTCAAAACGCTTAGCCTAGCCACACCCCCACGGGAAACAGCAGTGATTAACCTTTAGCAATAAACGAAAGTTTAACTAAGCTATACTAACCCCAGGGTTGGTCAATTTCGTGCCAGCCACCGCGGTCACACGATTAACCCAAGTCAATAGAAGCCGGCGTAAAGAGTGTTTTAGATCACCCCCTCCCCAATAAAGCTAAAACTCACCTGAGTTGTAAAAAACTCCAGTTGACACAAAATAGACTACGAAAGTGGCTTTAACATATCTGAACACACAATAGCTAAGACCCAAACTGGGATTAGATACCCCACTATGCTTAGCCCTAAACCTCAACAGTTAAATCAACAAAACTGCTCGCCAGAACACTACGAGCCACAGCTTAAAACTCAAAGGACCTGGCGGTGCTTCATATCCCTCTAGAGGAGCCTGTTCTGTAATCGATAAACCCCGATCAACCTCACCACCTCTTGCTCAGCCTATATACCGCCATCTTCAGCAAACCCTGATGAAGGCTACAAAGTAAGCGCAAGTACCCACGTAAAGACGTTAGGTCAAGGTGTAGCCCATGAGGTGGCAAGAAATGGGCTACATTTTCTACCCCAGAAAACTACGATAGCCCTTATGAAACTTAAGGGTCGAAGGTGGATTTAGCAGTAAACTAAGAGTAGAGTGCTTAGTTGAACAGGGCCCTGAAGCGCGTACACACCGCCCGTCACCCTCCTCAAGTATACTTCAAAGGACATTTAACTAAAACCCCTACGCATTTATATAGAGGAGACAAGTCGTAACATGGTAAGTGTACTGGAAAGTGCACTTGGACGAACCAGAGTGTAGCTTAACACAAAGCACCCAACTTACACTTAGGAGATTTCAACTTAACTTGACCGCTCTGAGCTAAACCTAGCCCCAAACCCACTCCACCTTACTACCAGACAACCTTAGCCAAACCATTTACCCAAATAAAGTATAGGCGATAGAAATTGAAACCTGGCGCAATAGATATAGTACCGCAAGGGAAAGATGAAAAATTATAACCAAGCATAATATAGCAAGGACTAACCCCTATACCTTCTGCATAATGAATTAACTAGAAATAACTTTGCAAGGAGAGCCAAAGCTAAGACCCCCGAAACCAGACGAGCTACCTAAGAACAGCTAAAAGAGCACACCCGTCTATGTAGCAAAATAGTGGGAAGATTTATAGGTAGAGGCGACAAACCTACCGAGCCTGGTGATAGCTGGTTGTCCAAGATAGAATCTTAGTTCAACTTTAAATTTGCCCACAGAACCCTCTAAATCCCCTTGTAAATTTAACTGTTAGTCCAAAGAGGAACAGCTCTTTGGACACTAGGAAAAAACCTTGTAGAGAGAGTAAAAAATTTAACACCCATAGTAGGCCTAAAAGCAGCCACCAATTAAGAAAGCGTTCAAGCTCAACACCCACTACCTAAAAAATCCCAAACATATAACTGAACTCCTCACACCCAATTGGACCAATCTATCACCCTATAGAAGAACTAATGTTAGTATAAGTAACATGAAAACATTCTCCTCCGCATAAGCCTGCGTCAGATTAAAACACTGAACTGACAATTAACAGCCCAATATCTACAATCAACCAACAAGTCATTATTACCCTCACTGTCAACCCAACACAGGCATGCTCATAAGGAAAGGTTAAAAAAAGTAAAAGGAACTCGGCAAATCTTACCCCGCCTGTTTACCAAAAACATCACCTCTAGCATCACCAGTATTAGAGGCACCGCCTGCCCAGTGACACATGTTTAACGGCCGCGGTACCCTAACCGTGCAAAGGTAGCATAATCACTTGTTCCTTAAATAGGGACCTGTATGAATGGCTCCACGAGGGTTCAGCTGTCTCTTACTTTTAACCAGTGAAATTGACCTGCCCGTGAAGAGGCGGGCATAACACAGCAAGACGAGAAGACCCTATGGAGCTTTAATTTATTAATGCAAACAGTACCTAACAAACCCACAGGTCCTAAACTACCAAACCTGCATTAAAAATTTCGGTTGGGGCGACCTCGGAGCAGAACCCAACCTCCGAGCAGTACATGCTAAGACTTCACCAGTCAAAGCGAACTACTATACTCAATTGATCCAATAACTTGACCAACGGAACAAGTTACCCTAGGGATAACAGCGCAATCCTATTCTAGAGTCCATATCAACAATAGGGTTTACGACCTCGATGTTGGATCAGGACATCCCGATGGTGCAGCCGCTATTAAAGGTTCGTTTGTTCAACGATTAAAGTCCTACGTGATCTGAGTTCAGACCGGAGTAATCCAGGTCGGTTTCTATCTACNTTCAAATTCCTCCCTGTACGAAAGGACAAGAGAAATAAGGCCTACTTCACAAAGCGCCTTCCCCCGTAAATGATATCATCTCAACTTAGTATTATACCCACACCCACCCAAGAACAGGGTTTGTTAAGATGGCAGAGCCCGGTAATCGCATAAAACTTAAAACTTTACAGTCAGAGGTTCAATTCCTCTTCTTAACAACATACCCATGGCCAACCTCCTACTCCTCATTGTACCCATTCTAATCGCAATGGCATTCCTAATGCTTACCGAACGAAAAATTCTAGGCTATATACAACTACGCAAAGGCCCCAACGTTGTAGGCCCCTACGGGCTACTACAACCCTTCGCTGACGCCATAAAACTCTTCACCAAAGAGCCCCTAAAACCCGCCACATCTACCATCACCCTCTACATCACCGCCCCGACCTTAGCTCTCACCATCGCTCTTCTACTATGAACCCCCCTCCCCATACCCAACCCCCTGGTCAACCTCAACCTAGGCCTCCTATTTATTCTAGCCACCTCTAGCCTAGCCGTTTACTCAATCCTCTGATCAGGGTGAGCATCAAACTCAAACTACGCCCTGATCGGCGCACTGCGAGCAGTAGCCCAAACAATCTCATATGAAGTCACCCTAGCCATCATTCTACTATCAACATTACTAATAAGTGGCTCCTTTAACCTCTCCACCCTTATCACAACACAAGAACACCTCTGATTACTCCTGCCATCATGACCCTTGGCCATAATATGATTTATCTCCACACTAGCAGAGACCAACCGAACCCCCTTCGACCTTGCCGAAGGGGAGTCCGAACTAGTCTCAGGCTTCAACATCGAATACGCCGCAGGCCCCTTCGCCCTATTCTTCATAGCCGAATACACAAACATTATTATAATAAACACCCTCACCACTACAATCTTCCTAGGAACAACATATGACGCACTCTCCCCTGAACTCTACACAACATATTTTGTCACCAAGACCCTACTTCTAACCTCCCTGTTCTTATGAATTCGAACAGCATACCCCCGATTCCGCTACGACCAACTCATACACCTCCTATGAAAAAACTTCCTACCACTCACCCTAGCATTACTTATATGATATGTCTCCATACCCATTACAATCTCCAGCATTCCCCCTCAAACCTAAGAAATATGTCTGATAAAAGAGTTACTTTGATAGAGTAAATAATAGGAGCTTAAACCCCCTTATTTCTAGGACTATGAGAATCGAACCCATCCCTGAGAATCCAAAATTCTCCGTGCCACCTATCACACCCCATCCTAAAGTAAGGTCAGCTAAATAAGCTATCGGGCCCATACCCCGAAAATGTTGGTTATACCCTTCCCGTACTAATTAATCCCCTGGCCCAACCCGTCATCTACTCTACCATCTTTGCAGGCACACTCATCACAGCGCTAAGCTCGCACTGATTTTTTACCTGAGTAGGCCTAGAAATAAACATGCTAGCTTTTATTCCAGTTCTAACCAAAAAAATAAACCCTCGTTCCACAGAAGCTGCCATCAAGTATTTCCTCACGCAAGCAACCGCATCCATAATCCTTCTAATAGCTATCCTCTTCAACAATATACTCTCCGGACAATGAACCATAACCAATACTACCAATCAATACTCATCATTAATAATCATAATAGCTATAGCAATAAAACTAGGAATAGCCCCCTTTCACTTCTGAGTCCCAGAGGTTACCCAAGGCACCCCTCTGACATCCGGCCTGCTTCTTCTCACATGACAAAAACTAGCCCCCATCTCAATCATATACCAAATCTCTCCCTCACTAAACGTAAGCCTTCTCCTCACTCTCTCAATCTTATCCATCATAGCAGGCAGTTGAGGTGGATTAAACCAAACCCAGCTACGCAAAATCTTAGCATACTCCTCAATTACCCACATAGGATGAATAATAGCAGTTCTACCGTACAACCCTAACATAACCATTCTTAATTTAACTATTTATATTATCCTAACTACTACCGCATTCCTACTACTCAACTTAAACTCCAGCACCACGACCCTACTACTATCTCGCACCTGAAACAAGCTAACATGACTAACACCCTTAATTCCATCCACCCTCCTCTCCCTAGGAGGCCTGCCCCCGCTAACCGGCTTTTTGCCCAAATGGGCCATTATCGAAGAATTCACAAAAAACAATAGCCTCATCATCCCCACCATCATAGCCACCATCACCCTCCTTAACCTCTACTTCTACCTACGCCTAATCTACTCCACCTCAATCACACTACTCCCCATATCTAACAACGTAAAAATAAAATGACAGTTTGAACATACAAAACCCACCCCATTCCTCCCCACACTCATCGCCCTTACCACGCTACTCCTACCTATCTCCCCTTTTATACTAATAATCTTATAGAAATTTAGGTTAAATACAGACCAAGAGCCTTCAAAGCCCTCAGTAAGTTGCAATACTTAATTTCTGTAACAGCTAAGGACTGCAAAACCCCACTCTGCATCAACTGAACGCAAATCAGCCACTTTAATTAAGCTAAGCCCTTACTAGACCAATGGGACTTAAACCCACAAACACTTAGTTAACAGCTAAGCACCCTAATCAACTGGCTTCAATCTACTTCTCCCGCCGCCGGGAAAAAAGGCGGGAGAAGCCCCGGCAGGTTTGAAGCTGCTTCTTCGAATTTGCAATTCAATATGAAAATCACCTCGGAGCTGGTAAAAAGAGGCCTAACCCCTGTCTTTAGATTTACAGTCCAATGCTTCACTCAGCCATTTTACCTCACCCCCACTGATGTTCGCCGACCGTTGACTATTCTCTACAAACCACAAAGACATTGGAACACTATACCTATTATTCGGCGCATGAGCTGGAGTCCTAGGCACAGCTCTAAGCCTCCTTATTCGAGCCGAGCTGGGCCAGCCAGGCAACCTTCTAGGTAACGACCACATCTACAACGTTATCGTCACAGCCCATGCATTTGTAATAATCTTCTTCATAGTAATACCCATCATAATCGGAGGCTTTGGCAACTGACTAGTTCCCCTAATAATCGGTGCCCCCGATATGGCGTTTCCCCGCATAAACAACATAAGCTTCTGACTCTTACCTCCCTCTCTCCTACTCCTGCTCGCATCTGCTATAGTGGAGGCCGGAGCAGGAACAGGTTGAACAGTCTACCCTCCCTTAGCAGGGAACTACTCCCACCCTGGAGCCTCCGTAGACCTAACCATCTTCTCCTTACACCTAGCAGGTGTCTCCTCTATCTTAGGGGCCATCAATTTCATCACAACAATTATCAATATAAAACCCCCTGCCATAACCCAATACCAAACGCCCCTCTTCGTCTGATCCGTCCTAATCACAGCAGTCCTACTTCTCCTATCTCTCCCAGTCCTAGCTGCTGGCATCACTATACTACTAACAGACCGCAACCTCAACACCACCTTCTTCGACCCCGCCGGAGGAGGAGACCCCATTCTATACCAACACCTATTCTGATTTTTCGGTCACCCTGAAGTTTATATTCTTATCCTACCAGGCTTCGGAATAATCTCCCATATTGTAACTTACTACTCCGGAAAAAAAGAACCATTTGGATACATAGGTATGGTCTGAGCTATGATATCAATTGGCTTCCTAGGGTTTATCGTGTGAGCACACCATATATTTACAGTAGGAATAGACGTAGACACACGAGCATATTTCACCTCCGCTACCATAATCATCGCTATCCCCACCGGCGTCAAAGTATTTAGCTGACTCGCCACACTCCACGGAAGCAATATGAAATGATCTGCTGCAGTGCTCTGAGCCCTAGGATTCATCTTTCTTTTCACCGTAGGTGGCCTGACTGGCATTGTATTAGCAAACTCATCACTAGACATCGTACTACACGACACGTACTACGTTGTAGCCCACTTCCACTATGTCCTATCAATAGGAGCTGTATTTGCCATCATAGGAGGCTTCATTCACTGATTTCCCCTATTCTCAGGCTACACCCTAGACCAAACCTACGCCAAAATCCATTTCACTATCATATTCATCGGCGTAAATCTAACTTTCTTCCCACAACACTTTCTCGGCCTATCCGGAATGCCCCGACGTTACTCGGACTACCCCGATGCATACACCACATGAAACATCCTATCATCTGTAGGCTCATTCATTTCTCTAACAGCAGTAATATTAATAATTTTCATGATTTGAGAAGCCTTCGCTTCGAAGCGAAAAGTCCTAATAGTAGAAGAACCCTCCATAAACCTGGAGTGACTATATGGATGCCCCCCACCCTACCACACATTCGAAGAACCCGTATACATAAAATCTAGACAAAAAAGGAAGGAATCGAACCCCCCAAAGCTGGTTTCAAGCCAACCCCATGGCCTCCATGACTTTTTCAAAAAGGTATTAGAAAAACCATTTCATAACTTTGTCAAAGTTAAATTATAGGCTAAATCCTATATATCTTAATGGCACATGCAGCGCAAGTAGGTCTACAAGACGCTACTTCCCCTATCATAGAAGAGCTTATCACCTTTCATGATCACGCCCTCATAATCATTTTCCTTATCTGCTTCCTAGTCCTGTATGCCCTTTTCCTAACACTCACAACAAAACTAACTAATACTAACATCTCAGACGCTCAGGAAATAGAAACCGTCTGAACTATCCTGCCCGCCATCATCCTAGTCCTCATCGCCCTCCCATCCCTACGCATCCTTTACATAACAGACGAGGTCAACGATCCCTCCCTTACCATCAAATCAATTGGCCACCAATGGTACTGAACCTACGAGTACACCGACTACGGCGGACTAATCTTCAACTCCTACATACTTCCCCCATTATTCCTAGAACCAGGCGACCTGCGACTCCTTGACGTTGACAATCGAGTAGTACTCCCGATTGAAGCCCCCATTCGTATAATAATTACATCACAAGACGTCTTGCACTCATGAGCTGTCCCCACATTAGGCTTAAAAACAGATGCAATTCCCGGACGTCTAAACCAAACCACTTTCACCGCTACACGACCGGGGGTATACTACGGTCAATGCTCTGAAATCTGTGGAGCAAACCACAGTTTCATGCCCATCGTCCTAGAATTAATTCCCCTAAAAATCTTTGAAATAGGGCCCGTATTTACCCTATAGCACCCCCTCTACCCCCTCTAGAGCCCACTGTAAAGCTAACTTAGCATTAACCTTTTAAGTTAAAGATTAAGAGAACCAACACCTCTTTACAGTGAAATGCCCCAACTAAATACTACCGTATGGCCCACCATAATTACCCCCATACTCCTTACACTATTCCTCATCACCCAACTAAAAATATTAAACACAAACTACCACCTACCTCCCTCACCAAAGCCCATAAAAATAAAAAATTATAACAAACCCTGAGAACCAAAATGAACGAAAATCTGTTCGCTTCATTCATTGCCCCCACAATCCTAGGCCTACCCGCCGCAGTACTGATCATTCTATTTCCCCCTCTATTGATCCCCACCTCCAAATATCTCATCAACAACCGACTAATCACCACCCAACAATGACTAATCAAACTAACCTCAAAACAAATGATAACCATACACAACACTAAAGGACGAACCTGATCTCTTATACTAGTATCCTTAATCATTTTTATTGCCACAACTAACCTCCTCGGACTCCTGCCTCACTCATTTACACCAACCACCCAACTATCTATAAACCTAGCCATGGCCATCCCCTTATGAGCGGGCACAGTGATTATAGGCTTTCGCTCTAAGATTAAAAATGCCCTAGCCCACTTCTTACCACAAGGCACACCTACACCCCTTATCCCCATACTAGTTATTATCGAAACCATCAGCCTACTCATTCAACCAATAGCCCTGGCCGTACGCCTAACCGCTAACATTACTGCAGGCCACCTACTCATGCACCTAATTGGAAGCGCCACCCTAGCAATATCAACCATTAACCTTCCCTCTACACTTATCATCTTCACAATTCTAATTCTACTGACTATCCTAGAAATCGCTGTCGCCTTAATCCAAGCCTACGTTTTCACACTTCTAGTAAGCCTCTACCTGCACGACAACACATAATGACCCACCAATCACATGCCTATCATATAGTAAAACCCAGCCCATGACCCCTAACAGGGGCCCTCTCAGCCCTCCTAATGACCTCCGGCCTAGCCATGTGATTTCACTTCCACTCCATAACGCTCCTCATACTAGGCCTACTAACCAACACACTAACCATATACCAATGATGGCGCGATGTAACACGAGAAAGCACATACCAAGGCCACCACACACCACCTGTCCAAAAAGGCCTTCGATACGGGATAATCCTATTTATTACCTCAGAAGTTTTTTTCTTCGCAGGATTTTTCTGAGCCTTTTACCACTCCAGCCTAGCCCCTACCCCCCAATTAGGAGGGCACTGGCCCCCAACAGGCATCACCCCGCTAAATCCCCTAGAAGTCCCACTCCTAAACACATCCGTATTACTCGCATCAGGAGTATCAATCACCTGAGCTCACCATAGTCTAATAGAAAACAACCGAAACCAAATAATTCAAGCACTGCTTATTACAATTTTACTGGGTCTCTATTTTACCCTCCTACAAGCCTCAGAGTACTTCGAGTCTCCCTTCACCATTTCCGACGGCATCTACGGCTCAACATTTTTTGTAGCCACAGGCTTCCACGGACTTCACGTCATTATTGGCTCAACTTTCCTCACTATCTGCTTCATCCGCCAACTAATATTTCACTTTACATCCAAACATCACTTTGGCTTCGAAGCCGCCGCCTGATACTGGCATTTTGTAGATGTGGTTTGACTATTTCTGTATGTCTCCATCTATTGATGAGGGTCTTACTCTTTTAGTATAAATAGTACCGTTAACTTCCAATTAACTAGTTTTGACAACATTCAAAAAAGAGTAATAAACTTCGCCTTAATTTTAATAATCAACACCCTCCTAGCCTTACTACTAATAATTATTACATTTTGACTACCACAACTCAACGGCTACATAGAAAAATCCACCCCTTACGAGTGCGGCTTCGACCCTATATCCCCCGCCCGCGTCCCTTTCTCCATAAAATTCTTCTTAGTAGCTATTACCTTCTTATTATTTGATCTAGAAATTGCCCTCCTTTTACCCCTACCATGAGCCCTACAAACAACTAACCTGCCACTAATAGTTATGTCATCCCTCTTATTAATCATCATCCTAGCCCTAAGTCTGGCCTATGAGTGACTACAAAAAGGATTAGACTGAACCGAATTGGTATATAGTTTAAACAAAACGAATGATTTCGACTCATTAAATTATGATAATCATATTTACCAAATGCCCCTCATTTACATAAATATTATACTAGCATTTACCATCTCACTTCTAGGAATACTAGTATATCGCTCACACCTCATATCCTCCCTACTATGCCTAGAAGGAATAATACTATCGCTGTTCATTATAGCTACTCTCATAACCCTCAACACCCACTCCCTCTTAGCCAATATTGTGCCTATTGCCATACTAGTCTTTGCCGCCTGCGAAGCAGCGGTGGGCCTAGCCCTACTAGTCTCAATCTCCAACACATATGGCCTAGACTACGTACATAACCTAAACCTACTCCAATGCTAAAACTAATCGTCCCAACAATTATATTACTACCACTGACATGACTTTCCAAAAAACACATAATTTGAATCAACACAACCACCCACAGCCTAATTATTAGCATCATCCCTCTACTATTTTTTAACCAAATCAACAACAACCTATTTAGCTGTTCCCCAACCTTTTCCTCCGACCCCCTAACAACCCCCCTCCTAATACTAACTACCTGACTCCTACCCCTCACAATCATGGCAAGCCAACGCCACTTATCCAGTGAACCACTATCACGAAAAAAACTCTACCTCTCTATACTAATCTCCCTACAAATCTCCTTAATTATAACATTCACAGCCACAGAACTAATCATATTTTATATCTTCTTCGAAACCACACTTATCCCCACCTTGGCTATCATCACCCGATGAGGCAACCAGCCAGAACGCCTGAACGCAGGCACATACTTCCTATTCTACACCCTAGTAGGCTCCCTTCCCCTACTCATCGCACTAATTTACACTCACAACACCCTAGGCTCACTAAACATTCTACTACTCACTCTCACTGCCCAAGAACTATCAAACTCCTGAGCCAACAACTTAATATGACTAGCTTACACAATAGCTTTTATAGTAAAGATACCTCTTTACGGACTCCACTTATGACTCCCTAAAGCCCATGTCGAAGCCCCCATCGCTGGGTCAATAGTACTTGCCGCAGTACTCTTAAAACTAGGCGGCTATGGTATAATACGCCTCACACTCATTCTCAACCCCCTGACAAAACACATAGCCTACCCCTTCCTTGTACTATCCCTATGAGGCATAATTATAACAAGCTCCATCTGCCTACGACAAACAGACCTAAAATCGCTCATTGCATACTCTTCAATCAGCCACATAGCCCTCGTAGTAACAGCCATTCTCATCCAAACCCCCTGAAGCTTCACCGGCGCAGTCATTCTCATAATCGCCCACGGGCTTACATCCTCATTACTATTCTGCCTAGCAAACTCAAACTACGAACGCACTCACAGTCGCATCATAATCCTCTCTCAAGGACTTCAAACTCTACTCCCACTAATAGCTTTTTGATGACTTCTAGCAAGCCTCGCTAACCTCGCCTTACCCCCCACTATTAACCTACTGGGAGAACTCTCTGTGCTAGTAACCACGTTCTCCTGATCAAATATCACTCTCCTACTTACAGGACTCAACATACTAGTCACAGCCCTATACTCCCTCTACATATTTACCACAACACAATGGGGCTCACTCACCCACCACATTAACAACATAAAACCCTCATTCACACGAGAAAACACCCTCATGTTCATACACCTATCCCCCATTCTCCTCCTATCCCTCAACCCCGACATCATTACCGGGTTTTCCTCTTGTAAATATAGTTTAACCAAAACATCAGATTGTGAATCTGACAACAGAGGCTTACGACCCCTTATTTACCGAGAAAGCTCACAAGAACTGCTAACTCATGCCCCCATGTCTAACAACATGGCTTTCTCAACTTTTAAAGGATAACAGCTATCCATTGGTCTTAGGCCCCAAAAATTTTGGTGCAACTCCAAATAAAAGTAATAACCATGCACACTACTATAACCACCCTAACCCTGACTTCCCTAATTCCCCCCATCCTTACCACCCTCGTTAACCCTAACAAAAAAAACTCATACCCCCATTATGTAAAATCCATTGTCGCATCCACCTTTATTATCAGTCTCTTCCCCACAACAATATTCATGTGCCTAGACCAAGAAGTTATTATCTCGAACTGACACTGAGCCACAACCCAAACAACCCAGCTCTCCCTAAGCTTCAAACTAGACTACTTCTCCATAATATTCATCCCTGTAGCATTGTTCGTTACATGGTCCATCATAGAATTCTCACTGTGATATATAAACTCAGACCCAAACATTAATCAGTTCTTCAAATATCTACTCATCTTCCTAATTACCATACTAATCTTAGTTACCGCTAACAACCTATTCCAACTGTTCATCGGCTGAGAGGGCGTAGGAATTATATCCTTCTTGCTCATCAGTTGATGATACGCCCGAGCAGATGCCAACACAGCAGCCATTCAAGCAATCCTATACAACCGTATCGGCGATATCGGTTTCATCCTCGCCTTAGCATGATTTATCCTACACTCCAACTCATGAGACCCACAACAAATAGCCCTTCTAAACGCTAATCCAAGCCTCACCCCACTACTAGGCCTCCTCCTAGCAGCAGCAGGCAAATCAGCCCAATTAGGTCTCCACCCCTGACTCCCCTCAGCCATAGAAGGCCCCACCCCAGTCTCAGCCCTACTCCACTCAAGCACTATAGTTGTAGCAGGAATCTTCTTACTCATCCGCTTCCACCCCCTAGCAGAAAATAGCCCACTAATCCAAACTCTAACACTATGCTTAGGCGCTATCACCACTCTGTTCGCAGCAGTCTGCGCCCTTACACAAAATGACATCAAAAAAATCGTAGCCTTCTCCACTTCAAGTCAACTAGGACTCATAATAGTTACAATCGGCATCAACCAACCACACCTAGCATTCCTGCACATCTGTACCCACGCCTTCTTCAAAGCCATACTATTTATGTGCTCCGGGTCCATCATCCACAACCTTAACAATGAACAAGATATTCGAAAAATAGGAGGACTACTCAAAACCATACCTCTCACTTCAACCTCCCTCACCATTGGCAGCCTAGCATTAGCAGGAATACCTTTCCTCACAGGTTTCTACTCCAAAGACCACATCATCGAAACCGCAAACATATCATACACAAACGCCTGAGCCCTATCTATTACTCTCATCGCTACCTCCCTGACAAGCGCCTATAGCACTCGAATAATTCTTCTCACCCTAACAGGTCAACCTCGCTTCCCCACCCTTACTAACATTAACGAAAATAACCCCACCCTACTAAACCCCATTAAACGCCTGGCAGCCGGAAGCCTATTCGCAGGATTTCTCATTACTAACAACATTTCCCCCGCATCCCCCTTCCAAACAACAATCCCCCTCTACCTAAAACTCACAGCCCTCGCTGTCACTTTCCTAGGACTTCTAACAGCCCTAGACCTCAACTACCTAACCAACAAACTTAAAATAAAATCCCCACTATGCACATTTTATTTCTCCAACATACTCGGATTCTACCCTAGCATCACACACCGCACAATCCCCTATCTAGGCCTTCTTACGAGCCAAAACCTGCCCCTACTCCTCCTAGACCTAACCTGACTAGAAAAGCTATTACCTAAAACAATTTCACAGCACCAAATCTCCACCTCCATCATCACCTCAACCCAAAAAGGCATAATTAAACTTTACTTCCTCTCTTTCTTCTTCCCACTCATCCTAACCCTACTCCTAATCACATAACCTATTCCCCCGAGCAATCTCAATTACAATATATACACCAACAAACAATGTTCAACCAGTAACTACTACTAATCAACGCCCATAATCATACAAAGCCCCCGCACCAATAGGATCCTCCCGAATCAACCCTGACCCCTCTCCTTCATAAATTATTCAGCTTCCTACACTATTAAAGTTTACCACAACCACCACCCCATCATACTCTTTCACCCACAGCACCAATCCTACCTCCATCGCTAACCCCACTAAAACACTCACCAAGACCTCAACCCCTGACCCCCATGCCTCAGGATACTCCTCAATAGCCATCGCTGTAGTATATCCAAAGACAACCATCATTCCCCCTAAATAAATTAAAAAAACTATTAAACCCATATAACCTCCCCCAAAATTCAGAATAATAACACACCCGACCACACCGCTAACAATCAATACTAAACCCCCATAAATAGGAGAAGGCTTAGAAGAAAACCCCACAAACCCCATTACTAAACCCACACTCAACAGAAACAAAGCATACATCATTATTCTCGCACGGACTACAACCACGACCAATGATATGAAAAACCATCGTTGTATTTCAACTACAAGAACACCAATGACCCCAATACGCAAAACTAACCCCCTAATAAAATTAATTAACCACTCATTCATCGACCTCCCCACCCCATCCAACATCTCCGCATGATGAAACTTCGGCTCACTCCTTGGCGCCTGCCTGATCCTCCAAATCACCACAGGACTATTCCTAGCCATGCACTACTCACCAGACGCCTCAACCGCCTTTTCATCAATCGCCCACATCACTCGAGACGTAAATTATGGCTGAATCATCCGCTACCTTCACGCCAATGGCGCCTCAATATTCTTTATCTGCCTCTTCCTACACATCGGGCGAGGCCTATATTACGGATCATTTCTCTACTCAGAAACCTGAAACATCGGCATTATCCTCCTGCTTGCAACTATAGCAACAGCCTTCATAGGCTATGTCCTCCCGTGAGGCCAAATATCATTCTGAGGGGCCACAGTAATTACAAACTTACTATCCGCCATCCCATACATTGGGACAGACCTAGTTCAATGAATCTGAGGAGGCTACTCAGTAGACAGTCCCACCCTCACACGATTCTTTACCTTTCACTTCATCTTGCCCTTCATTATTGCAGCCCTAGCAACACTCCACCTCCTATTCTTGCACGAAACGGGATCAAACAACCCCCTAGGAATCACCTCCCATTCCGATAAAATCACCTTCCACCCTTACTACACAATCAAAGACGCCCTCGGCTTACTTCTCTTCCTTCTCTCCTTAATGACATTAACACTATTCTCACCAGACCTCCTAGGCGACCCAGACAATTATACCCTAGCCAACCCCTTAAACACCCCTCCCCACATCAAGCCCGAATGATATTTCCTATTCGCCTACACAATTCTCCGATCCGTCCCTAACAAACTAGGAGGCGTCCTTGCCCTATTACTATCCATCCTCATCCTAGCAATAATCCCCATCCTCCATATATCCAAACAACAAAGCATAATATTTCGCCCACTAAGCCAATCACTTTATTGACTCCTAGCCGCAGACCTCCTCATTCTAACCTGAATCGGAGGACAACCAGTAAGCTACCCTTTTACCATCATTGGACAAGTAGCATCCGTACTATACTTCACAACAATCCTAATCCTAATACCAACTATCTCCCTAATTGAAAACAAAATACTCAAATGGGCCTGTCCTTGTAGTATAAACTAATACACCAGTCTTGTAAACCGGAGATGAAAACCTTTTTCCAAGGACAAATCAGAGAAAAAGTCTTTAACTCCACCATTAGCACCCAAAGCTAAGATTCTAATTTAAACTATTCTCTGTTCTTTCATGGGGAAGCAGATTTGGGTACCACCCAAGTATTGACTCACCCATCAACAACCGCTATGTATTTCGTACATTACTGCCAGCCACCATGAATATTGTACGGTACCATAAATACTTGACCACCTGTAGTACATAAAAACCCAATCCACATCAAAACCCCCTCCCCATGCTTACAAGCAAGTACAGCAATCAACCCTCAACTATCACACATCAACTGCAACTCCAAAGCCACCCCTCACCCACTAGGATACCAACAAACCTACCCACCCTTAACAGTACATAGTACATAAAGCCATTTACCGTACATAGCACATTACAGTCAAATCCCTTCTCGTCCCCATGGATGACCCCCCTCAGATAGGGGTCCCTTGACCACCATCCTCCGTGAAATCAATATCCCGCACAAGAGTGCTACTCTCCTCGCTCCGGGCCCATAACACTTGGGGGTAGCTAAAGTGAACTGTATCCGACATCTGGTTCCTACTTCAGGGTCATAAAGCCTAAATAGCCCACACGTTCCCCTTAAATAAGACATCACGATG"
# rCRS_slices, rCRSplus, and rCRSplus_positions are all used in
# seq2sites. The 'plus' part reflects the fact that this sequence
# starts before the beginning and ends after the end.
rCRS_slices = [slice(15500,16569),slice(0,16569),slice(0,1000)]
rCRSplus = ''.join(rCRS[x] for x in rCRS_slices)
rCRSplus_positions = []
for slyce in rCRS_slices:
rCRSplus_positions += range(slyce.start, slyce.stop)
# the rCRS sequence exploded into a list with biological numbering
rCRSlist = list(rCRS)
# add a spacer to the beginning so that indexing starts at 1
rCRSlist.insert(0,'#')
# indices of these regions in rCRSlist
HVR1_indices = list(x for x in range(16024,16366))
HVR2_indices = list(x for x in range(73,341))
HVR1and2_indices = HVR1_indices + HVR2_indices
HVR1to2_indices = list(x for x in range(16024,16570))+list(x for x in range(1,341))
coding_indices = list(x for x in range(577,15993))
all_indices = list(x for x in range(1,16570))
|
import DoublyLinkedLists
unitTests = True
class Deque:
def __init__(self):
self.items = DoublyLinkedLists.DoublyLinkedList()
self.head = self.items.head
self.tail = self.items.tail
self.empty = True
def enqueueLeft(self, item):
self.items.insert(item)
self.tail = self.items.tail
self.head = self.items.head
self.empty = False
def enqueueRight(self, item):
self.items.insertRight(item)
self.head = self.items.head
self.tail = self.items.tail
self.empty = False
def dequeueLeft(self):
if not self.empty:
temp = self.head
self.head = temp.nextNode
self.items.delete(temp)
if self.head == None:
self.empty = True
return temp
else:
print("Error: tried to dequeueLeft from an empty Deque.")
return None
def dequeueRight(self):
if not self.empty:
temp = self.tail
self.tail = temp.prevNode
self.items.delete(temp)
if self.items.head == None:
self.head = None
self.tail = None
self.empty = True
return temp
else:
print("Error: tried to dequeueRight from an empty Deque.")
return None
if unitTests:
testDeque = Deque()
testDeque.enqueueLeft(DoublyLinkedLists.Node(1))
testDeque.enqueueLeft(DoublyLinkedLists.Node(2))
testDeque.enqueueLeft(DoublyLinkedLists.Node(3))
print("Deque tests. Expected no errors above.")
print("Expected 1, 2, 3. Got: " + str(testDeque.dequeueRight().value)
+ ", " + str(testDeque.dequeueRight().value)
+ ", " + str(testDeque.dequeueRight().value))
print("Now we try to dequeue from an empty queue."
+ " We should get two printed errors below. ")
testDeque.dequeueRight()
testDeque.dequeueLeft()
print("Finally, try things the other direction.")
print("(Right to left this time)")
testDeque.enqueueRight(DoublyLinkedLists.Node(1))
testDeque.enqueueRight(DoublyLinkedLists.Node(2))
testDeque.enqueueRight(DoublyLinkedLists.Node(3))
print("Expected 1, 2, 3. Got: " + str(testDeque.dequeueLeft().value)
+ ", " + str(testDeque.dequeueLeft().value)
+ ", " + str(testDeque.dequeueLeft().value))
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
'''
* __ __
* ____ / /_ ____ ___ __ __ ____ ____ ____ / /
* / __ \/ __ \ / __ `__ \/ / / / / __ \/ __ \/ __ \ / /
* / /_/ / / / / / / / / / / /_/ / / /_/ / /_/ / /_/ / /_/
* \____/_/ /_/ /_/ /_/ /_/\__, / \__ /\__ _/_____/ __
* /____/ /___/ /_/
* โโโโโโ็ฅๅ
ฝๅบๆฒกโโโโโโ
* โ โ โ โ
* โโโโ โปโโโโโ โปโโโ
* โ โ
* โ โโโ โ
* โ โโณโ โโณโ โ
* โ โ
* โ โโปโ โ
* โ โ
* โโโ โโโCode is far away from bug with the animal protecting
* โ โ ็ฅๅ
ฝไฟไฝ,ไปฃ็ ๆ bug
* โ โ
* โ โโโโโ
* โ โฃโ
* โ โโ
* โโโโโโโโโโโโโณโโโ
* โโซโซ โโซโซ
* โโปโ โโปโ
* โโโโโโๆ่ง่่ๅโโโโโโ
'''
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, PasswordField, BooleanField, SelectField
from wtforms.validators import DataRequired, Length, Email, Regexp, EqualTo
from flask_pagedown.fields import PageDownField
class RegisterForm(FlaskForm):
username = StringField('็จๆทๅ', validators=[DataRequired(), Length(1, 64), Regexp('^(?!_)(?!.*?_$)[a-zA-Z0-9_'
'\u4e00-\u9fa5]+$', 0,
message='้ๆณ็จๆทๅ')])
email = StringField('้ฎ็ฎฑ', validators=[DataRequired(), Length(1, 64), Email()])
password = PasswordField('ๅฏ็ ', validators=[DataRequired(), EqualTo('cfm_password', message='ๅฏ็ ๅฟ
้กปไธ่ด')])
cfm_password = PasswordField('็กฎ่ฎคๅฏ็ ', validators=[DataRequired()])
submit = SubmitField('ๆณจๅ')
class LoginForm(FlaskForm):
username = StringField('็จๆทๅ', validators=[DataRequired(), Length(1, 64), Regexp('^(?!_)(?!.*?_$)[a-zA-Z0-9_'
'\u4e00-\u9fa5]+$', 0,
message='้ๆณ็จๆทๅ')])
password = PasswordField('ๅฏ็ ', validators=[DataRequired()])
remember_me = BooleanField('่ฎฐไฝๆ')
submit = SubmitField('็ปๅฝ')
class EditForm(FlaskForm):
article = PageDownField("Your post", validators=[DataRequired()])
title = StringField('ๆ ้ข', validators=[DataRequired()])
pythonTag = BooleanField('Python')
JsTag = BooleanField('JavaScript')
MachineTag = BooleanField('ๆบๅจๅญฆไน ')
CssTag = BooleanField('Css')
articleType = SelectField('ๆ็ซ ็ฑปๅ', choices=[('1', 'Python'), ('2', '้็ฌ'), ('3', 'ๅฐ่ฏด'),('4', 'ๅ็ซฏ')], validators=[DataRequired()])
submit = SubmitField('ไฟๅญ')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import quadpy
import src.fem_base.master.nodal_basis_2D as nb2d
import src.fem_base.master.barycentric_coord_tools as bct
import src.fem_base.master.master_1D as m1d
class Master2D(object):
def mk_shap_and_dshap_at_pts(self, pts):
shap = self.basis.shape_functions_at_pts(pts)
dshap = self.basis.shape_function_derivatives_at_pts(pts)
return shap, dshap
class Master2DTriangle(Master2D):
""" note vertex definitions in nodal_basis_2D.py """
def __init__(self, p, nquad_pts=None, *args, **kwargs):
self.name = "TRIANGLE"
self.p, self.dim = p, 2
self.basis = nb2d.NodalBasis2DTriangle(self.p, **kwargs)
self.nb, self.verts, self.n_ed = self.basis.nb, self.basis.verts, 3
self.nodal_pts = self.basis.nodal_pts
self.nq = 2*self.p +2 if nquad_pts is None else nquad_pts
self.quad_pts, self.wghts = triangle_quadrature(self.nq, self.verts)
# shape functions at nodal and quadrature points
self.shap_quad, self.dshap_quad = self.mk_shap_and_dshap_at_pts(self.quad_pts)
_, self.dshap_nodal = self.mk_shap_and_dshap_at_pts(self.nodal_pts)
self.nodal_barycentric_coords = bct.cart2bary(self.verts, self.nodal_pts.T)
# mass, stiffness matrices
self.M, self.S, self.K = self.mk_M(), self.mk_S(), self.mk_K()
self.Minv = np.linalg.inv(self.M)
# edge data structures, master edge, nodes on the edge, lifting matrix, normals
self.master_edge = [m1d.Master1D(p=self.p)]
self.ids_ed = self.find_nodes_on_edges()
self.nr = sum([len(ids) for ids in self.ids_ed])
self.L = self.mk_L()
self.edge_normals = np.array([[1/np.sqrt(2), 1/np.sqrt(2)], [-1, 0], [0, -1]])
def mk_M(self):
""" the mass matrix, M_ij = (phi_i, phi_j) """
shapw = np.dot(np.diag(self.wghts), self.shap_quad)
M = np.dot(self.shap_quad.T, shapw)
return M
def mk_S(self):
""" the stiffness matrix, S[k]_ij = (phi_i, \frac{d\phi_j}{dx_k})
returns a list indexed by coordinate direction on the master element
"""
S = [None, None]
for i in range(self.dim):
dshapw = np.dot(np.diag(self.wghts), self.dshap_quad[i])
S[i] = np.dot(self.shap_quad.T, dshapw)
return S
def mk_K(self):
""" the stiffness matrix, K_ij = (\frac{d\phi_i}{dx_k}, \frac{d\phi_j}{dx_k})
returns a list indexed by coordinate direction on the master element
"""
K = [None, None]
for i in range(self.dim):
dshapw = np.dot(np.diag(self.wghts), self.dshap_quad[i])
K[i] = np.dot(self.dshap_quad[i].T, dshapw)
return K
def find_nodes_on_edges(self):
""" computes the node numbers (ids) on each edge
the i^th barycentric coord of a point on a tri edge will be 0, find these pts
@retval ids_ed list of vectors indexed by edge number
NOTE: we manually flip edges 0 and 2 to ensure CCW ordering
of ed dof around the element
"""
ids_ed = [None, None, None]
bary_coords = bct.cart2bary(self.verts, self.nodal_pts.T)
ids_ed[0] = np.where( np.isclose(bary_coords[0, :], 0.) )[0][::-1]
ids_ed[1] = np.where( np.isclose(bary_coords[1, :], 0.) )[0]
ids_ed[2] = np.where( np.isclose(bary_coords[2, :], 0.) )[0][::-1]
return ids_ed
def mk_L(self):
""" makes the elemental lifting matrix """
L = np.zeros((self.nb, self.nr), dtype=int)
for ed_dof, interior_dof in enumerate(np.hstack(self.ids_ed)):
L[interior_dof, ed_dof] = 1
return L
def map_to_physical_space(self, tri_verts):
""" uses barycentric coords to map nodal pts to 2D physical space element
@param tri_verts np array shape (3, 2) with xy coords of phys space tri
"""
x, y = bct.bary2cart(tuple(tri_verts), self.nodal_barycentric_coords)
return np.vstack((x, y)).T
class Master2DQuad(Master2D): pass
def triangle_quadrature(n, verts):
""" look up / compute quadrature rule over the triangle, order n
@param n the order of polynomial which should be integrated exactly
@param verts tuple of tuples defining the master element
NOTE: leverages quadpy, 2*weights
"""
if n > 50:
raise NotImplementedError
qr = quadpy.triangle.xiao_gimbutas.XiaoGimbutas(degree=n)
bary, weights = qr.bary, qr.weights
xq, yq = bct.bary2cart(verts=verts, _lambda=bary.T)
points = np.vstack((xq, yq)).T
return points, 2*weights
|
def get_pandigital_multiple(n):
tmp_digit_list = []
multiplicator = 1
# find pandigital multiples while preserving the instruction
while (len(set(tmp_digit_list)) == len(tmp_digit_list)) and (len(tmp_digit_list)<9):
product = str(n * multiplicator)
digit_list = list(product)
tmp_digit_list.extend(digit_list)
if '0' in tmp_digit_list:
return False
multiplicator += 1
# define conditions to be met
check_length = len(tmp_digit_list) == 9
check_duplicates = len(set(tmp_digit_list)) == len(tmp_digit_list)
check_iteration = (multiplicator != 2)
# if the pandigital multiple found preserves the instruction
if check_length and check_duplicates and check_iteration:
result = int(''.join(tmp_digit_list))
return result
return False
def main():
# set initial n and pandigital multiple
n, pandigital_multiple = 9, get_pandigital_multiple(9)
# continue untill ...
for p in range(123, 877):
if ('9' in str(p)) or ('0' in str(p)):
continue
N = int('9' + str(p))
# if there is a pandigital multiple with given input
if get_pandigital_multiple(N):
current_pandigital_multiple = get_pandigital_multiple(N)
# compare with previous pandigital multiple
if current_pandigital_multiple > pandigital_multiple:
# replace pandigital_multiple with new pandigital_multiple
# when the latter is greater than the former
pandigital_multiple = current_pandigital_multiple
return pandigital_multiple
if __name__ == "__main__":
print(main())
|
"""
ENCODERS: given a set of images, return their encoded representation.
"""
from __future__ import print_function, division
__author__ = 'Vlad Popovici'
__version__ = 0.1
from abc import ABCMeta, abstractmethod
from future.utils import bytes_to_native_str as nstr
import gzip
import pickle
import numpy as np
import theano.misc.pkl_utils
from lasagne.layers import get_output
## ENCODER: abstract class declaring the basic functionality
class Encoder:
__metaclass__ = ABCMeta
name = nstr(b'Encoder')
@abstractmethod
def encode(self, X):
pass
@abstractmethod
def loadmodel(self, filename):
pass
## SdAEncoder: stacked denoising autoencoder (see http://deeplearning.net/tutorial/SdA.html)
class SdAEncoder(Encoder):
name = nstr(b'SdAEncoder')
def __init__(self, filename=None):
self.model = None
self.input_dim = 0
self.output_dim = 0
if filename is not None:
self.loadmodel(filename)
def loadmodel(self, filename):
with open(filename, 'rb') as f:
self.model = theano.misc.pkl_utils.load(f)
# get the input / output dimensions
if self.model.n_layers == 0:
raise RuntimeError('The encoder model does not contain any layer!')
self.input_dim = self.model.params[0].eval().shape[0]
self.output_dim = self.model.params[2*(self.model.n_layers-1)].eval().shape[1]
def encode(self, X):
assert(X.ndim == 2)
assert(self.model is not None)
n, dim = X.shape
if dim != self.input_dim:
raise RuntimeError('The given data dimension does not match the model')
# Construct the encoding chain (stack)
y = [X] # to make the calls uniform
for k in np.arange(self.model.n_layers):
y.append(self.model.dA_layers[k].get_hidden_values(y[k]))
# last element contains the final encoding:
return y[self.model.n_layers].eval()
## CNNEncoder: convolutional neural network encoder
class CNNEncoder(Encoder):
name = nstr(b'CNNEncoder')
def __init__(self, filename=None):
self.model = None
self.input_dim = 0
self.output_dim = 0
if filename is not None:
self.loadmodel(filename)
return
def loadmodel(self, filename):
with gzip.open(filename, 'rb') as f:
self.model = pickle.load(f)
if len(self.model.layers) == 0:
raise RuntimeError('The encoder model does not contain any layer!')
# find the encoding layer, called "encode"
for i, l in enumerate(self.model.get_all_layers()):
if l.name == 'encode':
self.encode_layer = l
self.encode_layer_idx = i
self.input_dim = self.model.layers[0][1]['shape'][1:] # get the tensor shape, skip the no. of samples (1st dim)
self.output_dim = self.model.layers[self.encode_layer_idx][1]['num_units']
return
def encode(self, X):
if X.shape[1:] != self.input_dim:
raise RuntimeError('The given data dimension does not match the model')
z = get_output(self.encode_layer, inputs=X)
return z.eval()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import numpy as np
from libs.configs import cfgs
def bbox_transform_inv(boxes, deltas, scale_factors=None):
dx = deltas[:, 0]
dy = deltas[:, 1]
dw = deltas[:, 2]
dh = deltas[:, 3]
if scale_factors:
dx /= scale_factors[0]
dy /= scale_factors[1]
dw /= scale_factors[2]
dh /= scale_factors[3]
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
pred_ctr_x = dx * widths + ctr_x
pred_ctr_y = dy * heights + ctr_y
pred_w = tf.exp(dw) * widths
pred_h = tf.exp(dh) * heights
predict_xmin = pred_ctr_x - 0.5 * pred_w
predict_xmax = pred_ctr_x + 0.5 * pred_w
predict_ymin = pred_ctr_y - 0.5 * pred_h
predict_ymax = pred_ctr_y + 0.5 * pred_h
return tf.transpose(tf.stack([predict_xmin, predict_ymin,
predict_xmax, predict_ymax]))
def bbox_transform(ex_rois, gt_rois, scale_factors=None):
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = np.log(gt_widths / ex_widths + 1e-5)
targets_dh = np.log(gt_heights / ex_heights + 1e-5)
if scale_factors:
targets_dx *= scale_factors[0]
targets_dy *= scale_factors[1]
targets_dw *= scale_factors[2]
targets_dh *= scale_factors[3]
targets = np.vstack((targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
def rbbox_transform_inv(boxes, deltas, scale_factors=None):
dx = deltas[:, 0]
dy = deltas[:, 1]
dw = deltas[:, 2]
dh = deltas[:, 3]
dtheta = deltas[:, 4]
if scale_factors:
dx /= scale_factors[0]
dy /= scale_factors[1]
dw /= scale_factors[2]
dh /= scale_factors[3]
dtheta /= scale_factors[4]
# BBOX_XFORM_CLIP = tf.log(cfgs.IMG_SHORT_SIDE_LEN / 16.)
# dw = tf.minimum(dw, BBOX_XFORM_CLIP)
# dh = tf.minimum(dh, BBOX_XFORM_CLIP)
pred_ctr_x = dx * boxes[:, 2] + boxes[:, 0]
pred_ctr_y = dy * boxes[:, 3] + boxes[:, 1]
pred_w = tf.exp(dw) * boxes[:, 2]
pred_h = tf.exp(dh) * boxes[:, 3]
pred_theta = dtheta * 180 / np.pi + boxes[:, 4]
return tf.transpose(tf.stack([pred_ctr_x, pred_ctr_y,
pred_w, pred_h, pred_theta]))
def rbbox_transform(ex_rois, gt_rois, scale_factors=None):
targets_dx = (gt_rois[:, 0] - ex_rois[:, 0]) / (ex_rois[:, 2] + 1)
targets_dy = (gt_rois[:, 1] - ex_rois[:, 1]) / (ex_rois[:, 3] + 1)
targets_dw = np.log(gt_rois[:, 2] / (ex_rois[:, 2] + 1))
targets_dh = np.log(gt_rois[:, 3] / (ex_rois[:, 3] + 1))
targets_dtheta = (gt_rois[:, 4] - ex_rois[:, 4]) * np.pi / 180
if scale_factors:
targets_dx *= scale_factors[0]
targets_dy *= scale_factors[1]
targets_dw *= scale_factors[2]
targets_dh *= scale_factors[3]
targets_dtheta *= scale_factors[4]
targets = np.vstack((targets_dx, targets_dy, targets_dw, targets_dh, targets_dtheta)).transpose()
return targets
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.